context_start_lineno
int64 1
913
| line_no
int64 16
984
| repo
stringclasses 5
values | id
int64 0
416
| target_function_prompt
stringlengths 201
13.6k
| function_signature
stringlengths 201
13.6k
| solution_position
listlengths 2
2
| raw_solution
stringlengths 201
13.6k
| focal_code
stringlengths 201
13.6k
| function_name
stringlengths 2
38
| start_line
int64 1
913
| end_line
int64 16
984
| file_path
stringlengths 10
52
| context
stringlengths 4.52k
9.85k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
538
| 561
|
QuantEcon.jl
| 200
|
function qnwnorm(n::Vector{Int}, mu::Vector, sig2::Matrix = Matrix(I, length(n), length(n)))
n_n, n_mu = length(n), length(mu)
if !(n_n == n_mu)
error("n and mu must have same number of elements")
end
_nodes = Array{Vector{Float64}}(undef, n_n)
_weights = Array{Vector{Float64}}(undef, n_n)
for i in 1:n_n
_nodes[i], _weights[i] = qnwnorm(n[i])
end
weights = ckron(_weights[end:-1:1]...)
nodes = gridmake(_nodes...)::Matrix{Float64}
new_sig2 = cholesky(sig2).U
mul!(nodes, nodes, new_sig2)
broadcast!(+, nodes, nodes, mu')
return nodes, weights
end
|
function qnwnorm(n::Vector{Int}, mu::Vector, sig2::Matrix = Matrix(I, length(n), length(n)))
n_n, n_mu = length(n), length(mu)
if !(n_n == n_mu)
error("n and mu must have same number of elements")
end
_nodes = Array{Vector{Float64}}(undef, n_n)
_weights = Array{Vector{Float64}}(undef, n_n)
for i in 1:n_n
_nodes[i], _weights[i] = qnwnorm(n[i])
end
weights = ckron(_weights[end:-1:1]...)
nodes = gridmake(_nodes...)::Matrix{Float64}
new_sig2 = cholesky(sig2).U
mul!(nodes, nodes, new_sig2)
broadcast!(+, nodes, nodes, mu')
return nodes, weights
end
|
[
538,
561
] |
function qnwnorm(n::Vector{Int}, mu::Vector, sig2::Matrix = Matrix(I, length(n), length(n)))
n_n, n_mu = length(n), length(mu)
if !(n_n == n_mu)
error("n and mu must have same number of elements")
end
_nodes = Array{Vector{Float64}}(undef, n_n)
_weights = Array{Vector{Float64}}(undef, n_n)
for i in 1:n_n
_nodes[i], _weights[i] = qnwnorm(n[i])
end
weights = ckron(_weights[end:-1:1]...)
nodes = gridmake(_nodes...)::Matrix{Float64}
new_sig2 = cholesky(sig2).U
mul!(nodes, nodes, new_sig2)
broadcast!(+, nodes, nodes, mu')
return nodes, weights
end
|
function qnwnorm(n::Vector{Int}, mu::Vector, sig2::Matrix = Matrix(I, length(n), length(n)))
n_n, n_mu = length(n), length(mu)
if !(n_n == n_mu)
error("n and mu must have same number of elements")
end
_nodes = Array{Vector{Float64}}(undef, n_n)
_weights = Array{Vector{Float64}}(undef, n_n)
for i in 1:n_n
_nodes[i], _weights[i] = qnwnorm(n[i])
end
weights = ckron(_weights[end:-1:1]...)
nodes = gridmake(_nodes...)::Matrix{Float64}
new_sig2 = cholesky(sig2).U
mul!(nodes, nodes, new_sig2)
broadcast!(+, nodes, nodes, mu')
return nodes, weights
end
|
qnwnorm
| 538
| 561
|
src/quad.jl
|
#FILE: QuantEcon.jl/test/test_sampler.jl
##CHUNK 1
@test isapprox(mvns.Q * mvns.Q', mvns.Sigma)
end
@testset "check positive semi-definite zeros" begin
mvns = MVNSampler(mu, zeros(n, n))
@test rand(mvns) == mu
end
@testset "check positive semi-definite ones" begin
mvns = MVNSampler(mu, ones(n, n))
c = rand(mvns)-mvns.mu
@test all(broadcast(isapprox,c[1],c))
end
@testset "check positive semi-definite 1 and -1/(n-1)" begin
Sigma = -1/(n-1)*ones(n, n) + n/(n-1)*Matrix(Matrix(I, n, n))
mvns = MVNSampler(mu, Sigma)
@test isapprox(sum(rand(mvns)) , sum(mu), atol=1e-4, rtol=1e-4)
end
#FILE: QuantEcon.jl/src/lss.jl
##CHUNK 1
mu_0 = reshape([mu_0;], n)
dist = MVNSampler(mu_0,Sigma_0)
LSS(A, C, G, H, k, n, m, l, mu_0, Sigma_0, dist)
end
# make kwarg version
function LSS(A::ScalarOrArray, C::ScalarOrArray, G::ScalarOrArray;
H::ScalarOrArray=zeros(size(G, 1)),
mu_0::Vector=zeros(size(G, 2)),
Sigma_0::Matrix=zeros(size(G, 2), size(G, 2)))
return LSS(A, C, G, H, mu_0, Sigma_0)
end
function simulate(lss::LSS, ts_length=100)
x = Matrix{Float64}(undef, lss.n, ts_length)
x[:, 1] = rand(lss.dist)
w = randn(lss.m, ts_length - 1)
#CURRENT FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
nodes_out = gridmake(nodes...)::Matrix{Float64}
return nodes_out, weights
end
end # @eval
end
## Multidim version for qnworm
# other types of args
qnwnorm(n::Vector{Int}, mu::Vector, sig2::Real) =
qnwnorm(n, mu, Matrix(Diagonal(fill(convert(Float64, sig2), length(n)))))
qnwnorm(n::Vector{Int}, mu::Real, sig2::Matrix = Matrix{eltype(n)}(I, length(n), length(n))) =
qnwnorm(n, fill(mu, length(n)), sig2)
qnwnorm(n::Vector{Int}, mu::Real, sig2::Real) =
qnwnorm(n, fill(mu, length(n)), Matrix(Diagonal(fill(convert(Float64, sig2), length(n)))))
qnwnorm(n::Int, mu::Vector, sig2::Matrix = eye(length(mu))) =
qnwnorm(fill(n, length(mu)), mu, sig2)
##CHUNK 2
qnwnorm(n::Int, mu::Vector, sig2::Real) =
qnwnorm(fill(n, length(mu)), mu, Matrix(Diagonal(fill(convert(Float64, sig2), length(mu)))))
qnwnorm(n::Int, mu::Real, sig2::Matrix = eye(length(mu))) =
qnwnorm(fill(n, size(sig2, 1)), fill(mu, size(sig2, 1)), sig2)
function qnwnorm(n::Int, mu::Real, sig2::Real)
n, w = qnwnorm([n], [mu], fill(convert(Float64, sig2), 1, 1))
@assert size(n, 2) == 1
n[:, 1], w
end
qnwnorm(n::Vector{Int}, mu::Vector, sig2::Vector) =
qnwnorm(n, mu, Matrix(Diagonal(convert(Vector{Float64}, sig2))))
qnwnorm(n::Vector{Int}, mu::Real, sig2::Vector) =
qnwnorm(n, fill(mu, length(n)), Matrix(Diagonal(convert(Array{Float64}, sig2))))
qnwnorm(n::Int, mu::Vector, sig2::Vector) =
##CHUNK 3
n[:, 1], w
end
qnwnorm(n::Vector{Int}, mu::Vector, sig2::Vector) =
qnwnorm(n, mu, Matrix(Diagonal(convert(Vector{Float64}, sig2))))
qnwnorm(n::Vector{Int}, mu::Real, sig2::Vector) =
qnwnorm(n, fill(mu, length(n)), Matrix(Diagonal(convert(Array{Float64}, sig2))))
qnwnorm(n::Int, mu::Vector, sig2::Vector) =
qnwnorm(fill(n, length(mu)), mu, Matrix(Diagonal(convert(Array{Float64}, sig2))))
qnwnorm(n::Int, mu::Real, sig2::Vector) =
qnwnorm(fill(n, length(sig2)), fill(mu, length(sig2)), Matrix(Diagonal(convert(Array{Float64}, sig2))))
"""
Computes quadrature nodes and weights for multivariate uniform distribution.
##### Arguments
##CHUNK 4
qnwnorm(fill(n, length(mu)), mu, Matrix(Diagonal(convert(Array{Float64}, sig2))))
qnwnorm(n::Int, mu::Real, sig2::Vector) =
qnwnorm(fill(n, length(sig2)), fill(mu, length(sig2)), Matrix(Diagonal(convert(Array{Float64}, sig2))))
"""
Computes quadrature nodes and weights for multivariate uniform distribution.
##### Arguments
- `n::Union{Int, Vector{Int}}` : Number of desired nodes along each dimension
- `a::Union{Real, Vector{Real}}` : Lower endpoint along each dimension
- `b::Union{Real, Vector{Real}}` : Upper endpoint along each dimension
$(qnw_returns)
$(qnw_func_notes)
$(qnw_refs)
##CHUNK 5
end
return nodes .* b, weights
end
## Multidim versions
for f in [:qnwlege, :qnwcheb, :qnwsimp, :qnwtrap, :qnwbeta, :qnwgamma]
@eval begin
function ($f)(n::Vector{Int}, a::Real, b::Real)
n_n = length(n)
($f)(n, fill(a, n_n), fill(b, n_n))
end
function ($f)(n::Int, a::Vector, b::Real)
n_a = length(a)
($f)(fill(n, n_a), a, fill(b, n_a))
end
function ($f)(n::Int, a::Real, b::Vector)
##CHUNK 6
qnwnorm(n, mu, Matrix(Diagonal(fill(convert(Float64, sig2), length(n)))))
qnwnorm(n::Vector{Int}, mu::Real, sig2::Matrix = Matrix{eltype(n)}(I, length(n), length(n))) =
qnwnorm(n, fill(mu, length(n)), sig2)
qnwnorm(n::Vector{Int}, mu::Real, sig2::Real) =
qnwnorm(n, fill(mu, length(n)), Matrix(Diagonal(fill(convert(Float64, sig2), length(n)))))
qnwnorm(n::Int, mu::Vector, sig2::Matrix = eye(length(mu))) =
qnwnorm(fill(n, length(mu)), mu, sig2)
qnwnorm(n::Int, mu::Vector, sig2::Real) =
qnwnorm(fill(n, length(mu)), mu, Matrix(Diagonal(fill(convert(Float64, sig2), length(mu)))))
qnwnorm(n::Int, mu::Real, sig2::Matrix = eye(length(mu))) =
qnwnorm(fill(n, size(sig2, 1)), fill(mu, size(sig2, 1)), sig2)
function qnwnorm(n::Int, mu::Real, sig2::Real)
n, w = qnwnorm([n], [mu], fill(convert(Float64, sig2), 1, 1))
@assert size(n, 2) == 1
##CHUNK 7
end
($f)(fill(n, n_a), a, b)
end
function ($f)(n::Vector{Int}, a::Vector, b::Vector)
n_n, n_a, n_b = length(n), length(a), length(b)
if !(n_n == n_a == n_b)
error("n, a, and b must have same number of elements")
end
nodes = Vector{Float64}[]
weights = Vector{Float64}[]
for i = 1:n_n
_1d = $f(n[i], a[i], b[i])
push!(nodes, _1d[1])
push!(weights, _1d[2])
end
weights = ckron(weights[end:-1:1]...)
##CHUNK 8
"""
function qnwunif(n, a, b)
nodes, weights = qnwlege(n, a, b)
weights ./= prod(b .- a)
return nodes, weights
end
"""
Computes quadrature nodes and weights for multivariate uniform distribution
##### Arguments
- `n::Union{Int, Vector{Int}}` : Number of desired nodes along each dimension
- `mu::Union{Real, Vector{Real}}` : Mean along each dimension
- `sig2::Union{Real, Vector{Real}, Matrix{Real}}(eye(length(n)))` : Covariance
structure
$(qnw_returns)
|
673
| 709
|
QuantEcon.jl
| 201
|
function qnwequi(n::Int, a::Vector, b::Vector, kind::AbstractString = "N")
# error checking
n_a, n_b = length(a), length(b)
if !(n_a == n_b)
error("a and b must have same number of elements")
end
d = n_a
i = reshape(1:n, n, 1)
if kind == "N"
j = 2.0.^((1:d) / (d + 1))
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "W"
j = equidist_pp[1:d]
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "H"
j = equidist_pp[1:d]
nodes = (i .* (i .+ 1) ./ 2) * j'
nodes -= fix(nodes)
elseif kind == "R"
nodes = rand(n, d)
else
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
|
function qnwequi(n::Int, a::Vector, b::Vector, kind::AbstractString = "N")
# error checking
n_a, n_b = length(a), length(b)
if !(n_a == n_b)
error("a and b must have same number of elements")
end
d = n_a
i = reshape(1:n, n, 1)
if kind == "N"
j = 2.0.^((1:d) / (d + 1))
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "W"
j = equidist_pp[1:d]
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "H"
j = equidist_pp[1:d]
nodes = (i .* (i .+ 1) ./ 2) * j'
nodes -= fix(nodes)
elseif kind == "R"
nodes = rand(n, d)
else
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
|
[
673,
709
] |
function qnwequi(n::Int, a::Vector, b::Vector, kind::AbstractString = "N")
# error checking
n_a, n_b = length(a), length(b)
if !(n_a == n_b)
error("a and b must have same number of elements")
end
d = n_a
i = reshape(1:n, n, 1)
if kind == "N"
j = 2.0.^((1:d) / (d + 1))
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "W"
j = equidist_pp[1:d]
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "H"
j = equidist_pp[1:d]
nodes = (i .* (i .+ 1) ./ 2) * j'
nodes -= fix(nodes)
elseif kind == "R"
nodes = rand(n, d)
else
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
|
function qnwequi(n::Int, a::Vector, b::Vector, kind::AbstractString = "N")
# error checking
n_a, n_b = length(a), length(b)
if !(n_a == n_b)
error("a and b must have same number of elements")
end
d = n_a
i = reshape(1:n, n, 1)
if kind == "N"
j = 2.0.^((1:d) / (d + 1))
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "W"
j = equidist_pp[1:d]
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "H"
j = equidist_pp[1:d]
nodes = (i .* (i .+ 1) ./ 2) * j'
nodes -= fix(nodes)
elseif kind == "R"
nodes = rand(n, d)
else
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
|
qnwequi
| 673
| 709
|
src/quad.jl
|
#FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
#FILE: QuantEcon.jl/test/test_quad.jl
##CHUNK 1
# dim 1: num nodes, dim2: method, dim3:func
data1d = Array{Float64}(undef, 6, 6, 3)
kinds = ["trap", "simp", "lege", "N", "W", "H"]
n_nodes = [5, 11, 21, 51, 101, 401] # number of nodes
a, b = -1, 1
for (k_i, k) in enumerate(kinds)
for (n_i, n) in enumerate(n_nodes)
num_in = length(k) == 1 ? n^2 : n
for (f_i, f) in enumerate([f1, f2, f3])
data1d[n_i, k_i, f_i] = quadrect(f, num_in, a, b, k)
end
end
end
# NOTE: drop last column -- corresponds to "R" and we have different
# random numbers than Matlab.
ml_data_1d = m["int_1d"][:, 1:6, :]
##CHUNK 2
f2(x) = exp.(- x[:, 1] .* cos.(x[:, 2].^2))
a = ([0.0, 0.0], [-1.0, -1.0])
b = ([1.0, 2.0], [1.0, 1.0])
# dim 1: num nodes, dim2: method
data2d1 = Matrix{Float64}(undef, 6, 6)
kinds = ["lege", "trap", "simp", "N", "W", "H"]
n_nodes = [5, 11, 21, 51, 101, 401] # number of nodes
for (k_i, k) in enumerate(kinds)
for (n_i, n) in enumerate(n_nodes)
num_in = length(k) == 1 ? n^2 : n
data2d1[n_i, k_i] = quadrect(f1, num_in, a[1], b[1], k)
end
end
# NOTE: drop last column -- corresponds to "R" and we have different
# random numbers than Matlab.
ml_data_2d1 = m["int_2d1"][:, 1:6]
#CURRENT FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
##### Returns
- `out::Float64` : The scalar that approximates integral of `f` on the hypercube
formed by `[a, b]`
$(qnw_refs)
"""
function quadrect(f::Function, n, a, b, kind = "lege", args...; kwargs...)
if lowercase(kind)[1] == 'l'
nodes, weights = qnwlege(n, a, b)
elseif lowercase(kind)[1] == 'c'
nodes, weights = qnwcheb(n, a, b)
elseif lowercase(kind)[1] == 't'
nodes, weights = qnwtrap(n, a, b)
elseif lowercase(kind)[1] == 's'
nodes, weights = qnwsimp(n, a, b)
else
nodes, weights = qnwequi(n, a, b, kind)
end
##CHUNK 2
ϵj = z1 * R
ωj = ones(n_nodes) ./ n_nodes
ϵj, ωj
end
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
##CHUNK 3
qnwequi(n::Vector{Int}, a::Vector, b::Real, kind::AbstractString = "N") =
qnwequi(prod(n), a, fill(b, length(a)), kind)
qnwequi(n::Vector{Int}, a::Real, b::Real, kind::AbstractString = "N") =
qnwequi(prod(n), fill(a, length(n)), fill(b, length(n)), kind)
qnwequi(n::Int, a::Real, b::Vector, kind::AbstractString = "N") =
qnwequi(n, fill(a, length(b)), b, kind)
qnwequi(n::Int, a::Vector, b::Real, kind::AbstractString = "N") =
qnwequi(n, a, fill(b, length(a)), kind)
function qnwequi(n::Int, a::Real, b::Real, kind::AbstractString = "N")
n, w = qnwequi(n, [a], [b], kind)
@assert size(n, 2) == 1
n[:, 1], w
end
##CHUNK 4
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Even,Type{Even}})
collect(range(quantile(d, q0), stop = quantile(d, qN), length = N))
end
##CHUNK 5
m = floor(Int, (n + 1) / 2)
nodes = zeros(n)
weights = zeros(n)
z = sqrt(2n + 1) - 1.85575 * ((2n + 1).^(-1 / 6))
for i = 1:m
# Reasonable starting values for root finding
if i == 1
z = sqrt(2n + 1) - 1.85575 * ((2n + 1).^(-1 / 6))
elseif i == 2
z = z - 1.14 * (n.^0.426) ./ z
elseif i == 3
z = 1.86z + 0.86nodes[1]
elseif i == 4
z = 1.91z + 0.91nodes[2]
else
z = 2z + nodes[i - 2]
end
##CHUNK 6
if abs(z - z1) < 1e-14
break
end
end
if it >= maxit
error("Failed to converge in qnwnorm")
end
nodes[n + 1 - i] = z
nodes[i] = -z
weights[i] = 2 ./ (pp .* pp)
weights[n + 1 - i] = weights[i]
end
weights = weights ./ sqrt(pi)
nodes = sqrt(2) .* nodes
return nodes, weights
end
##CHUNK 7
z = z - (x[1] - z) * r1 * r2 * r3
elseif i == n - 1
r1 = (1 + 0.235b) / (0.766 + 0.119b)
r2 = 1 / (1 + 0.639 * (n - 4) / (1 + 0.71 * (n - 4)))
r3 = 1 / (1 + 20a / ((7.5 + a ) * n * n))
z = z + (z - x[n - 3]) * r1 * r2 * r3
elseif i == n
r1 = (1 + 0.37b) / (1.67 + 0.28b)
r2 = 1 / (1 + 0.22 * (n - 8) / n)
r3 = 1 / (1 + 8 * a / ((6.28 + a ) * n * n))
z = z + (z - x[n - 2]) * r1 * r2 * r3
else
z = 3 * x[i - 1] - 3 * x[i - 2] + x[i - 3]
end
its = 1
temp = 0.0
|
795
| 809
|
QuantEcon.jl
| 202
|
function quadrect(f::Function, n, a, b, kind = "lege", args...; kwargs...)
if lowercase(kind)[1] == 'l'
nodes, weights = qnwlege(n, a, b)
elseif lowercase(kind)[1] == 'c'
nodes, weights = qnwcheb(n, a, b)
elseif lowercase(kind)[1] == 't'
nodes, weights = qnwtrap(n, a, b)
elseif lowercase(kind)[1] == 's'
nodes, weights = qnwsimp(n, a, b)
else
nodes, weights = qnwequi(n, a, b, kind)
end
return do_quad(f, nodes, weights, args...; kwargs...)
end
|
function quadrect(f::Function, n, a, b, kind = "lege", args...; kwargs...)
if lowercase(kind)[1] == 'l'
nodes, weights = qnwlege(n, a, b)
elseif lowercase(kind)[1] == 'c'
nodes, weights = qnwcheb(n, a, b)
elseif lowercase(kind)[1] == 't'
nodes, weights = qnwtrap(n, a, b)
elseif lowercase(kind)[1] == 's'
nodes, weights = qnwsimp(n, a, b)
else
nodes, weights = qnwequi(n, a, b, kind)
end
return do_quad(f, nodes, weights, args...; kwargs...)
end
|
[
795,
809
] |
function quadrect(f::Function, n, a, b, kind = "lege", args...; kwargs...)
if lowercase(kind)[1] == 'l'
nodes, weights = qnwlege(n, a, b)
elseif lowercase(kind)[1] == 'c'
nodes, weights = qnwcheb(n, a, b)
elseif lowercase(kind)[1] == 't'
nodes, weights = qnwtrap(n, a, b)
elseif lowercase(kind)[1] == 's'
nodes, weights = qnwsimp(n, a, b)
else
nodes, weights = qnwequi(n, a, b, kind)
end
return do_quad(f, nodes, weights, args...; kwargs...)
end
|
function quadrect(f::Function, n, a, b, kind = "lege", args...; kwargs...)
if lowercase(kind)[1] == 'l'
nodes, weights = qnwlege(n, a, b)
elseif lowercase(kind)[1] == 'c'
nodes, weights = qnwcheb(n, a, b)
elseif lowercase(kind)[1] == 't'
nodes, weights = qnwtrap(n, a, b)
elseif lowercase(kind)[1] == 's'
nodes, weights = qnwsimp(n, a, b)
else
nodes, weights = qnwequi(n, a, b, kind)
end
return do_quad(f, nodes, weights, args...; kwargs...)
end
|
quadrect
| 795
| 809
|
src/quad.jl
|
#FILE: QuantEcon.jl/test/test_quad.jl
##CHUNK 1
# dim 1: num nodes, dim2: method, dim3:func
data1d = Array{Float64}(undef, 6, 6, 3)
kinds = ["trap", "simp", "lege", "N", "W", "H"]
n_nodes = [5, 11, 21, 51, 101, 401] # number of nodes
a, b = -1, 1
for (k_i, k) in enumerate(kinds)
for (n_i, n) in enumerate(n_nodes)
num_in = length(k) == 1 ? n^2 : n
for (f_i, f) in enumerate([f1, f2, f3])
data1d[n_i, k_i, f_i] = quadrect(f, num_in, a, b, k)
end
end
end
# NOTE: drop last column -- corresponds to "R" and we have different
# random numbers than Matlab.
ml_data_1d = m["int_1d"][:, 1:6, :]
##CHUNK 2
f2(x) = exp.(- x[:, 1] .* cos.(x[:, 2].^2))
a = ([0.0, 0.0], [-1.0, -1.0])
b = ([1.0, 2.0], [1.0, 1.0])
# dim 1: num nodes, dim2: method
data2d1 = Matrix{Float64}(undef, 6, 6)
kinds = ["lege", "trap", "simp", "N", "W", "H"]
n_nodes = [5, 11, 21, 51, 101, 401] # number of nodes
for (k_i, k) in enumerate(kinds)
for (n_i, n) in enumerate(n_nodes)
num_in = length(k) == 1 ? n^2 : n
data2d1[n_i, k_i] = quadrect(f1, num_in, a[1], b[1], k)
end
end
# NOTE: drop last column -- corresponds to "R" and we have different
# random numbers than Matlab.
ml_data_2d1 = m["int_2d1"][:, 1:6]
##CHUNK 3
@test isapprox(data1d[:, 1, :], ml_data_1d[:, 1, :]) # trap
@test isapprox(data1d[:, 2, :], ml_data_1d[:, 2, :]) # simp
@test isapprox(data1d[:, 3, :], ml_data_1d[:, 3, :]) # lege
@test isapprox(data1d[:, 4, :], ml_data_1d[:, 4, :]) # N
@test isapprox(data1d[:, 5, :], ml_data_1d[:, 5, :]) # W
@test isapprox(data1d[:, 6, :], ml_data_1d[:, 6, :]) # H
end
@testset "testing quadrect 2d against Matlab" begin
f1(x) = exp.(x[:, 1] + x[:, 2])
f2(x) = exp.(- x[:, 1] .* cos.(x[:, 2].^2))
a = ([0.0, 0.0], [-1.0, -1.0])
b = ([1.0, 2.0], [1.0, 1.0])
# dim 1: num nodes, dim2: method
data2d1 = Matrix{Float64}(undef, 6, 6)
kinds = ["lege", "trap", "simp", "N", "W", "H"]
n_nodes = [5, 11, 21, 51, 101, 401] # number of nodes
#CURRENT FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
# Other argument types
qnwequi(n::Vector{Int}, a::Vector, b::Vector, kind::AbstractString = "N") =
qnwequi(prod(n), a, b, kind)
qnwequi(n::Vector{Int}, a::Real, b::Vector, kind::AbstractString = "N") =
qnwequi(prod(n), fill(a, length(b)), b, kind)
qnwequi(n::Vector{Int}, a::Vector, b::Real, kind::AbstractString = "N") =
qnwequi(prod(n), a, fill(b, length(a)), kind)
##CHUNK 2
qnwequi(n::Vector{Int}, a::Real, b::Real, kind::AbstractString = "N") =
qnwequi(prod(n), fill(a, length(n)), fill(b, length(n)), kind)
qnwequi(n::Int, a::Real, b::Vector, kind::AbstractString = "N") =
qnwequi(n, fill(a, length(b)), b, kind)
qnwequi(n::Int, a::Vector, b::Real, kind::AbstractString = "N") =
qnwequi(n, a, fill(b, length(a)), kind)
function qnwequi(n::Int, a::Real, b::Real, kind::AbstractString = "N")
n, w = qnwequi(n, [a], [b], kind)
@assert size(n, 2) == 1
n[:, 1], w
end
## Doing the quadrature
"""
Approximate the integral of `f`, given quadrature `nodes` and `weights`
##CHUNK 3
$(qnw_refs)
"""
function qnwequi(n::Int, a::Vector, b::Vector, kind::AbstractString = "N")
# error checking
n_a, n_b = length(a), length(b)
if !(n_a == n_b)
error("a and b must have same number of elements")
end
d = n_a
i = reshape(1:n, n, 1)
if kind == "N"
j = 2.0.^((1:d) / (d + 1))
nodes = i * j'
nodes -= fix(nodes)
elseif kind == "W"
j = equidist_pp[1:d]
nodes = i * j'
nodes -= fix(nodes)
##CHUNK 4
elseif kind == "H"
j = equidist_pp[1:d]
nodes = (i .* (i .+ 1) ./ 2) * j'
nodes -= fix(nodes)
elseif kind == "R"
nodes = rand(n, d)
else
error("Unknown `kind` specified. Valid choices are N, W, H, R")
end
r = b - a
nodes = a' .+ nodes .* r' # use broadcasting here.
weights = fill((prod(r) / n), n)
return nodes, weights
end
##CHUNK 5
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
##### Arguments
- `n::Union{Int, Vector{Int}}` : Number of desired nodes along each dimension
- `a::Union{Real, Vector{Real}}` : Lower endpoint along each dimension
- `b::Union{Real, Vector{Real}}` : Upper endpoint along each dimension
- `kind::AbstractString("N")`: One of the following:
- N - Neiderreiter (default)
- W - Weyl
- H - Haber
- R - pseudo Random
$(qnw_returns)
$(qnw_func_notes)
##CHUNK 6
##### Arguments
- `f::Function`: A callable function that is to be approximated over the domain
spanned by `nodes`.
- `nodes::Array`: Quadrature nodes
- `weights::Array`: Quadrature nodes
- `args...(Void)`: additional positional arguments to pass to `f`
- `;kwargs...(Void)`: additional keyword arguments to pass to `f`
##### Returns
- `out::Float64` : The scalar that approximates integral of `f` on the hypercube
formed by `[a, b]`
"""
function do_quad(f::Function, nodes::Array, weights::Vector, args...;
kwargs...)
return dot(f(nodes, args...; kwargs...), weights)
end
do_quad(f::Function, nodes::Array, weights::Vector) = dot(f(nodes), weights)
##CHUNK 7
end
return nodes .* b, weights
end
## Multidim versions
for f in [:qnwlege, :qnwcheb, :qnwsimp, :qnwtrap, :qnwbeta, :qnwgamma]
@eval begin
function ($f)(n::Vector{Int}, a::Real, b::Real)
n_n = length(n)
($f)(n, fill(a, n_n), fill(b, n_n))
end
function ($f)(n::Int, a::Vector, b::Real)
n_a = length(a)
($f)(fill(n, n_a), a, fill(b, n_a))
end
function ($f)(n::Int, a::Real, b::Vector)
|
834
| 870
|
QuantEcon.jl
| 203
|
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
|
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
|
[
834,
870
] |
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
|
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
|
qnwmonomial2
| 834
| 870
|
src/quad.jl
|
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
((i/(n-1) > p) + (i/(n-1) == p)/2))
P[i+1, i+1] = 1 - P[i+1, i] - P[i+1, i+2]
end
P[end, end-1], P[end, end] = ε/2, 1 - ε/2
return P
end
function Base.isapprox(x::Vector{Vector{<:Real}},
y::Vector{Vector{<:Real}})
length(x) == length(y) || return false
return all(xy -> isapprox(x, y), zip(x, y))
end
@testset "Testing mc_tools.jl" begin
# Matrix with two recurrent classes [1, 2] and [4, 5, 6],
# which have periods 2 and 3, respectively
Q = [0 1 0 0 0 0
1 0 0 0 0 0
##CHUNK 2
length(x) == length(y) || return false
return all(xy -> isapprox(x, y), zip(x, y))
end
@testset "Testing mc_tools.jl" begin
# Matrix with two recurrent classes [1, 2] and [4, 5, 6],
# which have periods 2 and 3, respectively
Q = [0 1 0 0 0 0
1 0 0 0 0 0
1//2 0 0 1//2 0 0
0 0 0 0 1 0
0 0 0 0 0 1
0 0 0 1 0 0]
Q_stationary_dists = Vector{Rational{Int}}[
[1//2, 1//2, 0, 0, 0, 0], [0, 0, 0, 1//3, 1//3, 1//3]
]
Q_dict_Rational = Dict(
"P" => Q,
"stationary_dists" => Q_stationary_dists,
#FILE: QuantEcon.jl/test/test_ddp.jl
##CHUNK 1
# From Puterman 2005, Section 3.2, Section 4.6.1
# "single-product stochastic inventory control"
#set up DDP constructor
s_indices = [1, 1, 1, 1, 2, 2, 2, 3, 3, 4]
a_indices = [1, 2, 3, 4, 1, 2, 3, 1, 2, 1]
R = [ 0//1, -1//1, -2//1, -5//1, 5//1, 0//1, -3//1, 6//1, -1//1, 5//1]
Q = [ 1//1 0//1 0//1 0//1;
3//4 1//4 0//1 0//1;
1//4 1//2 1//4 0//1;
0//1 1//4 1//2 1//4;
3//4 1//4 0//1 0//1;
1//4 1//2 1//4 0//1;
0//1 1//4 1//2 1//4;
1//4 1//2 1//4 0//1;
0//1 1//4 1//2 1//4;
0//1 1//4 1//2 1//4]
beta = 1
ddp_rational = DiscreteDP(R, Q, beta, s_indices, a_indices)
R = convert.(Float64, R)
##CHUNK 2
R = [5.0 10.0; -1.0 -Inf]
Q = Array{Float64}(undef, n, m, n)
Q[:, :, 1] = [0.5 0.0; 0.0 0.0]
Q[:, :, 2] = [0.5 1.0; 1.0 1.0]
ddp0 = DiscreteDP(R, Q, beta)
ddp0_b1 = DiscreteDP(R, Q, 1.0)
# Formulation with state-action pairs
L = 3 # Number of state-action pairs
s_indices = [1, 1, 2]
a_indices = [1, 2, 1]
R_sa = [R[1, 1], R[1, 2], R[2, 1]]
Q_sa = spzeros(L, n)
Q_sa[1, :] = Q[1, 1, :]
Q_sa[2, :] = Q[1, 2, :]
Q_sa[3, :] = Q[2, 1, :]
ddp0_sa = DiscreteDP(R_sa, Q_sa, beta, s_indices, a_indices)
ddp0_sa_b1 = DiscreteDP(R_sa, Q_sa, 1.0, s_indices, a_indices)
#FILE: QuantEcon.jl/other/quadrature.jl
##CHUNK 1
p2 = 1
for j=2:n
p3 = p2
p2 = p1
temp = 2 * j + ab
aa = 2 * j * (j + ab) * (temp - 2)
bb = (temp - 1) * (a * a - b * b + temp * (temp - 2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb * p2 - c * p3) / aa
end
pp = (n * (a - b - temp * z) * p1 +
2 * (n + a) * (n + b) * p2) / (temp * (1 - z * z))
z1 = z
z = z1 - p1 ./ pp
if abs(z - z1) < 3e-14 break end
end
if its >= maxit
error("Failure to converge in qnwbeta1")
end
#CURRENT FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
function qnwmonomial1(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n
z1 = zeros(n_nodes, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n) .* sqrt_vcv
ϵj = z1 * R
ωj = ones(n_nodes) ./ n_nodes
ϵj, ωj
##CHUNK 2
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n) .* sqrt_vcv
ϵj = z1 * R
ωj = ones(n_nodes) ./ n_nodes
ϵj, ωj
end
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Even,Type{Even}})
collect(range(quantile(d, q0), stop = quantile(d, qN), length = N))
end
##CHUNK 3
aa = 2 * j * (j + ab) * (temp - 2)
bb = (temp - 1) * (a * a - b * b + temp * (temp - 2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb * p2 - c * p3) / aa
end
pp = (n * (a - b - temp * z) * p1 +
2 * (n + a) * (n + b) * p2) / (temp * (1 - z * z))
z1 = z
z = z1 - p1 ./ pp
if abs(z - z1) < 3e-14 break end
end
if its >= maxit
error("Failure to converge in qnwbeta")
end
x[i] = z
w[i] = temp / (pp * p2)
end
##CHUNK 4
z = z - (x[1] - z) * r1 * r2 * r3
elseif i == n - 1
r1 = (1 + 0.235b) / (0.766 + 0.119b)
r2 = 1 / (1 + 0.639 * (n - 4) / (1 + 0.71 * (n - 4)))
r3 = 1 / (1 + 20a / ((7.5 + a ) * n * n))
z = z + (z - x[n - 3]) * r1 * r2 * r3
elseif i == n
r1 = (1 + 0.37b) / (1.67 + 0.28b)
r2 = 1 / (1 + 0.22 * (n - 8) / n)
r3 = 1 / (1 + 8 * a / ((6.28 + a ) * n * n))
z = z + (z - x[n - 2]) * r1 * r2 * r3
else
z = 3 * x[i - 1] - 3 * x[i - 2] + x[i - 3]
end
its = 1
temp = 0.0
##CHUNK 5
elseif i == 2
r1 = (4.1 + a) / ((1 + a) * (1 + 0.156a))
r2 = 1 + 0.06 * (n - 8) * (1 + 0.12a) / n
r3 = 1 + 0.012b * (1 + 0.25 * abs(a)) / n
z = z - (1 - z) * r1 * r2 * r3
elseif i == 3
r1 = (1.67 + 0.28a) / (1 + 0.37a)
r2 = 1 + 0.22 * (n - 8) / n
r3 = 1 + 8 * b / ((6.28 + b) * n * n)
z = z - (x[1] - z) * r1 * r2 * r3
elseif i == n - 1
r1 = (1 + 0.235b) / (0.766 + 0.119b)
r2 = 1 / (1 + 0.639 * (n - 4) / (1 + 0.71 * (n - 4)))
r3 = 1 / (1 + 20a / ((7.5 + a ) * n * n))
z = z + (z - x[n - 3]) * r1 * r2 * r3
elseif i == n
r1 = (1 + 0.37b) / (1.67 + 0.28b)
|
911
| 923
|
QuantEcon.jl
| 204
|
function qnwdist(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real = 0.001, qN::Real = 0.999, method::Union{T,Type{T}} = Quantile) where T
z = _quadnodes(d, N, q0, qN, method)
zprob = zeros(N)
for i in 2:N - 1
zprob[i] = cdf(d, (z[i] + z[i + 1]) / 2) - cdf(d, (z[i] + z[i - 1]) / 2)
end
zprob[1] = cdf(d, (z[1] + z[2]) / 2)
zprob[end] = 1 - cdf(d, (z[end - 1] + z[end]) / 2)
return z, zprob
end
|
function qnwdist(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real = 0.001, qN::Real = 0.999, method::Union{T,Type{T}} = Quantile) where T
z = _quadnodes(d, N, q0, qN, method)
zprob = zeros(N)
for i in 2:N - 1
zprob[i] = cdf(d, (z[i] + z[i + 1]) / 2) - cdf(d, (z[i] + z[i - 1]) / 2)
end
zprob[1] = cdf(d, (z[1] + z[2]) / 2)
zprob[end] = 1 - cdf(d, (z[end - 1] + z[end]) / 2)
return z, zprob
end
|
[
911,
923
] |
function qnwdist(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real = 0.001, qN::Real = 0.999, method::Union{T,Type{T}} = Quantile) where T
z = _quadnodes(d, N, q0, qN, method)
zprob = zeros(N)
for i in 2:N - 1
zprob[i] = cdf(d, (z[i] + z[i + 1]) / 2) - cdf(d, (z[i] + z[i - 1]) / 2)
end
zprob[1] = cdf(d, (z[1] + z[2]) / 2)
zprob[end] = 1 - cdf(d, (z[end - 1] + z[end]) / 2)
return z, zprob
end
|
function qnwdist(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real = 0.001, qN::Real = 0.999, method::Union{T,Type{T}} = Quantile) where T
z = _quadnodes(d, N, q0, qN, method)
zprob = zeros(N)
for i in 2:N - 1
zprob[i] = cdf(d, (z[i] + z[i + 1]) / 2) - cdf(d, (z[i] + z[i - 1]) / 2)
end
zprob[1] = cdf(d, (z[1] + z[2]) / 2)
zprob[end] = 1 - cdf(d, (z[end - 1] + z[end]) / 2)
return z, zprob
end
|
qnwdist
| 911
| 923
|
src/quad.jl
|
#CURRENT FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Even,Type{Even}})
collect(range(quantile(d, q0), stop = quantile(d, qN), length = N))
end
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Quantile,Type{Quantile}})
quantiles = range(q0, stop = qN, length = N)
##CHUNK 2
quantile `q0` to the quantile `qN`. `method` can be one of:
- `Even`: nodes will be evenly spaced between the quantiles
- `Quantile`: nodes will be placed at evenly spaced quantile values
To construct the weights, consider splitting the nodes into cells centered at
each node. Specifically, let notation `z_i` mean the `i`th node and let
`z_{i-1/2}` be 1/2 between nodes `z_{i-1}` and `z_i`. Then, weights are
determined as follows:
- `weights[1] = cdf(d, z_{1+1/2})`
- `weights[N] = 1 - cdf(d, z_{N-1/2})`
- `weights[i] = cdf(d, z_{i+1/2}) - cdf(d, z_{i-1/2})` for all i in 2:N-1
In effect, this strategy assigns node `i` all the probability associated with a
random variable occuring within the node `i`s cell.
The weights always sum to 1, so they can be used as a proper probability
distribution. This means that `E[f(x) | x ~ d] ≈ dot(f.(nodes), weights)`.
"""
##CHUNK 3
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Even,Type{Even}})
collect(range(quantile(d, q0), stop = quantile(d, qN), length = N))
end
function _quadnodes(d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real, qN::Real, ::Union{Quantile,Type{Quantile}})
quantiles = range(q0, stop = qN, length = N)
z = quantile.(d, quantiles)
end
"""
qnwdist(
d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real=0.001, qN::Real=0.999, method::Union{T,Type{T}}=Quantile
) where T
Construct `N` quadrature weights and nodes for distribution `d` from the
##CHUNK 4
z = quantile.(d, quantiles)
end
"""
qnwdist(
d::Distributions.ContinuousUnivariateDistribution, N::Int,
q0::Real=0.001, qN::Real=0.999, method::Union{T,Type{T}}=Quantile
) where T
Construct `N` quadrature weights and nodes for distribution `d` from the
quantile `q0` to the quantile `qN`. `method` can be one of:
- `Even`: nodes will be evenly spaced between the quantiles
- `Quantile`: nodes will be placed at evenly spaced quantile values
To construct the weights, consider splitting the nodes into cells centered at
each node. Specifically, let notation `z_i` mean the `i`th node and let
`z_{i-1/2}` be 1/2 between nodes `z_{i-1}` and `z_i`. Then, weights are
determined as follows:
##CHUNK 5
length of either `n` and/or `mu` (which ever is a vector).
If all 3 are scalars, then 1d nodes are computed. `mu` and `sig2` are treated as
the mean and variance of a 1d normal distribution
$(qnw_refs)
"""
function qnwnorm(n::Int)
maxit = 100
pim4 = 1 / pi^(0.25)
m = floor(Int, (n + 1) / 2)
nodes = zeros(n)
weights = zeros(n)
z = sqrt(2n + 1) - 1.85575 * ((2n + 1).^(-1 / 6))
for i = 1:m
# Reasonable starting values for root finding
if i == 1
z = sqrt(2n + 1) - 1.85575 * ((2n + 1).^(-1 / 6))
##CHUNK 6
- `a::Union{Real, Vector{Real}}` : Lower endpoint along each dimension
- `b::Union{Real, Vector{Real}}` : Upper endpoint along each dimension
$(qnw_returns)
$(qnw_func_notes)
$(qnw_refs)
"""
function qnwcheb(n::Int, a::Real, b::Real)
nodes = (b + a) / 2 .- (b - a) / 2 .* cos.(pi / n .* (0.5:(n - 0.5)))
weights = ((b - a) / n) .* (cos.(pi / n .* ((1:n) .- 0.5) * (2:2:n - 1)') *
(-2.0 ./ ((1:2:n - 2) .* (3:2:n))) .+ 1)
return nodes, weights
end
"""
Computes nodes and weights for multivariate normal distribution.
##### Arguments
##CHUNK 7
if abs(z - z1) < 1e-14
break
end
end
if it >= maxit
error("Failed to converge in qnwnorm")
end
nodes[n + 1 - i] = z
nodes[i] = -z
weights[i] = 2 ./ (pp .* pp)
weights[n + 1 - i] = weights[i]
end
weights = weights ./ sqrt(pi)
nodes = sqrt(2) .* nodes
return nodes, weights
end
##CHUNK 8
function qnwlogn(n, mu, sig2)
nodes, weights = qnwnorm(n, mu, sig2)
return exp.(nodes), weights
end
## qnwequi
const equidist_pp = sqrt.(primes(7920)) # good for d <= 1000
"""
Generates equidistributed sequences with property that averages
value of integrable function evaluated over the sequence converges
to the integral as n goes to infinity.
##### Arguments
- `n::Union{Int, Vector{Int}}` : Number of desired nodes along each dimension
- `a::Union{Real, Vector{Real}}` : Lower endpoint along each dimension
- `b::Union{Real, Vector{Real}}` : Upper endpoint along each dimension
##CHUNK 9
nodes = (b + a) / 2 .- (b - a) / 2 .* cos.(pi / n .* (0.5:(n - 0.5)))
weights = ((b - a) / n) .* (cos.(pi / n .* ((1:n) .- 0.5) * (2:2:n - 1)') *
(-2.0 ./ ((1:2:n - 2) .* (3:2:n))) .+ 1)
return nodes, weights
end
"""
Computes nodes and weights for multivariate normal distribution.
##### Arguments
- `n::Union{Int, Vector{Int}}` : Number of desired nodes along each dimension
- `mu::Union{Real, Vector{Real}}` : Mean along each dimension
- `sig2::Union{Real, Vector{Real}, Matrix{Real}}(eye(length(n)))` : Covariance
structure
$(qnw_returns)
##### Notes
##CHUNK 10
end
if its >= maxit
error("Failure to converge in qnwbeta")
end
x[i] = z
w[i] = temp / (pp * p2)
end
x = (1 .- x) ./ 2
w = w * exp((logabsgamma(a + n))[1] +
(logabsgamma(b + n))[1] -
(logabsgamma(n + 1))[1] -
(logabsgamma(n + ab + 1))[1] )
w = w / (2 * exp( (logabsgamma(a + 1))[1] +
(logabsgamma(b + 1))[1] -
(logabsgamma(ab + 2))[1] ))
return x, w
|
43
| 59
|
QuantEcon.jl
| 205
|
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
|
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
|
[
43,
59
] |
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
|
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
|
var_quadratic_sum
| 43
| 59
|
src/quadsums.jl
|
#FILE: QuantEcon.jl/src/matrix_eqn.jl
##CHUNK 1
- `gamma1::Matrix{Float64}` Represents the value ``X``
"""
function solve_discrete_lyapunov(A::ScalarOrArray,
B::ScalarOrArray,
max_it::Int=50)
# TODO: Implement Bartels-Stewardt
n = size(A, 2)
alpha0 = reshape([A;], n, n)
gamma0 = reshape([B;], n, n)
alpha1 = fill!(similar(alpha0), zero(eltype(alpha0)))
gamma1 = fill!(similar(gamma0), zero(eltype(gamma0)))
diff = 5
n_its = 1
while diff > 1e-15
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` The policy function, a `k x n` array
- `K::Matrix{Float64}` The worst case matrix, a `j x n` array
- `x0::Vector{Float64}` : The initial condition for state
##### Returns
- `e::Float64` The deterministic entropy
"""
function compute_deterministic_entropy(rlq::RBLQ, F, K, x0)
B, C, bet = rlq.B, rlq.C, rlq.bet
H0 = K'*K
C0 = zeros(Float64, rlq.n, 1)
A0 = A - B*F + C*K
return var_quadratic_sum(A0, C0, H0, bet, x0)
end
##CHUNK 2
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
#FILE: QuantEcon.jl/test/test_quadsum.jl
##CHUNK 1
val = var_quadratic_sum(A, C, H, beta, x0)
@test isapprox(val, 20.0; rough_kwargs...)
end
@testset "test identity var sum" begin
beta = .95
A = Matrix(I, 3, 3)
C = zeros(3, 3)
H = Matrix(I, 3, 3)
x0 = ones(3)
val = var_quadratic_sum(A, C, H, beta, x0)
@test isapprox(val, 60.0; rough_kwargs...)
end
end # facts
#FILE: QuantEcon.jl/test/test_lqcontrol.jl
##CHUNK 1
c = .05
β = .95
n = 0.
capT = 1
lq_scalar = QuantEcon.LQ(q, r, a, b, c, n, bet=β, capT=capT, rf=rf)
Q = [0. 0.; 0. 1]
R = [1. 0.; 0. 0]
rf = I * 100
A = fill(0.95, 2, 2)
B = fill(-1.0, 2, 2)
lq_mat = QuantEcon.LQ(Q, R, A, B, bet=β, capT=capT, rf=rf)
@testset "Test scalar sequences with exact by hand solution" begin
x0 = 2.0
x_seq, u_seq, w_seq = compute_sequence(lq_scalar, x0)
# solve by hand
u_0 = (-2 .*lq_scalar.A*lq_scalar.B*lq_scalar.bet*lq_scalar.rf) /
(2 .*lq_scalar.Q+lq_scalar.bet*lq_scalar.rf*2lq_scalar.B^2)*x0
x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + w_seq[end]
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
#CURRENT FILE: QuantEcon.jl/src/quadsums.jl
##CHUNK 1
- `A::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `C::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `H::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `beta::Float64`: Discount factor in `(0, 1)`
- `x_0::Union{Float64, Vector{Float64}}` The initial condtion. A conformable
array (of length `n`) or a scalar if `n = 1`
##### Returns
- `q0::Float64` : Represents the value ``q(x_0)``
##### Notes
The formula for computing ``q(x_0)`` is ``q(x_0) = x_0' Q x_0 + v`` where
- ``Q`` is the solution to ``Q = H + \beta A' Q A`` and
- ``v = \frac{trace(C' Q C) \beta}{1 - \beta}``
##CHUNK 2
##### Returns
- `q0::Float64` : Represents the value ``q(x_0)``
##### Notes
The formula for computing ``q(x_0)`` is ``q(x_0) = x_0' Q x_0 + v`` where
- ``Q`` is the solution to ``Q = H + \beta A' Q A`` and
- ``v = \frac{trace(C' Q C) \beta}{1 - \beta}``
"""
@doc doc"""
Computes the quadratic sum
```math
V = \sum_{j=0}^{\infty} A^j B A^{j'}
```
##CHUNK 3
```math
q(x_0) = \mathbb{E} \sum_{t=0}^{\infty} \beta^t x_t' H x_t
```
Here ``{x_t}`` is the VAR process ``x_{t+1} = A x_t + C w_t`` with ``{w_t}``
standard normal and ``x_0`` the initial condition.
##### Arguments
- `A::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `C::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `H::Union{Float64, Matrix{Float64}}` The `n x n` matrix described above (scalar)
if `n = 1`
- `beta::Float64`: Discount factor in `(0, 1)`
- `x_0::Union{Float64, Vector{Float64}}` The initial condtion. A conformable
array (of length `n`) or a scalar if `n = 1`
##CHUNK 4
``V`` is computed by solving the corresponding discrete lyapunov equation using the
doubling algorithm. See the documentation of `solve_discrete_lyapunov` for
more information.
##### Arguments
- `A::Matrix{Float64}` : An `n x n` matrix as described above. We assume in order
for convergence that the eigenvalues of ``A`` have moduli bounded by unity
- `B::Matrix{Float64}` : An `n x n` matrix as described above. We assume in order
for convergence that the eigenvalues of ``B`` have moduli bounded by unity
- `max_it::Int(50)` : Maximum number of iterations
##### Returns
- `gamma1::Matrix{Float64}` : Represents the value ``V``
"""
function m_quadratic_sum(A::Matrix, B::Matrix; max_it=50)
solve_discrete_lyapunov(A, B, max_it)
end
|
119
| 130
|
QuantEcon.jl
| 206
|
function b_operator(rlq::RBLQ, P::Matrix)
A, B, Q, R, bet = rlq.A, rlq.B, rlq.Q, rlq.R, rlq.bet
S1 = Q + bet.*B'*P*B
S2 = bet.*B'*P*A
S3 = bet.*A'*P*A
F = S1 \ S2
new_P = R - S2'*F + S3
return F, new_P
end
|
function b_operator(rlq::RBLQ, P::Matrix)
A, B, Q, R, bet = rlq.A, rlq.B, rlq.Q, rlq.R, rlq.bet
S1 = Q + bet.*B'*P*B
S2 = bet.*B'*P*A
S3 = bet.*A'*P*A
F = S1 \ S2
new_P = R - S2'*F + S3
return F, new_P
end
|
[
119,
130
] |
function b_operator(rlq::RBLQ, P::Matrix)
A, B, Q, R, bet = rlq.A, rlq.B, rlq.Q, rlq.R, rlq.bet
S1 = Q + bet.*B'*P*B
S2 = bet.*B'*P*A
S3 = bet.*A'*P*A
F = S1 \ S2
new_P = R - S2'*F + S3
return F, new_P
end
|
function b_operator(rlq::RBLQ, P::Matrix)
A, B, Q, R, bet = rlq.A, rlq.B, rlq.Q, rlq.R, rlq.bet
S1 = Q + bet.*B'*P*B
S2 = bet.*B'*P*A
S3 = bet.*A'*P*A
F = S1 \ S2
new_P = R - S2'*F + S3
return F, new_P
end
|
b_operator
| 119
| 130
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 2
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 3
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
##CHUNK 4
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
new_d = lq.bet * (d + tr(P * C * C'))
# Set new state
lq.P, lq.d = new_P, new_d
end
@doc doc"""
Computes value and policy functions in infinite horizon model.
##### Arguments
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
@doc doc"""
##CHUNK 2
##### Returns
- `dP::Matrix{Float64}` : The matrix ``P`` after applying the ``D`` operator
"""
function d_operator(rlq::RBLQ, P::Matrix)
C, theta, Im = rlq.C, rlq.theta, Matrix(I, rlq.j, rlq.j)
S1 = P*C
dP = P + S1*((theta.*Im - C'*S1) \ (S1'))
return dP
end
@doc doc"""
The ``D`` operator, mapping ``P`` into
```math
B(P) := R - \beta^2 A'PB(Q + \beta B'PB)^{-1}B'PA + \beta A'PA
```
##CHUNK 3
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
##CHUNK 4
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
@doc doc"""
Given ``K`` and ``F``, compute the value of deterministic entropy, which is
``\sum_t \beta^t x_t' K'K x_t`` with ``x_{t+1} = (A - BF + CK) x_t``.
##### Arguments
##CHUNK 5
Compute agent 1's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
##CHUNK 6
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}`: A `k x n` array representing agent 1's policy
##### Returns
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
|
157
| 175
|
QuantEcon.jl
| 207
|
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
|
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
|
[
157,
175
] |
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
|
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
|
robust_rule
| 157
| 175
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 2
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 3
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
#FILE: QuantEcon.jl/test/test_robustlq.jl
##CHUNK 1
A = [1. 0. 0.
0. 1. 0.
0. 0. ρ]
B = [0.0 1.0 0.0]'
C = [0.0 0.0 sigma_d]'
rblq = RBLQ(Q, R, A, B, C, β, θ)
lq = QuantEcon.LQ(Q, R, A, B, C, β)
Fr, Kr, Pr = robust_rule(rblq)
# test stuff
@testset "test robust vs simple" begin
Fs, Ks, Ps = robust_rule_simple(rblq, Pr; tol=1e-12)
@test isapprox(Fr, Fs; rough_kwargs...)
@test isapprox(Kr, Ks; rough_kwargs...)
@test isapprox(Pr, Ps; rough_kwargs...)
#FILE: QuantEcon.jl/test/test_lqcontrol.jl
##CHUNK 1
r = 0.05
bet = 1 / (1 + r)
t = 45
c_bar = 2.0
sigma = 0.25
mu = 1.0
q = 1e6
# == Formulate as an LQ problem == #
Q = 1.0
R = zeros(2, 2)
Rf = zeros(2, 2); Rf[1, 1] = q
A = [1.0+r -c_bar+mu;
0.0 1.0]
B = [-1.0, 0.0]
C = [sigma, 0.0]
# == Compute solutions and simulate == #
lq = QuantEcon.LQ(Q, R, A, B, C; bet=bet, capT=t, rf=Rf)
x0 = [0.0, 1.0]
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
@doc doc"""
Given ``K`` and ``F``, compute the value of deterministic entropy, which is
##CHUNK 2
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
##CHUNK 3
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
@doc doc"""
Compute agent 1's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##CHUNK 4
@doc doc"""
Compute agent 2's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}`: A `k x n` array representing agent 1's policy
##### Returns
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
##CHUNK 5
end
@doc doc"""
Compute agent 1's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
|
203
| 229
|
QuantEcon.jl
| 208
|
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
end
# I = eye(j)
K = (theta.*I - C'*P*C)\(C'*P)*(A - B*F)
return F, K, P
end
|
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
end
# I = eye(j)
K = (theta.*I - C'*P*C)\(C'*P)*(A - B*F)
return F, K, P
end
|
[
203,
229
] |
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
end
# I = eye(j)
K = (theta.*I - C'*P*C)\(C'*P)*(A - B*F)
return F, K, P
end
|
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
end
# I = eye(j)
K = (theta.*I - C'*P*C)\(C'*P)*(A - B*F)
return F, K, P
end
|
robust_rule_simple
| 203
| 229
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 2
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 3
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
#FILE: QuantEcon.jl/test/test_robustlq.jl
##CHUNK 1
A = [1. 0. 0.
0. 1. 0.
0. 0. ρ]
B = [0.0 1.0 0.0]'
C = [0.0 0.0 sigma_d]'
rblq = RBLQ(Q, R, A, B, C, β, θ)
lq = QuantEcon.LQ(Q, R, A, B, C, β)
Fr, Kr, Pr = robust_rule(rblq)
# test stuff
@testset "test robust vs simple" begin
Fs, Ks, Ps = robust_rule_simple(rblq, Pr; tol=1e-12)
@test isapprox(Fr, Fs; rough_kwargs...)
@test isapprox(Kr, Ks; rough_kwargs...)
@test isapprox(Pr, Ps; rough_kwargs...)
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
```
And the value function is ``-x'Px``
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
##### Returns
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
##CHUNK 2
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
##CHUNK 3
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
@doc doc"""
Solve the robust LQ problem
##CHUNK 4
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
##CHUNK 5
##### Returns
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
##CHUNK 6
- `P::Matrix{Float64}` : size is `n x n`
##### Returns
- `F::Matrix{Float64}` : The ``F`` matrix as defined above
- `new_p::Matrix{Float64}` : The matrix ``P`` after applying the ``B`` operator
"""
function b_operator(rlq::RBLQ, P::Matrix)
A, B, Q, R, bet = rlq.A, rlq.B, rlq.Q, rlq.R, rlq.bet
S1 = Q + bet.*B'*P*B
S2 = bet.*B'*P*A
S3 = bet.*A'*P*A
F = S1 \ S2
new_P = R - S2'*F + S3
return F, new_P
end
|
245
| 260
|
QuantEcon.jl
| 209
|
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
|
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
|
[
245,
260
] |
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
|
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
|
F_to_K
| 245
| 260
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 2
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 3
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
##CHUNK 2
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
##CHUNK 3
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
@doc doc"""
Given ``K`` and ``F``, compute the value of deterministic entropy, which is
``\sum_t \beta^t x_t' K'K x_t`` with ``x_{t+1} = (A - BF + CK) x_t``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` The policy function, a `k x n` array
- `K::Matrix{Float64}` The worst case matrix, a `j x n` array
##CHUNK 4
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
@doc doc"""
Solve the robust LQ problem
##CHUNK 5
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
##CHUNK 6
```
And the value function is ``-x'Px``
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
##### Returns
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
##CHUNK 7
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` : The policy function, a `k x n` array
##### Returns
- `P_F::Matrix{Float64}` : Matrix for discounted cost
- `d_F::Float64` : Constant for discounted cost
- `K_F::Matrix{Float64}` : Worst case policy
- `O_F::Matrix{Float64}` : Matrix for discounted entropy
- `o_F::Float64` : Constant for discounted entropy
"""
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
|
276
| 286
|
QuantEcon.jl
| 210
|
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
|
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
|
[
276,
286
] |
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
|
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
|
K_to_F
| 276
| 286
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 2
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 3
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
##CHUNK 2
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
##CHUNK 3
# Set up LQ version
# I = eye(j)
Z = zeros(k, j)
Ba = [B C]
Qa = [Q Z
Z' -bet.*I.*theta]
lq = QuantEcon.LQ(Qa, R, A, Ba, bet=bet)
# Solve and convert back to robust problem
P, f, d = stationary_values(lq)
F = f[1:k, :]
K = -f[k+1:end, :]
return F, K, P
end
@doc doc"""
Solve the robust LQ problem
##CHUNK 4
```
And the value function is ``-x'Px``
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
##### Returns
- `F::Matrix{Float64}` : The optimal control matrix from above
- `P::Matrix{Float64}` : The positive semi-definite matrix defining the value function
- `K::Matrix{Float64}` : the worst-case shock matrix ``K``, where ``w_{t+1} = K x_t`` is the worst case shock
"""
function robust_rule(rlq::RBLQ)
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
##CHUNK 5
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
@doc doc"""
Compute agent 1's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
##CHUNK 6
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
##CHUNK 7
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
|
331
| 350
|
QuantEcon.jl
| 211
|
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
|
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
|
[
331,
350
] |
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
|
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
|
evaluate_F
| 331
| 350
|
src/robustlq.jl
|
#FILE: QuantEcon.jl/src/lqcontrol.jl
##CHUNK 1
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
# Bind states
lq.P, lq.F, lq.d = P, F, d
end
"""
Non-mutating routine for solving for `P`, `d`, and `F` in infinite horizon model
See docstring for `stationary_values!` for more explanation
"""
##CHUNK 2
This function updates the `P`, `d`, and `F` fields on the `lq` instance in
addition to returning them
"""
function stationary_values!(lq::LQ)
# simplify notation
Q, R, A, B, N, C = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C
# solve Riccati equation, obtain P
A0, B0 = sqrt(lq.bet) * A, sqrt(lq.bet) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# Compute F
s1 = Q .+ lq.bet * (B' * P * B)
s2 = lq.bet * (B' * P * A) .+ N
F = s1 \ s2
# Compute d
d = lq.bet * tr(P * C * C') / (1 - lq.bet)
##CHUNK 3
This function updates the `P` and `d` fields on the `lq` instance in addition to
returning them
"""
function update_values!(lq::LQ)
# Simplify notation
Q, R, A, B, N, C, P, d = lq.Q, lq.R, lq.A, lq.B, lq.N, lq.C, lq.P, lq.d
# Some useful matrices
s1 = Q + lq.bet * (B'P*B)
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
##CHUNK 4
s2 = lq.bet * (B'P*A) + N
s3 = lq.bet * (A'P*A)
# Compute F as (Q + B'PB)^{-1} (beta B'PA)
lq.F = s1 \ s2
# Shift P back in time one step
new_P = R - s2'lq.F + s3
# Recalling that tr(AB) = tr(BA)
new_d = lq.bet * (d + tr(P * C * C'))
# Set new state
lq.P, lq.d = new_P, new_d
end
@doc doc"""
Computes value and policy functions in infinite horizon model.
##### Arguments
#CURRENT FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
- `K::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``F``
- `P::Matrix{Float64}` : The value function corresponding to ``F``
"""
function F_to_K(rlq::RBLQ, F::Matrix)
# simplify notation
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
# set up lq
Q2 = bet * theta
R2 = - R - F'*Q*F
A2 = A - B*F
B2 = C
lq = QuantEcon.LQ(Q2, R2, A2, B2, bet=bet)
neg_P, neg_K, d = stationary_values(lq)
return -neg_K, -neg_P
end
##CHUNK 2
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
##CHUNK 3
- `e::Float64` The deterministic entropy
"""
function compute_deterministic_entropy(rlq::RBLQ, F, K, x0)
B, C, bet = rlq.B, rlq.C, rlq.bet
H0 = K'*K
C0 = zeros(Float64, rlq.n, 1)
A0 = A - B*F + C*K
return var_quadratic_sum(A0, C0, H0, bet, x0)
end
@doc doc"""
Given a fixed policy ``F``, with the interpretation ``u = -F x``, this function
computes the matrix ``P_F`` and constant ``d_F`` associated with discounted cost
``J_F(x) = x' P_F x + d_F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` : The policy function, a `k x n` array
##CHUNK 4
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
lq = QuantEcon.LQ(Q1, R1, A1, B1, bet=bet)
P, F, d = stationary_values(lq)
return F, P
end
@doc doc"""
Given ``K`` and ``F``, compute the value of deterministic entropy, which is
``\sum_t \beta^t x_t' K'K x_t`` with ``x_{t+1} = (A - BF + CK) x_t``.
##CHUNK 5
@doc doc"""
Compute agent 1's best cost-minimizing response ``K``, given ``F``.
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `K::Matrix{Float64}`: A `k x n` array representing the worst case matrix
##### Returns
- `F::Matrix{Float64}` : Agent's best cost minimizing response corresponding to ``K``
- `P::Matrix{Float64}` : The value function corresponding to ``K``
"""
function K_to_F(rlq::RBLQ, K::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta = rlq.bet, rlq.theta
A1, B1, Q1, R1 = A+C*K, B, Q, R-bet*theta.*K'*K
##CHUNK 6
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` The policy function, a `k x n` array
- `K::Matrix{Float64}` The worst case matrix, a `j x n` array
- `x0::Vector{Float64}` : The initial condition for state
##### Returns
- `e::Float64` The deterministic entropy
"""
function compute_deterministic_entropy(rlq::RBLQ, F, K, x0)
B, C, bet = rlq.B, rlq.C, rlq.bet
H0 = K'*K
C0 = zeros(Float64, rlq.n, 1)
A0 = A - B*F + C*K
return var_quadratic_sum(A0, C0, H0, bet, x0)
end
|
17
| 55
|
QuantEcon.jl
| 212
|
function MVNSampler(mu::Vector{TM}, Sigma::Matrix{TS}) where {TM<:Real,TS<:Real}
ATOL1, RTOL1 = 1e-8, 1e-8
ATOL2, RTOL2 = 1e-8, 1e-14
n = length(mu)
if size(Sigma) != (n, n) # Check Sigma is n x n
throw(ArgumentError(
"Sigma must be 2 dimensional and square matrix of same length to mu"
))
end
issymmetric(Sigma) || throw(ArgumentError("Sigma must be symmetric"))
C = cholesky(Symmetric(Sigma, :L), Cholesky_RowMaximum, check=false)
A = C.factors
r = C.rank
p = invperm(C.piv)
if r == n # Positive definite
Q = tril!(A)[p, p]
return MVNSampler(mu, Sigma, Q)
end
non_PSD_msg = "Sigma must be positive semidefinite"
for i in r+1:n
A[i, i] >= -ATOL1 - RTOL1 * A[1, 1] ||
throw(ArgumentError(non_PSD_msg))
end
tril!(view(A, :, 1:r))
A[:, r+1:end] .= 0
Q = A[p, p]
isapprox(Q*Q', Sigma; rtol=RTOL2, atol=ATOL2) ||
throw(ArgumentError(non_PSD_msg))
return MVNSampler(mu, Sigma, Q)
end
|
function MVNSampler(mu::Vector{TM}, Sigma::Matrix{TS}) where {TM<:Real,TS<:Real}
ATOL1, RTOL1 = 1e-8, 1e-8
ATOL2, RTOL2 = 1e-8, 1e-14
n = length(mu)
if size(Sigma) != (n, n) # Check Sigma is n x n
throw(ArgumentError(
"Sigma must be 2 dimensional and square matrix of same length to mu"
))
end
issymmetric(Sigma) || throw(ArgumentError("Sigma must be symmetric"))
C = cholesky(Symmetric(Sigma, :L), Cholesky_RowMaximum, check=false)
A = C.factors
r = C.rank
p = invperm(C.piv)
if r == n # Positive definite
Q = tril!(A)[p, p]
return MVNSampler(mu, Sigma, Q)
end
non_PSD_msg = "Sigma must be positive semidefinite"
for i in r+1:n
A[i, i] >= -ATOL1 - RTOL1 * A[1, 1] ||
throw(ArgumentError(non_PSD_msg))
end
tril!(view(A, :, 1:r))
A[:, r+1:end] .= 0
Q = A[p, p]
isapprox(Q*Q', Sigma; rtol=RTOL2, atol=ATOL2) ||
throw(ArgumentError(non_PSD_msg))
return MVNSampler(mu, Sigma, Q)
end
|
[
17,
55
] |
function MVNSampler(mu::Vector{TM}, Sigma::Matrix{TS}) where {TM<:Real,TS<:Real}
ATOL1, RTOL1 = 1e-8, 1e-8
ATOL2, RTOL2 = 1e-8, 1e-14
n = length(mu)
if size(Sigma) != (n, n) # Check Sigma is n x n
throw(ArgumentError(
"Sigma must be 2 dimensional and square matrix of same length to mu"
))
end
issymmetric(Sigma) || throw(ArgumentError("Sigma must be symmetric"))
C = cholesky(Symmetric(Sigma, :L), Cholesky_RowMaximum, check=false)
A = C.factors
r = C.rank
p = invperm(C.piv)
if r == n # Positive definite
Q = tril!(A)[p, p]
return MVNSampler(mu, Sigma, Q)
end
non_PSD_msg = "Sigma must be positive semidefinite"
for i in r+1:n
A[i, i] >= -ATOL1 - RTOL1 * A[1, 1] ||
throw(ArgumentError(non_PSD_msg))
end
tril!(view(A, :, 1:r))
A[:, r+1:end] .= 0
Q = A[p, p]
isapprox(Q*Q', Sigma; rtol=RTOL2, atol=ATOL2) ||
throw(ArgumentError(non_PSD_msg))
return MVNSampler(mu, Sigma, Q)
end
|
function MVNSampler(mu::Vector{TM}, Sigma::Matrix{TS}) where {TM<:Real,TS<:Real}
ATOL1, RTOL1 = 1e-8, 1e-8
ATOL2, RTOL2 = 1e-8, 1e-14
n = length(mu)
if size(Sigma) != (n, n) # Check Sigma is n x n
throw(ArgumentError(
"Sigma must be 2 dimensional and square matrix of same length to mu"
))
end
issymmetric(Sigma) || throw(ArgumentError("Sigma must be symmetric"))
C = cholesky(Symmetric(Sigma, :L), Cholesky_RowMaximum, check=false)
A = C.factors
r = C.rank
p = invperm(C.piv)
if r == n # Positive definite
Q = tril!(A)[p, p]
return MVNSampler(mu, Sigma, Q)
end
non_PSD_msg = "Sigma must be positive semidefinite"
for i in r+1:n
A[i, i] >= -ATOL1 - RTOL1 * A[1, 1] ||
throw(ArgumentError(non_PSD_msg))
end
tril!(view(A, :, 1:r))
A[:, r+1:end] .= 0
Q = A[p, p]
isapprox(Q*Q', Sigma; rtol=RTOL2, atol=ATOL2) ||
throw(ArgumentError(non_PSD_msg))
return MVNSampler(mu, Sigma, Q)
end
|
MVNSampler
| 17
| 55
|
src/sampler.jl
|
#FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
##CHUNK 2
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
# Check that Nm is a valid number of grid points
Nm >= 3 || throw(ArgumentError("Nm must be a positive interger greater than 3"))
# Check that n_moments is a valid number
if n_moments < 1 || !(n_moments % 2 == 0 || n_moments == 1)
error("n_moments must be either 1 or a positive even integer")
end
# warning about persistency
warn_persistency(B, method)
##CHUNK 3
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
#FILE: QuantEcon.jl/test/test_sampler.jl
##CHUNK 1
c = rand(mvns)-mvns.mu
@test all(broadcast(isapprox,c[1],c))
end
@testset "check positive semi-definite 1 and -1/(n-1)" begin
Sigma = -1/(n-1)*ones(n, n) + n/(n-1)*Matrix(Matrix(I, n, n))
mvns = MVNSampler(mu, Sigma)
@test isapprox(sum(rand(mvns)) , sum(mu), atol=1e-4, rtol=1e-4)
end
@testset "check non-positive definite" begin
Sigma = [2.0 1.0 3.0 1.0;
1.0 2.0 1.0 1.0;
3.0 1.0 2.0 1.0;
1.0 1.0 1.0 1.0]
@test_throws ArgumentError MVNSampler(mu, Sigma)
end
@testset "check availability of rank deficient matrix" begin
A = randn(n,n)
##CHUNK 2
@testset "check non-positive definite" begin
Sigma = [2.0 1.0 3.0 1.0;
1.0 2.0 1.0 1.0;
3.0 1.0 2.0 1.0;
1.0 1.0 1.0 1.0]
@test_throws ArgumentError MVNSampler(mu, Sigma)
end
@testset "check availability of rank deficient matrix" begin
A = randn(n,n)
for r=1:n-2
r=2
for i = 1:r
A[:, end+1-i] = sum(A[:, 1:end-r], dims = 2)
end
Sigma = A * A'
@test typeof(MVNSampler(mu,Sigma)) <: MVNSampler
end
end
##CHUNK 3
@test isapprox(mvns.Q * mvns.Q', mvns.Sigma)
end
@testset "check positive semi-definite zeros" begin
mvns = MVNSampler(mu, zeros(n, n))
@test rand(mvns) == mu
end
@testset "check positive semi-definite ones" begin
mvns = MVNSampler(mu, ones(n, n))
c = rand(mvns)-mvns.mu
@test all(broadcast(isapprox,c[1],c))
end
@testset "check positive semi-definite 1 and -1/(n-1)" begin
Sigma = -1/(n-1)*ones(n, n) + n/(n-1)*Matrix(Matrix(I, n, n))
mvns = MVNSampler(mu, Sigma)
@test isapprox(sum(rand(mvns)) , sum(mu), atol=1e-4, rtol=1e-4)
end
##CHUNK 4
@testset "Testing sampler.jl" begin
n = 4
mu = collect(range(0.2, stop=0.6, length=n))
@testset "check positive definite" begin
Sigma = [3.0 1.0 1.0 1.0;
1.0 2.0 1.0 1.0;
1.0 1.0 2.0 1.0;
1.0 1.0 1.0 1.0]
mvns = MVNSampler(mu, Sigma)
@test isapprox(mvns.Q * mvns.Q', mvns.Sigma)
end
@testset "check positive semi-definite zeros" begin
mvns = MVNSampler(mu, zeros(n, n))
@test rand(mvns) == mu
end
@testset "check positive semi-definite ones" begin
mvns = MVNSampler(mu, ones(n, n))
#FILE: QuantEcon.jl/other/regression.jl
##CHUNK 1
# B1 = inv(X1' * X1 + T / n1 * eye(n1) * 10.0^ penalty) * X1' * Y1
B1 = inv(X1' * X1 + T / n1 * I * 10.0^ penalty) * X1' * Y1
B = de_normalize(X, Y, B1)
return B
end
function RLS_TSVD(X, Y, penalty=7)
T, n = size(X)
n1 = n - 1
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
##CHUNK 2
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
function RLAD_PP(X, Y, penalty=7)
# TODO: There is a bug here. linprog returns wrong answer, even when
# MATLAB gets it right (lame)
T, n1 = size(X)
N = size(Y, 2)
n1 -= 1
X1, Y1 = normalize_data(X, Y)
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
#CURRENT FILE: QuantEcon.jl/src/sampler.jl
|
40
| 68
|
QuantEcon.jl
| 213
|
function gridmake!(out, arrays::Union{AbstractVector,AbstractMatrix}...)
lens = Int[size(e, 1) for e in arrays]
n = sum(_i -> size(_i, 2), arrays)
l = prod(lens)
@assert size(out) == (l, n)
reverse!(lens)
repititions = cumprod(vcat(1, lens[1:end-1]))
reverse!(repititions)
reverse!(lens) # put lens back in correct order
col_base = 0
for i in 1:length(arrays)
arr = arrays[i]
ncol = size(arr, 2)
outer = repititions[i]
inner = floor(Int, l / (outer * lens[i]))
for col_plus in 1:ncol
row = 0
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
|
function gridmake!(out, arrays::Union{AbstractVector,AbstractMatrix}...)
lens = Int[size(e, 1) for e in arrays]
n = sum(_i -> size(_i, 2), arrays)
l = prod(lens)
@assert size(out) == (l, n)
reverse!(lens)
repititions = cumprod(vcat(1, lens[1:end-1]))
reverse!(repititions)
reverse!(lens) # put lens back in correct order
col_base = 0
for i in 1:length(arrays)
arr = arrays[i]
ncol = size(arr, 2)
outer = repititions[i]
inner = floor(Int, l / (outer * lens[i]))
for col_plus in 1:ncol
row = 0
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
|
[
40,
68
] |
function gridmake!(out, arrays::Union{AbstractVector,AbstractMatrix}...)
lens = Int[size(e, 1) for e in arrays]
n = sum(_i -> size(_i, 2), arrays)
l = prod(lens)
@assert size(out) == (l, n)
reverse!(lens)
repititions = cumprod(vcat(1, lens[1:end-1]))
reverse!(repititions)
reverse!(lens) # put lens back in correct order
col_base = 0
for i in 1:length(arrays)
arr = arrays[i]
ncol = size(arr, 2)
outer = repititions[i]
inner = floor(Int, l / (outer * lens[i]))
for col_plus in 1:ncol
row = 0
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
|
function gridmake!(out, arrays::Union{AbstractVector,AbstractMatrix}...)
lens = Int[size(e, 1) for e in arrays]
n = sum(_i -> size(_i, 2), arrays)
l = prod(lens)
@assert size(out) == (l, n)
reverse!(lens)
repititions = cumprod(vcat(1, lens[1:end-1]))
reverse!(repititions)
reverse!(lens) # put lens back in correct order
col_base = 0
for i in 1:length(arrays)
arr = arrays[i]
ncol = size(arr, 2)
outer = repititions[i]
inner = floor(Int, l / (outer * lens[i]))
for col_plus in 1:ncol
row = 0
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
|
gridmake!
| 40
| 68
|
src/util.jl
|
#FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
end
end
@doc doc"""
Define Matrix Multiplication between 3-dimensional matrix and a vector
Matrix multiplication over the last dimension of ``A``
"""
function *(A::AbstractArray{T,3}, v::AbstractVector) where T
shape = size(A)
size(v, 1) == shape[end] || error("wrong dimensions")
B = reshape(A, (prod(shape[1:end-1]), shape[end]))
out = B * v
return reshape(out, shape[1:end-1])
end
"""
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
ind = n * x + (1-n:0)
fstar = f[ind]
pstar = P[ind, :]
return pstar, fstar, ind
end
function expandg(g)
# Only need if I supply "transfunc". Not doing so
n, m = size(g)
P = sparse(1:n*m, g(:), 1, n*m, n)
return P
end
function diagmult(a::Vector{T}, b::Matrix{T}) where T <: Real
n = length(a)
return sparse(1:n, 1:n, a, n, n)*b
end
#FILE: QuantEcon.jl/src/interp.jl
##CHUNK 1
if col > li._ncol || col < 1
msg = "col must be beteween 1 and $(li._ncol), found $col"
throw(BoundsError(msg))
end
end
@inbounds begin
# handle corner cases
ix == 1 && return li.vals[1, col]
ix == li._n + 1 && return li.vals[end, col]
# now get on to the real work...
z = (li.breaks[ix] - xp)/(li.breaks[ix] - li.breaks[ix-1])
return (1-z) * li.vals[ix, col] + z * li.vals[ix-1, col]
end
end
_out_eltype(li::LinInterp{TV,TB}) where {TV,TB} = promote_type(eltype(TV), eltype(TB))
##CHUNK 2
end
return out
end
if ix == li._n + 1
for (ind, col) in enumerate(cols)
out[ind] = li.vals[end, col]
end
return out
end
# now get on to the real work...
z = (li.breaks[ix] - xp)/(li.breaks[ix] - li.breaks[ix-1])
for (ind, col) in enumerate(cols)
out[ind] = (1-z) * li.vals[ix, col] + z * li.vals[ix-1, col]
end
return out
end
##CHUNK 3
function LinInterp{TV,TB}(b::TB, v::TV) where {TB,TV}
if size(b, 1) != size(v, 1)
m = "breaks and vals must have same number of elements"
throw(DimensionMismatch(m))
end
if !issorted(b)
m = "breaks must be sorted"
throw(ArgumentError(m))
end
new{TV,TB}(b, v, length(b), size(v, 2))
end
end
function Base.:(==)(li1::LinInterp, li2::LinInterp)
all(getfield(li1, f) == getfield(li2, f) for f in fieldnames(typeof(li1)))
end
function LinInterp(b::TB, v::TV) where {TV<:AbstractArray,TB<:AbstractVector}
LinInterp{TV,TB}(b, v)
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
((i/(n-1) > p) + (i/(n-1) == p)/2))
P[i+1, i+1] = 1 - P[i+1, i] - P[i+1, i+2]
end
P[end, end-1], P[end, end] = ε/2, 1 - ε/2
return P
end
function Base.isapprox(x::Vector{Vector{<:Real}},
y::Vector{Vector{<:Real}})
length(x) == length(y) || return false
return all(xy -> isapprox(x, y), zip(x, y))
end
@testset "Testing mc_tools.jl" begin
# Matrix with two recurrent classes [1, 2] and [4, 5, 6],
# which have periods 2 and 3, respectively
Q = [0 1 0 0 0 0
1 0 0 0 0 0
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
gridmake!(out, arrays...)
out
end
end
function gridmake(t::Tuple)
all(map(x -> isa(x, Integer), t)) ||
error("gridmake(::Tuple) only valid when all elements are integers")
gridmake(map(x->1:x, t)...)::Matrix{Int}
end
"""
gridmake(arrays::Union{AbstractVector,AbstractMatrix}...)
Expand one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the input
arrays will correspond to one column of the output matrix. The first array
varies the fastest (see example)
##CHUNK 2
```
# References
A. Nijenhuis and H. S. Wilf, Combinatorial Algorithms, Chapter 5,
Academic Press, 1978.
"""
function simplex_grid(m, n)
# Get number of elements in array and allocate
L = num_compositions(m, n)
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
##CHUNK 3
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
@doc raw"""
simplex_index(x, m, n)
Return the index of the point x in the lexicographic order of the
integer points of the (m-1)-dimensional simplex ``\{x \mid x_0 +
\cdots + x_{m-1} = n\}``.
# Arguments
|
138
| 148
|
QuantEcon.jl
| 214
|
function is_stable(A::AbstractMatrix)
# Check for stability by testing that eigenvalues are less than 1
stable = true
d = eigvals(A)
if maximum(abs, d) > 1.0
stable = false
end
return stable
end
|
function is_stable(A::AbstractMatrix)
# Check for stability by testing that eigenvalues are less than 1
stable = true
d = eigvals(A)
if maximum(abs, d) > 1.0
stable = false
end
return stable
end
|
[
138,
148
] |
function is_stable(A::AbstractMatrix)
# Check for stability by testing that eigenvalues are less than 1
stable = true
d = eigvals(A)
if maximum(abs, d) > 1.0
stable = false
end
return stable
end
|
function is_stable(A::AbstractMatrix)
# Check for stability by testing that eigenvalues are less than 1
stable = true
d = eigvals(A)
if maximum(abs, d) > 1.0
stable = false
end
return stable
end
|
is_stable
| 138
| 148
|
src/util.jl
|
#FILE: QuantEcon.jl/src/lss.jl
##CHUNK 1
#### Arguments
- `lss::LSS` The linear state space system
#### Returns
- `stable::Bool` Whether or not the system is stable
"""
function is_stable(lss::LSS)
# Get version of A without constant row/column
A = remove_constants(lss)
# Check for stability
stable = is_stable(A)
return stable
end
##CHUNK 2
function is_stable(lss::LSS)
# Get version of A without constant row/column
A = remove_constants(lss)
# Check for stability
stable = is_stable(A)
return stable
end
@doc doc"""
Finds the row and column, if any, that correspond to the constant
term in a `LSS` system and removes them to get the matrix that needs
to be checked for stability.
#### Arguments
- `lss::LSS` The linear state space system
##CHUNK 3
!is_stable(lss) ? error("Cannot compute geometric sum because the system is not stable.") : nothing
# I = eye(lss.n)
S_x = (I - bet .* lss.A) \ x_t
S_y = lss.G * S_x
return S_x, S_y
end
@doc doc"""
Test for stability of linear state space system.
First removes the constant row and column.
#### Arguments
- `lss::LSS` The linear state space system
#### Returns
- `stable::Bool` Whether or not the system is stable
"""
##CHUNK 4
mu_x, Sigma_x = mu_x1, Sigma_x1
if err < tol && i > 1
# return here because of how scoping works in loops.
return mu_x1, mu_y, Sigma_x1, Sigma_y
end
end
end
function geometric_sums(lss::LSS, bet, x_t)
!is_stable(lss) ? error("Cannot compute geometric sum because the system is not stable.") : nothing
# I = eye(lss.n)
S_x = (I - bet .* lss.A) \ x_t
S_y = lss.G * S_x
return S_x, S_y
end
@doc doc"""
Test for stability of linear state space system.
First removes the constant row and column.
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
x = gth_solve(P_dict["P"])
@test isapprox(x, P_dict["stationary_dist"])
end
@testset "test MarkovChain with KMR matrices" begin
for P in kmr_matrices
mc = MarkovChain(P)
stationary_dists = stationary_distributions(mc)
for x in stationary_dists
# Check elements sum to one
@test isapprox(sum(x), 1; atol=tol)
# Check elements are nonnegative
for i in 1:length(x)
@test x[i] >= -tol
end
# Check x is a left eigenvector of P
@test isapprox(vec(x'*P), x; atol=tol)
end
##CHUNK 2
end
tol = 1e-15
kmr_matrices = (
kmr_markov_matrix_sequential(27, 1/3, 1e-2),
kmr_markov_matrix_sequential(3, 1/3, 1e-14)
)
@testset "test gth_solve with KMR matrices" begin
for P in kmr_matrices
x = gth_solve(P)
# Check elements sum to one
@test isapprox(sum(x), 1; atol=tol)
# Check elements are nonnegative
for i in 1:length(x)
@test x[i] >= -tol
end
##CHUNK 3
x = gth_solve(P)
# Check elements sum to one
@test isapprox(sum(x), 1; atol=tol)
# Check elements are nonnegative
for i in 1:length(x)
@test x[i] >= -tol
end
# Check x is a left eigenvector of P
@test isapprox(vec(x'*P), x; atol=tol)
end
end
@testset "test gth_solve with generator matrices" begin
P_dict_Int = Dict(
"P" => [-3 3; 4 -4],
"stationary_dist" => [4//7, 3//7],
)
#FILE: QuantEcon.jl/test/test_ddp.jl
##CHUNK 1
r2 = solve(ddp_rational, MPFI)
r3 = solve(ddp_rational, VFI)
@test maximum(abs, r1.v-v_star) < 1e-13
@test r1.sigma == r2.sigma
@test r1.sigma == r3.sigma
@test r1.mc.p == r2.mc.p
@test r1.mc.p == r3.mc.p
end
@testset "modified_policy_iteration" begin
for ddp_item in ddp0_collection
res = solve(ddp_item, MPFI)
v_init = [0.0, 1.0]
res_init = solve(ddp_item, v_init, MPFI)
# Check v is an epsilon/2-approxmation of v_star
@test maximum(abs, res.v - v_star) < epsilon/2
@test maximum(abs, res_init.v - v_star) < epsilon/2
# Check sigma == sigma_star
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
gridmake
"""
is_stable(A)
General function for testing for stability of matrix ``A``. Just
checks that eigenvalues are less than 1 in absolute value.
# Arguments
- `A::Matrix` The matrix we want to check
# Returns
- `stable::Bool` Whether or not the matrix is stable
"""
"""
##CHUNK 2
2 20 100
3 20 100
1 10 200
2 10 200
3 10 200
1 20 200
2 20 200
3 20 200
```
"""
gridmake
"""
is_stable(A)
General function for testing for stability of matrix ``A``. Just
checks that eigenvalues are less than 1 in absolute value.
# Arguments
|
224
| 242
|
QuantEcon.jl
| 215
|
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
return x, (x, h)
end
|
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
return x, (x, h)
end
|
[
224,
242
] |
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
return x, (x, h)
end
|
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
return x, (x, h)
end
|
Base.iterate
| 224
| 242
|
src/util.jl
|
#FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
# Compute conditional probabilities for each state
state_i = d[X[1]]
for t in 1:capT-1
# Find next period's state
state_j = d[X[t+1]]
cm[state_i, state_j] += 1.0
# Tomorrow's state is j
state_i = state_j
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
m::Int
n::Int
end
Base.eltype(sg::SimplexGrid) = Vector{Int}
function Base.iterate(sg::SimplexGrid)
x = zeros(Int, sg.m)
x[end] = sg.n
h = sg.m
return x, (x, h)
end
@doc raw"""
simplex_grid(m, n)
Construct an array consisting of the integer points in the
(m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m = n, x_i \geq 0\}``,
##CHUNK 2
x = [1, 3, 0]
x = [2, 0, 2]
x = [2, 1, 1]
x = [2, 2, 0]
x = [3, 0, 1]
x = [3, 1, 0]
x = [4, 0, 0]
```
"""
struct SimplexGrid
m::Int
n::Int
end
Base.eltype(sg::SimplexGrid) = Vector{Int}
function Base.iterate(sg::SimplexGrid)
x = zeros(Int, sg.m)
x[end] = sg.n
h = sg.m
##CHUNK 3
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
@doc raw"""
simplex_index(x, m, n)
Return the index of the point x in the lexicographic order of the
integer points of the (m-1)-dimensional simplex ``\{x \mid x_0 +
\cdots + x_{m-1} = n\}``.
# Arguments
##CHUNK 4
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
"""
##CHUNK 5
- `x::Vector{Int}` : Integer point in the simplex, i.e., an array of
m nonnegative integers that sum to n.
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must be a
nonnegative integer.
# Returns
- `idx::Int` : Index of x.
"""
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
##CHUNK 6
@doc raw"""
SimplexGrid
Iterator version of `simplex_grid`, i.e., iterator that iterates over the
integer points in the (m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m =
n, x_i \geq 0\}``, or equivalently, the m-part compositions of n, in
lexicographic order.
# Fields
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must
be a nonnegative integer.
# Examples
```julia
julia> sg = SimplexGrid(3, 4);
julia> for x in sg
##CHUNK 7
@doc raw"""
simplex_index(x, m, n)
Return the index of the point x in the lexicographic order of the
integer points of the (m-1)-dimensional simplex ``\{x \mid x_0 +
\cdots + x_{m-1} = n\}``.
# Arguments
- `x::Vector{Int}` : Integer point in the simplex, i.e., an array of
m nonnegative integers that sum to n.
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must be a
nonnegative integer.
# Returns
- `idx::Int` : Index of x.
"""
|
287
| 298
|
QuantEcon.jl
| 216
|
function simplex_grid(m, n)
# Get number of elements in array and allocate
L = num_compositions(m, n)
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
|
function simplex_grid(m, n)
# Get number of elements in array and allocate
L = num_compositions(m, n)
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
|
[
287,
298
] |
function simplex_grid(m, n)
# Get number of elements in array and allocate
L = num_compositions(m, n)
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
|
function simplex_grid(m, n)
# Get number of elements in array and allocate
L = num_compositions(m, n)
out = Matrix{Int}(undef, m, L)
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
|
simplex_grid
| 287
| 298
|
src/util.jl
|
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
ind = n * x + (1-n:0)
fstar = f[ind]
pstar = P[ind, :]
return pstar, fstar, ind
end
function expandg(g)
# Only need if I supply "transfunc". Not doing so
n, m = size(g)
P = sparse(1:n*m, g(:), 1, n*m, n)
return P
end
function diagmult(a::Vector{T}, b::Matrix{T}) where T <: Real
n = length(a)
return sparse(1:n, 1:n, a, n, n)*b
end
#FILE: QuantEcon.jl/test/test_util.jl
##CHUNK 1
0 1 0
4 2 0]
idx = Vector{Int}(undef, 3)
for i in 1:3
idx[i] = @inferred simplex_index(points[:, i], 3, 4)
end
@test all(@inferred simplex_grid(3, 4) .== grid_3_4)
@test all(grid_3_4[:, idx] .== points)
@test size(grid_3_4, 2) == num_compositions(3, 4)
sg = SimplexGrid(3, 4)
for (i, x) in enumerate(sg)
if i in idx
@test x == grid_3_4[:, i]
end
end
# Output from QuantEcon.py
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
in lexicographic order. The total number of the points (hence the
length of the output array) is L = (n+m-1)!/(n!*(m-1)!) (i.e.,
(n+m-1) choose (m-1)).
# Arguments
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must
be a nonnegative integer.
# Returns
- `out::Matrix{Int}` : Array of shape (m, L) containing the integer
points in the simplex, aligned in lexicographic
order.
# Notes
A grid of the (m-1)-dimensional *unit* simplex with n subdivisions
along each dimension can be obtained by `simplex_grid(m, n) / n`.
##CHUNK 2
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
@generated function gridmake(arrays::AbstractArray...)
T = reduce(promote_type, eltype(a) for a in arrays)
quote
l = 1
n = 0
for arr in arrays
l *= size(arr, 1)
n += size(arr, 2)
end
out = Matrix{$T}(undef, l, n)
gridmake!(out, arrays...)
##CHUNK 3
# Returns
- `::Int` : Total number of m-part compositions of n
"""
function num_compositions(m, n)
return binomial(n+m-1, m-1)
end
@doc raw"""
SimplexGrid
Iterator version of `simplex_grid`, i.e., iterator that iterates over the
integer points in the (m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m =
n, x_i \geq 0\}``, or equivalently, the m-part compositions of n, in
lexicographic order.
# Fields
##CHUNK 4
col_base = 0
for i in 1:length(arrays)
arr = arrays[i]
ncol = size(arr, 2)
outer = repititions[i]
inner = floor(Int, l / (outer * lens[i]))
for col_plus in 1:ncol
row = 0
for _1 in 1:outer, ix in 1:lens[i], _2 in 1:inner
out[row+=1, col_base+col_plus] = arr[ix, col_plus]
end
end
col_base += ncol
end
return out
end
@generated function gridmake(arrays::AbstractArray...)
##CHUNK 5
return x, (x, h)
end
@doc raw"""
simplex_grid(m, n)
Construct an array consisting of the integer points in the
(m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m = n, x_i \geq 0\}``,
or equivalently, the m-part compositions of n, which are listed
in lexicographic order. The total number of the points (hence the
length of the output array) is L = (n+m-1)!/(n!*(m-1)!) (i.e.,
(n+m-1) choose (m-1)).
# Arguments
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must
be a nonnegative integer.
##CHUNK 6
T = reduce(promote_type, eltype(a) for a in arrays)
quote
l = 1
n = 0
for arr in arrays
l *= size(arr, 1)
n += size(arr, 2)
end
out = Matrix{$T}(undef, l, n)
gridmake!(out, arrays...)
out
end
end
function gridmake(t::Tuple)
all(map(x -> isa(x, Integer), t)) ||
error("gridmake(::Tuple) only valid when all elements are integers")
gridmake(map(x->1:x, t)...)::Matrix{Int}
end
##CHUNK 7
out
end
end
function gridmake(t::Tuple)
all(map(x -> isa(x, Integer), t)) ||
error("gridmake(::Tuple) only valid when all elements are integers")
gridmake(map(x->1:x, t)...)::Matrix{Int}
end
"""
gridmake(arrays::Union{AbstractVector,AbstractMatrix}...)
Expand one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the input
arrays will correspond to one column of the output matrix. The first array
varies the fastest (see example)
# Example
##CHUNK 8
# Returns
- `out::Matrix{Int}` : Array of shape (m, L) containing the integer
points in the simplex, aligned in lexicographic
order.
# Notes
A grid of the (m-1)-dimensional *unit* simplex with n subdivisions
along each dimension can be obtained by `simplex_grid(m, n) / n`.
# Examples
```julia
julia> simplex_grid(3, 4)
3×15 Matrix{Int64}:
0 0 0 0 0 1 1 1 1 2 2 2 3 3 4
0 1 2 3 4 0 1 2 3 0 1 2 0 1 0
4 3 2 1 0 3 2 1 0 2 1 0 1 0 0
```
|
320
| 337
|
QuantEcon.jl
| 217
|
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
|
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
|
[
320,
337
] |
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
|
function simplex_index(x, m, n)
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
|
simplex_index
| 320
| 337
|
src/util.jl
|
#FILE: QuantEcon.jl/src/markov/mc_tools.jl
##CHUNK 1
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
for j in k+1:n, i in k+1:n
A[i, j] += A[i, k] * A[k, j]
end
end
# backsubstitution
x[n] = 1
@inbounds for k in n-1:-1:1, i in k+1:n
x[k] += x[i] * A[i, k]
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
# Returns
- `::Int` : Total number of m-part compositions of n
"""
function num_compositions(m, n)
return binomial(n+m-1, m-1)
end
@doc raw"""
SimplexGrid
Iterator version of `simplex_grid`, i.e., iterator that iterates over the
integer points in the (m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m =
n, x_i \geq 0\}``, or equivalently, the m-part compositions of n, in
lexicographic order.
# Fields
##CHUNK 2
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
return x, (x, h)
end
@doc raw"""
simplex_grid(m, n)
Construct an array consisting of the integer points in the
(m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m = n, x_i \geq 0\}``,
or equivalently, the m-part compositions of n, which are listed
##CHUNK 3
"""
num_compositions(m, n)
The total number of m-part compositions of n, which is equal to (n + m - 1)
choose (m - 1).
# Arguments
- `m::Int` : Number of parts of composition
- `n::Int` : Integer to decompose
# Returns
- `::Int` : Total number of m-part compositions of n
"""
function num_compositions(m, n)
return binomial(n+m-1, m-1)
end
##CHUNK 4
sg = SimplexGrid(m, n)
for (i, x) in enumerate(sg)
copyto!(out, m*(i-1) + 1, x, 1, m)
end
return out
end
@doc raw"""
simplex_index(x, m, n)
Return the index of the point x in the lexicographic order of the
integer points of the (m-1)-dimensional simplex ``\{x \mid x_0 +
\cdots + x_{m-1} = n\}``.
# Arguments
- `x::Vector{Int}` : Integer point in the simplex, i.e., an array of
##CHUNK 5
return x, (x, h)
end
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
##CHUNK 6
m nonnegative integers that sum to n.
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must be a
nonnegative integer.
# Returns
- `idx::Int` : Index of x.
"""
"""
next_k_array!(a)
Given an array `a` of k distinct positive integers, sorted in
ascending order, return the next k-array in the lexicographic
ordering of the descending sequences of the elements, following
[Combinatorial number system]
(https://en.wikipedia.org/wiki/Combinatorial_number_system). `a` is
modified in place.
##CHUNK 7
return x, (x, h)
end
@doc raw"""
simplex_grid(m, n)
Construct an array consisting of the integer points in the
(m-1)-dimensional simplex ``\{x \mid x_1 + \cdots + x_m = n, x_i \geq 0\}``,
or equivalently, the m-part compositions of n, which are listed
in lexicographic order. The total number of the points (hence the
length of the output array) is L = (n+m-1)!/(n!*(m-1)!) (i.e.,
(n+m-1) choose (m-1)).
# Arguments
- `m::Int` : Dimension of each point. Must be a positive integer.
- `n::Int` : Number which the coordinates of each point sum to. Must
be a nonnegative integer.
##CHUNK 8
m::Int
n::Int
end
Base.eltype(sg::SimplexGrid) = Vector{Int}
function Base.iterate(sg::SimplexGrid)
x = zeros(Int, sg.m)
x[end] = sg.n
h = sg.m
return x, (x, h)
end
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
|
376
| 396
|
QuantEcon.jl
| 218
|
function next_k_array!(a::Vector{<:Integer})
k = length(a)
if k == 1 || a[1] + 1 < a[2]
a[1] += 1
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
|
function next_k_array!(a::Vector{<:Integer})
k = length(a)
if k == 1 || a[1] + 1 < a[2]
a[1] += 1
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
|
[
376,
396
] |
function next_k_array!(a::Vector{<:Integer})
k = length(a)
if k == 1 || a[1] + 1 < a[2]
a[1] += 1
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
|
function next_k_array!(a::Vector{<:Integer})
k = length(a)
if k == 1 || a[1] + 1 < a[2]
a[1] += 1
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
|
next_k_array!
| 376
| 396
|
src/util.jl
|
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
#FILE: QuantEcon.jl/test/test_util.jl
##CHUNK 1
1 2 6;
1 3 6;
2 3 6;
1 4 6;
2 4 6;
3 4 6;
1 5 6;
2 5 6;
3 5 6;
4 5 6]
L, k = size(k_arrays)
k_arrays_computed = similar(k_arrays)
k_arrays_computed[1, :] = collect(1:k)
@test k_array_rank(k_arrays_computed[1, :]) == 1
for i = 2:L
k_arrays_computed[i, :] = next_k_array!(k_arrays_computed[i-1, :])
@test k_array_rank(k_arrays_computed[i, :]) == i
end
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
##CHUNK 2
pp = n * (z .* p1 - p2) ./ (z .* z .- 1)
z1 = z
z = z1 - p1 ./ pp # newton's method
err = maximum(abs, z - z1)
if err < 1e-14
break
end
end
if its == maxit
error("Maximum iterations in _qnwlege1")
end
nodes[i] = xm .- xl * z
nodes[n + 1 .- i] = xm .+ xl * z
weights[i] = 2 * xl ./ ((1 .- z .* z) .* pp .* pp)
weights[n + 1 .- i] = weights[i]
##CHUNK 3
end
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
#FILE: QuantEcon.jl/src/zeros.jl
##CHUNK 1
function divide_bracket(f::Function, x1::T, x2::T, n::Int=50) where T<:Number
x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
xs = range(x1, stop=x2, length=n)
dx = xs[2] - xs[1]
x1b = T[]
x2b = T[]
f1 = f(x1)
for x in xs[2:end]
f2 = f(x)
if f1*f2 <= 0.0
push!(x1b, x-dx)
push!(x2b, x)
end
f1 = f2
end
#FILE: QuantEcon.jl/other/quadrature.jl
##CHUNK 1
else
z = 3 * x[i-1] - 3 * x[i-2] + x[i-3]
end
ab = a + b
for its = 1:maxit
temp = 2 + ab
p1 = (a - b + temp * z) / 2
p2 = 1
for j=2:n
p3 = p2
p2 = p1
temp = 2 * j + ab
aa = 2 * j * (j + ab) * (temp - 2)
bb = (temp - 1) * (a * a - b * b + temp * (temp - 2) * z)
c = 2 * (j - 1 + a) * (j - 1 + b) * temp
p1 = (bb * p2 - c * p3) / aa
end
#FILE: QuantEcon.jl/examples/finite_dp_og_example.jl
##CHUNK 1
R[s, a] = -Inf
end
end
end
end
function populate_Q!(m,Q,B)
for a in 1:m
Q[:, a, a:(a + B)] .= 1.0 / (B+1)
end
end
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
# TODO: the stdlib function findmax(arr, dim) should do this now
function indvalmax(a::Matrix{T}, dim::Integer=2) where T
out_size = dim == 2 ? size(a, 1) : size(a, 2)
out_v = Array(T, out_size)
out_i = Array(Int64, out_size)
if dim == 2
for i=1:out_size
out_v[i], out_i[i] = findmax(a[i, :])
end
elseif dim == 1
for i=1:out_size
out_v[i], out_i[i] = findmax(a[:, i])
end
else
error("dim must be 1 or 2. Received $dim")
end
return out_v, out_i
end
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
return x, (x, h)
end
function Base.iterate(sg::SimplexGrid, state)
m = sg.m
x, h = state
x[1] >= sg.n && return nothing
h -= 1
val = x[h+1]
x[h+1] = 0
x[m] = val - 1
x[h] += 1
if val != 1
h = m
end
|
424
| 436
|
QuantEcon.jl
| 219
|
function k_array_rank(T::Type{<:Integer}, a::Vector{<:Integer})
if T != BigInt
binomial(BigInt(a[end]), BigInt(length(a))) ≤ typemax(T) ||
throw(InexactError(:Binomial, T, a[end]))
end
k = length(a)
idx = one(T)
for i = 1:k
idx += binomial(T(a[i])-one(T), T(i))
end
return idx
end
|
function k_array_rank(T::Type{<:Integer}, a::Vector{<:Integer})
if T != BigInt
binomial(BigInt(a[end]), BigInt(length(a))) ≤ typemax(T) ||
throw(InexactError(:Binomial, T, a[end]))
end
k = length(a)
idx = one(T)
for i = 1:k
idx += binomial(T(a[i])-one(T), T(i))
end
return idx
end
|
[
424,
436
] |
function k_array_rank(T::Type{<:Integer}, a::Vector{<:Integer})
if T != BigInt
binomial(BigInt(a[end]), BigInt(length(a))) ≤ typemax(T) ||
throw(InexactError(:Binomial, T, a[end]))
end
k = length(a)
idx = one(T)
for i = 1:k
idx += binomial(T(a[i])-one(T), T(i))
end
return idx
end
|
function k_array_rank(T::Type{<:Integer}, a::Vector{<:Integer})
if T != BigInt
binomial(BigInt(a[end]), BigInt(length(a))) ≤ typemax(T) ||
throw(InexactError(:Binomial, T, a[end]))
end
k = length(a)
idx = one(T)
for i = 1:k
idx += binomial(T(a[i])-one(T), T(i))
end
return idx
end
|
k_array_rank
| 424
| 436
|
src/util.jl
|
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
# TODO: the stdlib function findmax(arr, dim) should do this now
function indvalmax(a::Matrix{T}, dim::Integer=2) where T
out_size = dim == 2 ? size(a, 1) : size(a, 2)
out_v = Array(T, out_size)
out_i = Array(Int64, out_size)
if dim == 2
for i=1:out_size
out_v[i], out_i[i] = findmax(a[i, :])
end
elseif dim == 1
for i=1:out_size
out_v[i], out_i[i] = findmax(a[:, i])
end
else
error("dim must be 1 or 2. Received $dim")
end
return out_v, out_i
end
##CHUNK 2
# TODO: the stdlib function findmax(arr, dim) should do this now
function indvalmax(a::Matrix{T}, dim::Integer=2) where T
out_size = dim == 2 ? size(a, 1) : size(a, 2)
out_v = Array(T, out_size)
out_i = Array(Int64, out_size)
if dim == 2
for i=1:out_size
out_v[i], out_i[i] = findmax(a[i, :])
end
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `k::Integer` : Size of each probability vector.
- `m::Integer` : Number of probability vectors.
# Returns
- `a::Array` : Matrix of shape `(k, m)`, or Vector of shape `(k,)` if `m` is not
specified, containing probability vector(s) as column(s).
"""
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
#FILE: QuantEcon.jl/test/test_util.jl
##CHUNK 1
L, k = size(k_arrays)
k_arrays_computed = similar(k_arrays)
k_arrays_computed[1, :] = collect(1:k)
@test k_array_rank(k_arrays_computed[1, :]) == 1
for i = 2:L
k_arrays_computed[i, :] = next_k_array!(k_arrays_computed[i-1, :])
@test k_array_rank(k_arrays_computed[i, :]) == i
end
@test k_arrays_computed == k_arrays
# test InexactError when ranking is out of range
n, k = 100, 50
@test k_array_rank(BigInt, collect(n-k+1:n)) == binomial(BigInt(n), BigInt(k))
@test_throws InexactError k_array_rank(collect(n-k+1:n))
# test returning wrong value when ranking is out of range
n, k = 68, 29
@test k_array_rank(BigInt, collect(n-k+1:n)) == binomial(BigInt(n), BigInt(k))
##CHUNK 2
@test k_arrays_computed == k_arrays
# test InexactError when ranking is out of range
n, k = 100, 50
@test k_array_rank(BigInt, collect(n-k+1:n)) == binomial(BigInt(n), BigInt(k))
@test_throws InexactError k_array_rank(collect(n-k+1:n))
# test returning wrong value when ranking is out of range
n, k = 68, 29
@test k_array_rank(BigInt, collect(n-k+1:n)) == binomial(BigInt(n), BigInt(k))
# @test k_array_rank(collect(n-k+1:n)) != binomial(BigInt(n), BigInt(k)) # Change in the InexactError applied condition
end
end
#FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
end
end
@doc doc"""
Define Matrix Multiplication between 3-dimensional matrix and a vector
Matrix multiplication over the last dimension of ``A``
"""
function *(A::AbstractArray{T,3}, v::AbstractVector) where T
shape = size(A)
size(v, 1) == shape[end] || error("wrong dimensions")
B = reshape(A, (prod(shape[1:end-1]), shape[end]))
out = B * v
return reshape(out, shape[1:end-1])
end
"""
#CURRENT FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
It is the user's responsibility to ensure that the rank of the input
array fits within the range of `T`; a sufficient condition for it is
`binomial(BigInt(a[end]), BigInt(length(a))) <= typemax(T)`.
# Arguments
- `T::Type{<:Integer}`: The numeric type of ranking to be returned.
- `a::Vector{<:Integer}`: Array of length k.
# Returns
- `idx::T`: Ranking of `a`.
"""
k_array_rank(a::Vector{<:Integer}) = k_array_rank(Int, a)
##CHUNK 2
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
"""
k_array_rank([T=Int], a)
##CHUNK 3
Given an array `a` of k distinct positive integers, sorted in
ascending order, return its ranking in the lexicographic ordering of
the descending sequences of the elements, following
[Combinatorial number system]
(https://en.wikipedia.org/wiki/Combinatorial_number_system).
# Notes
`InexactError` exception will be thrown, or an incorrect value will be
returned without warning if overflow occurs during the computation.
It is the user's responsibility to ensure that the rank of the input
array fits within the range of `T`; a sufficient condition for it is
`binomial(BigInt(a[end]), BigInt(length(a))) <= typemax(T)`.
# Arguments
- `T::Type{<:Integer}`: The numeric type of ranking to be returned.
- `a::Vector{<:Integer}`: Array of length k.
# Returns
##CHUNK 4
x = a[i] + 1
end
a[i] = x
return a
end
"""
k_array_rank([T=Int], a)
Given an array `a` of k distinct positive integers, sorted in
ascending order, return its ranking in the lexicographic ordering of
the descending sequences of the elements, following
[Combinatorial number system]
(https://en.wikipedia.org/wiki/Combinatorial_number_system).
# Notes
`InexactError` exception will be thrown, or an incorrect value will be
returned without warning if overflow occurs during the computation.
|
121
| 143
|
QuantEcon.jl
| 220
|
function divide_bracket(f::Function, x1::T, x2::T, n::Int=50) where T<:Number
x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
xs = range(x1, stop=x2, length=n)
dx = xs[2] - xs[1]
x1b = T[]
x2b = T[]
f1 = f(x1)
for x in xs[2:end]
f2 = f(x)
if f1*f2 <= 0.0
push!(x1b, x-dx)
push!(x2b, x)
end
f1 = f2
end
return x1b, x2b
end
|
function divide_bracket(f::Function, x1::T, x2::T, n::Int=50) where T<:Number
x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
xs = range(x1, stop=x2, length=n)
dx = xs[2] - xs[1]
x1b = T[]
x2b = T[]
f1 = f(x1)
for x in xs[2:end]
f2 = f(x)
if f1*f2 <= 0.0
push!(x1b, x-dx)
push!(x2b, x)
end
f1 = f2
end
return x1b, x2b
end
|
[
121,
143
] |
function divide_bracket(f::Function, x1::T, x2::T, n::Int=50) where T<:Number
x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
xs = range(x1, stop=x2, length=n)
dx = xs[2] - xs[1]
x1b = T[]
x2b = T[]
f1 = f(x1)
for x in xs[2:end]
f2 = f(x)
if f1*f2 <= 0.0
push!(x1b, x-dx)
push!(x2b, x)
end
f1 = f2
end
return x1b, x2b
end
|
function divide_bracket(f::Function, x1::T, x2::T, n::Int=50) where T<:Number
x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
xs = range(x1, stop=x2, length=n)
dx = xs[2] - xs[1]
x1b = T[]
x2b = T[]
f1 = f(x1)
for x in xs[2:end]
f2 = f(x)
if f1*f2 <= 0.0
push!(x1b, x-dx)
push!(x2b, x)
end
f1 = f2
end
return x1b, x2b
end
|
divide_bracket
| 121
| 143
|
src/zeros.jl
|
#CURRENT FILE: QuantEcon.jl/src/zeros.jl
##CHUNK 1
end
if abs(f1) < abs(f2)
x1 += fac*(x1 - x2)
f1 = f(x1)
else
x2 += fac*(x2 - x1)
f2 = f(x2)
end
end
throw(ConvergenceError("failed to find bracket in $ntry iterations"))
end
expand_bracket(f::Function, x1::T; ntry::Int=50, fac::Float64=1.6) where {T<:Number} =
expand_bracket(f, 0.9x1, 1.1x1; ntry=ntry, fac=fac)
"""
Given a function `f` defined on the interval `[x1, x2]`, subdivide the
interval into `n` equally spaced segments, and search for zero crossings of the
##CHUNK 2
f1, f2 = f(x1), f(x2)
if f1 * f2 > 0
throw(ArgumentError("Root must be bracketed by [x1, x2]"))
end
# maybe we got lucky and either x1 or x2 is a root
if f1 == 0.0
return x1
end
if f2 == 0.0
return x2
end
dm = x2 - x1
for i=1:maxiter
dm *= 0.5
##CHUNK 3
ntry::Int=50, fac::Float64=1.6) where T<:Number
# x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
f1 = f(x1)
f2 = f(x2)
for j=1:ntry
if f1*f2 < 0.0
return x1, x2
end
if abs(f1) < abs(f2)
x1 += fac*(x1 - x2)
f1 = f(x1)
else
x2 += fac*(x2 - x1)
f2 = f(x2)
end
end
##CHUNK 4
throw(ConvergenceError("failed to find bracket in $ntry iterations"))
end
expand_bracket(f::Function, x1::T; ntry::Int=50, fac::Float64=1.6) where {T<:Number} =
expand_bracket(f, 0.9x1, 1.1x1; ntry=ntry, fac=fac)
"""
Given a function `f` defined on the interval `[x1, x2]`, subdivide the
interval into `n` equally spaced segments, and search for zero crossings of the
function. `nroot` will be set to the number of bracketing pairs found. If it is
positive, the arrays `xb1[1..nroot]` and `xb2[1..nroot]` will be filled
sequentially with any bracketing pairs that are found.
##### Arguments
- `f::Function`: The function you want to bracket
- `x1::T`: Lower border for search interval
- `x2::T`: Upper border for search interval
- `n::Int(50)`: The number of sub-intervals to divide `[x1, x2]` into
##CHUNK 5
$__zero_docstr_arg_ret
##### References
Matches `bisect` function from scipy/scipy/optimize/Zeros/bisect.c
"""
function bisect(f::Function, x1::T, x2::T; maxiter::Int=500,
xtol::Float64=1e-12, rtol::Float64=2*eps()) where T<:AbstractFloat
tol = xtol + rtol*(abs(x1) + abs(x2))
f1, f2 = f(x1), f(x2)
if f1 * f2 > 0
throw(ArgumentError("Root must be bracketed by [x1, x2]"))
end
# maybe we got lucky and either x1 or x2 is a root
if f1 == 0.0
return x1
##CHUNK 6
function _brent_body(BE::BrentExtrapolation, f::Function,
xa::T, xb::T, maxiter::Int=500,
xtol::Float64=1e-12,
rtol::Float64=2*eps()) where T<:AbstractFloat
xpre, xcur = xa, xb
xblk = fblk = spre = scur = 0.0
fpre = f(xpre)
fcur = f(xcur)
if fpre*fcur > 0
throw(ArgumentError("Root must be bracketed by [xa, xb]"))
end
# maybe we got lucky and x1 or x2 is a root of f
if fpre == 0.0
return xpre
end
##CHUNK 7
##### References
This method is `zbrac` from numerical recipies in C++
##### Exceptions
- Throws a `ConvergenceError` if the maximum number of iterations is exceeded
"""
function expand_bracket(f::Function, x1::T, x2::T;
ntry::Int=50, fac::Float64=1.6) where T<:Number
# x1 <= x2 || throw(ArgumentError("x1 must be less than x2"))
f1 = f(x1)
f2 = f(x2)
for j=1:ntry
if f1*f2 < 0.0
return x1, x2
##CHUNK 8
Matches `ridder` function from scipy/scipy/optimize/Zeros/ridder.c
"""
function ridder(f::Function, xa::T, xb::T; maxiter::Int=500,
xtol::Float64=1e-12, rtol::Float64=2*eps()) where T<:AbstractFloat
tol = xtol + rtol*(abs(xa) + abs(xb))
fa, fb = f(xa), f(xb)
if fa * fb > 0
throw(ArgumentError("Root must be bracketed by [xa, xb]"))
end
# maybe we got lucky and either xa or xb is a root
if fa == 0.0
return xa
end
if fb == 0.0
return xb
##CHUNK 9
end
if f2 == 0.0
return x2
end
dm = x2 - x1
for i=1:maxiter
dm *= 0.5
xm = x1 + dm
fm = f(xm)
# move bracketing interval up if sign(f(xm)) == sign(f(x1))
if fm*f1 >= 0.0
x1 = xm
end
if fm == 0.0 || abs(dm) < tol
return xm
##CHUNK 10
function. `nroot` will be set to the number of bracketing pairs found. If it is
positive, the arrays `xb1[1..nroot]` and `xb2[1..nroot]` will be filled
sequentially with any bracketing pairs that are found.
##### Arguments
- `f::Function`: The function you want to bracket
- `x1::T`: Lower border for search interval
- `x2::T`: Upper border for search interval
- `n::Int(50)`: The number of sub-intervals to divide `[x1, x2]` into
##### Returns
- `x1b::Vector{T}`: `Vector` of lower borders of bracketing intervals
- `x2b::Vector{T}`: `Vector` of upper borders of bracketing intervals
##### References
This is `zbrack` from Numerical Recepies Recepies in C++
"""
|
550
| 562
|
QuantEcon.jl
| 221
|
function backward_induction(ddp::DiscreteDP{T}, J::Integer,
v_term::AbstractVector{<:Real}=
zeros(num_states(ddp))) where {T}
n = num_states(ddp)
S = typeof(zero(T)/one(T))
vs = Matrix{S}(undef, n, J+1)
vs[:,end] = v_term
sigmas = Matrix{Int}(undef, n, J)
@inbounds for j in J+1: -1: 2
@views bellman_operator!(ddp, vs[:,j], vs[:,j-1], sigmas[:,j-1])
end
return vs, sigmas
end
|
function backward_induction(ddp::DiscreteDP{T}, J::Integer,
v_term::AbstractVector{<:Real}=
zeros(num_states(ddp))) where {T}
n = num_states(ddp)
S = typeof(zero(T)/one(T))
vs = Matrix{S}(undef, n, J+1)
vs[:,end] = v_term
sigmas = Matrix{Int}(undef, n, J)
@inbounds for j in J+1: -1: 2
@views bellman_operator!(ddp, vs[:,j], vs[:,j-1], sigmas[:,j-1])
end
return vs, sigmas
end
|
[
550,
562
] |
function backward_induction(ddp::DiscreteDP{T}, J::Integer,
v_term::AbstractVector{<:Real}=
zeros(num_states(ddp))) where {T}
n = num_states(ddp)
S = typeof(zero(T)/one(T))
vs = Matrix{S}(undef, n, J+1)
vs[:,end] = v_term
sigmas = Matrix{Int}(undef, n, J)
@inbounds for j in J+1: -1: 2
@views bellman_operator!(ddp, vs[:,j], vs[:,j-1], sigmas[:,j-1])
end
return vs, sigmas
end
|
function backward_induction(ddp::DiscreteDP{T}, J::Integer,
v_term::AbstractVector{<:Real}=
zeros(num_states(ddp))) where {T}
n = num_states(ddp)
S = typeof(zero(T)/one(T))
vs = Matrix{S}(undef, n, J+1)
vs[:,end] = v_term
sigmas = Matrix{Int}(undef, n, J)
@inbounds for j in J+1: -1: 2
@views bellman_operator!(ddp, vs[:,j], vs[:,j-1], sigmas[:,j-1])
end
return vs, sigmas
end
|
backward_induction
| 550
| 562
|
src/markov/ddp.jl
|
#CURRENT FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
```
for ``j= J, \\ldots, 1``, where the terminal value function ``v_{J+1}`` is
exogenously given by `v_term`.
# Parameters
- `ddp::DiscreteDP{T}` : Object that contains the Model Parameters
- `J::Integer`: Number of decision periods
- `v_term::AbstractVector{<:Real}=zeros(num_states(ddp))`: Terminal value
function of length equal to n (the number of states)
# Returns
- `vs::Matrix{S}`: Array of shape (n, J+1) where `vs[:,j]` contains the
optimal value function at period j = 1, ..., J+1.
- `sigmas::Matrix{Int}`: Array of shape (n, J) where `sigmas[:,j]` contains the
optimal policy function at period j = 1, ..., J.
"""
##CHUNK 2
```math
v^{\\ast}_j(s) = \\max_{a \\in A(s)} r(s, a) +
\\beta \\sum_{s' \\in S} q(s'|s, a) v^{\\ast}_{j+1}(s')
\\quad (s \\in S)
```
and
```math
\\sigma^{\\ast}_j(s) \\in \\operatorname*{arg\\,max}_{a \\in A(s)}
r(s, a) + \\beta \\sum_{s' \\in S} q(s'|s, a) v^*_{j+1}(s')
\\quad (s \\in S)
```
for ``j= J, \\ldots, 1``, where the terminal value function ``v_{J+1}`` is
exogenously given by `v_term`.
# Parameters
- `ddp::DiscreteDP{T}` : Object that contains the Model Parameters
- `J::Integer`: Number of decision periods
- `v_term::AbstractVector{<:Real}=zeros(num_states(ddp))`: Terminal value
##CHUNK 3
function solve(ddp::DiscreteDP{T}, v_init::AbstractVector{T},
method::Type{Algo}=VFI; max_iter::Integer=250,
epsilon::Real=1e-3, k::Integer=20) where {Algo<:DDPAlgorithm,T}
ddpr = DPSolveResult{Algo,T}(ddp, v_init)
_solve!(ddp, ddpr, max_iter, epsilon, k)
ddpr.mc = MarkovChain(ddp, ddpr)
ddpr
end
"""
backward_induction(ddp, J[, v_term=zeros(num_states(ddp))])
Solve by backward induction a ``J``-period finite horizon discrete dynamic
program with stationary reward ``r`` and transition probability functions ``q``
and discount factor ``\\beta \\in [0, 1]``.
The optimal value functions ``v^{\\ast}_1, \\ldots, v^{\\ast}_{J+1}`` and
policy functions ``\\sigma^{\\ast}_1, \\ldots, \\sigma^{\\ast}_J`` are obtained
by ``v^{\\ast}_{J+1} = v_{J+1}``, and
##CHUNK 4
SA formulation
- `a_indptr::Vector{Tind}`: Action Index Pointers. Empty unless using
SA formulation
##### Returns
- `ddp::DiscreteDP` : Constructor for DiscreteDP object
"""
function DiscreteDP(R::AbstractArray{T,NR},
Q::AbstractArray{T,NQ},
beta::Tbeta, s_indices::Vector{Tind},
a_indices::Vector{Tind}) where {T,NQ,NR,Tbeta,Tind}
DiscreteDP{T,NQ,NR,Tbeta,Tind,typeof(Q)}(R, Q, beta, s_indices, a_indices)
end
#--------------#
#-Type Aliases-#
#--------------#
##CHUNK 5
# TODO: express it in a similar way as above to exploit Julia's column major order
function RQ_sigma(ddp::DDPsa, sigma::AbstractVector{T}) where T<:Integer
sigma_indices = Array{T}(undef, num_states(ddp))
_find_indices!(ddp.a_indices, ddp.a_indptr, sigma, sigma_indices)
R_sigma = ddp.R[sigma_indices]
Q_sigma = ddp.Q[sigma_indices, :]
return R_sigma, Q_sigma
end
# ---------------- #
# Internal methods #
# ---------------- #
## s_wise_max for DDP
s_wise_max(ddp::DiscreteDP, vals::AbstractMatrix) = s_wise_max(vals)
function s_wise_max!(
ddp::DiscreteDP, vals::AbstractMatrix, out::AbstractVector,
out_argmax::AbstractVector
##CHUNK 6
Tv::Array{Tval}
num_iter::Int
sigma::Array{Int,1}
mc::MarkovChain
function DPSolveResult{Algo,Tval}(
ddp::DiscreteDP
) where {Algo,Tval}
v = s_wise_max(ddp, ddp.R) # Initialise v with v_init
ddpr = new{Algo,Tval}(v, similar(v), 0, similar(v, Int))
# fill in sigma with proper values
compute_greedy!(ddp, ddpr)
ddpr
end
# method to pass initial value function (skip the s-wise max)
function DPSolveResult{Algo,Tval}(
ddp::DiscreteDP, v::Vector
) where {Algo,Tval}
##CHUNK 7
- `a_indptr::Vector{Tind}`: Action Index Pointers. Empty unless using
SA formulation
##### Returns
- `ddp::DiscreteDP` : DiscreteDP object
"""
mutable struct DiscreteDP{T<:Real,NQ,NR,Tbeta<:Real,Tind,TQ<:AbstractArray{T,NQ}}
R::Array{T,NR} # Reward Array
Q::TQ # Transition Probability Array
beta::Tbeta # Discount Factor
a_indices::Vector{Tind} # Action Indices
a_indptr::Vector{Tind} # Action Index Pointers
function DiscreteDP{T,NQ,NR,Tbeta,Tind,TQ}(
R::Array, Q::TQ, beta::Real
) where {T,NQ,NR,Tbeta,Tind,TQ}
# verify input integrity 1
if NQ != 3
##CHUNK 8
end
# check feasibility
aptr_diff = diff(a_indptr)
if any(aptr_diff .== 0.0)
# First state index such that no action is available
s = findall(aptr_diff .== 0.0) # Only Gives True
throw(ArgumentError("for every state at least one action
must be available: violated for state $s"))
end
# indices
_a_indices = Vector{Tind}(_a_indices)
a_indptr = Vector{Tind}(a_indptr)
new{T,NQ,NR,Tbeta,Tind,typeof(Q)}(R, Q, beta, _a_indices, a_indptr)
end
end
"""
##CHUNK 9
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals,
Array{Float64}(undef, num_states(ddp)))
end
function s_wise_max!(
ddp::DDPsa, vals::AbstractVector, out::AbstractVector,
out_argmax::AbstractVector
)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals, out, out_argmax)
end
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
##CHUNK 10
throw(ArgumentError("for every state the reward must be finite for
some action: violated for state $s"))
end
# here the indices and indptr are empty.
_a_indices = Vector{Int}()
a_indptr = Vector{Int}()
new{T,NQ,NR,Tbeta,Tind,typeof(Q)}(R, Q, beta, _a_indices, a_indptr)
end
# Note: We left R, Q as type Array to produce more helpful error message with regards to shape.
# R and Q are dense Arrays
function DiscreteDP{T,NQ,NR,Tbeta,Tind,TQ}(
R::AbstractArray, Q::TQ, beta::Real, s_indices::Vector,
a_indices::Vector
) where {T,NQ,NR,Tbeta,Tind,TQ}
# verify input integrity 1
if NQ != 2
|
699
| 716
|
QuantEcon.jl
| 222
|
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
end
end
return out
end
|
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
end
end
return out
end
|
[
699,
716
] |
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
end
end
return out
end
|
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
end
end
return out
end
|
s_wise_max!
| 699
| 716
|
src/markov/ddp.jl
|
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
# TODO: the stdlib function findmax(arr, dim) should do this now
function indvalmax(a::Matrix{T}, dim::Integer=2) where T
out_size = dim == 2 ? size(a, 1) : size(a, 2)
out_v = Array(T, out_size)
out_i = Array(Int64, out_size)
if dim == 2
for i=1:out_size
out_v[i], out_i[i] = findmax(a[i, :])
end
elseif dim == 1
for i=1:out_size
out_v[i], out_i[i] = findmax(a[:, i])
end
else
error("dim must be 1 or 2. Received $dim")
end
return out_v, out_i
end
##CHUNK 2
# TODO: the stdlib function findmax(arr, dim) should do this now
function indvalmax(a::Matrix{T}, dim::Integer=2) where T
out_size = dim == 2 ? size(a, 1) : size(a, 2)
out_v = Array(T, out_size)
out_i = Array(Int64, out_size)
if dim == 2
for i=1:out_size
out_v[i], out_i[i] = findmax(a[i, :])
end
#CURRENT FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
Also fills `out_argmax` with the cartesiean index associated with the `indmax` in
each row
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector, vals::AbstractVector,
out::AbstractVector, out_argmax::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
##CHUNK 2
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
out_argmax[i] = a_indices[m]
end
end
out, out_argmax
end
"""
Check whether `s_indices` and `a_indices` are sorted in lexicographic order.
##CHUNK 3
)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals, out, out_argmax)
end
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
"""
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
Also fills `out_argmax` with the cartesiean index associated with the `indmax` in
each row
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector, vals::AbstractVector,
out::AbstractVector, out_argmax::AbstractVector
)
##CHUNK 4
out
end
function _find_indices!(
a_indices::AbstractVector, a_indptr::AbstractVector, sigma::AbstractVector,
out::AbstractVector
)
n = length(sigma)
for i in 1:n, j in a_indptr[i]:(a_indptr[i+1]-1)
if sigma[i] == a_indices[j]
out[i] = j
end
end
end
@doc doc"""
Define Matrix Multiplication between 3-dimensional matrix and a vector
Matrix multiplication over the last dimension of ``A``
##CHUNK 5
## s_wise_max for DDPsa
function s_wise_max(ddp::DDPsa, vals::AbstractVector)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals,
Array{Float64}(undef, num_states(ddp)))
end
function s_wise_max!(
ddp::DDPsa, vals::AbstractVector, out::AbstractVector,
out_argmax::AbstractVector
)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals, out, out_argmax)
end
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
"""
"""
##CHUNK 6
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`AbstractMatrix` of size `(num_states, num_actions)`.
Also fills `out_argmax` with the column number associated with the `indmax` in
each row
"""
function s_wise_max!(
vals::AbstractMatrix, out::AbstractVector, out_argmax::AbstractVector
)
# naive implementation where I just iterate over the rows
nr, nc = size(vals)
for i_r in 1:nr
# reset temporaries
cur_max = -Inf
out_argmax[i_r] = 1
for i_c in 1:nc
@inbounds v_rc = vals[i_r, i_c]
if v_rc > cur_max
out[i_r] = v_rc
##CHUNK 7
out_argmax[i_r] = i_c
cur_max = v_rc
end
end
end
out, out_argmax
end
## s_wise_max for DDPsa
function s_wise_max(ddp::DDPsa, vals::AbstractVector)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals,
Array{Float64}(undef, num_states(ddp)))
end
function s_wise_max!(
ddp::DDPsa, vals::AbstractVector, out::AbstractVector,
out_argmax::AbstractVector
##CHUNK 8
)
L = length(s_indices)
for i in 1:L-1
if s_indices[i] > s_indices[i+1]
return false
end
if s_indices[i] == s_indices[i+1]
if a_indices[i] >= a_indices[i+1]
return false
end
end
end
return true
end
"""
Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be
in sorted order.
Parameters
|
757
| 772
|
QuantEcon.jl
| 223
|
function _has_sorted_sa_indices(
s_indices::AbstractVector, a_indices::AbstractVector
)
L = length(s_indices)
for i in 1:L-1
if s_indices[i] > s_indices[i+1]
return false
end
if s_indices[i] == s_indices[i+1]
if a_indices[i] >= a_indices[i+1]
return false
end
end
end
return true
end
|
function _has_sorted_sa_indices(
s_indices::AbstractVector, a_indices::AbstractVector
)
L = length(s_indices)
for i in 1:L-1
if s_indices[i] > s_indices[i+1]
return false
end
if s_indices[i] == s_indices[i+1]
if a_indices[i] >= a_indices[i+1]
return false
end
end
end
return true
end
|
[
757,
772
] |
function _has_sorted_sa_indices(
s_indices::AbstractVector, a_indices::AbstractVector
)
L = length(s_indices)
for i in 1:L-1
if s_indices[i] > s_indices[i+1]
return false
end
if s_indices[i] == s_indices[i+1]
if a_indices[i] >= a_indices[i+1]
return false
end
end
end
return true
end
|
function _has_sorted_sa_indices(
s_indices::AbstractVector, a_indices::AbstractVector
)
L = length(s_indices)
for i in 1:L-1
if s_indices[i] > s_indices[i+1]
return false
end
if s_indices[i] == s_indices[i+1]
if a_indices[i] >= a_indices[i+1]
return false
end
end
end
return true
end
|
_has_sorted_sa_indices
| 757
| 772
|
src/markov/ddp.jl
|
#CURRENT FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
out_argmax[i] = a_indices[m]
end
end
out, out_argmax
end
"""
Check whether `s_indices` and `a_indices` are sorted in lexicographic order.
Parameters
----------
##CHUNK 2
`s_indices`, `a_indices` : Vectors
Returns
-------
bool: Whether `s_indices` and `a_indices` are sorted.
"""
"""
Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be
in sorted order.
Parameters
----------
- `num_states::Integer`
- `s_indices::AbstractVector{T}`
- `out::AbstractVector{T}` : with length = `num_states` + 1
"""
function _generate_a_indptr!(
num_states::Int, s_indices::AbstractVector, out::AbstractVector
##CHUNK 3
end
out, out_argmax
end
"""
Check whether `s_indices` and `a_indices` are sorted in lexicographic order.
Parameters
----------
`s_indices`, `a_indices` : Vectors
Returns
-------
bool: Whether `s_indices` and `a_indices` are sorted.
"""
"""
Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be
in sorted order.
##CHUNK 4
throw(ArgumentError(msg))
end
if _has_sorted_sa_indices(s_indices, a_indices)
a_indptr = Array{Int64}(undef, num_states + 1)
_a_indices = copy(a_indices)
_generate_a_indptr!(num_states, s_indices, a_indptr)
else
# transpose matrix to use Julia's CSC; now rows are actions and
# columns are states (this is why it's called as_ptr not sa_ptr)
m = maximum(a_indices)
n = maximum(s_indices)
msg = "Duplicate s-a pair found"
as_ptr = sparse(a_indices, s_indices, 1:num_sa_pairs, m, n,
(x,y)->throw(ArgumentError(msg)))
_a_indices = as_ptr.rowval
a_indptr = as_ptr.colptr
R = R[as_ptr.nzval]
Q = Q[as_ptr.nzval, :]
##CHUNK 5
num_sa_pairs, num_states = size(Q)
if length(R) != num_sa_pairs
throw(ArgumentError("shapes of R and Q must be (L,) and (L,n)"))
end
if length(s_indices) != num_sa_pairs
msg = "length of s_indices must be equal to the number of s-a pairs"
throw(ArgumentError(msg))
end
if length(a_indices) != num_sa_pairs
msg = "length of a_indices must be equal to the number of s-a pairs"
throw(ArgumentError(msg))
end
if _has_sorted_sa_indices(s_indices, a_indices)
a_indptr = Array{Int64}(undef, num_states + 1)
_a_indices = copy(a_indices)
_generate_a_indptr!(num_states, s_indices, a_indptr)
else
# transpose matrix to use Julia's CSC; now rows are actions and
# columns are states (this is why it's called as_ptr not sa_ptr)
##CHUNK 6
)
s_wise_max!(ddp.a_indices, ddp.a_indptr, vals, out, out_argmax)
end
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector,
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
##CHUNK 7
vals::AbstractVector, out::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
end
end
return out
end
"""
Populate `out` with `max_a vals(s, a)`, where `vals` is represented as a
`Vector` of size `(num_sa_pairs,)`.
##CHUNK 8
m = maximum(a_indices)
n = maximum(s_indices)
msg = "Duplicate s-a pair found"
as_ptr = sparse(a_indices, s_indices, 1:num_sa_pairs, m, n,
(x,y)->throw(ArgumentError(msg)))
_a_indices = as_ptr.rowval
a_indptr = as_ptr.colptr
R = R[as_ptr.nzval]
Q = Q[as_ptr.nzval, :]
end
# check feasibility
aptr_diff = diff(a_indptr)
if any(aptr_diff .== 0.0)
# First state index such that no action is available
s = findall(aptr_diff .== 0.0) # Only Gives True
throw(ArgumentError("for every state at least one action
must be available: violated for state $s"))
end
##CHUNK 9
Also fills `out_argmax` with the cartesiean index associated with the `indmax` in
each row
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector, vals::AbstractVector,
out::AbstractVector, out_argmax::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
out_argmax[i] = a_indices[m]
end
##CHUNK 10
)
idx = 1
out[1] = 1
for s in 1:num_states-1
while(s_indices[idx] == s)
idx += 1
end
out[s+1] = idx
end
# need this +1 to be consistent with Julia's sparse pointers:
# colptr[i]:(colptr[i+1]-1)
out[num_states+1] = length(s_indices)+1
out
end
function _find_indices!(
a_indices::AbstractVector, a_indptr::AbstractVector, sigma::AbstractVector,
out::AbstractVector
)
n = length(sigma)
|
185
| 217
|
QuantEcon.jl
| 224
|
function estimate_mc_discrete(X::Vector{T}, states::Vector{T}) where T
# Get length of simulation
capT = length(X)
# Make sure all of the passed in states appear in X... If not
# throw an error
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
# Compute conditional probabilities for each state
state_i = d[X[1]]
for t in 1:capT-1
# Find next period's state
state_j = d[X[t+1]]
cm[state_i, state_j] += 1.0
# Tomorrow's state is j
state_i = state_j
end
# Compute probabilities using counted elements
P = cm ./ sum(cm, dims = 2)
return MarkovChain(P, states)
end
|
function estimate_mc_discrete(X::Vector{T}, states::Vector{T}) where T
# Get length of simulation
capT = length(X)
# Make sure all of the passed in states appear in X... If not
# throw an error
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
# Compute conditional probabilities for each state
state_i = d[X[1]]
for t in 1:capT-1
# Find next period's state
state_j = d[X[t+1]]
cm[state_i, state_j] += 1.0
# Tomorrow's state is j
state_i = state_j
end
# Compute probabilities using counted elements
P = cm ./ sum(cm, dims = 2)
return MarkovChain(P, states)
end
|
[
185,
217
] |
function estimate_mc_discrete(X::Vector{T}, states::Vector{T}) where T
# Get length of simulation
capT = length(X)
# Make sure all of the passed in states appear in X... If not
# throw an error
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
# Compute conditional probabilities for each state
state_i = d[X[1]]
for t in 1:capT-1
# Find next period's state
state_j = d[X[t+1]]
cm[state_i, state_j] += 1.0
# Tomorrow's state is j
state_i = state_j
end
# Compute probabilities using counted elements
P = cm ./ sum(cm, dims = 2)
return MarkovChain(P, states)
end
|
function estimate_mc_discrete(X::Vector{T}, states::Vector{T}) where T
# Get length of simulation
capT = length(X)
# Make sure all of the passed in states appear in X... If not
# throw an error
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
# Compute conditional probabilities for each state
state_i = d[X[1]]
for t in 1:capT-1
# Find next period's state
state_j = d[X[t+1]]
cm[state_i, state_j] += 1.0
# Tomorrow's state is j
state_i = state_j
end
# Compute probabilities using counted elements
P = cm ./ sum(cm, dims = 2)
return MarkovChain(P, states)
end
|
estimate_mc_discrete
| 185
| 217
|
src/markov/markov_approx.jl
|
#FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
num_states, num_actions = size(R)
if size(Q) != (num_states, num_actions, num_states)
throw(ArgumentError("shapes of R and Q must be (n,m) and (n,m,n)"))
end
# check feasibility
R_max = s_wise_max(R)
if any(R_max .== -Inf)
# First state index such that all actions yield -Inf
s = findall(R_max .== -Inf) #-Only Gives True
throw(ArgumentError("for every state the reward must be finite for
some action: violated for state $s"))
end
# here the indices and indptr are empty.
_a_indices = Vector{Int}()
a_indptr = Vector{Int}()
new{T,NQ,NR,Tbeta,Tind,typeof(Q)}(R, Q, beta, _a_indices, a_indptr)
end
#FILE: QuantEcon.jl/src/markov/mc_tools.jl
##CHUNK 1
Methods are available that provide useful information such as the stationary
distributions, and communication and recurrent classes, and allow simulation of
state transitions.
##### Fields
- `p::AbstractMatrix` : The transition matrix. Must be square, all elements must be nonnegative, and all rows must sum to unity.
- `state_values::AbstractVector` : Vector containing the values associated with the states.
"""
mutable struct MarkovChain{T, TM<:AbstractMatrix{T}, TV<:AbstractVector}
p::TM # valid stochastic matrix
state_values::TV
function MarkovChain{T,TM,TV}(p::AbstractMatrix, state_values) where {T,TM,TV}
n, m = size(p)
n != m &&
throw(DimensionMismatch("stochastic matrix must be square"))
##CHUNK 2
simulate_indices!(X, mc; init=init)
end
"""
Fill `X` with sample paths of the Markov chain `mc` as columns.
The resulting matrix has the indices of the state values of `mc` as elements.
### Arguments
- `X::Matrix{Int}` : Preallocated matrix to be filled with indices
of the sample paths of the Markov chain `mc`.
- `mc::MarkovChain` : MarkovChain instance.
- `;init=rand(1:n_states(mc))` : Can be one of the following
- blank: random initial condition for each chain
- scalar: same initial condition for each chain
- vector: cycle through the elements, applying each as an
initial condition until all columns have an initial condition
(allows for more columns than initial conditions)
"""
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
num_sims = 5
X = Array{eltype(mc.state_values)}(undef, ts_length, num_sims)
simulate!(X, mc; init=init)
@test vec(X[1, :]) == mc.state_values[init .* ones(Int, num_sims)]
init = [2, 1]
simulate!(X, mc; init=init)
@test vec(X[1, :]) == mc.state_values[collect(take(cycle(init), num_sims))]
end
end # testset
end
@testset "simulate iterators" begin
p = [0.0 1.0 0.0 0.0
0.0 0.0 1.0 0.0
0.0 0.0 0.0 1.0
1.0 0.0 0.0 0.0]
mc = MarkovChain(p, [10.0, 20.0, 30.0, 40.0])
mcis = MCIndSimulator(mc, 50, 1)
#FILE: QuantEcon.jl/test/test_ddp.jl
##CHUNK 1
@testset "Testing markov/dpp.jl" begin
#-Setup-#
# Example from Puterman 2005, Section 3.1
beta = 0.95
# Formulation with Dense Matrices R: n x m, Q: n x m x n
n, m = 2, 2 # number of states, number of actions
R = [5.0 10.0; -1.0 -Inf]
Q = Array{Float64}(undef, n, m, n)
Q[:, :, 1] = [0.5 0.0; 0.0 0.0]
Q[:, :, 2] = [0.5 1.0; 1.0 1.0]
ddp0 = DiscreteDP(R, Q, beta)
ddp0_b1 = DiscreteDP(R, Q, 1.0)
# Formulation with state-action pairs
L = 3 # Number of state-action pairs
##CHUNK 2
R = [5.0 10.0; -1.0 -Inf]
Q = Array{Float64}(undef, n, m, n)
Q[:, :, 1] = [0.5 0.0; 0.0 0.0]
Q[:, :, 2] = [0.5 1.0; 1.0 1.0]
ddp0 = DiscreteDP(R, Q, beta)
ddp0_b1 = DiscreteDP(R, Q, 1.0)
# Formulation with state-action pairs
L = 3 # Number of state-action pairs
s_indices = [1, 1, 2]
a_indices = [1, 2, 1]
R_sa = [R[1, 1], R[1, 2], R[2, 1]]
Q_sa = spzeros(L, n)
Q_sa[1, :] = Q[1, 1, :]
Q_sa[2, :] = Q[1, 2, :]
Q_sa[3, :] = Q[2, 1, :]
ddp0_sa = DiscreteDP(R_sa, Q_sa, beta, s_indices, a_indices)
ddp0_sa_b1 = DiscreteDP(R_sa, Q_sa, 1.0, s_indices, a_indices)
#CURRENT FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
##CHUNK 2
c = 1
for k=1:floor(Int, n_moments/2)
c = (2*k-1)*c
gaussian_moment[2*k] = c
end
# Compute standardized VAR(1) representation
# (zero mean and diagonal covariance matrix)
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
##CHUNK 3
(1-q)*[zeros(1, n); θ_nm1 zeros(n-1, 1)]
θN[2:end-1, :] ./= 2
return range(m-Δ, stop=m+Δ, length=n), θN
end
end
# These are to help me order types other than vectors
@inline _emcd_lt(a::T, b::T) where {T} = isless(a, b)
# @inline _emcd_lt(a::Vector{T}, b::Vector{T}) where {T} = Base.lt(Base.Order.Lexicographic, a, b)
@doc doc"""
Accepts the simulation of a discrete state Markov chain and estimates
the transition probabilities
Let ``S = s_1, s_2, \ldots, s_N`` with ``s_1 < s_2 < \ldots < s_N`` be the discrete
states of a Markov chain. Furthermore, let ``P`` be the corresponding
stochastic transition matrix.
##CHUNK 4
#
# It is ok to do after the fact because adding this constant to each
# term effectively shifts the entire distribution. Because the
# normal distribution is symmetric and we just care about relative
# distances between points, the probabilities will be the same.
#
# I could have shifted it before, but then I would need to evaluate
# the cdf with a function that allows the distribution of input
# arguments to be [μ/(1 - ρ), 1] instead of [0, 1]
yy = y .+ μ / (1 - ρ) # center process around its mean (wbar / (1 - rho)) in new variable
# renormalize. In some test cases the rows sum to something that is 2e-15
# away from 1.0, which caused problems in the MarkovChain constructor
Π = Π./sum(Π, dims = 2)
MarkovChain(Π, yy)
end
|
298
| 425
|
QuantEcon.jl
| 225
|
function discrete_var(b::Union{Real, AbstractVector},
B::Union{Real, AbstractMatrix},
Psi::Union{Real, AbstractMatrix},
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
# Check that Nm is a valid number of grid points
Nm >= 3 || throw(ArgumentError("Nm must be a positive interger greater than 3"))
# Check that n_moments is a valid number
if n_moments < 1 || !(n_moments % 2 == 0 || n_moments == 1)
error("n_moments must be either 1 or a positive even integer")
end
# warning about persistency
warn_persistency(B, method)
# Compute polynomial moments of standard normal distribution
gaussian_moment = zeros(n_moments)
c = 1
for k=1:floor(Int, n_moments/2)
c = (2*k-1)*c
gaussian_moment[2*k] = c
end
# Compute standardized VAR(1) representation
# (zero mean and diagonal covariance matrix)
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
# Construct prior guesses for maximum entropy optimizations
q = construct_prior_guess(cond_mean[:, ii], Nm, y1D, y1Dhelper, method)
# Make sure all elements of the prior are stricly positive
q[q.<kappa] .= kappa
for jj = 1:M
# Try to use intelligent initial guesses
if ii == 1
lambda_guess = zeros(2)
else
lambda_guess = lambda_bar[(jj-1)*2+1:jj*2, ii-1]
end
# Maximum entropy optimization
if n_moments == 1 # match only 1 moment
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
else # match 2 moments first
p, lambda, moment_error = discrete_approximation(y1D[jj, :],
X -> polynomial_moment(X, cond_mean[jj, ii], scaling_factor[jj], 2),
[0; 1]./(scaling_factor[jj].^(1:2)), q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5) # if 2 moments fail, just match 1 moment
@warn("Failed to match first 2 moments. Just matching 1.")
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
lambda_bar[(jj-1)*2+1:jj*2, ii] = zeros(2,1)
elseif n_moments == 2
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
temp[:, jj] = p
else # solve maximum entropy problem sequentially from low order moments
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
for mm = 4:2:n_moments
lambda_guess = vcat(lambda, 0.0, 0.0) # add 0 to previous lambda
pnew, lambda, moment_error = discrete_approximation(y1D[jj,:],
X -> polynomial_moment(X, cond_mean[jj,ii],
scaling_factor[jj], mm),
gaussian_moment[1:mm]./(scaling_factor[jj].^(1:mm)),
q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5)
@warn(
"Failed to match first $mm moments. Just matching $(mm-2).")
break
else
p = pnew
end
end
temp[:, jj] = p
end
end
end
P[ii, :] .= vec(prod(allcomb3(temp), dims = 2))
end
X = C*D .+ mu # map grids back to original space
M != 1 || (return MarkovChain(P, vec(X)))
return MarkovChain(P, [X[:, i] for i in 1:Nm^M])
end
|
function discrete_var(b::Union{Real, AbstractVector},
B::Union{Real, AbstractMatrix},
Psi::Union{Real, AbstractMatrix},
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
# Check that Nm is a valid number of grid points
Nm >= 3 || throw(ArgumentError("Nm must be a positive interger greater than 3"))
# Check that n_moments is a valid number
if n_moments < 1 || !(n_moments % 2 == 0 || n_moments == 1)
error("n_moments must be either 1 or a positive even integer")
end
# warning about persistency
warn_persistency(B, method)
# Compute polynomial moments of standard normal distribution
gaussian_moment = zeros(n_moments)
c = 1
for k=1:floor(Int, n_moments/2)
c = (2*k-1)*c
gaussian_moment[2*k] = c
end
# Compute standardized VAR(1) representation
# (zero mean and diagonal covariance matrix)
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
# Construct prior guesses for maximum entropy optimizations
q = construct_prior_guess(cond_mean[:, ii], Nm, y1D, y1Dhelper, method)
# Make sure all elements of the prior are stricly positive
q[q.<kappa] .= kappa
for jj = 1:M
# Try to use intelligent initial guesses
if ii == 1
lambda_guess = zeros(2)
else
lambda_guess = lambda_bar[(jj-1)*2+1:jj*2, ii-1]
end
# Maximum entropy optimization
if n_moments == 1 # match only 1 moment
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
else # match 2 moments first
p, lambda, moment_error = discrete_approximation(y1D[jj, :],
X -> polynomial_moment(X, cond_mean[jj, ii], scaling_factor[jj], 2),
[0; 1]./(scaling_factor[jj].^(1:2)), q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5) # if 2 moments fail, just match 1 moment
@warn("Failed to match first 2 moments. Just matching 1.")
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
lambda_bar[(jj-1)*2+1:jj*2, ii] = zeros(2,1)
elseif n_moments == 2
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
temp[:, jj] = p
else # solve maximum entropy problem sequentially from low order moments
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
for mm = 4:2:n_moments
lambda_guess = vcat(lambda, 0.0, 0.0) # add 0 to previous lambda
pnew, lambda, moment_error = discrete_approximation(y1D[jj,:],
X -> polynomial_moment(X, cond_mean[jj,ii],
scaling_factor[jj], mm),
gaussian_moment[1:mm]./(scaling_factor[jj].^(1:mm)),
q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5)
@warn(
"Failed to match first $mm moments. Just matching $(mm-2).")
break
else
p = pnew
end
end
temp[:, jj] = p
end
end
end
P[ii, :] .= vec(prod(allcomb3(temp), dims = 2))
end
X = C*D .+ mu # map grids back to original space
M != 1 || (return MarkovChain(P, vec(X)))
return MarkovChain(P, [X[:, i] for i in 1:Nm^M])
end
|
[
298,
425
] |
function discrete_var(b::Union{Real, AbstractVector},
B::Union{Real, AbstractMatrix},
Psi::Union{Real, AbstractMatrix},
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
# Check that Nm is a valid number of grid points
Nm >= 3 || throw(ArgumentError("Nm must be a positive interger greater than 3"))
# Check that n_moments is a valid number
if n_moments < 1 || !(n_moments % 2 == 0 || n_moments == 1)
error("n_moments must be either 1 or a positive even integer")
end
# warning about persistency
warn_persistency(B, method)
# Compute polynomial moments of standard normal distribution
gaussian_moment = zeros(n_moments)
c = 1
for k=1:floor(Int, n_moments/2)
c = (2*k-1)*c
gaussian_moment[2*k] = c
end
# Compute standardized VAR(1) representation
# (zero mean and diagonal covariance matrix)
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
# Construct prior guesses for maximum entropy optimizations
q = construct_prior_guess(cond_mean[:, ii], Nm, y1D, y1Dhelper, method)
# Make sure all elements of the prior are stricly positive
q[q.<kappa] .= kappa
for jj = 1:M
# Try to use intelligent initial guesses
if ii == 1
lambda_guess = zeros(2)
else
lambda_guess = lambda_bar[(jj-1)*2+1:jj*2, ii-1]
end
# Maximum entropy optimization
if n_moments == 1 # match only 1 moment
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
else # match 2 moments first
p, lambda, moment_error = discrete_approximation(y1D[jj, :],
X -> polynomial_moment(X, cond_mean[jj, ii], scaling_factor[jj], 2),
[0; 1]./(scaling_factor[jj].^(1:2)), q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5) # if 2 moments fail, just match 1 moment
@warn("Failed to match first 2 moments. Just matching 1.")
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
lambda_bar[(jj-1)*2+1:jj*2, ii] = zeros(2,1)
elseif n_moments == 2
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
temp[:, jj] = p
else # solve maximum entropy problem sequentially from low order moments
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
for mm = 4:2:n_moments
lambda_guess = vcat(lambda, 0.0, 0.0) # add 0 to previous lambda
pnew, lambda, moment_error = discrete_approximation(y1D[jj,:],
X -> polynomial_moment(X, cond_mean[jj,ii],
scaling_factor[jj], mm),
gaussian_moment[1:mm]./(scaling_factor[jj].^(1:mm)),
q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5)
@warn(
"Failed to match first $mm moments. Just matching $(mm-2).")
break
else
p = pnew
end
end
temp[:, jj] = p
end
end
end
P[ii, :] .= vec(prod(allcomb3(temp), dims = 2))
end
X = C*D .+ mu # map grids back to original space
M != 1 || (return MarkovChain(P, vec(X)))
return MarkovChain(P, [X[:, i] for i in 1:Nm^M])
end
|
function discrete_var(b::Union{Real, AbstractVector},
B::Union{Real, AbstractMatrix},
Psi::Union{Real, AbstractMatrix},
Nm::Integer,
n_moments::Integer=2,
method::VAREstimationMethod=Even(),
n_sigmas::Real=sqrt(Nm-1))
# b = zeros(2)
# A = [0.9809 0.0028; 0.041 0.9648]
# Sigma = [7.569e-5 0.0; 0.0 0.00068644]
# N = 9
# n_moments = nMoments
# method = Quantile()
# b, B, Psi, Nm = (zeros(2), A, Sigma, N, nMoments, Quantile())
M, M_ = size(B, 1), size(B, 2)
# Check size restrictions on matrices
M == M_ || throw(ArgumentError("B must be a scalar or square matrix"))
M == length(b) || throw(ArgumentError("b must have the same number of rows as B"))
#% Check that Psi is a valid covariance matrix
isposdef(Psi) || throw(ArgumentError("Psi must be a positive definite matrix"))
# Check that Nm is a valid number of grid points
Nm >= 3 || throw(ArgumentError("Nm must be a positive interger greater than 3"))
# Check that n_moments is a valid number
if n_moments < 1 || !(n_moments % 2 == 0 || n_moments == 1)
error("n_moments must be either 1 or a positive even integer")
end
# warning about persistency
warn_persistency(B, method)
# Compute polynomial moments of standard normal distribution
gaussian_moment = zeros(n_moments)
c = 1
for k=1:floor(Int, n_moments/2)
c = (2*k-1)*c
gaussian_moment[2*k] = c
end
# Compute standardized VAR(1) representation
# (zero mean and diagonal covariance matrix)
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
# Construct prior guesses for maximum entropy optimizations
q = construct_prior_guess(cond_mean[:, ii], Nm, y1D, y1Dhelper, method)
# Make sure all elements of the prior are stricly positive
q[q.<kappa] .= kappa
for jj = 1:M
# Try to use intelligent initial guesses
if ii == 1
lambda_guess = zeros(2)
else
lambda_guess = lambda_bar[(jj-1)*2+1:jj*2, ii-1]
end
# Maximum entropy optimization
if n_moments == 1 # match only 1 moment
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
else # match 2 moments first
p, lambda, moment_error = discrete_approximation(y1D[jj, :],
X -> polynomial_moment(X, cond_mean[jj, ii], scaling_factor[jj], 2),
[0; 1]./(scaling_factor[jj].^(1:2)), q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5) # if 2 moments fail, just match 1 moment
@warn("Failed to match first 2 moments. Just matching 1.")
temp[:, jj], _, _ = discrete_approximation(y1D[jj, :],
X -> (X'.-cond_mean[jj, ii])/scaling_factor[jj],
[0.0], q[jj, :], [0.0])
lambda_bar[(jj-1)*2+1:jj*2, ii] = zeros(2,1)
elseif n_moments == 2
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
temp[:, jj] = p
else # solve maximum entropy problem sequentially from low order moments
lambda_bar[(jj-1)*2+1:jj*2, ii] = lambda
for mm = 4:2:n_moments
lambda_guess = vcat(lambda, 0.0, 0.0) # add 0 to previous lambda
pnew, lambda, moment_error = discrete_approximation(y1D[jj,:],
X -> polynomial_moment(X, cond_mean[jj,ii],
scaling_factor[jj], mm),
gaussian_moment[1:mm]./(scaling_factor[jj].^(1:mm)),
q[jj, :], lambda_guess)
if !(norm(moment_error) < 1e-5)
@warn(
"Failed to match first $mm moments. Just matching $(mm-2).")
break
else
p = pnew
end
end
temp[:, jj] = p
end
end
end
P[ii, :] .= vec(prod(allcomb3(temp), dims = 2))
end
X = C*D .+ mu # map grids back to original space
M != 1 || (return MarkovChain(P, vec(X)))
return MarkovChain(P, [X[:, i] for i in 1:Nm^M])
end
|
discrete_var
| 298
| 425
|
src/markov/markov_approx.jl
|
#FILE: QuantEcon.jl/src/sampler.jl
##CHUNK 1
struct MVNSampler{TM<:Real,TS<:Real,TQ<:BlasReal}
mu::Vector{TM}
Sigma::Matrix{TS}
Q::Matrix{TQ}
end
function MVNSampler(mu::Vector{TM}, Sigma::Matrix{TS}) where {TM<:Real,TS<:Real}
ATOL1, RTOL1 = 1e-8, 1e-8
ATOL2, RTOL2 = 1e-8, 1e-14
n = length(mu)
if size(Sigma) != (n, n) # Check Sigma is n x n
throw(ArgumentError(
"Sigma must be 2 dimensional and square matrix of same length to mu"
))
end
issymmetric(Sigma) || throw(ArgumentError("Sigma must be symmetric"))
#FILE: QuantEcon.jl/src/arma.jl
##CHUNK 1
theta = [0.0, -0.8]
sigma = 1.0
lp = ARMA(phi, theta, sigma)
require(joinpath(dirname(@__FILE__),"..", "examples", "arma_plots.jl"))
quad_plot(lp)
```
"""
mutable struct ARMA
phi::Vector # AR parameters phi_1, ..., phi_p
theta::Vector # MA parameters theta_1, ..., theta_q
p::Integer # Number of AR coefficients
q::Integer # Number of MA coefficients
sigma::Real # Variance of white noise
ma_poly::Vector # MA polynomial --- filtering representatoin
ar_poly::Vector # AR polynomial --- filtering representation
end
# constructors to coerce phi/theta to vectors
ARMA(phi::Real, theta::Real, sigma::Real) = ARMA([phi;], [theta;], sigma)
ARMA(phi::Real, theta::Vector, sigma::Real) = ARMA([phi;], theta, sigma)
#FILE: QuantEcon.jl/test/test_markov_approx.jl
##CHUNK 1
-4.17854136278901
-2.00057722402480
0
2.00057722402480
4.17854136278901
6.85786965529225
11.2940287556214]
# test transition matrix
@test isapprox(mc.p, P1_matlab, atol = 1e-7, rtol = 1e-7)
# test state values
@test isapprox(mc.state_values, D1_matlab)
# test invalod nMoments
@test_throws ErrorException discrete_var(0, rho, sigma2, N, 3, Even())
# test to match only first moment
nMoments = 1 # number of moments to match
mc = discrete_var(0, rho, sigma2, N, nMoments, Even())
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
bet::Real
theta::Real
end
function RBLQ(Q::ScalarOrArray, R::ScalarOrArray, A::ScalarOrArray,
B::ScalarOrArray, C::ScalarOrArray, bet::Real, theta::Real)
k = size(Q, 1)
n = size(R, 1)
j = size(C, 2)
# coerce sizes
A = reshape([A;], n, n)
B = reshape([B;], n, k)
C = reshape([C;], n, j)
R = reshape([R;], n, n)
Q = reshape([Q;], k, k)
RBLQ(A, B, C, Q, R, k, n, j, bet, theta)
end
@doc doc"""
#FILE: QuantEcon.jl/test/test_sampler.jl
##CHUNK 1
@testset "Testing sampler.jl" begin
n = 4
mu = collect(range(0.2, stop=0.6, length=n))
@testset "check positive definite" begin
Sigma = [3.0 1.0 1.0 1.0;
1.0 2.0 1.0 1.0;
1.0 1.0 2.0 1.0;
1.0 1.0 1.0 1.0]
mvns = MVNSampler(mu, Sigma)
@test isapprox(mvns.Q * mvns.Q', mvns.Sigma)
end
@testset "check positive semi-definite zeros" begin
mvns = MVNSampler(mu, zeros(n, n))
@test rand(mvns) == mu
end
@testset "check positive semi-definite ones" begin
mvns = MVNSampler(mu, ones(n, n))
#FILE: QuantEcon.jl/src/quadsums.jl
##CHUNK 1
"""
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
#FILE: QuantEcon.jl/src/lss.jl
##CHUNK 1
mu_0 = reshape([mu_0;], n)
dist = MVNSampler(mu_0,Sigma_0)
LSS(A, C, G, H, k, n, m, l, mu_0, Sigma_0, dist)
end
# make kwarg version
function LSS(A::ScalarOrArray, C::ScalarOrArray, G::ScalarOrArray;
H::ScalarOrArray=zeros(size(G, 1)),
mu_0::Vector=zeros(size(G, 2)),
Sigma_0::Matrix=zeros(size(G, 2), size(G, 2)))
return LSS(A, C, G, H, mu_0, Sigma_0)
end
function simulate(lss::LSS, ts_length=100)
x = Matrix{Float64}(undef, lss.n, ts_length)
x[:, 1] = rand(lss.dist)
w = randn(lss.m, ts_length - 1)
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
length(x) == length(y) || return false
return all(xy -> isapprox(x, y), zip(x, y))
end
@testset "Testing mc_tools.jl" begin
# Matrix with two recurrent classes [1, 2] and [4, 5, 6],
# which have periods 2 and 3, respectively
Q = [0 1 0 0 0 0
1 0 0 0 0 0
1//2 0 0 1//2 0 0
0 0 0 0 1 0
0 0 0 0 0 1
0 0 0 1 0 0]
Q_stationary_dists = Vector{Rational{Int}}[
[1//2, 1//2, 0, 0, 0, 0], [0, 0, 0, 1//3, 1//3, 1//3]
]
Q_dict_Rational = Dict(
"P" => Q,
"stationary_dists" => Q_stationary_dists,
#CURRENT FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
##### Argument
- `Sigma::ScalarOrArray` : variance-covariance matrix of the standardized process
- `Nm::Integer` : number of grid points
- `M::Integer` : number of variables (`M=1` corresponds to AR(1))
- `n_sigmas::Real` : number of standard error determining end points of grid
- `method::Even` : method for grid making
##### Return
- `y1D` : `M x Nm` matrix of variable grid
- `nothing` : `nothing` of type `Void`
"""
function construct_1D_grid(Sigma::Union{Real, AbstractMatrix}, Nm::Integer,
M::Integer, n_sigmas::Real, method::Even)
min_sigmas = sqrt(minimum(eigen(Sigma).values))
y1Drow = collect(range(-min_sigmas*n_sigmas, stop=min_sigmas*n_sigmas, length=Nm))'
y1D = repeat(y1Drow, M, 1)
|
494
| 506
|
QuantEcon.jl
| 226
|
function standardize_var(b::AbstractVector, B::AbstractMatrix,
Psi::AbstractMatrix, M::Integer)
C1 = cholesky(Psi).L
mu = ((I - B)\ I)*b
A1 = C1\(B*C1)
# unconditional variance
Sigma1 = reshape(((I-kron(A1,A1))\I)*vec(Matrix(I, M, M)),M,M)
U, _ = min_var_trace(Sigma1)
A = U'*A1*U
Sigma = U'*Sigma1*U
C = C1*U
return A, C, mu, Sigma
end
|
function standardize_var(b::AbstractVector, B::AbstractMatrix,
Psi::AbstractMatrix, M::Integer)
C1 = cholesky(Psi).L
mu = ((I - B)\ I)*b
A1 = C1\(B*C1)
# unconditional variance
Sigma1 = reshape(((I-kron(A1,A1))\I)*vec(Matrix(I, M, M)),M,M)
U, _ = min_var_trace(Sigma1)
A = U'*A1*U
Sigma = U'*Sigma1*U
C = C1*U
return A, C, mu, Sigma
end
|
[
494,
506
] |
function standardize_var(b::AbstractVector, B::AbstractMatrix,
Psi::AbstractMatrix, M::Integer)
C1 = cholesky(Psi).L
mu = ((I - B)\ I)*b
A1 = C1\(B*C1)
# unconditional variance
Sigma1 = reshape(((I-kron(A1,A1))\I)*vec(Matrix(I, M, M)),M,M)
U, _ = min_var_trace(Sigma1)
A = U'*A1*U
Sigma = U'*Sigma1*U
C = C1*U
return A, C, mu, Sigma
end
|
function standardize_var(b::AbstractVector, B::AbstractMatrix,
Psi::AbstractMatrix, M::Integer)
C1 = cholesky(Psi).L
mu = ((I - B)\ I)*b
A1 = C1\(B*C1)
# unconditional variance
Sigma1 = reshape(((I-kron(A1,A1))\I)*vec(Matrix(I, M, M)),M,M)
U, _ = min_var_trace(Sigma1)
A = U'*A1*U
Sigma = U'*Sigma1*U
C = C1*U
return A, C, mu, Sigma
end
|
standardize_var
| 494
| 506
|
src/markov/markov_approx.jl
|
#FILE: QuantEcon.jl/other/regression.jl
##CHUNK 1
U, S, V = svd(X1, thin=true)
S_inv = diagm(0 => 1.0 ./ S)
B1 = V*S_inv * U' * Y1
B = de_normalize(X, Y, B1)
else
U, S, V = svd(X, thin=true)
S_inv = diagm(1.0 ./ S)
B = V * S_inv * U' * Y
end
return B
end
function LAD_PP(X, Y, normalize=true)
T, n1 = size(X)
N = size(Y, 2)
if normalize
n1 -= 1
X1, Y1 = normalize_data(X, Y)
##CHUNK 2
# B1 = inv(X1' * X1 + T / n1 * eye(n1) * 10.0^ penalty) * X1' * Y1
B1 = inv(X1' * X1 + T / n1 * I * 10.0^ penalty) * X1' * Y1
B = de_normalize(X, Y, B1)
return B
end
function RLS_TSVD(X, Y, penalty=7)
T, n = size(X)
n1 = n - 1
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
##CHUNK 3
return B
end
function LS_SVD(X, Y, normalize=true)
# OLS using singular value decomposition
# Verified on 11-2-13
if normalize
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1, thin=true)
S_inv = diagm(0 => 1.0 ./ S)
B1 = V*S_inv * U' * Y1
B = de_normalize(X, Y, B1)
else
U, S, V = svd(X, thin=true)
S_inv = diagm(1.0 ./ S)
B = V * S_inv * U' * Y
end
return B
##CHUNK 4
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
function RLAD_PP(X, Y, penalty=7)
# TODO: There is a bug here. linprog returns wrong answer, even when
# MATLAB gets it right (lame)
T, n1 = size(X)
N = size(Y, 2)
n1 -= 1
X1, Y1 = normalize_data(X, Y)
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n) .* sqrt_vcv
ϵj = z1 * R
ωj = ones(n_nodes) ./ n_nodes
ϵj, ωj
end
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
#FILE: QuantEcon.jl/src/quadsums.jl
##CHUNK 1
"""
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
#FILE: QuantEcon.jl/src/lss.jl
##CHUNK 1
struct LSSMoments
lss::LSS
end
function Base.iterate(L::LSSMoments, state=(copy(L.lss.mu_0),
copy(L.lss.Sigma_0)))
A, C, G, H = L.lss.A, L.lss.C, L.lss.G, L.lss.H
mu_x, Sigma_x = state
mu_y, Sigma_y = G * mu_x, G * Sigma_x * G' + H * H'
# Update moments of x
mu_x2 = A * mu_x
Sigma_x2 = A * Sigma_x * A' + C * C'
return ((mu_x, mu_y, Sigma_x, Sigma_y), (mu_x2, Sigma_x2))
end
#CURRENT FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
A, C, mu, Sigma = standardize_var(b, B, Psi, M)
# Construct 1-D grids
y1D, y1Dhelper = construct_1D_grid(Sigma, Nm, M, n_sigmas, method)
# Construct all possible combinations of elements of the 1-D grids
D = allcomb3(y1D')'
# Construct finite-state Markov chain approximation
# conditional mean of the VAR process at each grid point
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
##CHUNK 2
```math
y_{t+1} = b + By_{t} + \Psi^{\frac{1}{2}}\epsilon_{t+1}
```
where ``\epsilon_{t+1}`` is an vector of independent standard normal
innovations of length `M`
```julia
P, X = discrete_var(b, B, Psi, Nm, n_moments, method, n_sigmas)
```
##### Arguments
- `b::Union{Real, AbstractVector}` : constant vector of length `M`.
`M=1` corresponds scalar case
- `B::Union{Real, AbstractMatrix}` : `M x M` matrix of impact coefficients
- `Psi::Union{Real, AbstractMatrix}` : `M x M` variance-covariance matrix of
the innovations
- `discrete_var` only accepts non-singular variance-covariance matrices, `Psi`.
- `Nm::Integer > 3` : Desired number of discrete points in each dimension
##CHUNK 3
- `A::Real` : impact coefficient of standardized AR(1) process
- `C::Real` : standard deviation of the innovation
- `mu::Real` : mean of the standardized AR(1) process
- `Sigma::Real` : variance of the standardized AR(1) process
"""
function standardize_var(b::Real, B::Real, Psi::Real, M::Integer)
C = sqrt(Psi)
A = B
mu = [b/(1-B)] # mean of the process
Sigma = 1/(1-B^2) #
return A, C, mu, Sigma
end
"""
return standerdized VAR(1) representation
##### Arguments
|
720
| 759
|
QuantEcon.jl
| 227
|
function discrete_approximation(D::AbstractVector, T::Function, Tbar::AbstractVector,
q::AbstractVector=ones(length(D))/length(D), # Default prior weights
lambda0::AbstractVector=zeros(Tbar))
# Input error checking
N = length(D)
Tx = T(D)
L, N2 = size(Tx)
if N2 != N || length(Tbar) != L || length(lambda0) != L || length(q) != N2
error("Dimension mismatch")
end
# Compute maximum entropy discrete distribution
options = Optim.Options(f_tol=1e-16, x_tol=1e-16)
obj(lambda) = entropy_obj(lambda, Tx, Tbar, q)
grad!(grad, lambda) = entropy_grad!(grad, lambda, Tx, Tbar, q)
hess!(hess, lambda) = entropy_hess!(hess, lambda, Tx, Tbar, q)
res = Optim.optimize(obj, grad!, hess!, lambda0, Optim.Newton(), options)
# Sometimes the algorithm fails to converge if the initial guess is too far
# away from the truth. If this occurs, the program tries an initial guess
# of all zeros.
if !Optim.converged(res) && all(lambda0 .!= 0.0)
@warn("Failed to find a solution from provided initial guess. Trying new initial guess.")
res = Optim.optimize(obj, grad!, hess!, zeros(lambda0), Optim.Newton(), options)
# check convergence
Optim.converged(res) || error("Failed to find a solution.")
end
# Compute final probability weights and moment errors
lambda_bar = Optim.minimizer(res)
minimum_value = Optim.minimum(res)
Tdiff = Tx .- Tbar
p = (q'.*exp.(lambda_bar'*Tdiff))/minimum_value
grad = similar(lambda0)
grad!(grad, Optim.minimizer(res))
moment_error = grad/minimum_value
return p, lambda_bar, moment_error
end
|
function discrete_approximation(D::AbstractVector, T::Function, Tbar::AbstractVector,
q::AbstractVector=ones(length(D))/length(D), # Default prior weights
lambda0::AbstractVector=zeros(Tbar))
# Input error checking
N = length(D)
Tx = T(D)
L, N2 = size(Tx)
if N2 != N || length(Tbar) != L || length(lambda0) != L || length(q) != N2
error("Dimension mismatch")
end
# Compute maximum entropy discrete distribution
options = Optim.Options(f_tol=1e-16, x_tol=1e-16)
obj(lambda) = entropy_obj(lambda, Tx, Tbar, q)
grad!(grad, lambda) = entropy_grad!(grad, lambda, Tx, Tbar, q)
hess!(hess, lambda) = entropy_hess!(hess, lambda, Tx, Tbar, q)
res = Optim.optimize(obj, grad!, hess!, lambda0, Optim.Newton(), options)
# Sometimes the algorithm fails to converge if the initial guess is too far
# away from the truth. If this occurs, the program tries an initial guess
# of all zeros.
if !Optim.converged(res) && all(lambda0 .!= 0.0)
@warn("Failed to find a solution from provided initial guess. Trying new initial guess.")
res = Optim.optimize(obj, grad!, hess!, zeros(lambda0), Optim.Newton(), options)
# check convergence
Optim.converged(res) || error("Failed to find a solution.")
end
# Compute final probability weights and moment errors
lambda_bar = Optim.minimizer(res)
minimum_value = Optim.minimum(res)
Tdiff = Tx .- Tbar
p = (q'.*exp.(lambda_bar'*Tdiff))/minimum_value
grad = similar(lambda0)
grad!(grad, Optim.minimizer(res))
moment_error = grad/minimum_value
return p, lambda_bar, moment_error
end
|
[
720,
759
] |
function discrete_approximation(D::AbstractVector, T::Function, Tbar::AbstractVector,
q::AbstractVector=ones(length(D))/length(D), # Default prior weights
lambda0::AbstractVector=zeros(Tbar))
# Input error checking
N = length(D)
Tx = T(D)
L, N2 = size(Tx)
if N2 != N || length(Tbar) != L || length(lambda0) != L || length(q) != N2
error("Dimension mismatch")
end
# Compute maximum entropy discrete distribution
options = Optim.Options(f_tol=1e-16, x_tol=1e-16)
obj(lambda) = entropy_obj(lambda, Tx, Tbar, q)
grad!(grad, lambda) = entropy_grad!(grad, lambda, Tx, Tbar, q)
hess!(hess, lambda) = entropy_hess!(hess, lambda, Tx, Tbar, q)
res = Optim.optimize(obj, grad!, hess!, lambda0, Optim.Newton(), options)
# Sometimes the algorithm fails to converge if the initial guess is too far
# away from the truth. If this occurs, the program tries an initial guess
# of all zeros.
if !Optim.converged(res) && all(lambda0 .!= 0.0)
@warn("Failed to find a solution from provided initial guess. Trying new initial guess.")
res = Optim.optimize(obj, grad!, hess!, zeros(lambda0), Optim.Newton(), options)
# check convergence
Optim.converged(res) || error("Failed to find a solution.")
end
# Compute final probability weights and moment errors
lambda_bar = Optim.minimizer(res)
minimum_value = Optim.minimum(res)
Tdiff = Tx .- Tbar
p = (q'.*exp.(lambda_bar'*Tdiff))/minimum_value
grad = similar(lambda0)
grad!(grad, Optim.minimizer(res))
moment_error = grad/minimum_value
return p, lambda_bar, moment_error
end
|
function discrete_approximation(D::AbstractVector, T::Function, Tbar::AbstractVector,
q::AbstractVector=ones(length(D))/length(D), # Default prior weights
lambda0::AbstractVector=zeros(Tbar))
# Input error checking
N = length(D)
Tx = T(D)
L, N2 = size(Tx)
if N2 != N || length(Tbar) != L || length(lambda0) != L || length(q) != N2
error("Dimension mismatch")
end
# Compute maximum entropy discrete distribution
options = Optim.Options(f_tol=1e-16, x_tol=1e-16)
obj(lambda) = entropy_obj(lambda, Tx, Tbar, q)
grad!(grad, lambda) = entropy_grad!(grad, lambda, Tx, Tbar, q)
hess!(hess, lambda) = entropy_hess!(hess, lambda, Tx, Tbar, q)
res = Optim.optimize(obj, grad!, hess!, lambda0, Optim.Newton(), options)
# Sometimes the algorithm fails to converge if the initial guess is too far
# away from the truth. If this occurs, the program tries an initial guess
# of all zeros.
if !Optim.converged(res) && all(lambda0 .!= 0.0)
@warn("Failed to find a solution from provided initial guess. Trying new initial guess.")
res = Optim.optimize(obj, grad!, hess!, zeros(lambda0), Optim.Newton(), options)
# check convergence
Optim.converged(res) || error("Failed to find a solution.")
end
# Compute final probability weights and moment errors
lambda_bar = Optim.minimizer(res)
minimum_value = Optim.minimum(res)
Tdiff = Tx .- Tbar
p = (q'.*exp.(lambda_bar'*Tdiff))/minimum_value
grad = similar(lambda0)
grad!(grad, Optim.minimizer(res))
moment_error = grad/minimum_value
return p, lambda_bar, moment_error
end
|
discrete_approximation
| 720
| 759
|
src/markov/markov_approx.jl
|
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
##CHUNK 2
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
##CHUNK 3
##### Arguments
- `rlq::RBLQ`: Instance of `RBLQ` type
- `F::Matrix{Float64}` The policy function, a `k x n` array
- `K::Matrix{Float64}` The worst case matrix, a `j x n` array
- `x0::Vector{Float64}` : The initial condition for state
##### Returns
- `e::Float64` The deterministic entropy
"""
function compute_deterministic_entropy(rlq::RBLQ, F, K, x0)
B, C, bet = rlq.B, rlq.C, rlq.bet
H0 = K'*K
C0 = zeros(Float64, rlq.n, 1)
A0 = A - B*F + C*K
return var_quadratic_sum(A0, C0, H0, bet, x0)
end
##CHUNK 4
##### Returns
- `P_F::Matrix{Float64}` : Matrix for discounted cost
- `d_F::Float64` : Constant for discounted cost
- `K_F::Matrix{Float64}` : Worst case policy
- `O_F::Matrix{Float64}` : Matrix for discounted entropy
- `o_F::Float64` : Constant for discounted entropy
"""
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
#CURRENT FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
cond_mean = A*D
# probability transition matrix
P = ones(Nm^M, Nm^M)
# normalizing constant for maximum entropy computations
scaling_factor = y1D[:, end]
# used to store some intermediate calculations
temp = Matrix{Float64}(undef, Nm, M)
# store optimized values of lambda (2 moments) to improve initial guesses
lambda_bar = zeros(2*M, Nm^M)
# small positive constant for numerical stability
kappa = 1e-8
for ii = 1:(Nm^M)
# Construct prior guesses for maximum entropy optimizations
q = construct_prior_guess(cond_mean[:, ii], Nm, y1D, y1Dhelper, method)
# Make sure all elements of the prior are stricly positive
q[q.<kappa] .= kappa
##CHUNK 2
"""
function entropy_grad!(grad::AbstractVector, lambda::AbstractVector,
Tx::AbstractMatrix, Tbar::AbstractVector, q::AbstractVector)
Tdiff = Tx .- Tbar
temp = q'.*exp.(lambda'*Tdiff)
temp2 = temp.*Tdiff
grad .= vec(sum(temp2, dims = 2))
end
"""
Compute hessian of objective function
##### Returns
- `hess` : `L x L` hessian matrix of the objective function evaluated at `lambda`
"""
function entropy_hess!(hess::AbstractMatrix, lambda::AbstractVector,
Tx::AbstractMatrix, Tbar::AbstractVector, q::AbstractVector)
Tdiff = Tx .- Tbar
##CHUNK 3
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
##CHUNK 4
Compute hessian of objective function
##### Returns
- `hess` : `L x L` hessian matrix of the objective function evaluated at `lambda`
"""
function entropy_hess!(hess::AbstractMatrix, lambda::AbstractVector,
Tx::AbstractMatrix, Tbar::AbstractVector, q::AbstractVector)
Tdiff = Tx .- Tbar
temp = q'.*exp.(lambda'*Tdiff)
temp2 = temp.*Tdiff
hess .= temp2*Tdiff'
end
@doc doc"""
find a unitary matrix `U` such that the diagonal components of `U'AU` is as
close to a multiple of identity matrix as possible
##CHUNK 5
- `obj` : scalar value of objective function evaluated at `lambda`
"""
function entropy_obj(lambda::AbstractVector, Tx::AbstractMatrix,
Tbar::AbstractVector, q::AbstractVector)
# Compute objective function
Tdiff = Tx .- Tbar
temp = q' .* exp.(lambda'*Tdiff)
obj = sum(temp)
return obj
end
"""
Compute gradient of objective function
##### Returns
- `grad` : length `L` gradient vector of the objective function evaluated at `lambda`
##CHUNK 6
return obj
end
"""
Compute gradient of objective function
##### Returns
- `grad` : length `L` gradient vector of the objective function evaluated at `lambda`
"""
function entropy_grad!(grad::AbstractVector, lambda::AbstractVector,
Tx::AbstractMatrix, Tbar::AbstractVector, q::AbstractVector)
Tdiff = Tx .- Tbar
temp = q'.*exp.(lambda'*Tdiff)
temp2 = temp.*Tdiff
grad .= vec(sum(temp2, dims = 2))
end
"""
|
871
| 892
|
QuantEcon.jl
| 228
|
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
return reshape(U_vec, K, K), fval
end
|
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
return reshape(U_vec, K, K), fval
end
|
[
871,
892
] |
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
return reshape(U_vec, K, K), fval
end
|
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
return reshape(U_vec, K, K), fval
end
|
min_var_trace
| 871
| 892
|
src/markov/markov_approx.jl
|
#FILE: QuantEcon.jl/src/quadsums.jl
##CHUNK 1
"""
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
#FILE: QuantEcon.jl/other/regression.jl
##CHUNK 1
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
function RLAD_PP(X, Y, penalty=7)
# TODO: There is a bug here. linprog returns wrong answer, even when
# MATLAB gets it right (lame)
T, n1 = size(X)
N = size(Y, 2)
n1 -= 1
X1, Y1 = normalize_data(X, Y)
##CHUNK 2
# B1 = inv(X1' * X1 + T / n1 * eye(n1) * 10.0^ penalty) * X1' * Y1
B1 = inv(X1' * X1 + T / n1 * I * 10.0^ penalty) * X1' * Y1
B = de_normalize(X, Y, B1)
return B
end
function RLS_TSVD(X, Y, penalty=7)
T, n = size(X)
n1 = n - 1
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
##CHUNK 3
Aeq = [X1 Matrix(I, T, T) -Matrix(I, T, T)]
B1 = zeros(size(X1, 2), N)
for j = 1:N
beq = Y1[:, j]
sol = linprog(f, Aeq, '=', beq, LB, UB)
B1[:, j] = sol.sol[1:n1]
end
B = normalize ? de_normalize(X, Y, B1) : B1
return B
end
function LAD_DP(X, Y, normalize=true)
T, n1 = size(X)
N = size(Y, 2)
if normalize
##CHUNK 4
else
X1 = X
Y1 = Y
end
#lower and upper bound
LB = [zeros(n1)-100; zeros(2*T)]
UB = [zeros(n1)+100; fill(Inf, 2T)]
f = [zeros(n1); ones(2*T)]
Aeq = [X1 Matrix(I, T, T) -Matrix(I, T, T)]
B1 = zeros(size(X1, 2), N)
for j = 1:N
beq = Y1[:, j]
sol = linprog(f, Aeq, '=', beq, LB, UB)
B1[:, j] = sol.sol[1:n1]
end
B = normalize ? de_normalize(X, Y, B1) : B1
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
function evaluate_F(rlq::RBLQ, F::Matrix)
R, Q, A, B, C = rlq.R, rlq.Q, rlq.A, rlq.B, rlq.C
bet, theta, j = rlq.bet, rlq.theta, rlq.j
# Solve for policies and costs using agent 2's problem
K_F, P_F = F_to_K(rlq, F)
# I = eye(j)
H = inv(I - C'*P_F*C./theta)
d_F = log(det(H))
# compute O_F and o_F
sig = -1.0 / theta
AO = sqrt(bet) .* (A - B*F + C*K_F)
O_F = solve_discrete_lyapunov(AO', bet*K_F'*K_F)
ho = (tr(H .- 1) - d_F) / 2.0
trace = tr(O_F*C*H*C')
o_F = (ho + bet*trace) / (1 - bet)
return K_F, P_F, d_F, O_F, o_F
end
##CHUNK 2
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
##CHUNK 3
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
if iterate >= max_iter
@warn("Maximum iterations in robust_rul_simple")
end
# I = eye(j)
K = (theta.*I - C'*P*C)\(C'*P)*(A - B*F)
return F, K, P
end
#CURRENT FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
- `Sigma::AbstractMatrix` : variance-covariance matrix of the standardized VAR(1) process
"""
function standardize_var(b::AbstractVector, B::AbstractMatrix,
Psi::AbstractMatrix, M::Integer)
C1 = cholesky(Psi).L
mu = ((I - B)\ I)*b
A1 = C1\(B*C1)
# unconditional variance
Sigma1 = reshape(((I-kron(A1,A1))\I)*vec(Matrix(I, M, M)),M,M)
U, _ = min_var_trace(Sigma1)
A = U'*A1*U
Sigma = U'*Sigma1*U
C = C1*U
return A, C, mu, Sigma
end
"""
construct prior guess for evenly spaced grid method
##CHUNK 2
q::AbstractVector=ones(length(D))/length(D), # Default prior weights
lambda0::AbstractVector=zeros(Tbar))
# Input error checking
N = length(D)
Tx = T(D)
L, N2 = size(Tx)
if N2 != N || length(Tbar) != L || length(lambda0) != L || length(q) != N2
error("Dimension mismatch")
end
# Compute maximum entropy discrete distribution
options = Optim.Options(f_tol=1e-16, x_tol=1e-16)
obj(lambda) = entropy_obj(lambda, Tx, Tbar, q)
grad!(grad, lambda) = entropy_grad!(grad, lambda, Tx, Tbar, q)
hess!(hess, lambda) = entropy_hess!(hess, lambda, Tx, Tbar, q)
res = Optim.optimize(obj, grad!, hess!, lambda0, Optim.Newton(), options)
# Sometimes the algorithm fails to converge if the initial guess is too far
|
35
| 51
|
QuantEcon.jl
| 229
|
function MarkovChain{T,TM,TV}(p::AbstractMatrix, state_values) where {T,TM,TV}
n, m = size(p)
n != m &&
throw(DimensionMismatch("stochastic matrix must be square"))
minimum(p) <0 &&
throw(ArgumentError("stochastic matrix must have nonnegative elements"))
!check_stochastic_matrix(p) &&
throw(ArgumentError("stochastic matrix rows must sum to 1"))
length(state_values) != n &&
throw(DimensionMismatch("state_values should have $n elements"))
return new{T,TM,TV}(p, state_values)
end
|
function MarkovChain{T,TM,TV}(p::AbstractMatrix, state_values) where {T,TM,TV}
n, m = size(p)
n != m &&
throw(DimensionMismatch("stochastic matrix must be square"))
minimum(p) <0 &&
throw(ArgumentError("stochastic matrix must have nonnegative elements"))
!check_stochastic_matrix(p) &&
throw(ArgumentError("stochastic matrix rows must sum to 1"))
length(state_values) != n &&
throw(DimensionMismatch("state_values should have $n elements"))
return new{T,TM,TV}(p, state_values)
end
|
[
35,
51
] |
function MarkovChain{T,TM,TV}(p::AbstractMatrix, state_values) where {T,TM,TV}
n, m = size(p)
n != m &&
throw(DimensionMismatch("stochastic matrix must be square"))
minimum(p) <0 &&
throw(ArgumentError("stochastic matrix must have nonnegative elements"))
!check_stochastic_matrix(p) &&
throw(ArgumentError("stochastic matrix rows must sum to 1"))
length(state_values) != n &&
throw(DimensionMismatch("state_values should have $n elements"))
return new{T,TM,TV}(p, state_values)
end
|
function MarkovChain{T,TM,TV}(p::AbstractMatrix, state_values) where {T,TM,TV}
n, m = size(p)
n != m &&
throw(DimensionMismatch("stochastic matrix must be square"))
minimum(p) <0 &&
throw(ArgumentError("stochastic matrix must have nonnegative elements"))
!check_stochastic_matrix(p) &&
throw(ArgumentError("stochastic matrix rows must sum to 1"))
length(state_values) != n &&
throw(DimensionMismatch("state_values should have $n elements"))
return new{T,TM,TV}(p, state_values)
end
|
MarkovChain{T,TM,TV}
| 35
| 51
|
src/markov/mc_tools.jl
|
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
return transpose(p)
end
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
##CHUNK 2
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
##CHUNK 3
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
##CHUNK 4
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
#FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
- `mc::MarkovChain{T}` : A Markov chain holding the state values and
transition matrix
"""
function estimate_mc_discrete(X::Vector{T}, states::Vector{T}) where T
# Get length of simulation
capT = length(X)
# Make sure all of the passed in states appear in X... If not
# throw an error
if any(!in(x, X) for x in states)
error("One of the states does not appear in history X")
end
# Count states and store in dictionary
nstates = length(states)
d = Dict{T, Int}(zip(states, 1:nstates))
# Counter matrix and dictionary mapping i -> states
cm = zeros(nstates, nstates)
#FILE: QuantEcon.jl/test/test_random_mc.jl
##CHUNK 1
@testset "Test random_stochastic_matrix" begin
n, k = 5, 3
Ps = (random_stochastic_matrix(n), random_stochastic_matrix(n, k))
for P in Ps
@test all(P .>= 0) == true
@test all(x->isapprox(sum(x), 1),
[P[i, :] for i in 1:size(P)[1]]) == true
end
seed = 1234
rngs = [MersenneTwister(seed) for i in 1:2]
Ps = random_stochastic_matrix.(rngs, n, k)
@test Ps[2] == Ps[1]
end
@testset "Test random_stochastic_matrix with k=1" begin
n, k = 3, 1
P = random_stochastic_matrix(n, k)
@test all((P .== 0) .| (P .== 1))
@test all(x->isequal(sum(x), 1),
#CURRENT FILE: QuantEcon.jl/src/markov/mc_tools.jl
##CHUNK 1
Methods are available that provide useful information such as the stationary
distributions, and communication and recurrent classes, and allow simulation of
state transitions.
##### Fields
- `p::AbstractMatrix` : The transition matrix. Must be square, all elements must be nonnegative, and all rows must sum to unity.
- `state_values::AbstractVector` : Vector containing the values associated with the states.
"""
mutable struct MarkovChain{T, TM<:AbstractMatrix{T}, TV<:AbstractVector}
p::TM # valid stochastic matrix
state_values::TV
end
# Provide constructor that infers T from eltype of matrix
MarkovChain(p::AbstractMatrix, state_values=1:size(p, 1)) =
MarkovChain{eltype(p), typeof(p), typeof(state_values)}(p, state_values)
##CHUNK 2
mutable struct MarkovChain{T, TM<:AbstractMatrix{T}, TV<:AbstractVector}
p::TM # valid stochastic matrix
state_values::TV
end
# Provide constructor that infers T from eltype of matrix
MarkovChain(p::AbstractMatrix, state_values=1:size(p, 1)) =
MarkovChain{eltype(p), typeof(p), typeof(state_values)}(p, state_values)
Base.eltype(mc::MarkovChain{T,TM,TV}) where {T,TM,TV} = eltype(TV)
"Number of states in the Markov chain `mc`"
n_states(mc::MarkovChain) = size(mc.p, 1)
function Base.show(io::IO, mc::MarkovChain{T,TM}) where {T,TM}
println(io, "Discrete Markov Chain")
println(io, "stochastic matrix of type $TM:")
print(io, mc.p)
end
##CHUNK 3
Base.eltype(mc::MarkovChain{T,TM,TV}) where {T,TM,TV} = eltype(TV)
"Number of states in the Markov chain `mc`"
n_states(mc::MarkovChain) = size(mc.p, 1)
function Base.show(io::IO, mc::MarkovChain{T,TM}) where {T,TM}
println(io, "Discrete Markov Chain")
println(io, "stochastic matrix of type $TM:")
print(io, mc.p)
end
@doc doc"""
This routine computes the stationary distribution of an irreducible Markov
transition matrix (stochastic matrix) or transition rate matrix (generator
matrix) ``A``.
More generally, given a Metzler matrix (square matrix whose off-diagonal
entries are all nonnegative) ``A``, this routine solves for a nonzero solution
``x`` to ``x (A - D) = 0``, where ``D`` is the diagonal matrix for which the rows of
``A - D`` sum to zero (i.e., ``D_{ii} = \sum_j A_{ij}`` for all ``i``). One (and only
##CHUNK 4
https://lectures.quantecon.org/jl/finite_markov.html
=#
import Graphs: DiGraph, period, attracting_components,
strongly_connected_components, is_strongly_connected
@inline check_stochastic_matrix(P) = maximum(abs, sum(P, dims = 2) .- 1) < 5e-15 ? true : false
"""
Finite-state discrete-time Markov chain.
Methods are available that provide useful information such as the stationary
distributions, and communication and recurrent classes, and allow simulation of
state transitions.
##### Fields
- `p::AbstractMatrix` : The transition matrix. Must be square, all elements must be nonnegative, and all rows must sum to unity.
- `state_values::AbstractVector` : Vector containing the values associated with the states.
"""
|
117
| 147
|
QuantEcon.jl
| 230
|
function gth_solve!(A::Matrix{T}) where T<:Real
n = size(A, 1)
x = zeros(T, n)
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
for j in k+1:n, i in k+1:n
A[i, j] += A[i, k] * A[k, j]
end
end
# backsubstitution
x[n] = 1
@inbounds for k in n-1:-1:1, i in k+1:n
x[k] += x[i] * A[i, k]
end
# normalisation
x /= sum(x)
return x
end
|
function gth_solve!(A::Matrix{T}) where T<:Real
n = size(A, 1)
x = zeros(T, n)
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
for j in k+1:n, i in k+1:n
A[i, j] += A[i, k] * A[k, j]
end
end
# backsubstitution
x[n] = 1
@inbounds for k in n-1:-1:1, i in k+1:n
x[k] += x[i] * A[i, k]
end
# normalisation
x /= sum(x)
return x
end
|
[
117,
147
] |
function gth_solve!(A::Matrix{T}) where T<:Real
n = size(A, 1)
x = zeros(T, n)
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
for j in k+1:n, i in k+1:n
A[i, j] += A[i, k] * A[k, j]
end
end
# backsubstitution
x[n] = 1
@inbounds for k in n-1:-1:1, i in k+1:n
x[k] += x[i] * A[i, k]
end
# normalisation
x /= sum(x)
return x
end
|
function gth_solve!(A::Matrix{T}) where T<:Real
n = size(A, 1)
x = zeros(T, n)
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
for j in k+1:n, i in k+1:n
A[i, j] += A[i, k] * A[k, j]
end
end
# backsubstitution
x[n] = 1
@inbounds for k in n-1:-1:1, i in k+1:n
x[k] += x[i] * A[i, k]
end
# normalisation
x /= sum(x)
return x
end
|
gth_solve!
| 117
| 147
|
src/markov/mc_tools.jl
|
#FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
#FILE: QuantEcon.jl/test/test_mc_tools.jl
##CHUNK 1
((i/(n-1) > p) + (i/(n-1) == p)/2))
P[i+1, i+1] = 1 - P[i+1, i] - P[i+1, i+2]
end
P[end, end-1], P[end, end] = ε/2, 1 - ε/2
return P
end
function Base.isapprox(x::Vector{Vector{<:Real}},
y::Vector{Vector{<:Real}})
length(x) == length(y) || return false
return all(xy -> isapprox(x, y), zip(x, y))
end
@testset "Testing mc_tools.jl" begin
# Matrix with two recurrent classes [1, 2] and [4, 5, 6],
# which have periods 2 and 3, respectively
Q = [0 1 0 0 0 0
1 0 0 0 0 0
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n + 2) .* sqrt_vcv
S = sqrt((n + 2) / 2) * sqrt_vcv
ϵj = [z0; z1 * R; z2 * S]
ωj = vcat(2 / (n + 2) * ones(size(z0, 1)),
(4 - n) / (2 * (n + 2)^2) * ones(size(z1, 1)),
1 / (n + 2)^2 * ones(size(z2, 1)))
return ϵj, ωj
end
#FILE: QuantEcon.jl/src/quadsums.jl
##CHUNK 1
"""
function var_quadratic_sum(A::ScalarOrArray, C::ScalarOrArray, H::ScalarOrArray,
bet::Real, x0::ScalarOrArray)
n = size(A, 1)
# coerce shapes
A = reshape([A;], n, n)
C = reshape([C;], n, n)
H = reshape([H;], n, n)
x0 = reshape([x0;], n)
# solve system
Q = solve_discrete_lyapunov(sqrt(bet) .* A', H)
cq = C'*Q*C
v = tr(cq) * bet / (1 - bet)
q0 = x0'*Q*x0 + v
return q0[1]
end
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
ind = n * x + (1-n:0)
fstar = f[ind]
pstar = P[ind, :]
return pstar, fstar, ind
end
function expandg(g)
# Only need if I supply "transfunc". Not doing so
n, m = size(g)
P = sparse(1:n*m, g(:), 1, n*m, n)
return P
end
function diagmult(a::Vector{T}, b::Matrix{T}) where T <: Real
n = length(a)
return sparse(1:n, 1:n, a, n, n)*b
end
#FILE: QuantEcon.jl/src/robustlq.jl
##CHUNK 1
"""
function robust_rule_simple(rlq::RBLQ,
P::Matrix=zeros(Float64, rlq.n, rlq.n);
max_iter=80,
tol=1e-8)
# Simplify notation
A, B, C, Q, R = rlq.A, rlq.B, rlq.C, rlq.Q, rlq.R
bet, theta, k, j = rlq.bet, rlq.theta, rlq.k, rlq.j
iterate, e = 0, tol + 1.0
F = similar(P) # instantiate so available after loop
while iterate <= max_iter && e > tol
F, new_P = b_operator(rlq, d_operator(rlq, P))
e = sqrt(sum((new_P - P).^2))
iterate += 1
copyto!(P, new_P)
end
#FILE: QuantEcon.jl/src/markov/markov_approx.jl
##CHUNK 1
end
function _rouwenhorst(p::Real, q::Real, m::Real, Δ::Real, n::Integer)
if n == 2
return [m-Δ, m+Δ], [p 1-p; 1-q q]
else
_, θ_nm1 = _rouwenhorst(p, q, m, Δ, n-1)
θN = p *[θ_nm1 zeros(n-1, 1); zeros(1, n)] +
(1-p)*[zeros(n-1, 1) θ_nm1; zeros(1, n)] +
q *[zeros(1, n); zeros(n-1, 1) θ_nm1] +
(1-q)*[zeros(1, n); θ_nm1 zeros(n-1, 1)]
θN[2:end-1, :] ./= 2
return range(m-Δ, stop=m+Δ, length=n), θN
end
end
# These are to help me order types other than vectors
##CHUNK 2
function min_var_trace(A::AbstractMatrix)
==(size(A)...) || throw(ArgumentError("input matrix must be square"))
K = size(A, 1) # size of A
d = tr(A)/K # diagonal of U'*A*U should be closest to d
function obj(X, grad)
X = reshape(X, K, K)
return (norm(diag(X'*A*X) .- d))
end
function unitary_constraint(res, X, grad)
X = reshape(X, K, K)
res .= vec(X'*X - Matrix(I, K, K))
end
opt = NLopt.Opt(:LN_COBYLA, K^2)
NLopt.min_objective!(opt, obj)
NLopt.equality_constraint!(opt, unitary_constraint, zeros(K^2))
fval, U_vec, ret = NLopt.optimize(opt, vec(Matrix(I, K, K)))
#FILE: QuantEcon.jl/src/lss.jl
##CHUNK 1
- `x::Matrix` An `n x num_reps` matrix, where the j-th column is the j_th
observation of ``x_T``
- `y::Matrix` An `k x num_reps` matrix, where the j-th column is the j_th
observation of ``y_T``
"""
function replicate(lss::LSS, t::Integer, num_reps::Integer=100)
x = Matrix{Float64}(undef, lss.n, num_reps)
v = randn(lss.l, num_reps)
for j=1:num_reps
x_t, _ = simulate(lss, t+1)
x[:, j] = x_t[:, end]
end
y = lss.G * x + lss.H * v
return x, y
end
replicate(lss::LSS; t::Integer=10, num_reps::Integer=100) =
replicate(lss, t, num_reps)
#FILE: QuantEcon.jl/other/regression.jl
##CHUNK 1
# B1 = inv(X1' * X1 + T / n1 * eye(n1) * 10.0^ penalty) * X1' * Y1
B1 = inv(X1' * X1 + T / n1 * I * 10.0^ penalty) * X1' * Y1
B = de_normalize(X, Y, B1)
return B
end
function RLS_TSVD(X, Y, penalty=7)
T, n = size(X)
n1 = n - 1
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1; thin=true)
r = sum((maximum(S)./ S) .<= 10.0^penalty)
Sr_inv = zeros(Float64, n1, n1)
Sr_inv[1:r, 1:r] = diagm(1./ S[1:r])
B1 = V*Sr_inv*U'*Y1
B = de_normalize(X, Y, B1)
return B
end
#CURRENT FILE: QuantEcon.jl/src/markov/mc_tools.jl
|
219
| 230
|
QuantEcon.jl
| 231
|
function period(mc::MarkovChain)
g = DiGraph(mc.p)
recurrent = attracting_components(g)
d = 1
for r in recurrent
pd = period(g[r])
d *= div(pd, gcd(pd, d))
end
return d
end
|
function period(mc::MarkovChain)
g = DiGraph(mc.p)
recurrent = attracting_components(g)
d = 1
for r in recurrent
pd = period(g[r])
d *= div(pd, gcd(pd, d))
end
return d
end
|
[
219,
230
] |
function period(mc::MarkovChain)
g = DiGraph(mc.p)
recurrent = attracting_components(g)
d = 1
for r in recurrent
pd = period(g[r])
d *= div(pd, gcd(pd, d))
end
return d
end
|
function period(mc::MarkovChain)
g = DiGraph(mc.p)
recurrent = attracting_components(g)
d = 1
for r in recurrent
pd = period(g[r])
d *= div(pd, gcd(pd, d))
end
return d
end
|
period
| 219
| 230
|
src/markov/mc_tools.jl
|
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
# Recurrance relation for Laguerre polynomials
p3 = p2
p2 = p1
p1 = ((2j - 1 + a - z) * p2 - (j - 1 + a) * p3) ./ j
end
pp = (n * p1 - (n + a) * p2) ./ z
z1 = z
z = z1 - p1 ./ pp
err = abs(z - z1)
if err < 3e-14
break
end
end
if err > 3e-14
error("failure to converge.")
end
nodes[i] = z
weights[i] = fact / (pp * n * p2)
##CHUNK 2
r2 = 1 / (1 + 0.22 * (n - 8) / n)
r3 = 1 / (1 + 8 * a / ((6.28 + a ) * n * n))
z = z + (z - x[n - 2]) * r1 * r2 * r3
else
z = 3 * x[i - 1] - 3 * x[i - 2] + x[i - 3]
end
its = 1
temp = 0.0
pp, p2 = 0.0, 0.0
for its = 1:maxit
# recurrance relation for Jacboi polynomials
temp = 2 + ab
p1 = (a - b + temp * z) / 2
p2 = 1
for j = 2:n
p3 = p2
p2 = p1
temp = 2 * j + ab
#CURRENT FILE: QuantEcon.jl/src/markov/mc_tools.jl
##CHUNK 1
##### Arguments
- `mc::MarkovChain` : MarkovChain instance.
##### Returns
- `::Vector{Vector{Int}}` : Vector of vectors that describe the recurrent
classes of `mc`.
"""
recurrent_classes(mc::MarkovChain) = attracting_components(DiGraph(mc.p))
"""
Find the communication classes of the Markov chain `mc`.
#### Arguments
- `mc::MarkovChain` : MarkovChain instance.
##CHUNK 2
is_irreducible(mc::MarkovChain) = is_strongly_connected(DiGraph(mc.p))
"""
Indicate whether the Markov chain `mc` is aperiodic.
##### Arguments
- `mc::MarkovChain` : MarkovChain instance.
##### Returns
- `::Bool`
"""
is_aperiodic(mc::MarkovChain) = period(mc) == 1
"""
Return the period of the Markov chain `mc`.
##### Arguments
##CHUNK 3
end
# normalisation
x /= sum(x)
return x
end
"""
Find the recurrent classes of the Markov chain `mc`.
##### Arguments
- `mc::MarkovChain` : MarkovChain instance.
##### Returns
- `::Vector{Vector{Int}}` : Vector of vectors that describe the recurrent
classes of `mc`.
##CHUNK 4
- `mc::MarkovChain` : MarkovChain instance.
##### Returns
- `::Int` : Period of `mc`.
"""
for (S, ex_T, ex_gth) in ((Real, :(T), :(gth_solve!)),
(Integer, :(Rational{T}), :(gth_solve)))
@eval function stationary_distributions(mc::MarkovChain{T}) where T<:$S
n = n_states(mc)
rec_classes = recurrent_classes(mc)
T1 = $ex_T
stationary_dists = Vector{Vector{T1}}(undef, length(rec_classes))
for (i, rec_class) in enumerate(rec_classes)
dist = zeros(T1, n)
##CHUNK 5
- `::Bool`
"""
is_aperiodic(mc::MarkovChain) = period(mc) == 1
"""
Return the period of the Markov chain `mc`.
##### Arguments
- `mc::MarkovChain` : MarkovChain instance.
##### Returns
- `::Int` : Period of `mc`.
"""
##CHUNK 6
#=
Tools for working with Markov Chains
@author : Spencer Lyon, Zac Cranko
@date: 07/10/2014
References
----------
https://lectures.quantecon.org/jl/finite_markov.html
=#
import Graphs: DiGraph, period, attracting_components,
strongly_connected_components, is_strongly_connected
@inline check_stochastic_matrix(P) = maximum(abs, sum(P, dims = 2) .- 1) < 5e-15 ? true : false
"""
Finite-state discrete-time Markov chain.
##CHUNK 7
"""
recurrent_classes(mc::MarkovChain) = attracting_components(DiGraph(mc.p))
"""
Find the communication classes of the Markov chain `mc`.
#### Arguments
- `mc::MarkovChain` : MarkovChain instance.
### Returns
- `::Vector{Vector{Int}}` : Vector of vectors that describe the communication
classes of `mc`.
"""
communication_classes(mc::MarkovChain) = strongly_connected_components(DiGraph(mc.p))
"""
Indicate whether the Markov chain `mc` is irreducible.
##CHUNK 8
https://lectures.quantecon.org/jl/finite_markov.html
=#
import Graphs: DiGraph, period, attracting_components,
strongly_connected_components, is_strongly_connected
@inline check_stochastic_matrix(P) = maximum(abs, sum(P, dims = 2) .- 1) < 5e-15 ? true : false
"""
Finite-state discrete-time Markov chain.
Methods are available that provide useful information such as the stationary
distributions, and communication and recurrent classes, and allow simulation of
state transitions.
##### Fields
- `p::AbstractMatrix` : The transition matrix. Must be square, all elements must be nonnegative, and all rows must sum to unity.
- `state_values::AbstractVector` : Vector containing the values associated with the states.
"""
|
275
| 285
|
QuantEcon.jl
| 232
|
function todense(T::Type, S::SparseMatrixCSC)
A = zeros(T, S.m, S.n)
for Sj in 1:S.n
for Sk in nzrange(S, Sj)
Si = S.rowval[Sk]
Sv = S.nzval[Sk]
A[Si, Sj] = Sv
end
end
return A
end
|
function todense(T::Type, S::SparseMatrixCSC)
A = zeros(T, S.m, S.n)
for Sj in 1:S.n
for Sk in nzrange(S, Sj)
Si = S.rowval[Sk]
Sv = S.nzval[Sk]
A[Si, Sj] = Sv
end
end
return A
end
|
[
275,
285
] |
function todense(T::Type, S::SparseMatrixCSC)
A = zeros(T, S.m, S.n)
for Sj in 1:S.n
for Sk in nzrange(S, Sj)
Si = S.rowval[Sk]
Sv = S.nzval[Sk]
A[Si, Sj] = Sv
end
end
return A
end
|
function todense(T::Type, S::SparseMatrixCSC)
A = zeros(T, S.m, S.n)
for Sj in 1:S.n
for Sk in nzrange(S, Sj)
Si = S.rowval[Sk]
Sv = S.nzval[Sk]
A[Si, Sj] = Sv
end
end
return A
end
|
todense
| 275
| 285
|
src/markov/mc_tools.jl
|
#FILE: QuantEcon.jl/src/markov/ddp.jl
##CHUNK 1
while(s_indices[idx] == s)
idx += 1
end
out[s+1] = idx
end
# need this +1 to be consistent with Julia's sparse pointers:
# colptr[i]:(colptr[i+1]-1)
out[num_states+1] = length(s_indices)+1
out
end
function _find_indices!(
a_indices::AbstractVector, a_indptr::AbstractVector, sigma::AbstractVector,
out::AbstractVector
)
n = length(sigma)
for i in 1:n, j in a_indptr[i]:(a_indptr[i+1]-1)
if sigma[i] == a_indices[j]
out[i] = j
end
##CHUNK 2
throw(ArgumentError(msg))
end
if _has_sorted_sa_indices(s_indices, a_indices)
a_indptr = Array{Int64}(undef, num_states + 1)
_a_indices = copy(a_indices)
_generate_a_indptr!(num_states, s_indices, a_indptr)
else
# transpose matrix to use Julia's CSC; now rows are actions and
# columns are states (this is why it's called as_ptr not sa_ptr)
m = maximum(a_indices)
n = maximum(s_indices)
msg = "Duplicate s-a pair found"
as_ptr = sparse(a_indices, s_indices, 1:num_sa_pairs, m, n,
(x,y)->throw(ArgumentError(msg)))
_a_indices = as_ptr.rowval
a_indptr = as_ptr.colptr
R = R[as_ptr.nzval]
Q = Q[as_ptr.nzval, :]
##CHUNK 3
- `s_indices::AbstractVector{T}`
- `out::AbstractVector{T}` : with length = `num_states` + 1
"""
function _generate_a_indptr!(
num_states::Int, s_indices::AbstractVector, out::AbstractVector
)
idx = 1
out[1] = 1
for s in 1:num_states-1
while(s_indices[idx] == s)
idx += 1
end
out[s+1] = idx
end
# need this +1 to be consistent with Julia's sparse pointers:
# colptr[i]:(colptr[i+1]-1)
out[num_states+1] = length(s_indices)+1
out
end
##CHUNK 4
function _find_indices!(
a_indices::AbstractVector, a_indptr::AbstractVector, sigma::AbstractVector,
out::AbstractVector
)
n = length(sigma)
for i in 1:n, j in a_indptr[i]:(a_indptr[i+1]-1)
if sigma[i] == a_indices[j]
out[i] = j
end
end
end
@doc doc"""
Define Matrix Multiplication between 3-dimensional matrix and a vector
Matrix multiplication over the last dimension of ``A``
"""
function *(A::AbstractArray{T,3}, v::AbstractVector) where T
##CHUNK 5
Also fills `out_argmax` with the cartesiean index associated with the `indmax` in
each row
"""
function s_wise_max!(
a_indices::AbstractVector, a_indptr::AbstractVector, vals::AbstractVector,
out::AbstractVector, out_argmax::AbstractVector
)
n = length(out)
for i in 1:n
if a_indptr[i] != a_indptr[i+1]
m = a_indptr[i]
for j in a_indptr[i]+1:(a_indptr[i+1]-1)
if vals[j] > vals[m]
m = j
end
end
out[i] = vals[m]
out_argmax[i] = a_indices[m]
end
##CHUNK 6
m = maximum(a_indices)
n = maximum(s_indices)
msg = "Duplicate s-a pair found"
as_ptr = sparse(a_indices, s_indices, 1:num_sa_pairs, m, n,
(x,y)->throw(ArgumentError(msg)))
_a_indices = as_ptr.rowval
a_indptr = as_ptr.colptr
R = R[as_ptr.nzval]
Q = Q[as_ptr.nzval, :]
end
# check feasibility
aptr_diff = diff(a_indptr)
if any(aptr_diff .== 0.0)
# First state index such that no action is available
s = findall(aptr_diff .== 0.0) # Only Gives True
throw(ArgumentError("for every state at least one action
must be available: violated for state $s"))
end
#FILE: QuantEcon.jl/other/ddpsolve.jl
##CHUNK 1
ind = n * x + (1-n:0)
fstar = f[ind]
pstar = P[ind, :]
return pstar, fstar, ind
end
function expandg(g)
# Only need if I supply "transfunc". Not doing so
n, m = size(g)
P = sparse(1:n*m, g(:), 1, n*m, n)
return P
end
function diagmult(a::Vector{T}, b::Matrix{T}) where T <: Real
n = length(a)
return sparse(1:n, 1:n, a, n, n)*b
end
#FILE: QuantEcon.jl/other/regression.jl
##CHUNK 1
return B
end
function LS_SVD(X, Y, normalize=true)
# OLS using singular value decomposition
# Verified on 11-2-13
if normalize
X1, Y1 = normalize_data(X, Y)
U, S, V = svd(X1, thin=true)
S_inv = diagm(0 => 1.0 ./ S)
B1 = V*S_inv * U' * Y1
B = de_normalize(X, Y, B1)
else
U, S, V = svd(X, thin=true)
S_inv = diagm(1.0 ./ S)
B = V * S_inv * U' * Y
end
return B
#FILE: QuantEcon.jl/src/sampler.jl
##CHUNK 1
C = cholesky(Symmetric(Sigma, :L), Cholesky_RowMaximum, check=false)
A = C.factors
r = C.rank
p = invperm(C.piv)
if r == n # Positive definite
Q = tril!(A)[p, p]
return MVNSampler(mu, Sigma, Q)
end
non_PSD_msg = "Sigma must be positive semidefinite"
for i in r+1:n
A[i, i] >= -ATOL1 - RTOL1 * A[1, 1] ||
throw(ArgumentError(non_PSD_msg))
end
tril!(view(A, :, 1:r))
A[:, r+1:end] .= 0
Q = A[p, p]
#CURRENT FILE: QuantEcon.jl/src/markov/mc_tools.jl
##CHUNK 1
gth_solve!(convert(Matrix{Rational{T}}, A))
"""
Same as `gth_solve`, but overwrite the input `A`, instead of creating a copy.
"""
function gth_solve!(A::Matrix{T}) where T<:Real
n = size(A, 1)
x = zeros(T, n)
@inbounds for k in 1:n-1
scale = sum(A[k, k+1:n])
if scale <= zero(T)
# There is one (and only one) recurrent class contained in
# {1, ..., k};
# compute the solution associated with that recurrent class.
n = k
break
end
A[k+1:n, k] /= scale
|
81
| 92
|
QuantEcon.jl
| 233
|
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
|
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
|
[
81,
92
] |
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
|
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
|
random_stochastic_matrix
| 81
| 92
|
src/markov/random_mc.jl
|
#FILE: QuantEcon.jl/test/test_random_mc.jl
##CHUNK 1
@testset "Test random_stochastic_matrix" begin
n, k = 5, 3
Ps = (random_stochastic_matrix(n), random_stochastic_matrix(n, k))
for P in Ps
@test all(P .>= 0) == true
@test all(x->isapprox(sum(x), 1),
[P[i, :] for i in 1:size(P)[1]]) == true
end
seed = 1234
rngs = [MersenneTwister(seed) for i in 1:2]
Ps = random_stochastic_matrix.(rngs, n, k)
@test Ps[2] == Ps[1]
end
@testset "Test random_stochastic_matrix with k=1" begin
n, k = 3, 1
P = random_stochastic_matrix(n, k)
@test all((P .== 0) .| (P .== 1))
@test all(x->isequal(sum(x), 1),
#CURRENT FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
##CHUNK 2
# random_stochastic_matrix
"""
random_stochastic_matrix([rng], n[, k])
Return a randomly sampled `n x n` stochastic matrix with `k` nonzero entries for
each row.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
##CHUNK 3
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
##CHUNK 4
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
##CHUNK 5
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
##CHUNK 6
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
##CHUNK 7
"""
random_probvec([rng], k[, m])
Return `m` randomly sampled probability vectors of size `k`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `k::Integer` : Size of each probability vector.
- `m::Integer` : Number of probability vectors.
# Returns
- `a::Array` : Matrix of shape `(k, m)`, or Vector of shape `(k,)` if `m` is not
specified, containing probability vector(s) as column(s).
"""
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
##CHUNK 8
"""
random_markov_chain([rng], n[, k])
Return a randomly sampled MarkovChain instance with `n` states, where each state
has `k` states with positive transition probability.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `mc::MarkovChain` : MarkovChain instance.
# Examples
```julia
##CHUNK 9
- `m::Integer` : Number of probability vectors.
# Returns
- `a::Array` : Matrix of shape `(k, m)`, or Vector of shape `(k,)` if `m` is not
specified, containing probability vector(s) as column(s).
"""
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
|
117
| 138
|
QuantEcon.jl
| 234
|
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
|
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
|
[
117,
138
] |
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
|
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
|
_random_stochastic_matrix
| 117
| 138
|
src/markov/random_mc.jl
|
#CURRENT FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
- `m::Integer` : Number of probability vectors.
# Returns
- `a::Array` : Matrix of shape `(k, m)`, or Vector of shape `(k,)` if `m` is not
specified, containing probability vector(s) as column(s).
"""
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
##CHUNK 2
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
##CHUNK 3
return transpose(p)
end
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
Generate a "non-square column stochstic matrix" of shape `(n, m)`, which contains
as columns `m` probability vectors of length `n` with `k` nonzero entries.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `m::Integer` : Number of probability vectors.
- `;k::Integer(n)` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
##CHUNK 4
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
return transpose(p)
end
random_stochastic_matrix(n::Integer, k::Integer=n) =
random_stochastic_matrix(Random.GLOBAL_RNG, n, k)
"""
_random_stochastic_matrix([rng], n, m; k=n)
##CHUNK 5
"""
random_probvec([rng], k[, m])
Return `m` randomly sampled probability vectors of size `k`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `k::Integer` : Size of each probability vector.
- `m::Integer` : Number of probability vectors.
# Returns
- `a::Array` : Matrix of shape `(k, m)`, or Vector of shape `(k,)` if `m` is not
specified, containing probability vector(s) as column(s).
"""
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
##CHUNK 6
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
random_probvec(k::Integer, m::Integer) = random_probvec(Random.GLOBAL_RNG, k, m)
random_probvec(rng::AbstractRNG, k::Integer) = vec(random_probvec(rng, k, 1))
random_probvec(k::Integer) = random_probvec(Random.GLOBAL_RNG, k)
##CHUNK 7
# random_stochastic_matrix
"""
random_stochastic_matrix([rng], n[, k])
Return a randomly sampled `n x n` stochastic matrix with `k` nonzero entries for
each row.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
##CHUNK 8
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
- `k::Integer=n` : Number of nonzero entries in each column of the matrix. Set
to `n` if none specified.
# Returns
- `p::Array` : Stochastic matrix.
"""
function random_stochastic_matrix(rng::AbstractRNG, n::Integer, k::Integer=n)
if !(n > 0)
throw(ArgumentError("n must be a positive integer"))
end
if !(k > 0 && k <= n)
throw(ArgumentError("k must be an integer with 0 < k <= n"))
end
p = _random_stochastic_matrix(rng, n, n, k=k)
##CHUNK 9
# Returns
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
# random_discrete_dp
"""
random_discrete_dp([rng], num_states, num_actions[, beta];
k=num_states, scale=1)
Generate a DiscreteDP randomly. The reward values are drawn from the normal
distribution with mean 0 and standard deviation `scale`.
##CHUNK 10
return ddp
end
random_discrete_dp(num_states::Integer, num_actions::Integer,
beta::Real=rand(); k::Integer=num_states, scale::Real=1) =
random_discrete_dp(Random.GLOBAL_RNG, num_states, num_actions, beta,
k=k, scale=scale)
# random_probvec
"""
random_probvec([rng], k[, m])
Return `m` randomly sampled probability vectors of size `k`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `k::Integer` : Size of each probability vector.
|
169
| 184
|
QuantEcon.jl
| 235
|
function random_discrete_dp(rng::AbstractRNG,
num_states::Integer,
num_actions::Integer,
beta::Real=rand(rng);
k::Integer=num_states,
scale::Real=1)
L = num_states * num_actions
R = scale * randn(rng, L)
Q = _random_stochastic_matrix(rng, num_states, L; k=k)
R = reshape(R, num_states, num_actions)
Q = reshape(transpose(Q), num_states, num_actions, num_states)
ddp = DiscreteDP(R, Q, beta)
return ddp
end
|
function random_discrete_dp(rng::AbstractRNG,
num_states::Integer,
num_actions::Integer,
beta::Real=rand(rng);
k::Integer=num_states,
scale::Real=1)
L = num_states * num_actions
R = scale * randn(rng, L)
Q = _random_stochastic_matrix(rng, num_states, L; k=k)
R = reshape(R, num_states, num_actions)
Q = reshape(transpose(Q), num_states, num_actions, num_states)
ddp = DiscreteDP(R, Q, beta)
return ddp
end
|
[
169,
184
] |
function random_discrete_dp(rng::AbstractRNG,
num_states::Integer,
num_actions::Integer,
beta::Real=rand(rng);
k::Integer=num_states,
scale::Real=1)
L = num_states * num_actions
R = scale * randn(rng, L)
Q = _random_stochastic_matrix(rng, num_states, L; k=k)
R = reshape(R, num_states, num_actions)
Q = reshape(transpose(Q), num_states, num_actions, num_states)
ddp = DiscreteDP(R, Q, beta)
return ddp
end
|
function random_discrete_dp(rng::AbstractRNG,
num_states::Integer,
num_actions::Integer,
beta::Real=rand(rng);
k::Integer=num_states,
scale::Real=1)
L = num_states * num_actions
R = scale * randn(rng, L)
Q = _random_stochastic_matrix(rng, num_states, L; k=k)
R = reshape(R, num_states, num_actions)
Q = reshape(transpose(Q), num_states, num_actions, num_states)
ddp = DiscreteDP(R, Q, beta)
return ddp
end
|
random_discrete_dp
| 169
| 184
|
src/markov/random_mc.jl
|
#FILE: QuantEcon.jl/test/test_random_mc.jl
##CHUNK 1
# k > n
@test_throws ArgumentError random_markov_chain(2, 3)
end
@testset "Test random_discrete_dp" begin
num_states, num_actions = 5, 4
num_sa = num_states * num_actions
k = 3
ddp = random_discrete_dp(num_states, num_actions; k=k)
# Check shapes
@test size(ddp.R) == (num_states, num_actions)
@test size(ddp.Q) == (num_states, num_actions, num_states)
# Check ddp.Q[:, a, :] is a stochastic matrix for all actions `a`
@test all(ddp.Q .>= 0) == true
for a in 1:num_actions
P = reshape(ddp.Q[:, a, :], (num_states, num_states))
@test all(x->isapprox(sum(x), 1),
[P[i, :] for i in 1:size(P)[1]]) == true
##CHUNK 2
# Check shapes
@test size(ddp.R) == (num_states, num_actions)
@test size(ddp.Q) == (num_states, num_actions, num_states)
# Check ddp.Q[:, a, :] is a stochastic matrix for all actions `a`
@test all(ddp.Q .>= 0) == true
for a in 1:num_actions
P = reshape(ddp.Q[:, a, :], (num_states, num_states))
@test all(x->isapprox(sum(x), 1),
[P[i, :] for i in 1:size(P)[1]]) == true
end
# Check number of nonzero entries for each state-action pair
@test sum(ddp.Q .> 0, dims = 3) ==
ones(Int, (num_states, num_actions, 1)) * k
seed = 1234
rngs = [MersenneTwister(seed) for i in 1:2]
ddps = random_discrete_dp.(rngs, num_states, num_actions)
@test ddps[2].R == ddps[1].R
##CHUNK 3
end
# Check number of nonzero entries for each state-action pair
@test sum(ddp.Q .> 0, dims = 3) ==
ones(Int, (num_states, num_actions, 1)) * k
seed = 1234
rngs = [MersenneTwister(seed) for i in 1:2]
ddps = random_discrete_dp.(rngs, num_states, num_actions)
@test ddps[2].R == ddps[1].R
@test ddps[2].Q == ddps[1].Q
@test ddps[2].beta == ddps[1].beta
@testset "Issue #296" begin
ddp = random_discrete_dp(5, 2)
@test ddp isa DiscreteDP
end
end
@testset "Test random_probvec" begin
#FILE: QuantEcon.jl/test/test_ddp.jl
##CHUNK 1
beta = 0.95
@test_throws ArgumentError DiscreteDP(R, Q, beta)
# State-Action Pair Formulation
#
# s_indices = [0, 0, 1, 1, 2, 2]
# a_indices = [0, 1, 0, 1, 0, 1]
# R_sa = reshape(R, n*m)
# Q_sa_dense = reshape(Q, n*m, n) #TODO: @sglyon Not sure how to reshape in Julia
#
# @test_throws ArgumentError DiscreteDP(R_sa, Q_sa, beta, s_indices, a_indices)
end
end # end @testset
#CURRENT FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
# random_discrete_dp
"""
random_discrete_dp([rng], num_states, num_actions[, beta];
k=num_states, scale=1)
Generate a DiscreteDP randomly. The reward values are drawn from the normal
distribution with mean 0 and standard deviation `scale`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `num_states::Integer` : Number of states.
- `num_actions::Integer` : Number of actions.
- `beta::Real=rand(rng)` : Discount factor. Randomly chosen from
`[0, 1)` if not specified.
- `;k::Integer(num_states)` : Number of possible next states for each
##CHUNK 2
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
# random_discrete_dp
"""
random_discrete_dp([rng], num_states, num_actions[, beta];
k=num_states, scale=1)
Generate a DiscreteDP randomly. The reward values are drawn from the normal
##CHUNK 3
distribution with mean 0 and standard deviation `scale`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `num_states::Integer` : Number of states.
- `num_actions::Integer` : Number of actions.
- `beta::Real=rand(rng)` : Discount factor. Randomly chosen from
`[0, 1)` if not specified.
- `;k::Integer(num_states)` : Number of possible next states for each
state-action pair. Equal to `num_states` if not specified.
- `scale::Real(1)` : Standard deviation of the normal distribution for the
reward values.
# Returns
- `ddp::DiscreteDP` : An instance of DiscreteDP.
"""
random_discrete_dp(num_states::Integer, num_actions::Integer,
##CHUNK 4
state-action pair. Equal to `num_states` if not specified.
- `scale::Real(1)` : Standard deviation of the normal distribution for the
reward values.
# Returns
- `ddp::DiscreteDP` : An instance of DiscreteDP.
"""
random_discrete_dp(num_states::Integer, num_actions::Integer,
beta::Real=rand(); k::Integer=num_states, scale::Real=1) =
random_discrete_dp(Random.GLOBAL_RNG, num_states, num_actions, beta,
k=k, scale=scale)
# random_probvec
"""
random_probvec([rng], k[, m])
##CHUNK 5
beta::Real=rand(); k::Integer=num_states, scale::Real=1) =
random_discrete_dp(Random.GLOBAL_RNG, num_states, num_actions, beta,
k=k, scale=scale)
# random_probvec
"""
random_probvec([rng], k[, m])
Return `m` randomly sampled probability vectors of size `k`.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `k::Integer` : Size of each probability vector.
- `m::Integer` : Number of probability vectors.
# Returns
##CHUNK 6
#=
Generate MarkovChain and DiscreteDP instances randomly.
@author : Daisuke Oyama
=#
import StatsBase: sample
import QuantEcon: MarkovChain, DiscreteDP
# random_markov_chain
"""
random_markov_chain([rng], n[, k])
Return a randomly sampled MarkovChain instance with `n` states, where each state
has `k` states with positive transition probability.
# Arguments
- `rng::AbstractRNG=GLOBAL_RNG` : Random number generator.
- `n::Integer` : Number of states.
|
210
| 227
|
QuantEcon.jl
| 236
|
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
|
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
|
[
210,
227
] |
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
|
function random_probvec(rng::AbstractRNG, k::Integer, m::Integer)
k == 1 && return ones((k, m))
# if k >= 2
x = Matrix{Float64}(undef, k, m)
r = rand(rng, k-1, m)
x[1:end .- 1, :] = sort(r, dims = 1)
for j in 1:m
x[end, j] = 1 - x[end-1, j]
for i in k-1:-1:2
x[i, j] -= x[i-1, j]
end
end
return x
end
|
random_probvec
| 210
| 227
|
src/markov/random_mc.jl
|
#FILE: QuantEcon.jl/src/lqnash.jl
##CHUNK 1
function nnash(a, b1, b2, r1, r2, q1, q2, s1, s2, w1, w2, m1, m2;
beta::Float64=1.0, tol::Float64=1e-8, max_iter::Int=1000)
# Apply discounting
a, b1, b2 = map(x->sqrt(beta) * x, Any[a, b1, b2])
dd = 10
its = 0
n = size(a, 1)
# NOTE: if b1/b2 has 2 dimensions, this is exactly what we want.
# if b1/b2 has 1 dimension size(b, 2) returns a 1, so it is also
# what we want
k_1 = size(b1, 2)
k_2 = size(b2, 2)
# initial values
v1 = Matrix(I, k_1, k_1)
v2 = Matrix(I, k_2, k_2)
p1 = zeros(n, n)
p2 = zeros(n, n)
#FILE: QuantEcon.jl/src/util.jl
##CHUNK 1
# If only one element then only one point in simplex
if m==1
return 1
end
decumsum = reverse(cumsum(reverse(x[2:end])))
idx = binomial(n+m-1, m-1)
for i in 1:m-1
if decumsum[i] == 0
break
end
idx -= num_compositions(m - (i-1), decumsum[i]-1)
end
return idx
end
"""
next_k_array!(a)
##CHUNK 2
return a
end
a[1] = 1
i = 2
x = a[i] + 1
while i < k && x == a[i+1]
i += 1
a[i-1] = i - 1
x = a[i] + 1
end
a[i] = x
return a
end
"""
k_array_rank([T=Int], a)
##CHUNK 3
- `idx::T`: Ranking of `a`.
"""
function k_array_rank(T::Type{<:Integer}, a::Vector{<:Integer})
if T != BigInt
binomial(BigInt(a[end]), BigInt(length(a))) ≤ typemax(T) ||
throw(InexactError(:Binomial, T, a[end]))
end
k = length(a)
idx = one(T)
for i = 1:k
idx += binomial(T(a[i])-one(T), T(i))
end
return idx
end
k_array_rank(a::Vector{<:Integer}) = k_array_rank(Int, a)
#FILE: QuantEcon.jl/src/quad.jl
##CHUNK 1
end
function qnwmonomial2(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n^2 + 1
z0 = zeros(1, n)
z1 = zeros(2n, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
##CHUNK 2
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
z2 = zeros(2n * (n - 1), n)
i = 0
# In each node, a pair of random variables (p,q) takes either values
# (1,1) or (1,-1) or (-1,1) or (-1,-1), and all other variables take
# value 0. For example, for N = 2, `z2 = [1 1; 1 -1; -1 1; -1 1]`
for p = 1:n - 1
for q = p + 1:n
i += 1
z2[4 * (i - 1) + 1:4 * i, p] = [1, -1, 1, -1]
z2[4 * (i - 1) + 1:4 * i, q] = [1, 1, -1, -1]
end
end
##CHUNK 3
function qnwmonomial1(vcv::AbstractMatrix)
n = size(vcv, 1)
@assert n == size(vcv, 2) "Variance covariance matrix must be square"
n_nodes = 2n
z1 = zeros(n_nodes, n)
# In each node, random variable i takes value either 1 or -1, and
# all other variables take value 0. For example, for N = 2,
# z1 = [1 0; -1 0; 0 1; 0 -1]
for i = 1:n
z1[2 * (i - 1) + 1:2 * i, i] = [1, -1]
end
sqrt_vcv = cholesky(vcv).U
R = sqrt(n) .* sqrt_vcv
ϵj = z1 * R
ωj = ones(n_nodes) ./ n_nodes
ϵj, ωj
#CURRENT FILE: QuantEcon.jl/src/markov/random_mc.jl
##CHUNK 1
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
##CHUNK 2
# Returns
- `p::Array` : Array of shape `(n, m)` containing `m` probability vectors of
length `n` as columns.
"""
function _random_stochastic_matrix(rng::AbstractRNG, n::Integer, m::Integer;
k::Integer=n)
probvecs = random_probvec(rng, k, m)
k == n && return probvecs
# if k < n
# Randomly sample row indices for each column for nonzero values
row_indices = Vector{Int}(undef, k*m)
for j in 1:m
row_indices[(j-1)*k+1:j*k] = sample(rng, 1:n, k, replace=false)
end
p = zeros(n, m)
##CHUNK 3
for j in 1:m
for i in 1:k
p[row_indices[(j-1)*k+i], j] = probvecs[i, j]
end
end
return p
end
_random_stochastic_matrix(n::Integer, m::Integer; k::Integer=n) =
_random_stochastic_matrix(Random.GLOBAL_RNG, n, m, k=k)
# random_discrete_dp
"""
random_discrete_dp([rng], num_states, num_actions[, beta];
k=num_states, scale=1)
Generate a DiscreteDP randomly. The reward values are drawn from the normal
|
25
| 40
|
StatsBase.jl
| 237
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
|
[
25,
40
] |
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
|
addcounts!
| 25
| 40
|
src/counts.jl
|
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
Return all modes (most common numbers) of an array, optionally over a
specified range `r` or weighted via vector `wv`.
"""
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = i
end
end
return d
end
"""
levelsmap(a)
Construct a dictionary that maps each of the `n` unique values
in `a` to a number between 1 and `n`.
"""
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
##CHUNK 2
proportions(x::AbstractArray{<:Integer}, wv::AbstractWeights) = proportions(x, span(x), wv)
#### functions for counting a single list of integers (2D)
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
##CHUNK 3
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
##CHUNK 4
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
##CHUNK 5
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
##CHUNK 6
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
# facet functions
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
addcounts!(zeros(Int, length(levels[1]), length(levels[2])), x, y, levels)
end
##CHUNK 7
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
##CHUNK 8
than the proportion of raw counts.
The output is a vector of length `length(levels)`.
"""
function counts end
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
addcounts!(zeros(Int, length(levels)), x, levels)
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights) =
addcounts!(zeros(eltype(wv), length(levels)), x, levels, wv)
counts(x::AbstractArray{<:Integer}, k::Integer) = counts(x, 1:k)
counts(x::AbstractArray{<:Integer}, k::Integer, wv::AbstractWeights) = counts(x, 1:k, wv)
counts(x::AbstractArray{<:Integer}) = counts(x, span(x))
counts(x::AbstractArray{<:Integer}, wv::AbstractWeights) = counts(x, span(x), wv)
"""
proportions(x, levels=span(x), [wv::AbstractWeights])
Return the proportion of values in the range `levels` that occur in `x`.
|
42
| 63
|
StatsBase.jl
| 238
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
|
[
42,
63
] |
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
|
addcounts!
| 42
| 63
|
src/counts.jl
|
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 2
function addcounts_radixsort!(cm::Dict{T}, x) where T
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
##CHUNK 3
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
##CHUNK 4
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
##CHUNK 5
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
Count the number of times each value in `x` occurs. If `levels` is provided, only values
falling in that range will be considered (the others will be ignored without
##CHUNK 6
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
##CHUNK 7
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
"""
countmap(x; alg = :auto)
countmap(x::AbstractVector, wv::AbstractVector{<:Real})
Return a dictionary mapping each unique value in `x` to its number of occurrences.
##CHUNK 8
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
addcounts!(zeros(Int, length(levels)), x, levels)
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights) =
addcounts!(zeros(eltype(wv), length(levels)), x, levels, wv)
counts(x::AbstractArray{<:Integer}, k::Integer) = counts(x, 1:k)
counts(x::AbstractArray{<:Integer}, k::Integer, wv::AbstractWeights) = counts(x, 1:k, wv)
counts(x::AbstractArray{<:Integer}) = counts(x, span(x))
counts(x::AbstractArray{<:Integer}, wv::AbstractWeights) = counts(x, span(x), wv)
"""
proportions(x, levels=span(x), [wv::AbstractWeights])
Return the proportion of values in the range `levels` that occur in `x`.
Equivalent to `counts(x, levels) / length(x)`.
If a vector of weights `wv` is provided, the proportion of weights is computed rather
than the proportion of raw counts.
"""
proportions(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) = counts(x, levels) / length(x)
##CHUNK 9
raising an error or a warning). If an integer `k` is provided, only values in the
range `1:k` will be considered.
If a vector of weights `wv` is provided, the proportion of weights is computed rather
than the proportion of raw counts.
The output is a vector of length `length(levels)`.
"""
function counts end
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
addcounts!(zeros(Int, length(levels)), x, levels)
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights) =
addcounts!(zeros(eltype(wv), length(levels)), x, levels, wv)
counts(x::AbstractArray{<:Integer}, k::Integer) = counts(x, 1:k)
counts(x::AbstractArray{<:Integer}, k::Integer, wv::AbstractWeights) = counts(x, 1:k, wv)
counts(x::AbstractArray{<:Integer}) = counts(x, span(x))
counts(x::AbstractArray{<:Integer}, wv::AbstractWeights) = counts(x, span(x), wv)
##CHUNK 10
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
# facet functions
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
addcounts!(zeros(Int, length(levels[1]), length(levels[2])), x, y, levels)
end
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
addcounts!(zeros(eltype(wv), length(levels[1]), length(levels[2])), x, y, levels, wv)
end
counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
counts(x, y, (levels, levels))
|
121
| 145
|
StatsBase.jl
| 239
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
|
[
121,
145
] |
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
|
addcounts!
| 121
| 145
|
src/counts.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
##CHUNK 2
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
ndoubleties += countties(y, i - k - 1, i - 1)
k = 0
end
end
if k > 0
sort!(view(y, (n - k):n))
ntiesx += div(widen(k) * (k + 1), 2)
ndoubleties += countties(y, n - k, n)
end
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
# facet functions
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
addcounts!(zeros(Int, length(levels[1]), length(levels[2])), x, y, levels)
end
##CHUNK 2
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
##CHUNK 3
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 4
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
##CHUNK 5
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
##CHUNK 6
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
##CHUNK 7
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
|
147
| 179
|
StatsBase.jl
| 240
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
|
[
147,
179
] |
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
|
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
|
addcounts!
| 147
| 179
|
src/counts.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
##CHUNK 2
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
##CHUNK 2
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
##CHUNK 3
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 4
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
##CHUNK 5
If a vector of weights `wv` is provided, the proportion of weights is computed rather
than the proportion of raw counts.
"""
proportions(x::AbstractArray{<:Integer}, k::Integer) = proportions(x, 1:k)
proportions(x::AbstractArray{<:Integer}, k::Integer, wv::AbstractWeights) = proportions(x, 1:k, wv)
proportions(x::AbstractArray{<:Integer}) = proportions(x, span(x))
proportions(x::AbstractArray{<:Integer}, wv::AbstractWeights) = proportions(x, span(x), wv)
#### functions for counting a single list of integers (2D)
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
# add counts of pairs from zip(x,y) to r
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
##CHUNK 6
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
##CHUNK 7
r[xi - bx, yi - by] += 1
end
end
return r
end
# facet functions
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
addcounts!(zeros(Int, length(levels[1]), length(levels[2])), x, y, levels)
end
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
addcounts!(zeros(eltype(wv), length(levels[1]), length(levels[2])), x, y, levels, wv)
end
counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
counts(x, y, (levels, levels))
counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights) =
##CHUNK 8
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
|
272
| 283
|
StatsBase.jl
| 241
|
function _addcounts!(::Type{T}, cm::Dict, x; alg = :auto) where T
# if it's safe to be sorted using radixsort then it should be faster
# albeit using more RAM
if radixsort_safe(T) && (alg == :auto || alg == :radixsort)
addcounts_radixsort!(cm, x)
elseif alg == :radixsort
throw(ArgumentError("`alg = :radixsort` is chosen but type `radixsort_safe($T)` did not return `true`; use `alg = :auto` or `alg = :dict` instead"))
else
addcounts_dict!(cm,x)
end
return cm
end
|
function _addcounts!(::Type{T}, cm::Dict, x; alg = :auto) where T
# if it's safe to be sorted using radixsort then it should be faster
# albeit using more RAM
if radixsort_safe(T) && (alg == :auto || alg == :radixsort)
addcounts_radixsort!(cm, x)
elseif alg == :radixsort
throw(ArgumentError("`alg = :radixsort` is chosen but type `radixsort_safe($T)` did not return `true`; use `alg = :auto` or `alg = :dict` instead"))
else
addcounts_dict!(cm,x)
end
return cm
end
|
[
272,
283
] |
function _addcounts!(::Type{T}, cm::Dict, x; alg = :auto) where T
# if it's safe to be sorted using radixsort then it should be faster
# albeit using more RAM
if radixsort_safe(T) && (alg == :auto || alg == :radixsort)
addcounts_radixsort!(cm, x)
elseif alg == :radixsort
throw(ArgumentError("`alg = :radixsort` is chosen but type `radixsort_safe($T)` did not return `true`; use `alg = :auto` or `alg = :dict` instead"))
else
addcounts_dict!(cm,x)
end
return cm
end
|
function _addcounts!(::Type{T}, cm::Dict, x; alg = :auto) where T
# if it's safe to be sorted using radixsort then it should be faster
# albeit using more RAM
if radixsort_safe(T) && (alg == :auto || alg == :radixsort)
addcounts_radixsort!(cm, x)
elseif alg == :radixsort
throw(ArgumentError("`alg = :radixsort` is chosen but type `radixsort_safe($T)` did not return `true`; use `alg = :auto` or `alg = :dict` instead"))
else
addcounts_dict!(cm,x)
end
return cm
end
|
_addcounts!
| 272
| 283
|
src/counts.jl
|
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
If a weighting vector `wv` is specified, the sum of the weights is used rather than the
raw counts.
`alg` is only allowed for unweighted counting and can be one of:
- `:auto` (default): if `StatsBase.radixsort_safe(eltype(x)) == true` then use
`:radixsort`, otherwise use `:dict`.
- `:radixsort`: if `radixsort_safe(eltype(x)) == true` then use the
[radix sort](https://en.wikipedia.org/wiki/Radix_sort)
algorithm to sort the input vector which will generally lead to
shorter running time for large `x` with many duplicates. However
the radix sort algorithm creates a copy of the input vector and
hence uses more RAM. Choose `:dict` if the amount of available
RAM is a limitation.
- `:dict`: use `Dict`-based method which is generally slower but uses less
RAM, is safe for any data type, is faster for small arrays, and
is faster when there are not many duplicates.
"""
addcounts!(cm::Dict, x; alg = :auto) = _addcounts!(eltype(x), cm, x, alg = alg)
##CHUNK 2
## 1D
"""
addcounts!(dict, x; alg = :auto)
addcounts!(dict, x, wv)
Add counts based on `x` to a count map. New entries will be added if new values come up.
If a weighting vector `wv` is specified, the sum of the weights is used rather than the
raw counts.
`alg` is only allowed for unweighted counting and can be one of:
- `:auto` (default): if `StatsBase.radixsort_safe(eltype(x)) == true` then use
`:radixsort`, otherwise use `:dict`.
- `:radixsort`: if `radixsort_safe(eltype(x)) == true` then use the
[radix sort](https://en.wikipedia.org/wiki/Radix_sort)
algorithm to sort the input vector which will generally lead to
##CHUNK 3
end
return cm
end
"""
countmap(x; alg = :auto)
countmap(x::AbstractVector, wv::AbstractVector{<:Real})
Return a dictionary mapping each unique value in `x` to its number of occurrences.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
`alg` is only allowed for unweighted counting and can be one of:
- `:auto` (default): if `StatsBase.radixsort_safe(eltype(x)) == true` then use
`:radixsort`, otherwise use `:dict`.
- `:radixsort`: if `radixsort_safe(eltype(x)) == true` then use the
[radix sort](https://en.wikipedia.org/wiki/Radix_sort)
##CHUNK 4
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
`alg` is only allowed for unweighted counting and can be one of:
- `:auto` (default): if `StatsBase.radixsort_safe(eltype(x)) == true` then use
`:radixsort`, otherwise use `:dict`.
- `:radixsort`: if `radixsort_safe(eltype(x)) == true` then use the
[radix sort](https://en.wikipedia.org/wiki/Radix_sort)
algorithm to sort the input vector which will generally lead to
shorter running time for large `x` with many duplicates. However
the radix sort algorithm creates a copy of the input vector and
hence uses more RAM. Choose `:dict` if the amount of available
RAM is a limitation.
- `:dict`: use `Dict`-based method which is generally slower but uses less
RAM, is safe for any data type, is faster for small arrays, and
is faster when there are not many duplicates.
"""
##CHUNK 5
shorter running time for large `x` with many duplicates. However
the radix sort algorithm creates a copy of the input vector and
hence uses more RAM. Choose `:dict` if the amount of available
RAM is a limitation.
- `:dict`: use `Dict`-based method which is generally slower but uses less
RAM, is safe for any data type, is faster for small arrays, and
is faster when there are not many duplicates.
"""
addcounts!(cm::Dict, x; alg = :auto) = _addcounts!(eltype(x), cm, x, alg = alg)
"""Dict-based addcounts method"""
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
##CHUNK 6
# sort the x using radixsort
sx = sort(vec(x), alg=Base.DEFAULT_UNSTABLE)
# Delegate the loop to a separate function since sort might not
# be inferred in Julia 0.6 after SortingAlgorithms is loaded.
# It seems that sort is inferred in Julia 0.7.
return _addcounts_radix_sort_loop!(cm, sx)
end
# fall-back for `x` an iterator
function addcounts_radixsort!(cm::Dict{T}, x) where T
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
##CHUNK 7
"""Dict-based addcounts method"""
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
end
end
return cm
end
# If the bits type is of small size i.e. it can have up to 65536 distinct values
# then it is always better to apply a counting-sort like reduce algorithm for
# faster results and less memory usage. However we still wish to enable others
# to write generic algorithms, therefore the methods below still accept the
# `alg` argument but it is ignored.
##CHUNK 8
end
end
last_sx = last(sx)
cm[last_sx] = get(cm, last_sx, 0) + lastindex(sx) + 1 - start_i
return cm
end
function addcounts_radixsort!(cm::Dict{T}, x::AbstractArray{T}) where T
# sort the x using radixsort
sx = sort(vec(x), alg=Base.DEFAULT_UNSTABLE)
# Delegate the loop to a separate function since sort might not
# be inferred in Julia 0.6 after SortingAlgorithms is loaded.
# It seems that sort is inferred in Julia 0.7.
return _addcounts_radix_sort_loop!(cm, sx)
end
# fall-back for `x` an iterator
##CHUNK 9
const BaseRadixSortSafeTypes = Union{Int8, Int16, Int32, Int64, Int128,
UInt8, UInt16, UInt32, UInt64, UInt128,
Float32, Float64}
"Can the type be safely sorted by radixsort"
radixsort_safe(::Type{T}) where T = T<:BaseRadixSortSafeTypes
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
start_i = firstindex(sx)
# now the data is sorted: can just run through and accumulate values before
# adding into the Dict
@inbounds for i in start_i+1:lastindex(sx)
sxi = sx[i]
if !isequal(last_sx, sxi)
cm[last_sx] = get(cm, last_sx, 0) + i - start_i
last_sx = sxi
start_i = i
##CHUNK 10
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
end
const BaseRadixSortSafeTypes = Union{Int8, Int16, Int32, Int64, Int128,
UInt8, UInt16, UInt32, UInt64, UInt128,
Float32, Float64}
"Can the type be safely sorted by radixsort"
radixsort_safe(::Type{T}) where T = T<:BaseRadixSortSafeTypes
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
|
286
| 296
|
StatsBase.jl
| 242
|
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
end
end
return cm
end
|
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
end
end
return cm
end
|
[
286,
296
] |
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
end
end
return cm
end
|
function addcounts_dict!(cm::Dict{T}, x) where T
for v in x
index = ht_keyindex2!(cm, v)
if index > 0
@inbounds cm.vals[index] += 1
else
@inbounds Base._setindex!(cm, 1, v, -index)
end
end
return cm
end
|
addcounts_dict!
| 286
| 296
|
src/counts.jl
|
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
"""
indexmap(a)
Construct a dictionary that maps each unique value in `a` to
the index of its first occurrence in `a`.
"""
function indexmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = i
end
end
return d
end
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
if c != 0
index = ht_keyindex2!(cm, i)
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
end
##CHUNK 2
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
if c != 0
##CHUNK 3
## auxiliary functions
function _normalize_countmap(cm::Dict{T}, s::Real) where T
r = Dict{T,Float64}()
for (k, c) in cm
r[k] = c / s
end
return r
end
## 1D
"""
addcounts!(dict, x; alg = :auto)
addcounts!(dict, x, wv)
Add counts based on `x` to a count map. New entries will be added if new values come up.
##CHUNK 4
index = ht_keyindex2!(cm, i)
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
end
const BaseRadixSortSafeTypes = Union{Int8, Int16, Int32, Int64, Int128,
UInt8, UInt16, UInt32, UInt64, UInt128,
Float32, Float64}
"Can the type be safely sorted by radixsort"
radixsort_safe(::Type{T}) where T = T<:BaseRadixSortSafeTypes
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
##CHUNK 5
# Counts of discrete values
#################################################
#
# counts on given levels
#
#################################################
if isdefined(Base, :ht_keyindex2)
const ht_keyindex2! = Base.ht_keyindex2
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
##CHUNK 6
# `alg` argument but it is ignored.
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x::AbstractArray{Bool}; alg = :ignored)
sumx = sum(x)
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + length(x) - sumx
cm
end
# specialized for `Bool` iterator
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
##CHUNK 7
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
##CHUNK 8
# fall-back for `x` an iterator
function addcounts_radixsort!(cm::Dict{T}, x) where T
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
|
350
| 370
|
StatsBase.jl
| 243
|
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
start_i = firstindex(sx)
# now the data is sorted: can just run through and accumulate values before
# adding into the Dict
@inbounds for i in start_i+1:lastindex(sx)
sxi = sx[i]
if !isequal(last_sx, sxi)
cm[last_sx] = get(cm, last_sx, 0) + i - start_i
last_sx = sxi
start_i = i
end
end
last_sx = last(sx)
cm[last_sx] = get(cm, last_sx, 0) + lastindex(sx) + 1 - start_i
return cm
end
|
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
start_i = firstindex(sx)
# now the data is sorted: can just run through and accumulate values before
# adding into the Dict
@inbounds for i in start_i+1:lastindex(sx)
sxi = sx[i]
if !isequal(last_sx, sxi)
cm[last_sx] = get(cm, last_sx, 0) + i - start_i
last_sx = sxi
start_i = i
end
end
last_sx = last(sx)
cm[last_sx] = get(cm, last_sx, 0) + lastindex(sx) + 1 - start_i
return cm
end
|
[
350,
370
] |
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
start_i = firstindex(sx)
# now the data is sorted: can just run through and accumulate values before
# adding into the Dict
@inbounds for i in start_i+1:lastindex(sx)
sxi = sx[i]
if !isequal(last_sx, sxi)
cm[last_sx] = get(cm, last_sx, 0) + i - start_i
last_sx = sxi
start_i = i
end
end
last_sx = last(sx)
cm[last_sx] = get(cm, last_sx, 0) + lastindex(sx) + 1 - start_i
return cm
end
|
function _addcounts_radix_sort_loop!(cm::Dict{T}, sx::AbstractVector{T}) where T
isempty(sx) && return cm
last_sx = first(sx)
start_i = firstindex(sx)
# now the data is sorted: can just run through and accumulate values before
# adding into the Dict
@inbounds for i in start_i+1:lastindex(sx)
sxi = sx[i]
if !isequal(last_sx, sxi)
cm[last_sx] = get(cm, last_sx, 0) + i - start_i
last_sx = sxi
start_i = i
end
end
last_sx = last(sx)
cm[last_sx] = get(cm, last_sx, 0) + lastindex(sx) + 1 - start_i
return cm
end
|
_addcounts_radix_sort_loop!
| 350
| 370
|
src/counts.jl
|
#FILE: StatsBase.jl/src/ranking.jl
##CHUNK 1
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
end
end
# the last range
ar = (s + n) / 2
for i = s : n
rks[p[i]] = ar
end
end
#FILE: StatsBase.jl/test/counts.jl
##CHUNK 1
cm_missing = countmap(skipmissing(xx))
@test cm_missing isa Dict{Int, Int}
@test cm_missing == cm
cm_any_itr = countmap((i for i in xx))
@test cm_any_itr isa Dict{Any,Int} # no knowledge about type
@test cm_any_itr == cm
# with multidimensional array
@test countmap(reshape(xx, 20, 100, 20, 10); alg=:radixsort) == cm
@test countmap(reshape(xx, 20, 100, 20, 10); alg=:dict) == cm
# with empty array
@test countmap(Int[]) == Dict{Int, Int}()
# testing the radixsort-based addcounts
xx = repeat([6, 1, 3, 1], outer=100_000)
cm = Dict{Int, Int}()
StatsBase.addcounts_radixsort!(cm,xx)
@test cm == Dict(1 => 200_000, 3 => 100_000, 6 => 100_000)
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
# fall-back for `x` an iterator
function addcounts_radixsort!(cm::Dict{T}, x) where T
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
##CHUNK 2
function addcounts_radixsort!(cm::Dict{T}, x::AbstractArray{T}) where T
# sort the x using radixsort
sx = sort(vec(x), alg=Base.DEFAULT_UNSTABLE)
# Delegate the loop to a separate function since sort might not
# be inferred in Julia 0.6 after SortingAlgorithms is loaded.
# It seems that sort is inferred in Julia 0.7.
return _addcounts_radix_sort_loop!(cm, sx)
end
# fall-back for `x` an iterator
function addcounts_radixsort!(cm::Dict{T}, x) where T
cx = vec(collect(x))
sx = sort!(cx, alg = Base.DEFAULT_UNSTABLE)
return _addcounts_radix_sort_loop!(cm, sx)
end
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
##CHUNK 3
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
if c != 0
index = ht_keyindex2!(cm, i)
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
##CHUNK 4
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
##CHUNK 5
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 6
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
##CHUNK 7
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
##CHUNK 8
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
|
389
| 405
|
StatsBase.jl
| 244
|
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
|
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
|
[
389,
405
] |
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
|
function addcounts!(cm::Dict{T}, x::AbstractArray{T}, wv::AbstractVector{W}) where {T,W<:Real}
# add wv weighted counts of integers from x to cm
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
|
addcounts!
| 389
| 405
|
src/counts.jl
|
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
end) @inbounds (@nref $N R j) += f((@nref $N A i) - (@nref $N means j)) * wi
end
return R
end
end
_wsum!(R::AbstractArray, A::AbstractArray, w::AbstractVector, dim::Int, init::Bool) =
_wsum_general!(R, identity, A, w, dim, init)
## wsum! and wsum
wsumtype(::Type{T}, ::Type{W}) where {T,W} = typeof(zero(T) * zero(W) + zero(T) * zero(W))
"""
wsum!(R::AbstractArray, A::AbstractArray,
w::AbstractVector, dim::Int;
init::Bool=true)
Compute the weighted sum of `A` with weights `w` over the dimension `dim` and store
the result in `R`. If `init=false`, the sum is added to `R` rather than starting
#CURRENT FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
##CHUNK 2
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
##CHUNK 3
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 4
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
##CHUNK 5
else
using Base: ht_keyindex2!
end
#### functions for counting a single list of integers (1D)
"""
addcounts!(r, x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
Add the number of occurrences in `x` of each value in `levels` to an existing
array `r`. For each `xi ∈ x`, if `xi == levels[j]`, then we increment `r[j]`.
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
##CHUNK 6
function counts end
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}) =
addcounts!(zeros(Int, length(levels)), x, levels)
counts(x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights) =
addcounts!(zeros(eltype(wv), length(levels)), x, levels, wv)
counts(x::AbstractArray{<:Integer}, k::Integer) = counts(x, 1:k)
counts(x::AbstractArray{<:Integer}, k::Integer, wv::AbstractWeights) = counts(x, 1:k, wv)
counts(x::AbstractArray{<:Integer}) = counts(x, span(x))
counts(x::AbstractArray{<:Integer}, wv::AbstractWeights) = counts(x, span(x), wv)
"""
proportions(x, levels=span(x), [wv::AbstractWeights])
Return the proportion of values in the range `levels` that occur in `x`.
Equivalent to `counts(x, levels) / length(x)`.
If a vector of weights `wv` is provided, the proportion of weights is computed rather
than the proportion of raw counts.
##CHUNK 7
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
##CHUNK 8
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
##CHUNK 9
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
# facet functions
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}})
addcounts!(zeros(Int, length(levels[1]), length(levels[2])), x, y, levels)
end
function counts(x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer}, levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
addcounts!(zeros(eltype(wv), length(levels[1]), length(levels[2])), x, y, levels, wv)
end
|
5
| 16
|
StatsBase.jl
| 245
|
function _symmetrize!(a::DenseMatrix)
m, n = size(a)
m == n || error("a must be a square matrix.")
for j = 1:n
@inbounds for i = j+1:n
vl = a[i,j]
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
|
function _symmetrize!(a::DenseMatrix)
m, n = size(a)
m == n || error("a must be a square matrix.")
for j = 1:n
@inbounds for i = j+1:n
vl = a[i,j]
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
|
[
5,
16
] |
function _symmetrize!(a::DenseMatrix)
m, n = size(a)
m == n || error("a must be a square matrix.")
for j = 1:n
@inbounds for i = j+1:n
vl = a[i,j]
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
|
function _symmetrize!(a::DenseMatrix)
m, n = size(a)
m == n || error("a must be a square matrix.")
for j = 1:n
@inbounds for i = j+1:n
vl = a[i,j]
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
|
_symmetrize!
| 5
| 16
|
src/cov.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
##CHUNK 2
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 3
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 4
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
##CHUNK 5
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
##CHUNK 6
end
end
if m > 1 && n > 1
indsx == indsy ||
throw(ArgumentError("All input vectors must have the same indices"))
end
end
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
#CURRENT FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
##CHUNK 2
"""
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
|
142
| 157
|
StatsBase.jl
| 246
|
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
|
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
|
[
142,
157
] |
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
|
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
|
cov2cor!
| 142
| 157
|
src/cov.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
##CHUNK 2
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
##CHUNK 3
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 4
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
#CURRENT FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
cor2cov!(C, s)
Convert the correlation matrix `C` to a covariance matrix in-place using a vector of
standard deviations `s`.
"""
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
##CHUNK 2
"""
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
##CHUNK 3
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
"""
##CHUNK 4
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
##CHUNK 5
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
|
162
| 184
|
StatsBase.jl
| 247
|
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
|
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
|
[
162,
184
] |
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
|
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
|
cov2cor!
| 162
| 184
|
src/cov.jl
|
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
#FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
# Symmetric Toeplitz solver
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
##CHUNK 2
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
durbin(r::AbstractVector{T}) where {T<:BlasReal} = durbin!(r, zeros(T, length(r)))
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 2
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for i = 1 : length(lags)
l = lags[i]
sX = view(tmpX, 1+l:lx, 1:l+1)
r[i,j] = l == 0 ? 1 : (cholesky!(sX'sX, Val(false)) \ (sX'view(X, 1+l:lx, j)))[end]
end
end
r
end
function pacf_yulewalker!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}, mk::Integer) where T<:Union{Float32, Float64}
tmp = Vector{T}(undef, mk)
for j = 1 : size(X,2)
acfs = autocor(X[:,j], 1:mk)
for i = 1 : length(lags)
l = lags[i]
r[i,j] = l == 0 ? 1 : l == 1 ? acfs[i] : -durbin!(view(acfs, 1:l), tmp)[l]
end
end
end
#CURRENT FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
##CHUNK 2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
##CHUNK 3
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
##CHUNK 4
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
|
204
| 219
|
StatsBase.jl
| 248
|
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
|
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
|
[
204,
219
] |
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
|
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
|
cor2cov!
| 204
| 219
|
src/cov.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
#CURRENT FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
##CHUNK 2
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
##CHUNK 3
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
##CHUNK 4
Convert the correlation matrix `C` to a covariance matrix in-place using a vector of
standard deviations `s`.
"""
# Preserve structure of Symmetric and Hermitian correlation matrices
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
##CHUNK 5
T = typeof(zero(eltype(C)) / (zs * zs))
return cov2cor!(copyto!(similar(C, T), C), s)
end
# Original implementation: https://github.com/JuliaStats/Statistics.jl/blob/22dee82f9824d6045e87aa4b97e1d64fe6f01d8d/src/Statistics.jl#L633-L657
"""
cov2cor!(C::AbstractMatrix, [s::AbstractArray])
Convert the covariance matrix `C` to a correlation matrix in-place, optionally using a vector of
standard deviations `s`.
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
##CHUNK 6
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
"""
##CHUNK 7
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
"""
CovarianceEstimator
Abstract type for covariance estimators.
"""
abstract type CovarianceEstimator end
"""
cov(ce::CovarianceEstimator, x::AbstractVector; mean=nothing)
Compute a variance estimate from the observation vector `x` using the estimator `ce`.
##CHUNK 8
## extended methods for computing covariance and scatter matrix
# auxiliary functions
function _symmetrize!(a::DenseMatrix)
m, n = size(a)
m == n || error("a must be a square matrix.")
for j = 1:n
@inbounds for i = j+1:n
vl = a[i,j]
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
function _scalevars(x::DenseMatrix, s::AbstractWeights, dims::Int)
dims == 1 ? Diagonal(s) * x :
dims == 2 ? x * Diagonal(s) :
##CHUNK 9
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
end
end
return C
end
"""
cor2cov(C, s)
Compute the covariance matrix from the correlation matrix `C` and a vector of standard
deviations `s`. Use [`StatsBase.cor2cov!`](@ref) for an in-place version.
|
222
| 244
|
StatsBase.jl
| 249
|
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
|
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
|
[
222,
244
] |
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
|
function cor2cov!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j in 1:n
sj = s[j]
for i in 1:(j-1)
A[i,j] *= s[i] * sj
end
A[j,j] = sj^2
end
else
for j in 1:n
sj = s[j]
A[j,j] = sj^2
for i in (j+1):n
A[i,j] *= s[i] * sj
end
end
end
return C
end
|
cor2cov!
| 222
| 244
|
src/cov.jl
|
#FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
# Symmetric Toeplitz solver
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
##CHUNK 2
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
levinson(r::AbstractVector{T}, b::AbstractVector{T}) where {T<:BlasReal} = levinson!(r, copy(b), zeros(T, length(b)))
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
#CURRENT FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
A[j,j] = oneunit(A[j,j])
end
else
for j = 1:n
sj = s[j]
A[j,j] = oneunit(A[j,j])
for i = (j+1):n
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
end
##CHUNK 2
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
##CHUNK 3
Convert the correlation matrix `C` to a covariance matrix in-place using a vector of
standard deviations `s`.
"""
function cor2cov!(C::AbstractMatrix, s::AbstractArray)
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j in 1:n
sj = s[j]
for i in 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
##CHUNK 4
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
##CHUNK 5
C[i,j] = adjoint(C[j,i])
end
C[j,j] = sj^2
for i in (j+1):n
C[i,j] *= s[i] * sj
end
end
return C
end
# Preserve structure of Symmetric and Hermitian correlation matrices
"""
CovarianceEstimator
Abstract type for covariance estimators.
"""
abstract type CovarianceEstimator end
"""
##CHUNK 6
T = typeof(zero(eltype(C)) / (zs * zs))
return cov2cor!(copyto!(similar(C, T), C), s)
end
# Original implementation: https://github.com/JuliaStats/Statistics.jl/blob/22dee82f9824d6045e87aa4b97e1d64fe6f01d8d/src/Statistics.jl#L633-L657
"""
cov2cor!(C::AbstractMatrix, [s::AbstractArray])
Convert the covariance matrix `C` to a correlation matrix in-place, optionally using a vector of
standard deviations `s`.
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
|
11
| 21
|
StatsBase.jl
| 250
|
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
|
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
|
[
11,
21
] |
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
|
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
|
counteq
| 11
| 21
|
src/deviation.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
##CHUNK 2
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
#CURRENT FILE: StatsBase.jl/src/deviation.jl
##CHUNK 1
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
##CHUNK 2
# Computing deviation in a variety of ways
## count the number of equal/non-equal pairs
"""
counteq(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are equal.
"""
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
##CHUNK 3
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
"""
sqL2dist(a, b)
Compute the squared L2 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|^2``.
Efficient equivalent of `sum(abs2, a - b)`.
"""
function sqL2dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
##CHUNK 4
# Computing deviation in a variety of ways
## count the number of equal/non-equal pairs
"""
counteq(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are equal.
"""
##CHUNK 5
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
Compute the L∞ distance, also called the Chebyshev distance, between
two arrays: ``\\max_{1≤i≤n} |a_i - b_i|``.
Efficient equivalent of `maxabs(a - b)`.
"""
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
##CHUNK 6
L1dist(a, b)
Compute the L1 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|``.
Efficient equivalent of `sum(abs, a - b)`.
"""
function L1dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
##CHUNK 7
"""
sqL2dist(a, b)
Compute the squared L2 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|^2``.
Efficient equivalent of `sum(abs2, a - b)`.
"""
function sqL2dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs2(a[i] - b[i])
end
return r
end
# L2 distance
"""
|
30
| 40
|
StatsBase.jl
| 251
|
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
|
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
|
[
30,
40
] |
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
|
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
|
countne
| 30
| 40
|
src/deviation.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
##CHUNK 2
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
"""
countmap(x; alg = :auto)
countmap(x::AbstractVector, wv::AbstractVector{<:Real})
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
#CURRENT FILE: StatsBase.jl/src/deviation.jl
##CHUNK 1
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
##CHUNK 2
# Computing deviation in a variety of ways
## count the number of equal/non-equal pairs
"""
counteq(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are equal.
"""
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
##CHUNK 3
end
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
"""
sqL2dist(a, b)
Compute the squared L2 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|^2``.
Efficient equivalent of `sum(abs2, a - b)`.
"""
function sqL2dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
##CHUNK 4
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
Compute the L∞ distance, also called the Chebyshev distance, between
two arrays: ``\\max_{1≤i≤n} |a_i - b_i|``.
Efficient equivalent of `maxabs(a - b)`.
"""
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
##CHUNK 5
L1dist(a, b)
Compute the L1 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|``.
Efficient equivalent of `sum(abs, a - b)`.
"""
function L1dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
|
96
| 107
|
StatsBase.jl
| 252
|
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
if r < v
r = v
end
end
return r
end
|
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
if r < v
r = v
end
end
return r
end
|
[
96,
107
] |
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
if r < v
r = v
end
end
return r
end
|
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
if r < v
r = v
end
end
return r
end
|
Linfdist
| 96
| 107
|
src/deviation.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
#CURRENT FILE: StatsBase.jl/src/deviation.jl
##CHUNK 1
"""
L1dist(a, b)
Compute the L1 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|``.
Efficient equivalent of `sum(abs, a - b)`.
"""
function L1dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
##CHUNK 2
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
"""
sqL2dist(a, b)
Compute the squared L2 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|^2``.
Efficient equivalent of `sum(abs2, a - b)`.
"""
function sqL2dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
##CHUNK 3
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs2(a[i] - b[i])
end
return r
end
# L2 distance
"""
L2dist(a, b)
Compute the L2 distance between two arrays: ``\\sqrt{\\sum_{i=1}^n |a_i - b_i|^2}``.
Efficient equivalent of `sqrt(sum(abs2, a - b))`.
"""
L2dist(a::AbstractArray{T}, b::AbstractArray{T}) where {T<:Number} = sqrt(sqL2dist(a, b))
# L1 distance
##CHUNK 4
"""
sqL2dist(a, b)
Compute the squared L2 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|^2``.
Efficient equivalent of `sum(abs2, a - b)`.
"""
function sqL2dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs2(a[i] - b[i])
end
return r
end
# L2 distance
##CHUNK 5
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
end
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
function countne(a::AbstractArray, b::AbstractArray)
##CHUNK 6
end
"""
countne(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are not equal.
"""
function countne(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] != b[i]
c += 1
end
end
return c
end
##CHUNK 7
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
Compute the L∞ distance, also called the Chebyshev distance, between
two arrays: ``\\max_{1≤i≤n} |a_i - b_i|``.
Efficient equivalent of `maxabs(a - b)`.
"""
# Generalized KL-divergence
"""
gkldiv(a, b)
##CHUNK 8
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
# MeanAD: mean absolute deviation
"""
meanad(a, b)
Return the mean absolute deviation between two arrays: `mean(abs, a - b)`.
"""
meanad(a::AbstractArray{T}, b::AbstractArray{T}) where {T<:Number} =
L1dist(a, b) / length(a)
##CHUNK 9
# Computing deviation in a variety of ways
## count the number of equal/non-equal pairs
"""
counteq(a, b)
Count the number of indices at which the elements of the arrays
`a` and `b` are equal.
"""
function counteq(a::AbstractArray, b::AbstractArray)
n = length(a)
length(b) == n || throw(DimensionMismatch("Inconsistent lengths."))
c = 0
for i in eachindex(a, b)
@inbounds if a[i] == b[i]
c += 1
end
end
return c
|
118
| 131
|
StatsBase.jl
| 253
|
function gkldiv(a::AbstractArray{T}, b::AbstractArray{T}) where T<:AbstractFloat
n = length(a)
r = 0.0
for i in eachindex(a, b)
@inbounds ai = a[i]
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
|
function gkldiv(a::AbstractArray{T}, b::AbstractArray{T}) where T<:AbstractFloat
n = length(a)
r = 0.0
for i in eachindex(a, b)
@inbounds ai = a[i]
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
|
[
118,
131
] |
function gkldiv(a::AbstractArray{T}, b::AbstractArray{T}) where T<:AbstractFloat
n = length(a)
r = 0.0
for i in eachindex(a, b)
@inbounds ai = a[i]
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
|
function gkldiv(a::AbstractArray{T}, b::AbstractArray{T}) where T<:AbstractFloat
n = length(a)
r = 0.0
for i in eachindex(a, b)
@inbounds ai = a[i]
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
|
gkldiv
| 118
| 131
|
src/deviation.jl
|
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
kldivergence(p,q) / log(b)
#############################
##CHUNK 2
"""
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
##CHUNK 3
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
##CHUNK 4
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
"""
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
##CHUNK 5
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
"""
crossentropy(p, q, [b])
Compute the cross entropy between `p` and `q`, optionally specifying a real
number `b` such that the result is scaled by `1/log(b)`.
"""
#FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
levinson(r::AbstractVector{T}, b::AbstractVector{T}) where {T<:BlasReal} = levinson!(r, copy(b), zeros(T, length(b)))
#CURRENT FILE: StatsBase.jl/src/deviation.jl
##CHUNK 1
"""
L1dist(a, b)
Compute the L1 distance between two arrays: ``\\sum_{i=1}^n |a_i - b_i|``.
Efficient equivalent of `sum(abs, a - b)`.
"""
function L1dist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
##CHUNK 2
for i in eachindex(a, b)
@inbounds r += abs(a[i] - b[i])
end
return r
end
# Linf distance
"""
Linfdist(a, b)
Compute the L∞ distance, also called the Chebyshev distance, between
two arrays: ``\\max_{1≤i≤n} |a_i - b_i|``.
Efficient equivalent of `maxabs(a - b)`.
"""
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
##CHUNK 3
Compute the L∞ distance, also called the Chebyshev distance, between
two arrays: ``\\max_{1≤i≤n} |a_i - b_i|``.
Efficient equivalent of `maxabs(a - b)`.
"""
function Linfdist(a::AbstractArray{T}, b::AbstractArray{T}) where T<:Number
n = length(a)
length(b) == n || throw(DimensionMismatch("Input dimension mismatch"))
r = 0.0
for i in eachindex(a, b)
@inbounds v = abs(a[i] - b[i])
if r < v
r = v
end
end
return r
end
# Generalized KL-divergence
##CHUNK 4
"""
gkldiv(a, b)
Compute the generalized Kullback-Leibler divergence between two arrays:
``\\sum_{i=1}^n (a_i \\log(a_i/b_i) - a_i + b_i)``.
Efficient equivalent of `sum(a*log(a/b)-a+b)`.
"""
# MeanAD: mean absolute deviation
"""
meanad(a, b)
Return the mean absolute deviation between two arrays: `mean(abs, a - b)`.
"""
meanad(a::AbstractArray{T}, b::AbstractArray{T}) where {T<:Number} =
L1dist(a, b) / length(a)
# MaxAD: maximum absolute deviation
|
19
| 42
|
StatsBase.jl
| 254
|
function (ecdf::ECDF)(v::AbstractVector{<:Real})
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
ord = sortperm(v)
m = length(v)
r = similar(ecdf.sorted_values, m)
r0 = zero(weightsum)
i = 1
for (j, x) in enumerate(ecdf.sorted_values)
while i <= m && x > v[ord[i]]
r[ord[i]] = r0
i += 1
end
r0 += evenweights ? 1 : ecdf.weights[j]
if i > m
break
end
end
while i <= m
r[ord[i]] = weightsum
i += 1
end
return r / weightsum
end
|
function (ecdf::ECDF)(v::AbstractVector{<:Real})
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
ord = sortperm(v)
m = length(v)
r = similar(ecdf.sorted_values, m)
r0 = zero(weightsum)
i = 1
for (j, x) in enumerate(ecdf.sorted_values)
while i <= m && x > v[ord[i]]
r[ord[i]] = r0
i += 1
end
r0 += evenweights ? 1 : ecdf.weights[j]
if i > m
break
end
end
while i <= m
r[ord[i]] = weightsum
i += 1
end
return r / weightsum
end
|
[
19,
42
] |
function (ecdf::ECDF)(v::AbstractVector{<:Real})
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
ord = sortperm(v)
m = length(v)
r = similar(ecdf.sorted_values, m)
r0 = zero(weightsum)
i = 1
for (j, x) in enumerate(ecdf.sorted_values)
while i <= m && x > v[ord[i]]
r[ord[i]] = r0
i += 1
end
r0 += evenweights ? 1 : ecdf.weights[j]
if i > m
break
end
end
while i <= m
r[ord[i]] = weightsum
i += 1
end
return r / weightsum
end
|
function (ecdf::ECDF)(v::AbstractVector{<:Real})
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
ord = sortperm(v)
m = length(v)
r = similar(ecdf.sorted_values, m)
r0 = zero(weightsum)
i = 1
for (j, x) in enumerate(ecdf.sorted_values)
while i <= m && x > v[ord[i]]
r[ord[i]] = r0
i += 1
end
r0 += evenweights ? 1 : ecdf.weights[j]
if i > m
break
end
end
while i <= m
r[ord[i]] = weightsum
i += 1
end
return r / weightsum
end
|
unknown_function
| 19
| 42
|
src/empirical.jl
|
#FILE: StatsBase.jl/test/empirical.jl
##CHUNK 1
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == extrema(x)
fnecdf = ecdf([0.5])
@test fnecdf([zeros(5000); ones(5000)]) == [zeros(5000); ones(5000)]
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == (0.5, 0.5)
@test isnan(ecdf([1,2,3])(NaN))
@test_throws ArgumentError ecdf([1, NaN])
end
@testset "Weighted ECDF" begin
x = randn(10000000)
w1 = rand(10000000)
w2 = weights(w1)
fnecdf = ecdf(x, weights=w1)
fnecdfalt = ecdf(x, weights=w2)
@test fnecdf.sorted_values == fnecdfalt.sorted_values
@test fnecdf.weights == fnecdfalt.weights
@test fnecdf.weights != w1 # check that w wasn't accidentally modified in place
@test fnecdfalt.weights != w2
y = [-1.96, -1.644854, -1.281552, -0.6744898, 0, 0.6744898, 1.281552, 1.644854, 1.96]
@test isapprox(fnecdf(y), [0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975], atol=1e-3)
##CHUNK 2
@test isapprox(fnecdf(1.96), 0.975, atol=1e-3)
@test fnecdf(y) ≈ map(fnecdf, y)
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == extrema(x)
fnecdf = ecdf([1.0, 0.5], weights=weights([3, 1]))
@test fnecdf(0.75) == 0.25
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == (0.5, 1.0)
@test_throws ArgumentError ecdf(rand(8), weights=weights(rand(10)))
# Check frequency weights
v = randn(100)
r = rand(1:100, 100)
vv = vcat(fill.(v, r)...) # repeat elements of v according to r
fw = fweights(r)
frecdf1 = ecdf(v, weights=fw)
frecdf2 = ecdf(vv)
@test frecdf1(y) ≈ frecdf2(y)
# Check probability weights
a = randn(100)
b = rand(100)
b̃ = abs(10randn()) * b
bw1 = pweights(b)
##CHUNK 3
w1 = rand(10000000)
w2 = weights(w1)
fnecdf = ecdf(x, weights=w1)
fnecdfalt = ecdf(x, weights=w2)
@test fnecdf.sorted_values == fnecdfalt.sorted_values
@test fnecdf.weights == fnecdfalt.weights
@test fnecdf.weights != w1 # check that w wasn't accidentally modified in place
@test fnecdfalt.weights != w2
y = [-1.96, -1.644854, -1.281552, -0.6744898, 0, 0.6744898, 1.281552, 1.644854, 1.96]
@test isapprox(fnecdf(y), [0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975], atol=1e-3)
@test isapprox(fnecdf(1.96), 0.975, atol=1e-3)
@test fnecdf(y) ≈ map(fnecdf, y)
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == extrema(x)
fnecdf = ecdf([1.0, 0.5], weights=weights([3, 1]))
@test fnecdf(0.75) == 0.25
@test extrema(fnecdf) == (minimum(fnecdf), maximum(fnecdf)) == (0.5, 1.0)
@test_throws ArgumentError ecdf(rand(8), weights=weights(rand(10)))
# Check frequency weights
v = randn(100)
r = rand(1:100, 100)
#FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
function cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, wv::AbstractWeights,
m::Real=mean(v, wv))
if minimum(krange) <= 0
throw(ArgumentError("Cumulant orders must be strictly positive."))
end
k = maximum(krange)
cmoms = zeros(typeof(m), k)
cumls = zeros(typeof(m), k)
cmoms[1] = 0
cumls[1] = m
for i = 2:k
kn = wv isa UnitWeights ? moment(v, i, m) : moment(v, i, wv, m)
cmoms[i] = kn
for j = 2:i-2
kn -= binomial(i-1, j)*cmoms[j]*cumls[i-j]
end
cumls[i] = kn
end
return cumls[krange]
end
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
efraimidis_a_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
efraimidis_a_wsample_norep!(default_rng(), a, wv, x)
# Weighted sampling without replacement
# Instead of keys u^(1/w) where u = random(0,1) keys w/v where v = randexp(1) are used.
"""
efraimidis_ares_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
##CHUNK 2
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
#CURRENT FILE: StatsBase.jl/src/empirical.jl
##CHUNK 1
# Empirical estimation of CDF and PDF
## Empirical CDF
struct ECDF{T <: AbstractVector{<:Real}, W <: AbstractWeights{<:Real}}
sorted_values::T
weights::W
end
function (ecdf::ECDF)(x::Real)
isnan(x) && return NaN
n = searchsortedlast(ecdf.sorted_values, x)
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
partialsum = evenweights ? n : sum(view(ecdf.weights, 1:n))
partialsum / weightsum
end
"""
##CHUNK 2
"""
function ecdf(X::AbstractVector{<:Real}; weights::AbstractVector{<:Real}=Weights(Float64[]))
any(isnan, X) && throw(ArgumentError("ecdf can not include NaN values"))
isempty(weights) || length(X) == length(weights) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(X)) and $(length(weights))"))
ord = sortperm(X)
ECDF(X[ord], isempty(weights) ? weights : Weights(weights[ord]))
end
minimum(ecdf::ECDF) = first(ecdf.sorted_values)
maximum(ecdf::ECDF) = last(ecdf.sorted_values)
extrema(ecdf::ECDF) = (minimum(ecdf), maximum(ecdf))
##CHUNK 3
isnan(x) && return NaN
n = searchsortedlast(ecdf.sorted_values, x)
evenweights = isempty(ecdf.weights)
weightsum = evenweights ? length(ecdf.sorted_values) : sum(ecdf.weights)
partialsum = evenweights ? n : sum(view(ecdf.weights, 1:n))
partialsum / weightsum
end
"""
ecdf(X; weights::AbstractWeights)
Return an empirical cumulative distribution function (ECDF) based on a vector of samples
given in `X`. Optionally providing `weights` returns a weighted ECDF.
Note: this function that returns a callable composite type, which can then be applied to
evaluate CDF values on other samples.
`extrema`, `minimum`, and `maximum` are supported to for obtaining the range over which
function is inside the interval ``(0,1)``; the function is defined for the whole real line.
##CHUNK 4
ecdf(X; weights::AbstractWeights)
Return an empirical cumulative distribution function (ECDF) based on a vector of samples
given in `X`. Optionally providing `weights` returns a weighted ECDF.
Note: this function that returns a callable composite type, which can then be applied to
evaluate CDF values on other samples.
`extrema`, `minimum`, and `maximum` are supported to for obtaining the range over which
function is inside the interval ``(0,1)``; the function is defined for the whole real line.
"""
function ecdf(X::AbstractVector{<:Real}; weights::AbstractVector{<:Real}=Weights(Float64[]))
any(isnan, X) && throw(ArgumentError("ecdf can not include NaN values"))
isempty(weights) || length(X) == length(weights) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(X)) and $(length(weights))"))
ord = sortperm(X)
ECDF(X[ord], isempty(weights) ? weights : Weights(weights[ord]))
end
minimum(ecdf::ECDF) = first(ecdf.sorted_values)
|
21
| 50
|
StatsBase.jl
| 255
|
function rle(v::AbstractVector{T}) where T
n = length(v)
vals = T[]
lens = Int[]
n>0 || return (vals,lens)
cv = v[1]
cl = 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
|
function rle(v::AbstractVector{T}) where T
n = length(v)
vals = T[]
lens = Int[]
n>0 || return (vals,lens)
cv = v[1]
cl = 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
|
[
21,
50
] |
function rle(v::AbstractVector{T}) where T
n = length(v)
vals = T[]
lens = Int[]
n>0 || return (vals,lens)
cv = v[1]
cl = 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
|
function rle(v::AbstractVector{T}) where T
n = length(v)
vals = T[]
lens = Int[]
n>0 || return (vals,lens)
cv = v[1]
cl = 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
|
rle
| 21
| 50
|
src/misc.jl
|
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(xv, yv, wv)
xi = xv[i]
yi = yv[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += wv[i]
end
end
return r
end
##CHUNK 2
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
#FILE: StatsBase.jl/test/misc.jl
##CHUNK 1
using StatsBase
using SparseArrays, Test
# rle & inverse_rle
z = [1, 1, 2, 2, 2, 3, 1, 2, 2, 3, 3, 3, 3]
(vals, lens) = rle(z)
@test vals == [1, 2, 3, 1, 2, 3]
@test lens == [2, 3, 1, 1, 2, 4]
@test inverse_rle(vals, lens) == z
@test_throws ArgumentError inverse_rle(vals, fill(-1, length(lens)))
@test_throws DimensionMismatch inverse_rle(vals, [1])
z = [true, true, false, false, true, false, true, true, true]
vals, lens = rle(z)
@test vals == [true, false, true, false, true]
@test lens == [2, 2, 1, 1, 3]
@test inverse_rle(vals, lens) == z
z = BitArray([true, true, false, false, true])
##CHUNK 2
(vals, lens) = rle(z)
@test vals == [true, false, true]
@test lens == [2, 2, 1]
z = [1, 1, 2, missing, 2, 3, 1, missing, missing, 3, 3, 3, 3]
vals, lens = rle(z)
@test isequal(vals, [1, 2, missing, 2, 3, 1, missing, 3])
@test lens == [2, 1, 1, 1, 1, 1, 2, 4]
@test isequal(inverse_rle(vals, lens), z)
# levelsmap
a = [1, 1, 2, 2, 2, 3, 1, 2, 2, 3, 3, 3, 3, 2]
b = [true, false, false, true, false, true, true, false]
@test levelsmap(a) == Dict(2=>2, 3=>3, 1=>1)
@test levelsmap(b) == Dict(false=>2, true=>1)
# indicatormat
II = [false true false false false;
##CHUNK 3
@test_throws ArgumentError inverse_rle(vals, fill(-1, length(lens)))
@test_throws DimensionMismatch inverse_rle(vals, [1])
z = [true, true, false, false, true, false, true, true, true]
vals, lens = rle(z)
@test vals == [true, false, true, false, true]
@test lens == [2, 2, 1, 1, 3]
@test inverse_rle(vals, lens) == z
z = BitArray([true, true, false, false, true])
(vals, lens) = rle(z)
@test vals == [true, false, true]
@test lens == [2, 2, 1]
z = [1, 1, 2, missing, 2, 3, 1, missing, missing, 3, 3, 3, 3]
vals, lens = rle(z)
@test isequal(vals, [1, 2, missing, 2, 3, 1, missing, 3])
@test lens == [2, 1, 1, 1, 1, 1, 2, 4]
@test isequal(inverse_rle(vals, lens), z)
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
#FILE: StatsBase.jl/src/deprecates.jl
##CHUNK 1
ac = n / wsum
for i = 1:n
@inbounds a[i] = w[i] * ac
end
larges = Vector{Int}(undef, n)
smalls = Vector{Int}(undef, n)
kl = 0 # actual number of larges
ks = 0 # actual number of smalls
for i = 1:n
@inbounds ai = a[i]
if ai > 1.0
larges[kl+=1] = i # push to larges
elseif ai < 1.0
smalls[ks+=1] = i # push to smalls
end
end
#CURRENT FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
|
60
| 80
|
StatsBase.jl
| 256
|
function inverse_rle(vals::AbstractVector{T}, lens::AbstractVector{<:Integer}) where T
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
|
function inverse_rle(vals::AbstractVector{T}, lens::AbstractVector{<:Integer}) where T
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
|
[
60,
80
] |
function inverse_rle(vals::AbstractVector{T}, lens::AbstractVector{<:Integer}) where T
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
|
function inverse_rle(vals::AbstractVector{T}, lens::AbstractVector{<:Integer}) where T
m = length(vals)
mlens = length(lens)
mlens == m || throw(DimensionMismatch(
"number of vals ($m) does not match the number of lens ($mlens)"))
n = sum(lens)
n >= 0 || throw(ArgumentError("lengths must be non-negative"))
r = Vector{T}(undef, n)
p = 0
@inbounds for i = 1 : m
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
|
inverse_rle
| 60
| 80
|
src/misc.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
##CHUNK 3
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 4
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 5
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
#FILE: StatsBase.jl/test/misc.jl
##CHUNK 1
using StatsBase
using SparseArrays, Test
# rle & inverse_rle
z = [1, 1, 2, 2, 2, 3, 1, 2, 2, 3, 3, 3, 3]
(vals, lens) = rle(z)
@test vals == [1, 2, 3, 1, 2, 3]
@test lens == [2, 3, 1, 1, 2, 4]
@test inverse_rle(vals, lens) == z
##CHUNK 2
@test_throws ArgumentError inverse_rle(vals, fill(-1, length(lens)))
@test_throws DimensionMismatch inverse_rle(vals, [1])
z = [true, true, false, false, true, false, true, true, true]
vals, lens = rle(z)
@test vals == [true, false, true, false, true]
@test lens == [2, 2, 1, 1, 3]
@test inverse_rle(vals, lens) == z
z = BitArray([true, true, false, false, true])
(vals, lens) = rle(z)
@test vals == [true, false, true]
@test lens == [2, 2, 1]
z = [1, 1, 2, missing, 2, 3, 1, missing, missing, 3, 3, 3, 3]
vals, lens = rle(z)
@test isequal(vals, [1, 2, missing, 2, 3, 1, missing, 3])
@test lens == [2, 1, 1, 1, 1, 1, 2, 4]
@test isequal(inverse_rle(vals, lens), z)
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
##CHUNK 2
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
checkbounds(r, axes(xlevels, 1), axes(ylevels, 1))
mx0 = first(xlevels)
mx1 = last(xlevels)
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
#CURRENT FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
function rle(v::AbstractVector{T}) where T
n = length(v)
vals = T[]
lens = Int[]
n>0 || return (vals,lens)
cv = v[1]
cl = 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
|
107
| 118
|
StatsBase.jl
| 257
|
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
|
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
|
[
107,
118
] |
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
|
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
|
levelsmap
| 107
| 118
|
src/misc.jl
|
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
if c != 0
index = ht_keyindex2!(cm, i)
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
##CHUNK 2
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
##CHUNK 3
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
##CHUNK 4
memory space. Suitable for the case where memory is tight.
"""
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
@generated function _wsum_general!(R::AbstractArray{RT}, f::supertype(typeof(abs)),
A::AbstractArray{T,N}, w::AbstractVector{WT}, dim::Int, init::Bool) where {T,RT,WT,N}
quote
init && fill!(R, zero(RT))
wi = zero(WT)
if dim == 1
@nextract $N sizeR d->size(R,d)
sizA1 = size(A, 1)
@nloops $N i d->(d>1 ? (1:size(A,d)) : (1:1)) d->(j_d = sizeR_d==1 ? 1 : i_d) begin
@inbounds r = (@nref $N R j)
for i_1 = 1:sizA1
@inbounds r += f(@nref $N A i) * w[i_1]
end
@inbounds (@nref $N R j) = r
end
else
@nloops $N i A d->(if d == dim
wi = w[i_d]
j_d = 1
else
#CURRENT FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
"""
indexmap(a)
Construct a dictionary that maps each unique value in `a` to
the index of its first occurrence in `a`.
"""
function indexmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = i
end
end
return d
end
##CHUNK 2
j = lens[i]
j >= 0 || throw(ArgumentError("lengths must be non-negative"))
v = vals[i]
while j > 0
r[p+=1] = v
j -=1
end
end
return r
end
"""
indexmap(a)
Construct a dictionary that maps each unique value in `a` to
the index of its first occurrence in `a`.
"""
function indexmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
##CHUNK 3
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = i
end
end
return d
end
"""
levelsmap(a)
Construct a dictionary that maps each of the `n` unique values
in `a` to a number between 1 and `n`.
"""
"""
indicatormat(x, k::Integer; sparse=false)
|
168
| 180
|
StatsBase.jl
| 258
|
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
|
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
|
[
168,
180
] |
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
|
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
|
_indicatormat_dense
| 168
| 180
|
src/misc.jl
|
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
j_d = i_d
end) @inbounds (@nref $N R j) += f(@nref $N A i) * wi
end
return R
end
end
@generated function _wsum_centralize!(R::AbstractArray{RT}, f::supertype(typeof(abs)),
A::AbstractArray{T,N}, w::AbstractVector{WT}, means,
dim::Int, init::Bool) where {T,RT,WT,N}
quote
init && fill!(R, zero(RT))
wi = zero(WT)
if dim == 1
@nextract $N sizeR d->size(R,d)
sizA1 = size(A, 1)
@nloops $N i d->(d>1 ? (1:size(A,d)) : (1:1)) d->(j_d = sizeR_d==1 ? 1 : i_d) begin
@inbounds r = (@nref $N R j)
@inbounds m = (@nref $N means j)
for i_1 = 1:sizA1
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
#CURRENT FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
end
##CHUNK 2
r = zeros(Bool, k, n)
for i = 1 : n
r[x[i], i] = true
end
return r
end
_indicatormat_sparse(x::AbstractArray{<:Integer}, k::Integer) = (n = length(x); sparse(x, 1:n, true, k, n))
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
##CHUNK 3
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
"""
indicatormat(x, k::Integer; sparse=false)
Construct a boolean matrix `I` of size `(k, length(x))` such that
`I[x[i], i] = true` and all other elements are set to `false`.
If `sparse` is `true`, the output will be a sparse matrix, otherwise
it will be dense (default).
# Examples
```jldoctest
##CHUNK 4
end
"""
indicatormat(x, c=sort(unique(x)); sparse=false)
Construct a boolean matrix `I` of size `(length(c), length(x))`.
Let `ci` be the index of `x[i]` in `c`. Then `I[ci, i] = true` and
all other elements are `false`.
"""
function indicatormat(x::AbstractArray, c::AbstractArray; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, c) : _indicatormat_dense(x, c)
end
indicatormat(x::AbstractArray; sparse::Bool=false) =
indicatormat(x, sort!(unique(x)); sparse=sparse)
function _indicatormat_dense(x::AbstractArray{<:Integer}, k::Integer)
n = length(x)
##CHUNK 5
indicatormat(x, k::Integer; sparse=false)
Construct a boolean matrix `I` of size `(k, length(x))` such that
`I[x[i], i] = true` and all other elements are set to `false`.
If `sparse` is `true`, the output will be a sparse matrix, otherwise
it will be dense (default).
# Examples
```jldoctest
julia> using StatsBase
julia> indicatormat([1 2 2], 2)
2×3 Matrix{Bool}:
1 0 0
0 1 1
```
"""
function indicatormat(x::AbstractArray{<:Integer}, k::Integer; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, k) : _indicatormat_dense(x, k)
##CHUNK 6
julia> using StatsBase
julia> indicatormat([1 2 2], 2)
2×3 Matrix{Bool}:
1 0 0
0 1 1
```
"""
function indicatormat(x::AbstractArray{<:Integer}, k::Integer; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, k) : _indicatormat_dense(x, k)
end
"""
indicatormat(x, c=sort(unique(x)); sparse=false)
Construct a boolean matrix `I` of size `(length(c), length(x))`.
Let `ci` be the index of `x[i]` in `c`. Then `I[ci, i] = true` and
all other elements are `false`.
"""
##CHUNK 7
function indicatormat(x::AbstractArray, c::AbstractArray; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, c) : _indicatormat_dense(x, c)
end
indicatormat(x::AbstractArray; sparse::Bool=false) =
indicatormat(x, sort!(unique(x)); sparse=sparse)
function _indicatormat_dense(x::AbstractArray{<:Integer}, k::Integer)
n = length(x)
r = zeros(Bool, k, n)
for i = 1 : n
r[x[i], i] = true
end
return r
end
_indicatormat_sparse(x::AbstractArray{<:Integer}, k::Integer) = (n = length(x); sparse(x, 1:n, true, k, n))
##CHUNK 8
for i = 1 : length(a)
@inbounds k = a[i]
if !haskey(d, k)
d[k] = i
end
end
return d
end
"""
levelsmap(a)
Construct a dictionary that maps each of the `n` unique values
in `a` to a number between 1 and `n`.
"""
function levelsmap(a::AbstractArray{T}) where T
d = Dict{T,Int}()
index = 1
for i = 1 : length(a)
|
184
| 194
|
StatsBase.jl
| 259
|
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
end
|
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
end
|
[
184,
194
] |
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
end
|
function _indicatormat_sparse(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
rinds = Vector{Int}(undef, n)
@inbounds for i = 1 : n
rinds[i] = d[x[i]]
end
return sparse(rinds, 1:n, true, m, n)
end
|
_indicatormat_sparse
| 184
| 194
|
src/misc.jl
|
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
j_d = i_d
end) @inbounds (@nref $N R j) += f(@nref $N A i) * wi
end
return R
end
end
@generated function _wsum_centralize!(R::AbstractArray{RT}, f::supertype(typeof(abs)),
A::AbstractArray{T,N}, w::AbstractVector{WT}, means,
dim::Int, init::Bool) where {T,RT,WT,N}
quote
init && fill!(R, zero(RT))
wi = zero(WT)
if dim == 1
@nextract $N sizeR d->size(R,d)
sizA1 = size(A, 1)
@nloops $N i d->(d>1 ? (1:size(A,d)) : (1:1)) d->(j_d = sizeR_d==1 ? 1 : i_d) begin
@inbounds r = (@nref $N R j)
@inbounds m = (@nref $N means j)
for i_1 = 1:sizA1
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
#CURRENT FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
r = zeros(Bool, k, n)
for i = 1 : n
r[x[i], i] = true
end
return r
end
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
##CHUNK 2
n = length(x)
r = zeros(Bool, m, n)
o = 0
@inbounds for i = 1 : n
xi = x[i]
r[o + d[xi]] = true
o += m
end
return r
end
_indicatormat_sparse(x::AbstractArray{<:Integer}, k::Integer) = (n = length(x); sparse(x, 1:n, true, k, n))
##CHUNK 3
function indicatormat(x::AbstractArray, c::AbstractArray; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, c) : _indicatormat_dense(x, c)
end
indicatormat(x::AbstractArray; sparse::Bool=false) =
indicatormat(x, sort!(unique(x)); sparse=sparse)
function _indicatormat_dense(x::AbstractArray{<:Integer}, k::Integer)
n = length(x)
r = zeros(Bool, k, n)
for i = 1 : n
r[x[i], i] = true
end
return r
end
function _indicatormat_dense(x::AbstractArray{T}, c::AbstractArray{T}) where T
d = indexmap(c)
m = length(c)
##CHUNK 4
end
"""
indicatormat(x, c=sort(unique(x)); sparse=false)
Construct a boolean matrix `I` of size `(length(c), length(x))`.
Let `ci` be the index of `x[i]` in `c`. Then `I[ci, i] = true` and
all other elements are `false`.
"""
function indicatormat(x::AbstractArray, c::AbstractArray; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, c) : _indicatormat_dense(x, c)
end
indicatormat(x::AbstractArray; sparse::Bool=false) =
indicatormat(x, sort!(unique(x)); sparse=sparse)
function _indicatormat_dense(x::AbstractArray{<:Integer}, k::Integer)
n = length(x)
##CHUNK 5
@inbounds k = a[i]
if !haskey(d, k)
d[k] = index
index += 1
end
end
return d
end
"""
indicatormat(x, k::Integer; sparse=false)
Construct a boolean matrix `I` of size `(k, length(x))` such that
`I[x[i], i] = true` and all other elements are set to `false`.
If `sparse` is `true`, the output will be a sparse matrix, otherwise
it will be dense (default).
# Examples
```jldoctest
##CHUNK 6
indicatormat(x, k::Integer; sparse=false)
Construct a boolean matrix `I` of size `(k, length(x))` such that
`I[x[i], i] = true` and all other elements are set to `false`.
If `sparse` is `true`, the output will be a sparse matrix, otherwise
it will be dense (default).
# Examples
```jldoctest
julia> using StatsBase
julia> indicatormat([1 2 2], 2)
2×3 Matrix{Bool}:
1 0 0
0 1 1
```
"""
function indicatormat(x::AbstractArray{<:Integer}, k::Integer; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, k) : _indicatormat_dense(x, k)
##CHUNK 7
julia> using StatsBase
julia> indicatormat([1 2 2], 2)
2×3 Matrix{Bool}:
1 0 0
0 1 1
```
"""
function indicatormat(x::AbstractArray{<:Integer}, k::Integer; sparse::Bool=false)
sparse ? _indicatormat_sparse(x, k) : _indicatormat_dense(x, k)
end
"""
indicatormat(x, c=sort(unique(x)); sparse=false)
Construct a boolean matrix `I` of size `(length(c), length(x))`.
Let `ci` be the index of `x[i]` in `c`. Then `I[ci, i] = true` and
all other elements are `false`.
"""
|
34
| 56
|
StatsBase.jl
| 260
|
function var!(R::AbstractArray, A::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
mean=nothing, corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var!, :corrected, corrected)
if mean == 0
mean = Base.reducedim_initarray(A, dims, 0, eltype(R))
elseif mean === nothing
mean = Statistics.mean(A, w, dims=dims)
else
# check size of mean
for i = 1:ndims(A)
dA = size(A,i)
dM = size(mean,i)
if i == dims
dM == 1 || throw(DimensionMismatch("Incorrect size of mean."))
else
dM == dA || throw(DimensionMismatch("Incorrect size of mean."))
end
end
end
return rmul!(_wsum_centralize!(R, abs2, A, convert(Vector, w), mean, dims, true),
varcorrection(w, corrected))
end
|
function var!(R::AbstractArray, A::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
mean=nothing, corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var!, :corrected, corrected)
if mean == 0
mean = Base.reducedim_initarray(A, dims, 0, eltype(R))
elseif mean === nothing
mean = Statistics.mean(A, w, dims=dims)
else
# check size of mean
for i = 1:ndims(A)
dA = size(A,i)
dM = size(mean,i)
if i == dims
dM == 1 || throw(DimensionMismatch("Incorrect size of mean."))
else
dM == dA || throw(DimensionMismatch("Incorrect size of mean."))
end
end
end
return rmul!(_wsum_centralize!(R, abs2, A, convert(Vector, w), mean, dims, true),
varcorrection(w, corrected))
end
|
[
34,
56
] |
function var!(R::AbstractArray, A::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
mean=nothing, corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var!, :corrected, corrected)
if mean == 0
mean = Base.reducedim_initarray(A, dims, 0, eltype(R))
elseif mean === nothing
mean = Statistics.mean(A, w, dims=dims)
else
# check size of mean
for i = 1:ndims(A)
dA = size(A,i)
dM = size(mean,i)
if i == dims
dM == 1 || throw(DimensionMismatch("Incorrect size of mean."))
else
dM == dA || throw(DimensionMismatch("Incorrect size of mean."))
end
end
end
return rmul!(_wsum_centralize!(R, abs2, A, convert(Vector, w), mean, dims, true),
varcorrection(w, corrected))
end
|
function var!(R::AbstractArray, A::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
mean=nothing, corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var!, :corrected, corrected)
if mean == 0
mean = Base.reducedim_initarray(A, dims, 0, eltype(R))
elseif mean === nothing
mean = Statistics.mean(A, w, dims=dims)
else
# check size of mean
for i = 1:ndims(A)
dA = size(A,i)
dM = size(mean,i)
if i == dims
dM == 1 || throw(DimensionMismatch("Incorrect size of mean."))
else
dM == dA || throw(DimensionMismatch("Incorrect size of mean."))
end
end
end
return rmul!(_wsum_centralize!(R, abs2, A, convert(Vector, w), mean, dims, true),
varcorrection(w, corrected))
end
|
var!
| 34
| 56
|
src/moments.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
function cov(sc::SimpleCovariance, X::AbstractMatrix; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X; dims=dims, corrected=sc.corrected)
else
return covm(X, mean, dims, corrected=sc.corrected)
end
end
function cov(sc::SimpleCovariance, X::AbstractMatrix, w::AbstractWeights; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X, w, dims, corrected=sc.corrected)
else
return covm(X, mean, w, dims, corrected=sc.corrected)
end
end
##CHUNK 2
end
"""
cor(X, w::AbstractWeights, dims=1)
Compute the Pearson correlation matrix of `X` along the dimension
`dims` with a weighting `w` .
"""
cor(x::DenseMatrix, w::AbstractWeights, dims::Int=1) =
corm(x, mean(x, w, dims=dims), w, dims)
function mean_and_cov(x::DenseMatrix, dims::Int=1; corrected::Bool=true)
m = mean(x, dims=dims)
return m, covm(x, m, dims, corrected=corrected)
end
function mean_and_cov(x::DenseMatrix, wv::AbstractWeights, dims::Int=1;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, wv, dims=dims)
return m, cov(x, wv, dims; corrected=depcheck(:mean_and_cov, :corrected, corrected))
end
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
wsumtype(::Type{T}, ::Type{W}) where {T,W} = typeof(zero(T) * zero(W) + zero(T) * zero(W))
"""
wsum!(R::AbstractArray, A::AbstractArray,
w::AbstractVector, dim::Int;
init::Bool=true)
Compute the weighted sum of `A` with weights `w` over the dimension `dim` and store
the result in `R`. If `init=false`, the sum is added to `R` rather than starting
from zero.
"""
function wsum!(R::AbstractArray, A::AbstractArray{T,N}, w::AbstractVector, dim::Int; init::Bool=true) where {T,N}
1 <= dim <= N || error("dim should be within [1, $N]")
ndims(R) <= N || error("ndims(R) should not exceed $N")
length(w) == size(A,dim) || throw(DimensionMismatch("Inconsistent array dimension."))
# TODO: more careful examination of R's size
_wsum!(R, A, w, dim, init)
end
##CHUNK 2
from zero.
"""
function wsum!(R::AbstractArray, A::AbstractArray{T,N}, w::AbstractVector, dim::Int; init::Bool=true) where {T,N}
1 <= dim <= N || error("dim should be within [1, $N]")
ndims(R) <= N || error("ndims(R) should not exceed $N")
length(w) == size(A,dim) || throw(DimensionMismatch("Inconsistent array dimension."))
# TODO: more careful examination of R's size
_wsum!(R, A, w, dim, init)
end
function wsum(A::AbstractArray{T}, w::AbstractVector{W}, dim::Int) where {T<:Number,W<:Real}
length(w) == size(A,dim) || throw(DimensionMismatch("Inconsistent array dimension."))
_wsum!(similar(A, wsumtype(T,W), Base.reduced_indices(axes(A), dim)), A, w, dim, true)
end
function wsum(A::AbstractArray{<:Number}, w::UnitWeights, dim::Int)
size(A, dim) != length(w) && throw(DimensionMismatch("Inconsistent array dimension."))
return sum(A, dims=dim)
end
#FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 2
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
end
function mean_and_var(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
v = var(x, w, dims, mean=m, corrected=depcheck(:mean_and_var, :corrected, corrected))
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
s = std(x, w, dims, mean=m, corrected=depcheck(:mean_and_std, :corrected, corrected))
m, s
end
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
##CHUNK 2
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
s = std(x, w, dims, mean=m, corrected=depcheck(:mean_and_std, :corrected, corrected))
m, s
end
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
end
function _moment2(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real; corrected=false)
##CHUNK 3
function mean_and_var(x::AbstractArray{<:Real}, dim::Int; corrected::Bool=true)
m = mean(x, dims=dim)
v = var(x, dims=dim, mean=m, corrected=corrected)
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, dim::Int; corrected::Bool=true)
m = mean(x, dims=dim)
s = std(x, dims=dim, mean=m, corrected=corrected)
m, s
end
function mean_and_var(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
v = var(x, w, dims, mean=m, corrected=depcheck(:mean_and_var, :corrected, corrected))
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
##CHUNK 4
function var(v::AbstractArray{<:Real}, w::AbstractWeights; mean=nothing,
corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var, :corrected, corrected)
if mean == nothing
_moment2(v, w, Statistics.mean(v, w); corrected=corrected)
else
_moment2(v, w, mean; corrected=corrected)
end
end
## var along dim
function var(A::AbstractArray{<:Real}, w::AbstractWeights, dim::Int; mean=nothing,
corrected::Union{Bool, Nothing}=nothing)
corrected = depcheck(:var, :corrected, corrected)
var!(similar(A, Float64, Base.reduced_indices(axes(A), dim)), A, w, dim;
mean=mean, corrected=corrected)
end
|
281
| 295
|
StatsBase.jl
| 261
|
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
[
281,
295
] |
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
skewness
| 281
| 295
|
src/moments.jl
|
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
##CHUNK 2
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
##CHUNK 3
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
##CHUNK 4
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
##CHUNK 5
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
##CHUNK 6
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
##CHUNK 7
##### Skewness and Kurtosis
# Skewness
# This is Type 1 definition according to Joanes and Gill (1998)
"""
skewness(v, [wv::AbstractWeights], m=mean(v))
Compute the standardized skewness of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
##CHUNK 8
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
s = std(x, w, dims, mean=m, corrected=depcheck(:mean_and_std, :corrected, corrected))
m, s
end
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
##CHUNK 9
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
Return the `k`th order cumulant of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a pre-computed mean `m`.
If `k` is a range of `Integer`s, then return all the cumulants of orders in this range as a vector.
This quantity is calculated using a recursive definition on lower-order cumulants and central moments.
##CHUNK 10
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
end
function _moment2(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
@inbounds s += (z * z) * wv[i]
end
|
297
| 315
|
StatsBase.jl
| 262
|
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
[
297,
315
] |
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
|
skewness
| 297
| 315
|
src/moments.jl
|
#FILE: StatsBase.jl/test/cov.jl
##CHUNK 1
X = randn(3, 8)
Z1 = X .- mean(X, dims = 1)
Z2 = X .- mean(X, dims = 2)
w1 = rand(3)
w2 = rand(8)
# varcorrection is negative if sum of weights is smaller than 1
if f === fweights
w1[1] += 1
w2[1] += 1
end
wv1 = f(w1)
wv2 = f(w2)
Z1w = X .- mean(X, wv1, dims=1)
Z2w = X .- mean(X, wv2, dims=2)
#FILE: StatsBase.jl/test/moments.jl
##CHUNK 1
@test_throws ArgumentError mean_and_std(x, wv; corrected=true)
else
(m, s) = mean_and_std(x, wv; corrected=true)
@test m == mean(x, wv)
@test s == std(x, wv; corrected=true)
end
end
end
x = rand(5, 6)
w1 = [0.57, 5.10, 0.91, 1.72, 0.0]
w2 = [3.84, 2.70, 8.29, 8.91, 9.71, 0.0]
@testset "Uncorrected with $f" for f in weight_funcs
wv1 = f(w1)
wv2 = f(w2)
m1 = mean(x, wv1, dims=1)
m2 = mean(x, wv2, dims=2)
expected_var1 = sum(abs2.(x .- m1) .* w1, dims = 1) ./ sum(wv1)
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
function sem(x::AbstractArray, weights::ProbabilityWeights; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
return var(x, weights; mean=mean, corrected=true) / 0
else
_mean = mean === nothing ? Statistics.mean(x, weights) : mean
# sum of squared errors = sse
sse = sum(Broadcast.instantiate(Broadcast.broadcasted(x, weights) do x_i, w
return abs2(w * (x_i - _mean))
end))
n = count(!iszero, weights)
return sqrt(sse * n / (n - 1)) / sum(weights)
end
end
# Median absolute deviation
@irrational mad_constant 1.4826022185056018 BigFloat("1.482602218505601860547076529360423431326703202590312896536266275245674447622701")
"""
mad(x; center=median(x), normalize=true)
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
##CHUNK 2
return (cm4 / (cm2 * cm2)) - 3.0
end
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
##CHUNK 3
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
##CHUNK 4
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
##CHUNK 5
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
##CHUNK 6
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
end
function _moment2(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
@inbounds s += (z * z) * wv[i]
end
##CHUNK 7
##### Skewness and Kurtosis
# Skewness
# This is Type 1 definition according to Joanes and Gill (1998)
"""
skewness(v, [wv::AbstractWeights], m=mean(v))
Compute the standardized skewness of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
|
328
| 341
|
StatsBase.jl
| 263
|
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
|
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
|
[
328,
341
] |
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
|
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
|
kurtosis
| 328
| 341
|
src/moments.jl
|
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
elseif normalize
m * mad_constant
else
m
end
end
# Interquartile range
"""
iqr(x)
Compute the interquartile range (IQR) of collection `x`, i.e. the 75th percentile
minus the 25th percentile.
"""
iqr(x) = (q = quantile(x, [.25, .75]); q[2] - q[1])
# Generalized variance
"""
genvar(X)
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
##CHUNK 2
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
##CHUNK 3
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
##CHUNK 4
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
##CHUNK 5
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
##CHUNK 6
##### Skewness and Kurtosis
# Skewness
# This is Type 1 definition according to Joanes and Gill (1998)
"""
skewness(v, [wv::AbstractWeights], m=mean(v))
Compute the standardized skewness of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
##CHUNK 7
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
s = std(x, w, dims, mean=m, corrected=depcheck(:mean_and_std, :corrected, corrected))
m, s
end
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
##CHUNK 8
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
Return the `k`th order cumulant of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a pre-computed mean `m`.
If `k` is a range of `Integer`s, then return all the cumulants of orders in this range as a vector.
##CHUNK 9
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
|
343
| 362
|
StatsBase.jl
| 264
|
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
|
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
|
[
343,
362
] |
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
|
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
|
kurtosis
| 343
| 362
|
src/moments.jl
|
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
function sem(x::AbstractArray, weights::ProbabilityWeights; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
return var(x, weights; mean=mean, corrected=true) / 0
else
_mean = mean === nothing ? Statistics.mean(x, weights) : mean
# sum of squared errors = sse
sse = sum(Broadcast.instantiate(Broadcast.broadcasted(x, weights) do x_i, w
return abs2(w * (x_i - _mean))
end))
n = count(!iszero, weights)
return sqrt(sse * n / (n - 1)) / sum(weights)
end
end
# Median absolute deviation
@irrational mad_constant 1.4826022185056018 BigFloat("1.482602218505601860547076529360423431326703202590312896536266275245674447622701")
"""
mad(x; center=median(x), normalize=true)
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
##CHUNK 2
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
end
cm3 /= n
cm2 /= n
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
function skewness(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
##CHUNK 3
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
##CHUNK 4
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
Return the `k`th order cumulant of a real-valued array `v`, optionally
##CHUNK 5
cm3 = 0.0 # empirical 3rd centered moment
@inbounds for i = 1:n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2w = z * z * w_i
cm2 += z2w
cm3 += z2w * z
end
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
##CHUNK 6
##### Skewness and Kurtosis
# Skewness
# This is Type 1 definition according to Joanes and Gill (1998)
"""
skewness(v, [wv::AbstractWeights], m=mean(v))
Compute the standardized skewness of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function skewness(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm3 = 0.0 # empirical 3rd centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm3 += z2 * z
##CHUNK 7
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
##CHUNK 8
k == 4 ? _moment4(v, wv, m) :
_momentk(v, k, wv, m)
end
moment(v::AbstractArray{<:Real}, k::Int) = moment(v, k, mean(v))
function moment(v::AbstractArray{<:Real}, k::Int, wv::AbstractWeights)
moment(v, k, wv, mean(v, wv))
end
##### Skewness and Kurtosis
# Skewness
# This is Type 1 definition according to Joanes and Gill (1998)
"""
skewness(v, [wv::AbstractWeights], m=mean(v))
Compute the standardized skewness of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
##CHUNK 9
m, v
end
function mean_and_std(x::AbstractArray{<:Real}, w::AbstractWeights, dims::Int;
corrected::Union{Bool, Nothing}=nothing)
m = mean(x, w, dims=dims)
s = std(x, w, dims, mean=m, corrected=depcheck(:mean_and_std, :corrected, corrected))
m, s
end
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
|
381
| 400
|
StatsBase.jl
| 265
|
function cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, wv::AbstractWeights,
m::Real=mean(v, wv))
if minimum(krange) <= 0
throw(ArgumentError("Cumulant orders must be strictly positive."))
end
k = maximum(krange)
cmoms = zeros(typeof(m), k)
cumls = zeros(typeof(m), k)
cmoms[1] = 0
cumls[1] = m
for i = 2:k
kn = wv isa UnitWeights ? moment(v, i, m) : moment(v, i, wv, m)
cmoms[i] = kn
for j = 2:i-2
kn -= binomial(i-1, j)*cmoms[j]*cumls[i-j]
end
cumls[i] = kn
end
return cumls[krange]
end
|
function cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, wv::AbstractWeights,
m::Real=mean(v, wv))
if minimum(krange) <= 0
throw(ArgumentError("Cumulant orders must be strictly positive."))
end
k = maximum(krange)
cmoms = zeros(typeof(m), k)
cumls = zeros(typeof(m), k)
cmoms[1] = 0
cumls[1] = m
for i = 2:k
kn = wv isa UnitWeights ? moment(v, i, m) : moment(v, i, wv, m)
cmoms[i] = kn
for j = 2:i-2
kn -= binomial(i-1, j)*cmoms[j]*cumls[i-j]
end
cumls[i] = kn
end
return cumls[krange]
end
|
[
381,
400
] |
function cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, wv::AbstractWeights,
m::Real=mean(v, wv))
if minimum(krange) <= 0
throw(ArgumentError("Cumulant orders must be strictly positive."))
end
k = maximum(krange)
cmoms = zeros(typeof(m), k)
cumls = zeros(typeof(m), k)
cmoms[1] = 0
cumls[1] = m
for i = 2:k
kn = wv isa UnitWeights ? moment(v, i, m) : moment(v, i, wv, m)
cmoms[i] = kn
for j = 2:i-2
kn -= binomial(i-1, j)*cmoms[j]*cumls[i-j]
end
cumls[i] = kn
end
return cumls[krange]
end
|
function cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, wv::AbstractWeights,
m::Real=mean(v, wv))
if minimum(krange) <= 0
throw(ArgumentError("Cumulant orders must be strictly positive."))
end
k = maximum(krange)
cmoms = zeros(typeof(m), k)
cumls = zeros(typeof(m), k)
cmoms[1] = 0
cumls[1] = m
for i = 2:k
kn = wv isa UnitWeights ? moment(v, i, m) : moment(v, i, wv, m)
cmoms[i] = kn
for j = 2:i-2
kn -= binomial(i-1, j)*cmoms[j]*cumls[i-j]
end
cumls[i] = kn
end
return cumls[krange]
end
|
cumulant
| 381
| 400
|
src/moments.jl
|
#CURRENT FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
Return the `k`th order cumulant of a real-valued array `v`, optionally
##CHUNK 2
return (cm4 / (cm2 * cm2)) - 3.0
end
kurtosis(v::AbstractArray{<:Real}) = kurtosis(v, mean(v))
kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights) = kurtosis(v, wv, mean(v, wv))
"""
cumulant(v, k, [wv::AbstractWeights], m=mean(v))
Return the `k`th order cumulant of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a pre-computed mean `m`.
If `k` is a range of `Integer`s, then return all the cumulants of orders in this range as a vector.
This quantity is calculated using a recursive definition on lower-order cumulants and central moments.
Reference: Smith, P. J. 1995. A Recursive Formulation of the Old Problem of Obtaining
Moments from Cumulants and Vice Versa. The American Statistician, 49(2), 217–218.
https://doi.org/10.2307/2684642
"""
##CHUNK 3
s / sum(wv)
end
function _momentk(v::AbstractArray{<:Real}, k::Int, m::Real)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += (z ^ k)
end
s / n
end
function _momentk(v::AbstractArray{<:Real}, k::Int, wv::AbstractWeights, m::Real)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
@inbounds s += (z ^ k) * wv[i]
end
##CHUNK 4
specifying a weighting vector `wv` and a pre-computed mean `m`.
If `k` is a range of `Integer`s, then return all the cumulants of orders in this range as a vector.
This quantity is calculated using a recursive definition on lower-order cumulants and central moments.
Reference: Smith, P. J. 1995. A Recursive Formulation of the Old Problem of Obtaining
Moments from Cumulants and Vice Versa. The American Statistician, 49(2), 217–218.
https://doi.org/10.2307/2684642
"""
cumulant(v::AbstractArray{<:Real}, krange::Union{Integer, AbstractRange{<:Integer}}, m::Real=mean(v)) =
cumulant(v, krange, uweights(length(v)), m)
##CHUNK 5
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
##CHUNK 6
s / n
end
function _moment4(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
@inbounds s += abs2(z * z) * wv[i]
end
s / sum(wv)
end
function _momentk(v::AbstractArray{<:Real}, k::Int, m::Real)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += (z ^ k)
end
##CHUNK 7
function moment(v::AbstractArray{<:Real}, k::Int, m::Real)
k == 2 ? _moment2(v, m) :
k == 3 ? _moment3(v, m) :
k == 4 ? _moment4(v, m) :
_momentk(v, k, m)
end
function moment(v::AbstractArray{<:Real}, k::Int, wv::AbstractWeights, m::Real)
k == 2 ? _moment2(v, wv, m) :
k == 3 ? _moment3(v, wv, m) :
k == 4 ? _moment4(v, wv, m) :
_momentk(v, k, wv, m)
end
moment(v::AbstractArray{<:Real}, k::Int) = moment(v, k, mean(v))
function moment(v::AbstractArray{<:Real}, k::Int, wv::AbstractWeights)
moment(v, k, wv, mean(v, wv))
end
##CHUNK 8
##### General central moment
function _moment2(v::AbstractArray{<:Real}, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
s += z * z
end
varcorrection(n, corrected) * s
end
function _moment2(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real; corrected=false)
n = length(v)
s = 0.0
for i = 1:n
@inbounds z = v[i] - m
@inbounds s += (z * z) * wv[i]
end
##CHUNK 9
end
function kurtosis(v::AbstractArray{<:Real}, wv::AbstractWeights, m::Real)
n = length(v)
length(wv) == n || throw(DimensionMismatch("Inconsistent array lengths."))
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
@inbounds for i = 1 : n
x_i = v[i]
w_i = wv[i]
z = x_i - m
z2 = z * z
z2w = z2 * w_i
cm2 += z2w
cm4 += z2w * z2
end
sw = sum(wv)
cm4 /= sw
cm2 /= sw
##CHUNK 10
sw = sum(wv)
cm3 /= sw
cm2 /= sw
return cm3 / sqrt(cm2 * cm2 * cm2) # this is much faster than cm2^1.5
end
skewness(v::AbstractArray{<:Real}) = skewness(v, mean(v))
skewness(v::AbstractArray{<:Real}, wv::AbstractWeights) = skewness(v, wv, mean(v, wv))
# (excessive) Kurtosis
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
|
1
| 20
|
StatsBase.jl
| 266
|
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
[
1,
20
] |
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
_pairwise!
| 1
| 20
|
src/pairwise.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
#FILE: StatsBase.jl/test/pairwise.jl
##CHUNK 1
Symmetric(pairwise(f, x, x), :U)
res = zeros(4, 4)
res2 = zeros(4, 4)
@test pairwise!(f, res, x, x, symmetric=true) === res
@test pairwise!(f, res2, x, symmetric=true) === res2
@test res == res2 == Symmetric(pairwise(f, x, x), :U)
@test_throws ArgumentError pairwise(f, x, y, symmetric=true)
@test_throws ArgumentError pairwise!(f, res, x, y, symmetric=true)
end
@testset "cor corner cases" begin
# Integer inputs must give a Float64 output
res = pairwise(cor, [[1, 2, 3], [1, 5, 2]])
@test res isa Matrix{Float64}
@test res == [cor(xi, yi) for xi in ([1, 2, 3], [1, 5, 2]),
yi in ([1, 2, 3], [1, 5, 2])]
# NaNs are ignored for the diagonal
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
#CURRENT FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
end
end
if m > 1 && n > 1
indsx == indsy ||
throw(ArgumentError("All input vectors must have the same indices"))
end
end
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
##CHUNK 2
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
##CHUNK 3
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
##CHUNK 4
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 5
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
##CHUNK 6
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
end
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
##CHUNK 7
# V is inferred (contrary to U), but it only gives an upper bound for U
V = promote_type_union(Union{T, Tsm})
return convert(Matrix{U}, dest)::Matrix{<:V}
end
end
"""
pairwise!(f, dest::AbstractMatrix, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Store in matrix `dest` the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`, and return it. Rows correspond to
entries in `x` and columns to entries in `y`, and `dest` must therefore
be of size `length(x) × length(y)`.
If `y` is omitted then `x` is crossed with itself.
As a special case, if `f` is `cor`, diagonal cells for which entries
from `x` and `y` are identical (according to `===`) are set to one even
in the presence `missing`, `NaN` or `Inf` entries.
|
49
| 80
|
StatsBase.jl
| 267
|
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
[
49,
80
] |
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
function _pairwise!(::Val{:pairwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :pairwise)
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
|
_pairwise!
| 49
| 80
|
src/pairwise.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 2
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
#CURRENT FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 2
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
##CHUNK 3
end
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
##CHUNK 4
_pairwise!(f, dest, x′, y′, symmetric=symmetric, skipmissing=skipmissing)
if isconcretetype(eltype(dest))
return dest
else
# Final eltype depends on actual contents (consistent with `map` and `broadcast`
# but using `promote_type` rather than `promote_typejoin`)
U = mapreduce(typeof, promote_type, dest)
# V is inferred (contrary to U), but it only gives an upper bound for U
V = promote_type_union(Union{T, Tsm})
return convert(Matrix{U}, dest)::Matrix{<:V}
end
end
"""
pairwise!(f, dest::AbstractMatrix, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Store in matrix `dest` the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`, and return it. Rows correspond to
##CHUNK 5
end
end
if m > 1 && n > 1
indsx == indsy ||
throw(ArgumentError("All input vectors must have the same indices"))
end
end
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
##CHUNK 6
julia> pairwise(cor, eachcol(y), skipmissing=:pairwise)
3×3 Matrix{Float64}:
1.0 0.928571 -0.866025
0.928571 1.0 -1.0
-0.866025 -1.0 1.0
```
"""
function pairwise(f, x, y=x; symmetric::Bool=false, skipmissing::Symbol=:none)
if symmetric && x !== y
throw(ArgumentError("symmetric=true only makes sense passing " *
"a single set of variables (x === y)"))
end
return _pairwise(Val(skipmissing), f, x, y, symmetric)
end
# cov(x) is faster than cov(x, x)
_cov(x, y) = x === y ? cov(x) : cov(x, y)
pairwise!(::typeof(cov), dest::AbstractMatrix, x, y;
|
82
| 103
|
StatsBase.jl
| 268
|
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
end
|
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
end
|
[
82,
103
] |
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
end
|
function _pairwise!(::Val{:listwise}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
check_vectors(x, y, :listwise)
nminds = .!ismissing.(first(x))
@inbounds for xi in Iterators.drop(x, 1)
nminds .&= .!ismissing.(xi)
end
if x !== y
@inbounds for yj in y
nminds .&= .!ismissing.(yj)
end
end
# Computing integer indices once for all vectors is faster
nminds′ = findall(nminds)
# TODO: check whether wrapping views in a custom array type which asserts
# that entries cannot be `missing` (similar to `skipmissing`)
# could offer better performance
return _pairwise!(Val(:none), f, dest,
[view(xi, nminds′) for xi in x],
[view(yi, nminds′) for yi in y],
symmetric)
end
|
_pairwise!
| 82
| 103
|
src/pairwise.jl
|
#CURRENT FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
##CHUNK 2
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 3
_pairwise!(f, dest, x′, y′, symmetric=symmetric, skipmissing=skipmissing)
if isconcretetype(eltype(dest))
return dest
else
# Final eltype depends on actual contents (consistent with `map` and `broadcast`
# but using `promote_type` rather than `promote_typejoin`)
U = mapreduce(typeof, promote_type, dest)
# V is inferred (contrary to U), but it only gives an upper bound for U
V = promote_type_union(Union{T, Tsm})
return convert(Matrix{U}, dest)::Matrix{<:V}
end
end
"""
pairwise!(f, dest::AbstractMatrix, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Store in matrix `dest` the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`, and return it. Rows correspond to
##CHUNK 4
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
##CHUNK 5
0.928571 1.0 -1.0
-0.866025 -1.0 1.0
```
"""
function pairwise!(f, dest::AbstractMatrix, x, y=x;
symmetric::Bool=false, skipmissing::Symbol=:none)
if symmetric && x !== y
throw(ArgumentError("symmetric=true only makes sense passing " *
"a single set of variables (x === y)"))
end
return _pairwise!(f, dest, x, y, symmetric=symmetric, skipmissing=skipmissing)
end
"""
pairwise(f, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Return a matrix holding the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`. Rows correspond to
##CHUNK 6
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
##CHUNK 7
return convert(Matrix{U}, dest)::Matrix{<:V}
end
end
"""
pairwise!(f, dest::AbstractMatrix, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Store in matrix `dest` the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`, and return it. Rows correspond to
entries in `x` and columns to entries in `y`, and `dest` must therefore
be of size `length(x) × length(y)`.
If `y` is omitted then `x` is crossed with itself.
As a special case, if `f` is `cor`, diagonal cells for which entries
from `x` and `y` are identical (according to `===`) are set to one even
in the presence `missing`, `NaN` or `Inf` entries.
# Keyword arguments
- `symmetric::Bool=false`: If `true`, `f` is only called to compute
##CHUNK 8
return _pairwise!(f, dest, x, y, symmetric=symmetric, skipmissing=skipmissing)
end
"""
pairwise(f, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Return a matrix holding the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`. Rows correspond to
entries in `x` and columns to entries in `y`. If `y` is omitted then a
square matrix crossing `x` with itself is returned.
As a special case, if `f` is `cor`, diagonal cells for which entries
from `x` and `y` are identical (according to `===`) are set to one even
in the presence `missing`, `NaN` or `Inf` entries.
# Keyword arguments
- `symmetric::Bool=false`: If `true`, `f` is only called to compute
for the lower triangle of the matrix, and these values are copied
##CHUNK 9
to fill the upper triangle. Only allowed when `y` is omitted.
Defaults to `true` when `f` is `cor` or `cov`.
- `skipmissing::Symbol=:none`: If `:none` (the default), missing values
in inputs are passed to `f` without any modification.
Use `:pairwise` to skip entries with a `missing` value in either
of the two vectors passed to `f` for a given pair of vectors in `x` and `y`.
Use `:listwise` to skip entries with a `missing` value in any of the
vectors in `x` or `y`; note that this might drop a large part of entries.
Only allowed when entries in `x` and `y` are vectors.
# Examples
```jldoctest
julia> using StatsBase, Statistics
julia> x = [1 3 7
2 5 6
3 8 4
4 6 2];
julia> pairwise(cor, eachcol(x))
##CHUNK 10
function check_vectors(x, y, skipmissing::Symbol)
m = length(x)
n = length(y)
if !(all(xi -> xi isa AbstractVector, x) && all(yi -> yi isa AbstractVector, y))
throw(ArgumentError("All entries in x and y must be vectors " *
"when skipmissing=:$skipmissing"))
end
if m > 1
indsx = keys(first(x))
for i in 2:m
keys(x[i]) == indsx ||
throw(ArgumentError("All input vectors must have the same indices"))
end
end
if n > 1
indsy = keys(first(y))
for j in 2:n
keys(y[j]) == indsy ||
throw(ArgumentError("All input vectors must have the same indices"))
|
105
| 122
|
StatsBase.jl
| 269
|
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
|
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
|
[
105,
122
] |
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
|
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
return _pairwise!(Val(skipmissing), f, dest, x′, y′, symmetric)
end
|
_pairwise!
| 105
| 122
|
src/pairwise.jl
|
#FILE: StatsBase.jl/test/pairwise.jl
##CHUNK 1
length(xm), length(ym)), xm, ym,
skipmissing=:something)
# variable with only missings
xm = [fill(missing, 10), rand(10)]
ym = [rand(10), rand(10)]
res = pairwise(f, xm, ym)
@test res isa Matrix{Union{Float64, Missing}}
res2 = zeros(Union{Float64, Missing}, size(res))
@test pairwise!(f, res2, xm, ym) === res2
@test res ≅ res2 ≅ [f(xi, yi) for xi in xm, yi in ym]
@test_throws Union{ArgumentError,MethodError} pairwise(f, xm, ym, skipmissing=:pairwise)
@test_throws Union{ArgumentError,MethodError} pairwise(f, xm, ym, skipmissing=:listwise)
res = zeros(Union{Float64, Missing}, length(xm), length(ym))
@test_throws Union{ArgumentError,MethodError} pairwise!(f, res, xm, ym, skipmissing=:pairwise)
@test_throws Union{ArgumentError,MethodError} pairwise!(f, res, xm, ym, skipmissing=:listwise)
##CHUNK 2
if skipmissing in (:pairwise, :listwise)
@test_broken Core.Compiler.return_type(g, Tuple{Vector{Vector{Union{Float64, Missing}}}}) ==
Core.Compiler.return_type(g, Tuple{Vector{Vector{Union{Float64, Missing}}},
Vector{Vector{Union{Float64, Missing}}}}) ==
Matrix{Float64}
end
end
@test_throws ArgumentError pairwise(f, xm, ym, skipmissing=:something)
@test_throws ArgumentError pairwise!(f, zeros(Union{Float64, Missing},
length(xm), length(ym)), xm, ym,
skipmissing=:something)
# variable with only missings
xm = [fill(missing, 10), rand(10)]
ym = [rand(10), rand(10)]
res = pairwise(f, xm, ym)
@test res isa Matrix{Union{Float64, Missing}}
res2 = zeros(Union{Float64, Missing}, size(res))
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
#FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
#CURRENT FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
"""
function pairwise(f, x, y=x; symmetric::Bool=false, skipmissing::Symbol=:none)
if symmetric && x !== y
throw(ArgumentError("symmetric=true only makes sense passing " *
"a single set of variables (x === y)"))
end
return _pairwise(Val(skipmissing), f, x, y, symmetric)
end
# cov(x) is faster than cov(x, x)
_cov(x, y) = x === y ? cov(x) : cov(x, y)
pairwise!(::typeof(cov), dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none) =
pairwise!(_cov, dest, x, y, symmetric=symmetric, skipmissing=skipmissing)
pairwise(::typeof(cov), x, y; symmetric::Bool=false, skipmissing::Symbol=:none) =
pairwise(_cov, x, y, symmetric=symmetric, skipmissing=skipmissing)
##CHUNK 2
end
end
function _pairwise(::Val{skipmissing}, f, x, y, symmetric::Bool) where {skipmissing}
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
T = Core.Compiler.return_type(f, Tuple{eltype(x′), eltype(y′)})
Tsm = Core.Compiler.return_type((x, y) -> f(disallowmissing(x), disallowmissing(y)),
Tuple{eltype(x′), eltype(y′)})
if skipmissing === :none
dest = Matrix{T}(undef, m, n)
elseif skipmissing in (:pairwise, :listwise)
dest = Matrix{Tsm}(undef, m, n)
else
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
##CHUNK 3
pairwise!(::typeof(cov), dest::AbstractMatrix, x;
symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise!(_cov, dest, x, x, symmetric=symmetric, skipmissing=skipmissing)
pairwise(::typeof(cov), x; symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise(_cov, x, x, symmetric=symmetric, skipmissing=skipmissing)
pairwise!(::typeof(cor), dest::AbstractMatrix, x;
symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise!(cor, dest, x, x, symmetric=symmetric, skipmissing=skipmissing)
pairwise(::typeof(cor), x; symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise(cor, x, x, symmetric=symmetric, skipmissing=skipmissing)
##CHUNK 4
# cov(x) is faster than cov(x, x)
_cov(x, y) = x === y ? cov(x) : cov(x, y)
pairwise!(::typeof(cov), dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none) =
pairwise!(_cov, dest, x, y, symmetric=symmetric, skipmissing=skipmissing)
pairwise(::typeof(cov), x, y; symmetric::Bool=false, skipmissing::Symbol=:none) =
pairwise(_cov, x, y, symmetric=symmetric, skipmissing=skipmissing)
pairwise!(::typeof(cov), dest::AbstractMatrix, x;
symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise!(_cov, dest, x, x, symmetric=symmetric, skipmissing=skipmissing)
pairwise(::typeof(cov), x; symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise(_cov, x, x, symmetric=symmetric, skipmissing=skipmissing)
pairwise!(::typeof(cor), dest::AbstractMatrix, x;
symmetric::Bool=true, skipmissing::Symbol=:none) =
pairwise!(cor, dest, x, x, symmetric=symmetric, skipmissing=skipmissing)
##CHUNK 5
if symmetric && x !== y
throw(ArgumentError("symmetric=true only makes sense passing " *
"a single set of variables (x === y)"))
end
return _pairwise!(f, dest, x, y, symmetric=symmetric, skipmissing=skipmissing)
end
"""
pairwise(f, x[, y];
symmetric::Bool=false, skipmissing::Symbol=:none)
Return a matrix holding the result of applying `f` to all possible pairs
of entries in iterators `x` and `y`. Rows correspond to
entries in `x` and columns to entries in `y`. If `y` is omitted then a
square matrix crossing `x` with itself is returned.
As a special case, if `f` is `cor`, diagonal cells for which entries
from `x` and `y` are identical (according to `===`) are set to one even
in the presence `missing`, `NaN` or `Inf` entries.
|
16
| 26
|
StatsBase.jl
| 270
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
[
16,
26
] |
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
_partialcor
| 16
| 26
|
src/partialcor.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 2
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
##CHUNK 3
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 4
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 5
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
_zscore!(Z, X, μ, σ)
end
function zscore!(Z::AbstractArray{<:AbstractFloat}, X::AbstractArray{<:Real},
μ::AbstractArray{<:Real}, σ::AbstractArray{<:Real})
size(Z) == size(X) || throw(DimensionMismatch("Z and X must have the same size."))
_zscore_chksize(X, μ, σ)
_zscore!(Z, X, μ, σ)
end
zscore!(X::AbstractArray{<:AbstractFloat}, μ::Real, σ::Real) = _zscore!(X, X, μ, σ)
zscore!(X::AbstractArray{<:AbstractFloat}, μ::AbstractArray{<:Real}, σ::AbstractArray{<:Real}) =
(_zscore_chksize(X, μ, σ); _zscore!(X, X, μ, σ))
"""
zscore(X, [μ, σ])
Compute the z-scores of `X`, optionally specifying a precomputed mean `μ` and
##CHUNK 2
zscore!(X::AbstractArray{<:AbstractFloat}, μ::Real, σ::Real) = _zscore!(X, X, μ, σ)
zscore!(X::AbstractArray{<:AbstractFloat}, μ::AbstractArray{<:Real}, σ::AbstractArray{<:Real}) =
(_zscore_chksize(X, μ, σ); _zscore!(X, X, μ, σ))
"""
zscore(X, [μ, σ])
Compute the z-scores of `X`, optionally specifying a precomputed mean `μ` and
standard deviation `σ`. z-scores are the signed number of standard deviations
above the mean that an observation lies, i.e. ``(x - μ) / σ``.
`μ` and `σ` should be both scalars or both arrays. The computation is broadcasting.
In particular, when `μ` and `σ` are arrays, they should have the same size, and
`size(μ, i) == 1 || size(μ, i) == size(X, i)` for each dimension.
"""
function zscore(X::AbstractArray{T}, μ::Real, σ::Real) where T<:Real
ZT = typeof((zero(T) - zero(μ)) / one(σ))
_zscore!(Array{ZT}(undef, size(X)), X, μ, σ)
#CURRENT FILE: StatsBase.jl/src/partialcor.jl
##CHUNK 1
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
Σzy += zi * yi
end
end
# Individual pairwise correlations
rxy = Σxy / sqrt(Σxx * Σyy)
rxz = Σxz / sqrt(Σxx * Σzz)
rzy = Σzy / sqrt(Σzz * Σyy)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
##CHUNK 2
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
##CHUNK 3
throw(DimensionMismatch("Inputs must have the same number of observations"))
length(x) > 0 || throw(ArgumentError("Inputs must be non-empty"))
return Statistics.clampcor(_partialcor(x, mean(x), y, mean(y), Z))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
|
28
| 62
|
StatsBase.jl
| 271
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
Σzy += zi * yi
end
end
# Individual pairwise correlations
rxy = Σxy / sqrt(Σxx * Σyy)
rxz = Σxz / sqrt(Σxx * Σzz)
rzy = Σzy / sqrt(Σzz * Σyy)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
Σzy += zi * yi
end
end
# Individual pairwise correlations
rxy = Σxy / sqrt(Σxx * Σyy)
rxz = Σxz / sqrt(Σxx * Σzz)
rzy = Σzy / sqrt(Σzz * Σyy)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
[
28,
62
] |
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
Σzy += zi * yi
end
end
# Individual pairwise correlations
rxy = Σxy / sqrt(Σxx * Σyy)
rxz = Σxz / sqrt(Σxx * Σzz)
rzy = Σzy / sqrt(Σzz * Σyy)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
@simd for i in eachindex(x, y, z)
xi = x[i] - μx
yi = y[i] - μy
zi = z[i] - μz
Σxx += abs2(xi)
Σyy += abs2(yi)
Σzz += abs2(zi)
Σxy += xi * yi
Σxz += xi * zi
Σzy += zi * yi
end
end
# Individual pairwise correlations
rxy = Σxy / sqrt(Σxx * Σyy)
rxz = Σxz / sqrt(Σxx * Σzz)
rzy = Σzy / sqrt(Σzz * Σyy)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
_partialcor
| 28
| 62
|
src/partialcor.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 2
end
end
push!(zxs, xj)
xxs[j] = dot(xj, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
##CHUNK 3
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
##CHUNK 4
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 5
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 6
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
##CHUNK 7
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
end
"""
crosscor(x, y, [lags]; demean=true)
Compute the cross correlation between real-valued vectors or matrices `x` and `y`,
optionally specifying the `lags`. `demean` specifies whether the respective means of
`x` and `y` should be subtracted from them before computing their cross correlation.
##CHUNK 8
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 9
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
#CURRENT FILE: StatsBase.jl/src/partialcor.jl
##CHUNK 1
throw(DimensionMismatch("Inputs must have the same number of observations"))
length(x) > 0 || throw(ArgumentError("Inputs must be non-empty"))
return Statistics.clampcor(_partialcor(x, mean(x), y, mean(y), Z))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
|
27
| 44
|
StatsBase.jl
| 272
|
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
|
[
27,
44
] |
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
|
corspearman
| 27
| 44
|
src/rankcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
##CHUNK 2
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
##CHUNK 3
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
##CHUNK 4
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
##CHUNK 5
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
##CHUNK 6
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
##CHUNK 7
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
##CHUNK 8
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
##CHUNK 9
return C
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
# Auxiliary functions for Kendall's rank correlation
"""
countties(x::AbstractVector{<:Real}, lo::Integer, hi::Integer)
##CHUNK 10
# Rank-based correlations
#
# - Spearman's correlation
# - Kendall's correlation
#
#######################################
#
# Spearman correlation
#
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
|
46
| 63
|
StatsBase.jl
| 273
|
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
|
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
|
[
46,
63
] |
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
|
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
|
corspearman
| 46
| 63
|
src/rankcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
##CHUNK 2
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 3
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
##CHUNK 4
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
##CHUNK 5
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
##CHUNK 6
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
##CHUNK 7
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
##CHUNK 8
return C
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
# Auxiliary functions for Kendall's rank correlation
"""
countties(x::AbstractVector{<:Real}, lo::Integer, hi::Integer)
##CHUNK 9
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
##CHUNK 10
# Rank-based correlations
#
# - Spearman's correlation
# - Kendall's correlation
#
#######################################
#
# Spearman correlation
#
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
|
65
| 90
|
StatsBase.jl
| 274
|
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
|
[
65,
90
] |
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
|
corspearman
| 65
| 90
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for i = 1 : length(lags)
l = lags[i]
sX = view(tmpX, 1+l:lx, 1:l+1)
r[i,j] = l == 0 ? 1 : (cholesky!(sX'sX, Val(false)) \ (sX'view(X, 1+l:lx, j)))[end]
end
end
r
end
function pacf_yulewalker!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}, mk::Integer) where T<:Union{Float32, Float64}
tmp = Vector{T}(undef, mk)
for j = 1 : size(X,2)
acfs = autocor(X[:,j], 1:mk)
for i = 1 : length(lags)
l = lags[i]
r[i,j] = l == 0 ? 1 : l == 1 ? acfs[i] : -durbin!(view(acfs, 1:l), tmp)[l]
end
end
end
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
##CHUNK 2
@inbounds for (j, yj) in enumerate(y)
ynminds = .!ismissing.(yj)
@inbounds for (i, xi) in enumerate(x)
symmetric && i > j && continue
if xi === yj
ynm = view(yj, ynminds)
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(ynm, ynm)
end
else
nminds = .!ismissing.(xi) .& ynminds
xnm = view(xi, nminds)
ynm = view(yj, nminds)
dest[i, j] = f(xnm, ynm)
end
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 2
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
##CHUNK 3
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
##CHUNK 4
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
##CHUNK 5
end
end
return C
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
# Auxiliary functions for Kendall's rank correlation
##CHUNK 6
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
|
92
| 116
|
StatsBase.jl
| 275
|
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
|
[
92,
116
] |
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
|
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
C[j,i] = cor(Xjrank, Yirank)
end
end
end
return C
end
|
corspearman
| 92
| 116
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
function _pairwise!(::Val{:none}, f, dest::AbstractMatrix, x, y, symmetric::Bool)
@inbounds for (i, xi) in enumerate(x), (j, yj) in enumerate(y)
symmetric && i > j && continue
# For performance, diagonal is special-cased
if f === cor && eltype(dest) !== Union{} && i == j && xi === yj
# TODO: float() will not be needed after JuliaLang/Statistics.jl#61
dest[i, j] = float(cor(xi))
else
dest[i, j] = f(xi, yj)
end
end
if symmetric
m, n = size(dest)
@inbounds for j in 1:n, i in (j+1):m
dest[i, j] = dest[j, i]
end
end
return dest
end
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 2
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
##CHUNK 3
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
##CHUNK 4
anynan[j] = any(isnan, Xj)
if anynan[j]
C[:,j] .= NaN
C[j,:] .= NaN
C[j,j] = 1
continue
end
Xjrank = tiedrank(Xj)
for i = 1:(j-1)
Xi = view(X, :, i)
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
##CHUNK 5
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
##CHUNK 6
return(reshape([corkendall!(copy(x), Y[:,i], permx) for i in 1:n], 1, n))
end
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
##CHUNK 7
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
|
189
| 200
|
StatsBase.jl
| 276
|
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
|
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
|
[
189,
200
] |
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
|
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
|
corkendall
| 189
| 200
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/test/rankcorr.jl
##CHUNK 1
# AbstractMatrix{<:Real}
@test corkendall(X) ≈ [c11 c12; c12 c22]
@test c11 == 1.0
@test c22 == 1.0
@test c12 == 3/sqrt(20)
# Finished testing for overflow, so redefine n for speedier tests
n = 100
@test corkendall(repeat(X, n), repeat(X, n)) ≈ [c11 c12; c12 c22]
@test corkendall(repeat(X, n)) ≈ [c11 c12; c12 c22]
# All eight three-element permutations
z = [1 1 1;
1 1 2;
1 2 2;
1 2 2;
1 2 1;
2 1 2;
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
end
function corkendall(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
n = size(Y, 2)
permx = sortperm(x)
return(reshape([corkendall!(copy(x), Y[:,i], permx) for i in 1:n], 1, n))
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
##CHUNK 2
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
# Auxiliary functions for Kendall's rank correlation
"""
countties(x::AbstractVector{<:Real}, lo::Integer, hi::Integer)
Return the number of ties within `x[lo:hi]`. Assumes `x` is sorted.
"""
function countties(x::AbstractVector, lo::Integer, hi::Integer)
##CHUNK 3
corkendall(x, y=x)
Compute Kendall's rank correlation coefficient, τ. `x` and `y` must both be either
matrices or vectors.
"""
corkendall(x::AbstractVector{<:Real}, y::AbstractVector{<:Real}) = corkendall!(copy(x), copy(y))
function corkendall(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
permy = sortperm(y)
return([corkendall!(copy(y), X[:,i], permy) for i in 1:size(X, 2)])
end
function corkendall(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
n = size(Y, 2)
permx = sortperm(x)
return(reshape([corkendall!(copy(x), Y[:,i], permx) for i in 1:n], 1, n))
end
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
##CHUNK 4
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
##CHUNK 5
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
##CHUNK 6
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
##CHUNK 7
# Kendall correlation
#
#######################################
# Knight, William R. “A Computer Method for Calculating Kendall's Tau with Ungrouped Data.”
# Journal of the American Statistical Association, vol. 61, no. 314, 1966, pp. 436–439.
# JSTOR, www.jstor.org/stable/2282833.
function corkendall!(x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, permx::AbstractArray{<:Integer}=sortperm(x))
if any(isnan, x) || any(isnan, y) return NaN end
n = length(x)
if n != length(y) error("Vectors must have same length") end
# Initial sorting
permute!(x, permx)
permute!(y, permx)
# Use widen to avoid overflows on both 32bit and 64bit
npairs = div(widen(n) * (n - 1), 2)
ntiesx = ndoubleties = nswaps = widen(0)
k = 0
##CHUNK 8
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
##CHUNK 9
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
|
202
| 213
|
StatsBase.jl
| 277
|
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
|
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
|
[
202,
213
] |
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
|
function corkendall(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
permx = sortperm(X[:,j])
for i = 1:nc
C[j,i] = corkendall!(X[:,j], Y[:,i], permx)
end
end
return C
end
|
corkendall
| 202
| 213
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/test/rankcorr.jl
##CHUNK 1
# AbstractMatrix{<:Real}
@test corkendall(X) ≈ [c11 c12; c12 c22]
@test c11 == 1.0
@test c22 == 1.0
@test c12 == 3/sqrt(20)
# Finished testing for overflow, so redefine n for speedier tests
n = 100
@test corkendall(repeat(X, n), repeat(X, n)) ≈ [c11 c12; c12 c22]
@test corkendall(repeat(X, n)) ≈ [c11 c12; c12 c22]
# All eight three-element permutations
z = [1 1 1;
1 1 2;
1 2 2;
1 2 2;
1 2 1;
2 1 2;
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
end
function corkendall(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
n = size(Y, 2)
permx = sortperm(x)
return(reshape([corkendall!(copy(x), Y[:,i], permx) for i in 1:n], 1, n))
end
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
##CHUNK 2
if anynan[i]
C[i,j] = C[j,i] = NaN
else
Xirank = tiedrank(Xi)
C[i,j] = C[j,i] = cor(Xjrank, Xirank)
end
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
##CHUNK 3
C = Matrix{Float64}(I, n, n)
for j = 2:n
permx = sortperm(X[:,j])
for i = 1:j - 1
C[j,i] = corkendall!(X[:,j], X[:,i], permx)
C[i,j] = C[j,i]
end
end
return C
end
# Auxiliary functions for Kendall's rank correlation
"""
countties(x::AbstractVector{<:Real}, lo::Integer, hi::Integer)
Return the number of ties within `x[lo:hi]`. Assumes `x` is sorted.
"""
function countties(x::AbstractVector, lo::Integer, hi::Integer)
##CHUNK 4
corkendall(x, y=x)
Compute Kendall's rank correlation coefficient, τ. `x` and `y` must both be either
matrices or vectors.
"""
corkendall(x::AbstractVector{<:Real}, y::AbstractVector{<:Real}) = corkendall!(copy(x), copy(y))
function corkendall(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
permy = sortperm(y)
return([corkendall!(copy(y), X[:,i], permy) for i in 1:size(X, 2)])
end
function corkendall(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
n = size(Y, 2)
permx = sortperm(x)
return(reshape([corkendall!(copy(x), Y[:,i], permx) for i in 1:n], 1, n))
end
function corkendall(X::AbstractMatrix{<:Real})
n = size(X, 2)
##CHUNK 5
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
##CHUNK 6
function corspearman(X::AbstractMatrix{<:Real}, Y::AbstractMatrix{<:Real})
size(X, 1) == size(Y, 1) ||
throw(ArgumentError("number of rows in each array must match"))
nr = size(X, 2)
nc = size(Y, 2)
C = Matrix{Float64}(undef, nr, nc)
for j = 1:nr
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,:] .= NaN
continue
end
Xjrank = tiedrank(Xj)
for i = 1:nc
Yi = view(Y, :, i)
if any(isnan, Yi)
C[j,i] = NaN
else
Yirank = tiedrank(Yi)
##CHUNK 7
end
end
return C
end
function corspearman(x::AbstractVector{<:Real}, Y::AbstractMatrix{<:Real})
size(Y, 1) == length(x) ||
throw(DimensionMismatch("x and Y have inconsistent dimensions"))
n = size(Y, 2)
C = Matrix{Float64}(I, 1, n)
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
##CHUNK 8
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 9
any(isnan, x) && return fill!(C, NaN)
xrank = tiedrank(x)
for j = 1:n
Yj = view(Y, :, j)
if any(isnan, Yj)
C[1,j] = NaN
else
Yjrank = tiedrank(Yj)
C[1,j] = cor(xrank, Yjrank)
end
end
return C
end
function corspearman(X::AbstractMatrix{<:Real})
n = size(X, 2)
C = Matrix{Float64}(I, n, n)
anynan = Vector{Bool}(undef, n)
for j = 1:n
Xj = view(X, :, j)
|
254
| 293
|
StatsBase.jl
| 278
|
function merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
# Use of widen below prevents possible overflow errors when
# length(v) exceeds 2^16 (32 bit) or 2^32 (64 bit)
nswaps = widen(0)
@inbounds if lo < hi
hi - lo <= SMALL_THRESHOLD && return insertion_sort!(v, lo, hi)
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
v[k] = t[i]
i += 1
end
k += 1
end
while k < j
v[k] = t[i]
k += 1
i += 1
end
end
return nswaps
end
|
function merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
# Use of widen below prevents possible overflow errors when
# length(v) exceeds 2^16 (32 bit) or 2^32 (64 bit)
nswaps = widen(0)
@inbounds if lo < hi
hi - lo <= SMALL_THRESHOLD && return insertion_sort!(v, lo, hi)
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
v[k] = t[i]
i += 1
end
k += 1
end
while k < j
v[k] = t[i]
k += 1
i += 1
end
end
return nswaps
end
|
[
254,
293
] |
function merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
# Use of widen below prevents possible overflow errors when
# length(v) exceeds 2^16 (32 bit) or 2^32 (64 bit)
nswaps = widen(0)
@inbounds if lo < hi
hi - lo <= SMALL_THRESHOLD && return insertion_sort!(v, lo, hi)
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
v[k] = t[i]
i += 1
end
k += 1
end
while k < j
v[k] = t[i]
k += 1
i += 1
end
end
return nswaps
end
|
function merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
# Use of widen below prevents possible overflow errors when
# length(v) exceeds 2^16 (32 bit) or 2^32 (64 bit)
nswaps = widen(0)
@inbounds if lo < hi
hi - lo <= SMALL_THRESHOLD && return insertion_sort!(v, lo, hi)
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
v[k] = t[i]
i += 1
end
k += 1
end
while k < j
v[k] = t[i]
k += 1
i += 1
end
end
return nswaps
end
|
merge_sort!
| 254
| 293
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
##CHUNK 2
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
##CHUNK 3
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
#FILE: StatsBase.jl/test/wsampling.jl
##CHUNK 1
for rev in (true, false), T in (Int, Int16, Float64, Float16, BigInt, ComplexF64, Rational{Int})
r = rev ? reverse(4:7) : (4:7)
r = T===Int ? r : T.(r)
aa = Int.(sample(r, wv, n; ordered=true))
check_wsample_wrep(aa, (4, 7), wv, 5.0e-3; ordered=true, rev=rev)
aa = Int.(sample(r, wv, 10; ordered=true))
check_wsample_wrep(aa, (4, 7), wv, -1; ordered=true, rev=rev)
end
#### weighted sampling without replacement
function check_wsample_norep(a::AbstractArray, vrgn, wv::AbstractWeights, ptol::Real;
ordered::Bool=false, rev::Bool=false)
# each column of a for one run
vmin, vmax = vrgn
(amin, amax) = extrema(a)
@test vmin <= amin <= amax <= vmax
n = vmax - vmin + 1
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
ndoubleties += countties(y, i - k - 1, i - 1)
k = 0
end
end
if k > 0
sort!(view(y, (n - k):n))
ntiesx += div(widen(k) * (k + 1), 2)
ndoubleties += countties(y, n - k, n)
end
##CHUNK 2
ndoubleties += countties(y, i - k - 1, i - 1)
k = 0
end
end
if k > 0
sort!(view(y, (n - k):n))
ntiesx += div(widen(k) * (k + 1), 2)
ndoubleties += countties(y, n - k, n)
end
nswaps = merge_sort!(y, 1, n)
ntiesy = countties(y, 1, n)
# Calls to float below prevent possible overflow errors when
# length(x) exceeds 77_936 (32 bit) or 5_107_605_667 (64 bit)
(npairs + ndoubleties - ntiesx - ntiesy - 2 * nswaps) /
sqrt(float(npairs - ntiesx) * float(npairs - ntiesy))
end
"""
##CHUNK 3
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
v[j] = x
end
return nswaps
end
##CHUNK 4
result += div(thistiecount * (thistiecount + 1), 2)
thistiecount = widen(0)
end
end
if thistiecount > 0
result += div(thistiecount * (thistiecount + 1), 2)
end
result
end
# Tests appear to show that a value of 64 is optimal,
# but note that the equivalent constant in base/sort.jl is 20.
const SMALL_THRESHOLD = 64
# merge_sort! copied from Julia Base
# (commit 28330a2fef4d9d149ba0fd3ffa06347b50067647, dated 20 Sep 2020)
"""
merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
##CHUNK 5
if n != length(y) error("Vectors must have same length") end
# Initial sorting
permute!(x, permx)
permute!(y, permx)
# Use widen to avoid overflows on both 32bit and 64bit
npairs = div(widen(n) * (n - 1), 2)
ntiesx = ndoubleties = nswaps = widen(0)
k = 0
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
##CHUNK 6
insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
Mutates `v` by sorting elements `x[lo:hi]` using the insertion sort algorithm.
This method is a copy-paste-edit of sort! in base/sort.jl, amended to return the bubblesort distance.
"""
function insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
if lo == hi return widen(0) end
nswaps = widen(0)
@inbounds for i = lo + 1:hi
j = i
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
|
306
| 324
|
StatsBase.jl
| 279
|
function insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
if lo == hi return widen(0) end
nswaps = widen(0)
@inbounds for i = lo + 1:hi
j = i
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
v[j] = x
end
return nswaps
end
|
function insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
if lo == hi return widen(0) end
nswaps = widen(0)
@inbounds for i = lo + 1:hi
j = i
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
v[j] = x
end
return nswaps
end
|
[
306,
324
] |
function insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
if lo == hi return widen(0) end
nswaps = widen(0)
@inbounds for i = lo + 1:hi
j = i
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
v[j] = x
end
return nswaps
end
|
function insertion_sort!(v::AbstractVector, lo::Integer, hi::Integer)
if lo == hi return widen(0) end
nswaps = widen(0)
@inbounds for i = lo + 1:hi
j = i
x = v[i]
while j > lo
if x < v[j - 1]
nswaps += 1
v[j] = v[j - 1]
j -= 1
continue
end
break
end
v[j] = x
end
return nswaps
end
|
insertion_sort!
| 306
| 324
|
src/rankcorr.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
##CHUNK 2
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
##CHUNK 3
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
##CHUNK 4
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
##CHUNK 5
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
#FILE: StatsBase.jl/src/misc.jl
##CHUNK 1
i = 2
@inbounds while i <= n
vi = v[i]
if isequal(vi, cv)
cl += 1
else
push!(vals, cv)
push!(lens, cl)
cv = vi
cl = 1
end
i += 1
end
# the last section
push!(vals, cv)
push!(lens, cl)
return (vals, lens)
end
#CURRENT FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
##CHUNK 2
Mutates `v` by sorting elements `x[lo:hi]` using the merge sort algorithm.
This method is a copy-paste-edit of sort! in base/sort.jl, amended to return the bubblesort distance.
"""
function merge_sort!(v::AbstractVector, lo::Integer, hi::Integer, t::AbstractVector=similar(v, 0))
# Use of widen below prevents possible overflow errors when
# length(v) exceeds 2^16 (32 bit) or 2^32 (64 bit)
nswaps = widen(0)
@inbounds if lo < hi
hi - lo <= SMALL_THRESHOLD && return insertion_sort!(v, lo, hi)
m = midpoint(lo, hi)
(length(t) < m - lo + 1) && resize!(t, m - lo + 1)
nswaps = merge_sort!(v, lo, m, t)
nswaps += merge_sort!(v, m + 1, hi, t)
i, j = 1, lo
while j <= m
t[i] = v[j]
i += 1
##CHUNK 3
j += 1
end
i, k = 1, lo
while k < j <= hi
if v[j] < t[i]
v[k] = v[j]
j += 1
nswaps += m - lo + 1 - (i - 1)
else
v[k] = t[i]
i += 1
end
k += 1
end
while k < j
v[k] = t[i]
k += 1
i += 1
end
##CHUNK 4
if n != length(y) error("Vectors must have same length") end
# Initial sorting
permute!(x, permx)
permute!(y, permx)
# Use widen to avoid overflows on both 32bit and 64bit
npairs = div(widen(n) * (n - 1), 2)
ntiesx = ndoubleties = nswaps = widen(0)
k = 0
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
|
60
| 80
|
StatsBase.jl
| 280
|
function _competerank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
|
function _competerank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
|
[
60,
80
] |
function _competerank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
|
function _competerank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
|
_competerank!
| 60
| 80
|
src/ranking.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 2
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
##CHUNK 3
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
##CHUNK 4
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
##CHUNK 5
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
##CHUNK 6
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
##CHUNK 7
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
"""
seqsample_c!([rng], a::AbstractArray, x::AbstractArray)
#CURRENT FILE: StatsBase.jl/src/ranking.jl
##CHUNK 1
competerank(x::AbstractArray; sortkwargs...) =
_rank(_competerank!, x; sortkwargs...)
# Dense ranking ("1223" ranking) -- resolve tied ranks using min
function _denserank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
##CHUNK 2
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
rks[pi] = k
end
end
return rks
end
"""
denserank(x; lt=isless, by=identity, rev::Bool=false, ...)
##CHUNK 3
# Tied ranking ("1 2.5 2.5 4" ranking) -- resolve tied ranks using average
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
|
97
| 117
|
StatsBase.jl
| 281
|
function _denserank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
rks[pi] = k
end
end
return rks
end
|
function _denserank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
rks[pi] = k
end
end
return rks
end
|
[
97,
117
] |
function _denserank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
rks[pi] = k
end
end
return rks
end
|
function _denserank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k += 1
end
rks[pi] = k
end
end
return rks
end
|
_denserank!
| 97
| 117
|
src/ranking.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 2
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
##CHUNK 3
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
##CHUNK 4
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
##CHUNK 5
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
##CHUNK 6
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
##CHUNK 7
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
"""
seqsample_c!([rng], a::AbstractArray, x::AbstractArray)
#CURRENT FILE: StatsBase.jl/src/ranking.jl
##CHUNK 1
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
##CHUNK 2
All items in `x` are given distinct, successive ranks based on their position
in the sorted vector.
Missing values are assigned rank `missing`.
"""
ordinalrank(x::AbstractArray; sortkwargs...) =
_rank(_ordinalrank!, x; sortkwargs...)
# Competition ranking ("1224" ranking) -- resolve tied ranks using min
function _competerank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
##CHUNK 3
# Tied ranking ("1 2.5 2.5 4" ranking) -- resolve tied ranks using average
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
|
134
| 163
|
StatsBase.jl
| 282
|
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
end
end
# the last range
ar = (s + n) / 2
for i = s : n
rks[p[i]] = ar
end
end
return rks
end
|
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
end
end
# the last range
ar = (s + n) / 2
for i = s : n
rks[p[i]] = ar
end
end
return rks
end
|
[
134,
163
] |
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
end
end
# the last range
ar = (s + n) / 2
for i = s : n
rks[p[i]] = ar
end
end
return rks
end
|
function _tiedrank!(rks::AbstractArray, x::AbstractArray, p::AbstractArray{<:Integer})
n = _check_randparams(rks, x, p)
@inbounds if n > 0
v = x[p[1]]
s = 1 # starting index of current range
for e in 2:n # e is pass-by-end index of current range
cx = x[p[e]]
if cx != v
# fill average rank to s : e-1
ar = (s + e - 1) / 2
for i = s : e-1
rks[p[i]] = ar
end
# switch to next range
s = e
v = cx
end
end
# the last range
ar = (s + n) / 2
for i = s : n
rks[p[i]] = ar
end
end
return rks
end
|
_tiedrank!
| 134
| 163
|
src/ranking.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
##CHUNK 2
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 3
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
##CHUNK 4
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
"""
seqsample_c!([rng], a::AbstractArray, x::AbstractArray)
##CHUNK 5
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
##CHUNK 6
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
"""
##CHUNK 7
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
ndoubleties += countties(y, i - k - 1, i - 1)
k = 0
end
end
if k > 0
sort!(view(y, (n - k):n))
ntiesx += div(widen(k) * (k + 1), 2)
ndoubleties += countties(y, n - k, n)
end
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
if corrected
n = count(!iszero, w)
n / (s * (n - 1))
else
1 / s
end
end
"""
eweights(t::AbstractArray{<:Integer}, λ::Real; scale=false)
eweights(t::AbstractVector{T}, r::StepRange{T}, λ::Real; scale=false) where T
eweights(n::Integer, λ::Real; scale=false)
Construct a [`Weights`](@ref) vector which assigns exponentially decreasing weights to past
observations (larger integer values `i` in `t`).
The integer value `n` represents the number of past observations to consider.
`n` defaults to `maximum(t) - minimum(t) + 1` if only `t` is passed in
and the elements are integers, and to `length(r)` if a superset range `r` is also passed in.
If `n` is explicitly passed instead of `t`, `t` defaults to `1:n`.
#CURRENT FILE: StatsBase.jl/src/ranking.jl
##CHUNK 1
n = _check_randparams(rks, x, p)
@inbounds if n > 0
p1 = p[1]
v = x[p1]
rks[p1] = k = 1
for i in 2:n
pi = p[i]
xi = x[pi]
if xi != v
v = xi
k = i
end
rks[pi] = k
end
end
return rks
end
|
53
| 78
|
StatsBase.jl
| 283
|
function cronbachalpha(covmatrix::AbstractMatrix{<:Real})
if !isposdef(covmatrix)
throw(ArgumentError("Covariance matrix must be positive definite. " *
"Maybe you passed the data matrix instead of its covariance matrix? " *
"If so, call `cronbachalpha(cov(...))` instead."))
end
k = size(covmatrix, 2)
k > 1 || throw(ArgumentError("Covariance matrix must have more than one column."))
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
|
function cronbachalpha(covmatrix::AbstractMatrix{<:Real})
if !isposdef(covmatrix)
throw(ArgumentError("Covariance matrix must be positive definite. " *
"Maybe you passed the data matrix instead of its covariance matrix? " *
"If so, call `cronbachalpha(cov(...))` instead."))
end
k = size(covmatrix, 2)
k > 1 || throw(ArgumentError("Covariance matrix must have more than one column."))
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
|
[
53,
78
] |
function cronbachalpha(covmatrix::AbstractMatrix{<:Real})
if !isposdef(covmatrix)
throw(ArgumentError("Covariance matrix must be positive definite. " *
"Maybe you passed the data matrix instead of its covariance matrix? " *
"If so, call `cronbachalpha(cov(...))` instead."))
end
k = size(covmatrix, 2)
k > 1 || throw(ArgumentError("Covariance matrix must have more than one column."))
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
|
function cronbachalpha(covmatrix::AbstractMatrix{<:Real})
if !isposdef(covmatrix)
throw(ArgumentError("Covariance matrix must be positive definite. " *
"Maybe you passed the data matrix instead of its covariance matrix? " *
"If so, call `cronbachalpha(cov(...))` instead."))
end
k = size(covmatrix, 2)
k > 1 || throw(ArgumentError("Covariance matrix must have more than one column."))
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
|
cronbachalpha
| 53
| 78
|
src/reliability.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 2
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 3
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 4
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
vr = a[j,i]
a[i,j] = a[j,i] = middle(vl, vr)
end
end
return a
end
function _scalevars(x::DenseMatrix, s::AbstractWeights, dims::Int)
dims == 1 ? Diagonal(s) * x :
dims == 2 ? x * Diagonal(s) :
error("dims should be either 1 or 2.")
end
## scatter matrix
_unscaled_covzm(x::DenseMatrix, dims::Integer) = unscaled_covzm(x, dims)
_unscaled_covzm(x::DenseMatrix, wv::AbstractWeights, dims::Integer) =
_symmetrize!(unscaled_covzm(x, _scalevars(x, wv, dims), dims))
"""
##CHUNK 2
function cov(sc::SimpleCovariance, X::AbstractMatrix; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X; dims=dims, corrected=sc.corrected)
else
return covm(X, mean, dims, corrected=sc.corrected)
end
end
function cov(sc::SimpleCovariance, X::AbstractMatrix, w::AbstractWeights; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X, w, dims, corrected=sc.corrected)
else
return covm(X, mean, w, dims, corrected=sc.corrected)
end
end
#FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
#FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
#CURRENT FILE: StatsBase.jl/src/reliability.jl
##CHUNK 1
```math
ρ = \\frac{k}{k-1} \\left(1 - \\frac{\\sum^k_{i=1} σ^2_i}{\\sum_{i=1}^k \\sum_{j=1}^k σ_{ij}}\\right)
```
where ``k`` is the number of items, i.e. columns, ``σ_i^2`` the item variance,
and ``σ_{ij}`` the inter-item covariance.
Returns a `CronbachAlpha` object that holds:
* `alpha`: the Cronbach's alpha score for all items, i.e. columns, in `covmatrix`; and
* `dropped`: a vector giving Cronbach's alpha scores if a specific item,
i.e. column, is dropped from `covmatrix`.
# Example
```jldoctest
julia> using StatsBase
julia> cov_X = [10 6 6 6;
6 11 6 6;
6 6 12 6;
##CHUNK 2
struct CronbachAlpha{T <: Real}
alpha::T
dropped::Vector{T}
end
function Base.show(io::IO, x::CronbachAlpha)
@printf(io, "Cronbach's alpha for all items: %.4f\n", x.alpha)
isempty(x.dropped) && return
println(io, "\nCronbach's alpha if an item is dropped:")
for (idx, val) in enumerate(x.dropped)
@printf(io, "item %i: %.4f\n", idx, val)
end
end
"""
cronbachalpha(covmatrix::AbstractMatrix{<:Real})
Calculate Cronbach's alpha (1951) from a covariance matrix `covmatrix` according to
the [formula](https://en.wikipedia.org/wiki/Cronbach%27s_alpha):
|
123
| 136
|
StatsBase.jl
| 284
|
function trimvar(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
prop = count/n
end
return var(winsor(x, count=count)) / (n * (1 - 2prop)^2)
end
|
function trimvar(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
prop = count/n
end
return var(winsor(x, count=count)) / (n * (1 - 2prop)^2)
end
|
[
123,
136
] |
function trimvar(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
prop = count/n
end
return var(winsor(x, count=count)) / (n * (1 - 2prop)^2)
end
|
function trimvar(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
prop = count/n
end
return var(winsor(x, count=count)) / (n * (1 - 2prop)^2)
end
|
trimvar
| 123
| 136
|
src/robust.jl
|
#FILE: StatsBase.jl/test/counts.jl
##CHUNK 1
@test addcounts!(fill(0.0, 1, 5), reshape(x, 10, 50, 10), 1:5, w) ≈ c0 # Perhaps this should not be allowed
@test x == x0
@test w == w0
end
@testset "2D integer counts" begin
x = rand(1:4, n)
y = rand(1:5, n)
w = weights(rand(n))
x0 = deepcopy(x)
y0 = deepcopy(y)
w0 = deepcopy(w)
c0 = Int[count(t->t != 0, (x .== i) .& (y .== j)) for i in 1:4, j in 1:5]
@test counts(x, y, (4, 5)) == c0
@test counts(x .+ 2, y .+ 3, (3:6, 4:8)) == c0
@test proportions(x, y, (1:4, 1:5)) ≈ (c0 ./ n)
@test counts(reshape(x, 10, 50, 10), reshape(y, 10, 50, 10), (4, 5)) == c0
##CHUNK 2
@test counts(reshape(x, 10, 50, 10), 5, w) ≈ c0 # Perhaps this should not be allowed
@test counts(x, w) ≈ c0
@test counts(x .+ 1, 2:6, w) ≈ c0
@test proportions(x, w) ≈ (c0 ./ sum(w))
@test counts(reshape(x, 10, 50, 10), w) ≈ c0 # Perhaps this should not be allowed
#addcounts! to row matrix
c0 = reshape(c0, 1, 5)
@test addcounts!(fill(0.0, 1, 5), x, 1:5, w) ≈ c0
@test addcounts!(fill(0.0, 1, 5), reshape(x, 10, 50, 10), 1:5, w) ≈ c0 # Perhaps this should not be allowed
@test x == x0
@test w == w0
end
@testset "2D integer counts" begin
x = rand(1:4, n)
y = rand(1:5, n)
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
return lower + ratio * (upper - lower)
end
elseif method == :compete
if value > maximum(itr)
return 1.0
elseif value ≤ minimum(itr)
return 0.0
else
value ∈ itr && (count_less += 1)
return (count_less - 1) / (n - 1)
end
elseif method == :tied
return (count_less + count_equal/2) / n
elseif method == :strict
return count_less / n
elseif method == :weak
return (count_less + count_equal) / n
else
throw(ArgumentError("method=:$method is not valid. Pass :inc, :exc, :compete, :tied, :strict or :weak."))
end
#CURRENT FILE: StatsBase.jl/src/robust.jl
##CHUNK 1
function uplo(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
prop == 0 || throw(ArgumentError("prop and count can not both be > 0."))
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
end
# indices for lowest count values
x2 = Base.copymutable(x)
lo = partialsort!(x2, count+1)
up = partialsort!(x2, n-count)
up, lo
end
##CHUNK 2
# Robust Statistics
#############################
#
# Trimming outliers
#
#############################
# Trimmed set
"Return the upper and lower bound elements used by `trim` and `winsor`"
function uplo(x::AbstractVector; prop::Real=0.0, count::Integer=0)
n = length(x)
n > 0 || throw(ArgumentError("x can not be empty."))
if count == 0
0 <= prop < 0.5 || throw(ArgumentError("prop must satisfy 0 ≤ prop < 0.5."))
count = floor(Int, n * prop)
else
prop == 0 || throw(ArgumentError("prop and count can not both be > 0."))
0 <= count < n/2 || throw(ArgumentError("count must satisfy 0 ≤ count < length(x)/2."))
##CHUNK 3
```
"""
function winsor(x::AbstractVector; prop::Real=0.0, count::Integer=0)
up, lo = uplo(x; prop=prop, count=count)
(clamp(xi, lo, up) for xi in x)
end
"""
winsor!(x::AbstractVector; prop=0.0, count=0)
A variant of [`winsor`](@ref) that modifies vector `x` in place.
"""
function winsor!(x::AbstractVector; prop::Real=0.0, count::Integer=0)
copyto!(x, winsor(x; prop=prop, count=count))
return x
end
#############################
##CHUNK 4
# Example
```jldoctest
julia> collect(winsor([5,2,3,4,1], prop=0.2))
5-element Vector{Int64}:
4
2
3
4
2
```
"""
function winsor(x::AbstractVector; prop::Real=0.0, count::Integer=0)
up, lo = uplo(x; prop=prop, count=count)
(clamp(xi, lo, up) for xi in x)
end
"""
winsor!(x::AbstractVector; prop=0.0, count=0)
##CHUNK 5
winsor(x::AbstractVector; prop=0.0, count=0)
Return an iterator of all elements of `x` that replaces either `count` or
proportion `prop` of the highest elements with the previous-highest element
and an equal number of the lowest elements with the next-lowest element.
The number of replaced elements could be smaller than specified if several
elements equal the lower or upper bound.
To compute the Winsorized mean of `x` use `mean(winsor(x))`.
# Example
```jldoctest
julia> collect(winsor([5,2,3,4,1], prop=0.2))
5-element Vector{Int64}:
4
2
3
4
2
##CHUNK 6
A variant of [`trim`](@ref) that modifies `x` in place.
"""
function trim!(x::AbstractVector; prop::Real=0.0, count::Integer=0)
up, lo = uplo(x; prop=prop, count=count)
ix = (i for (i,xi) in enumerate(x) if lo > xi || xi > up)
deleteat!(x, ix)
return x
end
"""
winsor(x::AbstractVector; prop=0.0, count=0)
Return an iterator of all elements of `x` that replaces either `count` or
proportion `prop` of the highest elements with the previous-highest element
and an equal number of the lowest elements with the next-lowest element.
The number of replaced elements could be smaller than specified if several
elements equal the lower or upper bound.
To compute the Winsorized mean of `x` use `mean(winsor(x))`.
##CHUNK 7
"""
trim(x::AbstractVector; prop=0.0, count=0)
Return an iterator of all elements of `x` that omits either `count` or proportion
`prop` of the highest and lowest elements.
The number of trimmed elements could be smaller than specified if several
elements equal the lower or upper bound.
To compute the trimmed mean of `x` use `mean(trim(x))`;
to compute the variance use `trimvar(x)` (see [`trimvar`](@ref)).
# Example
```jldoctest
julia> collect(trim([5,2,4,3,1], prop=0.2))
3-element Vector{Int64}:
2
4
3
```
|
14
| 29
|
StatsBase.jl
| 285
|
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
|
[
14,
29
] |
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
|
direct_sample!
| 14
| 29
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
##CHUNK 2
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
##CHUNK 3
### Algorithms for sampling with replacement
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
##CHUNK 4
"""
function seqsample_c!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
l = n - k + 1
minv = l
u = n
while u >= l
v = u * rand(rng)
if v < minv
minv = v
##CHUNK 5
Reference: D. Knuth. *The Art of Computer Programming*. Vol 2, 3.4.2, p.142.
This algorithm consumes `length(a)` random numbers. It requires no additional
memory space. Suitable for the case where memory is tight.
"""
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
##CHUNK 6
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
direct_sample!(a::AbstractArray, x::AbstractArray) = direct_sample!(default_rng(), a, x)
# check whether we can use T to store indices 1:n exactly, and
# use some heuristics to decide whether it is beneficial for k samples
# (true for a subset of hardware-supported numeric types)
_storeindices(n, k, ::Type{T}) where {T<:Integer} = n ≤ typemax(T)
##CHUNK 7
return a[i1], a[i2]
end
samplepair(a::AbstractArray) = samplepair(default_rng(), a)
### Algorithm for sampling without replacement
"""
knuths_sample!([rng], a, x)
*Knuth's Algorithm S* for random sampling without replacement.
Reference: D. Knuth. *The Art of Computer Programming*. Vol 2, 3.4.2, p.142.
This algorithm consumes `length(a)` random numbers. It requires no additional
memory space. Suitable for the case where memory is tight.
"""
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
##CHUNK 8
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
##CHUNK 9
```
This algorithm consumes `k=length(x)` random numbers. It uses an integer array of
length `n=length(a)` internally to maintain the shuffled indices. It is considerably
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
##CHUNK 10
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
|
40
| 50
|
StatsBase.jl
| 286
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
|
[
40,
50
] |
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
|
direct_sample!
| 40
| 50
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
"""
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
##CHUNK 2
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 3
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
##CHUNK 4
This algorithm consumes ``O(n)`` random numbers, with `n=length(a)`.
The outputs are ordered.
"""
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
##CHUNK 5
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
##CHUNK 6
for i = 1:k
# swap element `i` with another random element in inds[i:n]
# set element `i` in `x`
end
```
This algorithm consumes `k=length(x)` random numbers. It uses an integer array of
length `n=length(a)` internally to maintain the shuffled indices. It is considerably
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
##CHUNK 7
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
##CHUNK 8
Noting `k=length(x)` and `n=length(a)`, this algorithm:
* consumes `k` random numbers
* has time complexity ``O(n k)``, as scanning the weight vector each time takes ``O(n)``
* requires no additional memory space.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
##CHUNK 9
###########################################################
#
# (non-weighted) sampling
#
###########################################################
using AliasTables
using Random: Sampler
using Random: default_rng
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
##CHUNK 10
storeindices(n, k, T) = false
# order results of a sampler that does not order automatically
function sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n, k = length(a), length(x)
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
|
65
| 87
|
StatsBase.jl
| 287
|
function sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n, k = length(a), length(x)
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
@inbounds for i = 1:k
x[i] = a[indices[i]]
end
end
return x
end
|
function sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n, k = length(a), length(x)
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
@inbounds for i = 1:k
x[i] = a[indices[i]]
end
end
return x
end
|
[
65,
87
] |
function sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n, k = length(a), length(x)
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
@inbounds for i = 1:k
x[i] = a[indices[i]]
end
end
return x
end
|
function sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n, k = length(a), length(x)
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
@inbounds for i = 1:k
x[i] = a[indices[i]]
end
end
return x
end
|
sample_ordered!
| 65
| 87
|
src/sampling.jl
|
#FILE: StatsBase.jl/src/pairwise.jl
##CHUNK 1
[view(yi, nminds′) for yi in y],
symmetric)
end
function _pairwise!(f, dest::AbstractMatrix, x, y;
symmetric::Bool=false, skipmissing::Symbol=:none)
if !(skipmissing in (:none, :pairwise, :listwise))
throw(ArgumentError("skipmissing must be one of :none, :pairwise or :listwise"))
end
x′ = x isa Union{AbstractArray, Tuple, NamedTuple} ? x : collect(x)
y′ = y isa Union{AbstractArray, Tuple, NamedTuple} ? y : collect(y)
m = length(x′)
n = length(y′)
size(dest) != (m, n) &&
throw(DimensionMismatch("dest has dimensions $(size(dest)) but expected ($m, $n)"))
Base.has_offset_axes(dest) && throw("dest indices must start at 1")
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
##CHUNK 2
storeindices(n, k, ::Type{T}) where {T<:Base.HWNumber} = _storeindices(n, k, T)
storeindices(n, k, T) = false
# order results of a sampler that does not order automatically
# special case of a range can be done more efficiently
sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractRange, x::AbstractArray) =
sort!(sampler!(rng, a, x), rev=step(a)<0)
# weighted case:
sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray) =
sample_ordered!(rng, a, x) do rng, a, x
sampler!(rng, a, wv, x)
end
### draw a pair of distinct integers in [1:n]
"""
samplepair([rng], n)
##CHUNK 3
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
##CHUNK 4
direct_sample!(a::AbstractArray, x::AbstractArray) = direct_sample!(default_rng(), a, x)
# check whether we can use T to store indices 1:n exactly, and
# use some heuristics to decide whether it is beneficial for k samples
# (true for a subset of hardware-supported numeric types)
_storeindices(n, k, ::Type{T}) where {T<:Integer} = n ≤ typemax(T)
_storeindices(n, k, ::Type{T}) where {T<:Union{Float32,Float64}} = k < 22 && n ≤ maxintfloat(T)
_storeindices(n, k, ::Type{Complex{T}}) where {T} = _storeindices(n, k, T)
_storeindices(n, k, ::Type{Rational{T}}) where {T} = k < 16 && _storeindices(n, k, T)
_storeindices(n, k, T) = false
storeindices(n, k, ::Type{T}) where {T<:Base.HWNumber} = _storeindices(n, k, T)
storeindices(n, k, T) = false
# order results of a sampler that does not order automatically
# special case of a range can be done more efficiently
sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractRange, x::AbstractArray) =
sort!(sampler!(rng, a, x), rev=step(a)<0)
# weighted case:
##CHUNK 5
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 6
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
##CHUNK 7
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_c!(a::AbstractArray, x::AbstractArray) = seqsample_c!(default_rng(), a, x)
##CHUNK 8
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
##CHUNK 9
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
direct_sample!(a::AbstractArray, x::AbstractArray) = direct_sample!(default_rng(), a, x)
# check whether we can use T to store indices 1:n exactly, and
# use some heuristics to decide whether it is beneficial for k samples
# (true for a subset of hardware-supported numeric types)
_storeindices(n, k, ::Type{T}) where {T<:Integer} = n ≤ typemax(T)
_storeindices(n, k, ::Type{T}) where {T<:Union{Float32,Float64}} = k < 22 && n ≤ maxintfloat(T)
_storeindices(n, k, ::Type{Complex{T}}) where {T} = _storeindices(n, k, T)
_storeindices(n, k, ::Type{Rational{T}}) where {T} = k < 16 && _storeindices(n, k, T)
_storeindices(n, k, T) = false
|
143
| 176
|
StatsBase.jl
| 288
|
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
|
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
|
[
143,
176
] |
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
|
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
|
knuths_sample!
| 143
| 176
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 2
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
##CHUNK 3
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 4
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
##CHUNK 5
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
##CHUNK 6
* has time complexity ``O(n k)``, as scanning the weight vector each time takes ``O(n)``
* requires no additional memory space.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
##CHUNK 7
redraw until it draws an unsampled one.
This algorithm consumes about (or slightly more than) `k=length(x)` random numbers,
and requires ``O(k)`` memory to store the set of sampled indices.
Very fast when ``n >> k``, with `n=length(a)`.
However, if `k` is large and approaches ``n``, the rejection rate would increase
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
##CHUNK 8
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
end
x[i] = a[idx]
push!(s, idx)
end
return x
end
self_avoid_sample!(a::AbstractArray, x::AbstractArray) =
self_avoid_sample!(default_rng(), a, x)
##CHUNK 9
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n)`` time
for building the alias table, and then ``O(1)`` to draw each sample. It consumes ``k`` random numbers.
"""
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
##CHUNK 10
"""
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
|
204
| 226
|
StatsBase.jl
| 289
|
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
|
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
|
[
204,
226
] |
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
|
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
|
fisher_yates_sample!
| 204
| 226
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 2
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 3
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
##CHUNK 4
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_c!(a::AbstractArray, x::AbstractArray) = seqsample_c!(default_rng(), a, x)
##CHUNK 5
Each time draw a new index, if the index has already been sampled,
redraw until it draws an unsampled one.
This algorithm consumes about (or slightly more than) `k=length(x)` random numbers,
and requires ``O(k)`` memory to store the set of sampled indices.
Very fast when ``n >> k``, with `n=length(a)`.
However, if `k` is large and approaches ``n``, the rejection rate would increase
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
##CHUNK 6
# todo: if eltype(x) <: Real && eltype(a) <: Real,
# in some cases it might be faster to check
# issorted(a) to see if we can just sort x
if storeindices(n, k, eltype(x))
sort!(sampler!(rng, Base.OneTo(n), x), by=real, lt=<)
@inbounds for i = 1:k
x[i] = a[Int(x[i])]
end
else
indices = Array{Int}(undef, k)
sort!(sampler!(rng, Base.OneTo(n), indices))
@inbounds for i = 1:k
x[i] = a[indices[i]]
end
end
return x
end
# special case of a range can be done more efficiently
sample_ordered!(sampler!, rng::AbstractRNG, a::AbstractRange, x::AbstractArray) =
##CHUNK 7
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
##CHUNK 8
The outputs are ordered.
"""
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
##CHUNK 9
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
##CHUNK 10
memory space. Suitable for the case where memory is tight.
"""
function knuths_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
initshuffle::Bool=true)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
# initialize
for i = 1:k
@inbounds x[i] = a[i]
end
if initshuffle
@inbounds for j = 1:k
l = rand(rng, j:k)
if l != j
|
244
| 272
|
StatsBase.jl
| 290
|
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
end
x[i] = a[idx]
push!(s, idx)
end
return x
end
|
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
end
x[i] = a[idx]
push!(s, idx)
end
return x
end
|
[
244,
272
] |
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
end
x[i] = a[idx]
push!(s, idx)
end
return x
end
|
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
# remaining
for i = 2:k
idx = rand(rng, rgen)
while idx in s
idx = rand(rng, rgen)
end
x[i] = a[idx]
push!(s, idx)
end
return x
end
|
self_avoid_sample!
| 244
| 272
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 2
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 3
items appear in the same order as in `a`) should be taken.
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
##CHUNK 5
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 6
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
self_avoid_sample!([rng], a::AbstractArray, x::AbstractArray)
Self-avoid sampling: use a set to maintain the index that has been sampled.
Each time draw a new index, if the index has already been sampled,
redraw until it draws an unsampled one.
This algorithm consumes about (or slightly more than) `k=length(x)` random numbers,
and requires ``O(k)`` memory to store the set of sampled indices.
Very fast when ``n >> k``, with `n=length(a)`.
##CHUNK 7
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
##CHUNK 8
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
##CHUNK 9
seqsample_a!(rng, a, x)
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
##CHUNK 10
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
|
286
| 315
|
StatsBase.jl
| 291
|
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
[
286,
315
] |
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
function seqsample_a!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
seqsample_a!
| 286
| 315
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
However, if `k` is large and approaches ``n``, the rejection rate would increase
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
##CHUNK 2
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 3
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
##CHUNK 4
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 5
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 6
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 7
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 8
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
##CHUNK 9
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
##CHUNK 10
faster than Knuth's algorithm especially when `n` is greater than `k`.
It is ``O(n)`` for initialization, plus ``O(k)`` for random shuffling
"""
function fisher_yates_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
|
328
| 361
|
StatsBase.jl
| 292
|
function seqsample_c!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
l = n - k + 1
minv = l
u = n
while u >= l
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
function seqsample_c!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
l = n - k + 1
minv = l
u = n
while u >= l
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
[
328,
361
] |
function seqsample_c!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
l = n - k + 1
minv = l
u = n
while u >= l
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
function seqsample_c!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
l = n - k + 1
minv = l
u = n
while u >= l
v = u * rand(rng)
if v < minv
minv = v
end
u -= 1
end
s = trunc(Int, minv) + 1
x[j+=1] = a[i+=s]
n -= s
k -= 1
end
if k > 0
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
|
seqsample_c!
| 328
| 361
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
However, if `k` is large and approaches ``n``, the rejection rate would increase
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
##CHUNK 2
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
"""
seqsample_c!([rng], a::AbstractArray, x::AbstractArray)
##CHUNK 3
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 4
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
i = 0
j = 0
while k > 1
u = rand(rng)
q = (n - k) / n
while q > u # skip
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
##CHUNK 5
### Algorithms for sampling with replacement
function direct_sample!(rng::AbstractRNG, a::UnitRange, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
s = Sampler(rng, 1:length(a))
b = a[1] - 1
if b == 0
for i = 1:length(x)
@inbounds x[i] = rand(rng, s)
end
else
for i = 1:length(x)
@inbounds x[i] = b + rand(rng, s)
end
end
return x
end
direct_sample!(a::UnitRange, x::AbstractArray) = direct_sample!(default_rng(), a, x)
##CHUNK 6
k <= n || error("length(x) should not exceed length(a)")
inds = Vector{Int}(undef, n)
for i = 1:n
@inbounds inds[i] = i
end
@inbounds for i = 1:k
j = rand(rng, i:n)
t = inds[j]
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
##CHUNK 7
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
##CHUNK 8
j += 1
i += s+1
@inbounds x[j] = a[i]
N = N - s - 1
n -= 1
q1 -= s
q2 = q1 / N
threshold -= alpha
end
if n > 1
seqsample_a!(rng, a[i+1:end], @view x[j+1:end])
else
s = trunc(Int, N * vprime)
@inbounds x[j+=1] = a[i+=s+1]
end
end
seqsample_d!(a::AbstractArray, x::AbstractArray) = seqsample_d!(default_rng(), a, x)
##CHUNK 9
* has time complexity ``O(n k)``, as scanning the weight vector each time takes ``O(n)``
* requires no additional memory space.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
##CHUNK 10
t = x[j]
x[j] = x[l]
x[l] = t
end
end
end
# scan remaining
s = Sampler(rng, 1:k)
for i = k+1:n
if rand(rng) * i < k # keep it with probability k / i
@inbounds x[rand(rng, s)] = a[i]
end
end
return x
end
knuths_sample!(a::AbstractArray, x::AbstractArray; initshuffle::Bool=true) =
knuths_sample!(default_rng(), a, x; initshuffle=initshuffle)
"""
|
488
| 525
|
StatsBase.jl
| 293
|
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
if ordered
if n > 10 * k * k
seqsample_c!(rng, a, x)
else
seqsample_a!(rng, a, x)
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
|
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
if ordered
if n > 10 * k * k
seqsample_c!(rng, a, x)
else
seqsample_a!(rng, a, x)
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
|
[
488,
525
] |
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
if ordered
if n > 10 * k * k
seqsample_c!(rng, a, x)
else
seqsample_a!(rng, a, x)
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
|
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
if ordered
if n > 10 * k * k
seqsample_c!(rng, a, x)
else
seqsample_a!(rng, a, x)
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
|
sample!
| 488
| 525
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
##CHUNK 2
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
efraimidis_aexpj_wsample_norep!(rng, a, wv, x; ordered=ordered)
end
return x
end
sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, wv, x; replace=replace, ordered=ordered)
sample(rng::AbstractRNG, a::AbstractArray{T}, wv::AbstractWeights, n::Integer;
replace::Bool=true, ordered::Bool=false) where {T} =
##CHUNK 3
"""
sample!([rng], a, [wv::AbstractWeights], x; replace=true, ordered=false)
Draw a random sample of `length(x)` elements from an array `a`
and store the result in `x`. A polyalgorithm is used for sampling.
Sampling probabilities are proportional to the weights given in `wv`,
if provided. `replace` dictates whether sampling is performed with
replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
##CHUNK 4
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
##CHUNK 5
inds[j] = inds[i]
inds[i] = t
x[i] = a[t]
end
return x
end
fisher_yates_sample!(a::AbstractArray, x::AbstractArray) =
fisher_yates_sample!(default_rng(), a, x)
"""
self_avoid_sample!([rng], a::AbstractArray, x::AbstractArray)
Self-avoid sampling: use a set to maintain the index that has been sampled.
Each time draw a new index, if the index has already been sampled,
redraw until it draws an unsampled one.
This algorithm consumes about (or slightly more than) `k=length(x)` random numbers,
and requires ``O(k)`` memory to store the set of sampled indices.
Very fast when ``n >> k``, with `n=length(a)`.
##CHUNK 6
i += 1
n -= 1
q *= (n - k) / n
end
@inbounds x[j+=1] = a[i+=1]
n -= 1
k -= 1
end
if k > 0 # checking k > 0 is necessary: x can be empty
s = trunc(Int, n * rand(rng))
x[j+1] = a[i+(s+1)]
end
return x
end
seqsample_a!(a::AbstractArray, x::AbstractArray) = seqsample_a!(default_rng(), a, x)
"""
seqsample_c!([rng], a::AbstractArray, x::AbstractArray)
##CHUNK 7
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
##CHUNK 8
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
"""
sample([rng], a, [wv::AbstractWeights], n::Integer; replace=true, ordered=false)
Select a random, optionally weighted sample of size `n` from an array `a`
using a polyalgorithm. Sampling probabilities are proportional to the weights
given in `wv`, if provided. `replace` dictates whether sampling is performed
with replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
##CHUNK 9
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
##CHUNK 10
However, if `k` is large and approaches ``n``, the rejection rate would increase
drastically, resulting in poorer performance.
"""
function self_avoid_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
n = length(a)
k = length(x)
k <= n || error("length(x) should not exceed length(a)")
s = Set{Int}()
sizehint!(s, k)
rgen = Sampler(rng, 1:n)
# first one
idx = rand(rng, rgen)
x[1] = a[idx]
push!(s, idx)
|
586
| 600
|
StatsBase.jl
| 294
|
function sample(rng::AbstractRNG, wv::AbstractWeights)
1 == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
t = rand(rng) * wsum
n = length(wv)
i = 1
cw = wv[1]
while cw < t && i < n
i += 1
@inbounds cw += wv[i]
end
return i
end
|
function sample(rng::AbstractRNG, wv::AbstractWeights)
1 == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
t = rand(rng) * wsum
n = length(wv)
i = 1
cw = wv[1]
while cw < t && i < n
i += 1
@inbounds cw += wv[i]
end
return i
end
|
[
586,
600
] |
function sample(rng::AbstractRNG, wv::AbstractWeights)
1 == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
t = rand(rng) * wsum
n = length(wv)
i = 1
cw = wv[1]
while cw < t && i < n
i += 1
@inbounds cw += wv[i]
end
return i
end
|
function sample(rng::AbstractRNG, wv::AbstractWeights)
1 == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
t = rand(rng) * wsum
n = length(wv)
i = 1
cw = wv[1]
while cw < t && i < n
i += 1
@inbounds cw += wv[i]
end
return i
end
|
sample
| 586
| 600
|
src/sampling.jl
|
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
@propagate_inbounds function Base.getindex(wv::W, i::AbstractArray) where W <: AbstractWeights
@boundscheck checkbounds(wv, i)
@inbounds v = wv.values[i]
W(v, sum(v))
end
Base.getindex(wv::W, ::Colon) where {W <: AbstractWeights} = W(copy(wv.values), sum(wv))
@propagate_inbounds function Base.setindex!(wv::AbstractWeights, v::Real, i::Int)
s = v - wv[i]
sum = wv.sum + s
isfinite(sum) || throw(ArgumentError("weights cannot contain Inf or NaN values"))
wv.values[i] = v
wv.sum = sum
v
end
"""
varcorrection(n::Integer, corrected=false)
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
#FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
end
return mv
end
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
##CHUNK 2
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
##CHUNK 3
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
##CHUNK 2
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
##CHUNK 3
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
naive_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
naive_wsample_norep!(default_rng(), a, wv, x)
# Weighted sampling without replacement
##CHUNK 4
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
##CHUNK 5
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
|
618
| 632
|
StatsBase.jl
| 295
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
|
[
618,
632
] |
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
|
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
|
direct_sample!
| 618
| 632
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
##CHUNK 2
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
##CHUNK 3
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
##CHUNK 4
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n)`` time
for building the alias table, and then ``O(1)`` to draw each sample. It consumes ``k`` random numbers.
"""
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
##CHUNK 5
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
##CHUNK 6
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(k \\log(k) \\log(n / k))``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
##CHUNK 7
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(k \\log(k) \\log(n / k))``
processing time to draw ``k`` elements. It consumes ``O(k \\log(n / k))`` random numbers.
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
##CHUNK 8
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
##CHUNK 9
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
##CHUNK 10
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
|
649
| 666
|
StatsBase.jl
| 296
|
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
|
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
|
[
649,
666
] |
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
|
function alias_sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
|
alias_sample!
| 649
| 666
|
src/sampling.jl
|
#FILE: StatsBase.jl/src/deprecates.jl
##CHUNK 1
a::AbstractVector{Float64},
alias::AbstractVector{Int})
Base.depwarn("make_alias_table! is both internal and deprecated, use AliasTables.jl instead", :make_alias_table!)
# Arguments:
#
# w [in]: input weights
# wsum [in]: pre-computed sum(w)
#
# a [out]: acceptance probabilities
# alias [out]: alias table
#
# Note: a and w can be the same array, then that array will be
# overwritten inplace by acceptance probabilities
#
# Returns nothing
#
n = length(w)
length(a) == length(alias) == n ||
throw(DimensionMismatch("Inconsistent array lengths."))
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
##CHUNK 2
"""
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
##CHUNK 3
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 4
Draw each sample by scanning the weight vector.
Noting `k=length(x)` and `n=length(a)`, this algorithm:
* consumes `k` random numbers
* has time complexity ``O(n k)``, as scanning the weight vector each time takes ``O(n)``
* requires no additional memory space.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
##CHUNK 5
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
##CHUNK 6
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
##CHUNK 7
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
##CHUNK 8
"""
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
##CHUNK 9
"""
direct_sample!([rng], a::AbstractArray, x::AbstractArray)
Direct sampling: for each `j` in `1:k`, randomly pick `i` from `1:n`,
and set `x[j] = a[i]`, with `n=length(a)` and `k=length(x)`.
This algorithm consumes `k` random numbers.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
s = Sampler(rng, 1:length(a))
for i = 1:length(x)
@inbounds x[i] = a[rand(rng, s)]
end
return x
end
|
681
| 711
|
StatsBase.jl
| 297
|
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
|
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
|
[
681,
711
] |
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
|
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
|
naive_wsample_norep!
| 681
| 711
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
##CHUNK 2
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 3
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 5
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
##CHUNK 6
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
##CHUNK 7
t = rand(rng) * wsum
n = length(wv)
i = 1
cw = wv[1]
while cw < t && i < n
i += 1
@inbounds cw += wv[i]
end
return i
end
sample(wv::AbstractWeights) = sample(default_rng(), wv)
sample(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights) = a[sample(rng, wv)]
sample(a::AbstractArray, wv::AbstractWeights) = sample(default_rng(), a, wv)
"""
direct_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Direct sampling.
##CHUNK 8
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n + k \\log k)``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
##CHUNK 9
Draw each sample by scanning the weight vector.
Noting `k=length(x)` and `n=length(a)`, this algorithm:
* consumes `k` random numbers
* has time complexity ``O(n k)``, as scanning the weight vector each time takes ``O(n)``
* requires no additional memory space.
"""
function direct_sample!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
##CHUNK 10
"""
efraimidis_aexpj_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Implementation of weighted sampling without replacement using Efraimidis-Spirakis A-ExpJ algorithm.
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(k \\log(k) \\log(n / k))``
processing time to draw ``k`` elements. It consumes ``O(k \\log(n / k))`` random numbers.
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
|
728
| 753
|
StatsBase.jl
| 298
|
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
|
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
|
[
728,
753
] |
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
|
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
end
return x
end
|
efraimidis_a_wsample_norep!
| 728
| 753
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 2
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 3
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
##CHUNK 4
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
##CHUNK 5
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
##CHUNK 6
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
##CHUNK 7
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
##CHUNK 8
c = w[1]
while c < u && j < n
@inbounds c += w[j+=1]
end
@inbounds x[i] = a[j]
@inbounds wsum -= w[j]
@inbounds w[j] = 0.0
end
return x
end
naive_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
naive_wsample_norep!(default_rng(), a, wv, x)
# Weighted sampling without replacement
# Instead of keys u^(1/w) where u = random(0,1) keys w/v where v = randexp(1) are used.
"""
efraimidis_a_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Weighted sampling without replacement using Efraimidis-Spirakis A algorithm.
##CHUNK 9
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
##CHUNK 10
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
|
770
| 826
|
StatsBase.jl
| 299
|
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
return x
end
|
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
return x
end
|
[
770,
826
] |
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
return x
end
|
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
if key > threshold
# update priority queue
pq[1] = (key => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
end
end
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
return x
end
|
efraimidis_ares_wsample_norep!
| 770
| 826
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
##CHUNK 2
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
##CHUNK 3
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
##CHUNK 4
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n + k \\log k)``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
##CHUNK 5
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 6
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
##CHUNK 7
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 8
end
efraimidis_aexpj_wsample_norep!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false) =
efraimidis_aexpj_wsample_norep!(default_rng(), a, wv, x; ordered=ordered)
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
##CHUNK 9
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
##CHUNK 10
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.