context_start_lineno
int64 1
913
| line_no
int64 16
984
| repo
stringclasses 5
values | id
int64 0
416
| target_function_prompt
stringlengths 201
13.6k
| function_signature
stringlengths 201
13.6k
| solution_position
listlengths 2
2
| raw_solution
stringlengths 201
13.6k
| focal_code
stringlengths 201
13.6k
| function_name
stringlengths 2
38
| start_line
int64 1
913
| end_line
int64 16
984
| file_path
stringlengths 10
52
| context
stringlengths 4.52k
9.85k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
843
| 908
|
StatsBase.jl
| 300
|
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
|
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
|
[
843,
908
] |
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
|
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
# update threshold
threshold = pq[1].first
X = threshold * randexp(rng)
end
if ordered
# fill output array with items sorted as in a
sort!(pq, by=last)
@inbounds for i in 1:k
x[i] = a[pq[i].second]
end
else
# fill output array with items in descending order
@inbounds for i in k:-1:1
x[i] = a[heappop!(pq).second]
end
end
return x
end
|
efraimidis_aexpj_wsample_norep!
| 843
| 908
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
##CHUNK 2
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
##CHUNK 3
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
##CHUNK 5
w < 0 && error("Negative weight found in weight vector at index $s")
if w > 0
i += 1
pq[i] = (w/randexp(rng) => s)
end
i >= k && break
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
key = w/randexp(rng)
# if key is larger than the threshold
##CHUNK 6
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n + k \\log k)``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
##CHUNK 7
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
##CHUNK 8
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 9
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 10
Implementation of weighted sampling without replacement using Efraimidis-Spirakis A-Res algorithm.
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(k \\log(k) \\log(n / k))``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_ares_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
|
913
| 942
|
StatsBase.jl
| 301
|
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
efraimidis_aexpj_wsample_norep!(rng, a, wv, x; ordered=ordered)
end
return x
end
|
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
efraimidis_aexpj_wsample_norep!(rng, a, wv, x; ordered=ordered)
end
return x
end
|
[
913,
942
] |
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
efraimidis_aexpj_wsample_norep!(rng, a, wv, x; ordered=ordered)
end
return x
end
|
function sample!(rng::AbstractRNG, a::AbstractArray, wv::AbstractWeights, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
if replace
if ordered
sample_ordered!(rng, a, wv, x) do rng, a, wv, x
sample!(rng, a, wv, x; replace=true, ordered=false)
end
else
if n < 40
direct_sample!(rng, a, wv, x)
else
t = ifelse(n < 500, 64, 32)
if k < t
direct_sample!(rng, a, wv, x)
else
alias_sample!(rng, a, wv, x)
end
end
end
else
k <= n || error("Cannot draw $k samples from $n samples without replacement.")
efraimidis_aexpj_wsample_norep!(rng, a, wv, x; ordered=ordered)
end
return x
end
|
sample!
| 913
| 942
|
src/sampling.jl
|
#CURRENT FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
##CHUNK 2
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 3
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
k = length(x)
k == 0 && return x
if replace # with replacement
if ordered
sample_ordered!(direct_sample!, rng, a, x)
else
direct_sample!(rng, a, x)
end
else # without replacement
k <= n || error("Cannot draw more samples without replacement.")
if ordered
if n > 10 * k * k
seqsample_c!(rng, a, x)
else
seqsample_a!(rng, a, x)
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 5
"""
sample!([rng], a, [wv::AbstractWeights], x; replace=true, ordered=false)
Draw a random sample of `length(x)` elements from an array `a`
and store the result in `x`. A polyalgorithm is used for sampling.
Sampling probabilities are proportional to the weights given in `wv`,
if provided. `replace` dictates whether sampling is performed with
replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
Output array `a` must not be the same object as `x` or `wv`
nor share memory with them, or the result may be incorrect.
"""
function sample!(rng::AbstractRNG, a::AbstractArray, x::AbstractArray;
replace::Bool=true, ordered::Bool=false)
1 == firstindex(a) == firstindex(x) ||
##CHUNK 6
end
end
end
return x
end
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
"""
sample([rng], a, [wv::AbstractWeights], n::Integer; replace=true, ordered=false)
Select a random, optionally weighted sample of size `n` from an array `a`
using a polyalgorithm. Sampling probabilities are proportional to the weights
given in `wv`, if provided. `replace` dictates whether sampling is performed
with replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
Optionally specify a random number generator `rng` as the first argument
##CHUNK 7
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
##CHUNK 8
# wsample interface
"""
wsample!([rng], a, w, x; replace=true, ordered=false)
Select a weighted sample from an array `a` and store the result in `x`. Sampling
probabilities are proportional to the weights given in `w`. `replace` dictates
whether sampling is performed with replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
Optionally specify a random number generator `rng` as the first argument
(defaults to `Random.default_rng()`).
"""
wsample!(rng::AbstractRNG, a::AbstractArray, w::AbstractVector{<:Real}, x::AbstractArray;
replace::Bool=true, ordered::Bool=false) =
sample!(rng, a, weights(w), x; replace=replace, ordered=ordered)
wsample!(a::AbstractArray, w::AbstractVector{<:Real}, x::AbstractArray;
replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, weights(w), x; replace=replace, ordered=ordered)
##CHUNK 9
end
else
if k == 1
@inbounds x[1] = sample(rng, a)
elseif k == 2
@inbounds (x[1], x[2]) = samplepair(rng, a)
elseif n < k * 24
fisher_yates_sample!(rng, a, x)
else
self_avoid_sample!(rng, a, x)
end
end
end
return x
end
sample!(a::AbstractArray, x::AbstractArray; replace::Bool=true, ordered::Bool=false) =
sample!(default_rng(), a, x; replace=replace, ordered=ordered)
"""
##CHUNK 10
replace::Bool=true, ordered::Bool=false) =
sample(default_rng(), a, wv, n; replace=replace, ordered=ordered)
sample(rng::AbstractRNG, a::AbstractArray{T}, wv::AbstractWeights, dims::Dims;
replace::Bool=true, ordered::Bool=false) where {T} =
sample!(rng, a, wv, Array{T}(undef, dims); replace=replace, ordered=ordered)
sample(a::AbstractArray, wv::AbstractWeights, dims::Dims;
replace::Bool=true, ordered::Bool=false) =
sample(default_rng(), a, wv, dims; replace=replace, ordered=ordered)
# wsample interface
"""
wsample!([rng], a, w, x; replace=true, ordered=false)
Select a weighted sample from an array `a` and store the result in `x`. Sampling
probabilities are proportional to the weights given in `w`. `replace` dictates
whether sampling is performed with replacement. `ordered` dictates whether
an ordered sample (also called a sequential sample, i.e. a sample where
items appear in the same order as in `a`) should be taken.
|
34
| 45
|
StatsBase.jl
| 302
|
function genmean(a, p::Real)
if p == 0
return geomean(a)
end
# At least one of `x` or `p` must not be an int to avoid domain errors when `p` is a negative int.
# We choose `x` in order to exploit exponentiation by squaring when `p` is an int.
r = mean(a) do x
float(x)^p
end
return r^inv(p)
end
|
function genmean(a, p::Real)
if p == 0
return geomean(a)
end
# At least one of `x` or `p` must not be an int to avoid domain errors when `p` is a negative int.
# We choose `x` in order to exploit exponentiation by squaring when `p` is an int.
r = mean(a) do x
float(x)^p
end
return r^inv(p)
end
|
[
34,
45
] |
function genmean(a, p::Real)
if p == 0
return geomean(a)
end
# At least one of `x` or `p` must not be an int to avoid domain errors when `p` is a negative int.
# We choose `x` in order to exploit exponentiation by squaring when `p` is an int.
r = mean(a) do x
float(x)^p
end
return r^inv(p)
end
|
function genmean(a, p::Real)
if p == 0
return geomean(a)
end
# At least one of `x` or `p` must not be an int to avoid domain errors when `p` is a negative int.
# We choose `x` in order to exploit exponentiation by squaring when `p` is an int.
r = mean(a) do x
float(x)^p
end
return r^inv(p)
end
|
genmean
| 34
| 45
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/deviation.jl
##CHUNK 1
for i in eachindex(a, b)
@inbounds ai = a[i]
@inbounds bi = b[i]
if ai > 0
r += (ai * log(ai / bi) - ai + bi)
else
r += bi
end
end
return r::Float64
end
# MeanAD: mean absolute deviation
"""
meanad(a, b)
Return the mean absolute deviation between two arrays: `mean(abs, a - b)`.
"""
meanad(a::AbstractArray{T}, b::AbstractArray{T}) where {T<:Number} =
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
scale = sum(p)
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
##CHUNK 2
"""
geomean(a)
Return the geometric mean of a collection.
"""
geomean(a) = exp(mean(log, a))
# Harmonic mean
"""
harmmean(a)
Return the harmonic mean of a collection.
"""
harmmean(a) = inv(mean(inv, a))
# Generalized mean
"""
genmean(a, p)
Return the generalized/power mean with exponent `p` of a real-valued array,
##CHUNK 3
Compute the entropy of a collection of probabilities `p`,
optionally specifying a real number `b` such that the entropy is scaled by `1/log(b)`.
Elements with probability 0 or 1 add 0 to the entropy.
"""
function entropy(p)
if isempty(p)
throw(ArgumentError("empty collections are not supported since they do not " *
"represent proper probability distributions"))
end
return -sum(xlogx, p)
end
function entropy(p, b::Real)
e = entropy(p)
# Promote explicitly before applying `log` to avoid undesired promotions
# with `log(b)::Float64` arising from `b::Int` (ref: #924)
_b = first(promote(b, e))
return e / log(_b)
end
##CHUNK 4
A pre-computed `mean` may be provided.
When not using weights, this is the (sample) standard deviation
divided by the square root of the sample size. If weights are used, the
variance of the sample mean is calculated as follows:
* `AnalyticWeights`: Not implemented.
* `FrequencyWeights`: ``\\frac{\\sum_{i=1}^n w_i (x_i - \\bar{x_i})^2}{(\\sum w_i) (\\sum w_i - 1)}``
* `ProbabilityWeights`: ``\\frac{n}{n-1} \\frac{\\sum_{i=1}^n w_i^2 (x_i - \\bar{x_i})^2}{\\left( \\sum w_i \\right)^2}``
The standard error is then the square root of the above quantities.
# References
Carl-Erik Särndal, Bengt Swensson, Jan Wretman (1992). Model Assisted Survey Sampling.
New York: Springer. pp. 51-53.
"""
function sem(x; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
##CHUNK 5
Return the harmonic mean of a collection.
"""
harmmean(a) = inv(mean(inv, a))
# Generalized mean
"""
genmean(a, p)
Return the generalized/power mean with exponent `p` of a real-valued array,
i.e. ``\\left( \\frac{1}{n} \\sum_{i=1}^n a_i^p \\right)^{\\frac{1}{p}}``, where `n = length(a)`.
It is taken to be the geometric mean when `p == 0`.
"""
# compute mode, given the range of integer values
"""
mode(a, [r])
mode(a::AbstractArray, wv::AbstractWeights)
Return the mode (most common number) of an array, optionally
##CHUNK 6
#############################
#
# entropy and friends
#
#############################
"""
entropy(p, [b])
Compute the entropy of a collection of probabilities `p`,
optionally specifying a real number `b` such that the entropy is scaled by `1/log(b)`.
Elements with probability 0 or 1 add 0 to the entropy.
"""
function entropy(p)
if isempty(p)
throw(ArgumentError("empty collections are not supported since they do not " *
"represent proper probability distributions"))
end
return -sum(xlogx, p)
##CHUNK 7
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
"""
crossentropy(p, q, [b])
Compute the cross entropy between `p` and `q`, optionally specifying a real
number `b` such that the result is scaled by `1/log(b)`.
"""
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
##CHUNK 8
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
##CHUNK 9
"""
StatsBase.mad!(x; center=median!(x), normalize=true)
Compute the median absolute deviation (MAD) of array `x` around `center`
(by default, around the median), overwriting `x` in the process.
If `normalize` is set to `true`, the MAD is multiplied by
`1 / quantile(Normal(), 3/4) ≈ 1.4826`, in order to obtain a consistent estimator
of the standard deviation under the assumption that the data is normally distributed.
"""
function mad!(x::AbstractArray;
center=median!(x),
normalize::Union{Bool,Nothing}=true,
constant=nothing)
isempty(x) && throw(ArgumentError("mad is not defined for empty arrays"))
c = center === nothing ? median!(x) : center
T = promote_type(typeof(c), eltype(x))
U = eltype(x)
x2 = U == T ? x : isconcretetype(U) && isconcretetype(T) && sizeof(U) == sizeof(T) ? reinterpret(T, x) : similar(x, T)
x2 .= abs.(x .- c)
|
56
| 75
|
StatsBase.jl
| 303
|
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
|
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
|
[
56,
75
] |
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
|
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
|
mode
| 56
| 75
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 2
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
##CHUNK 3
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
##CHUNK 4
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
##CHUNK 2
Return all modes (most common numbers) of an array, optionally over a
specified range `r` or weighted via vector `wv`.
"""
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
##CHUNK 3
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
##CHUNK 4
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
##CHUNK 5
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
##CHUNK 6
end
y = iterate(a, st)
end
return mv
end
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
|
84
| 108
|
StatsBase.jl
| 304
|
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
|
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
|
[
84,
108
] |
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
|
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
|
modes
| 84
| 108
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 2
m0 = first(levels)
m1 = last(levels)
b = m0 - 1
@inbounds for i in eachindex(xv, wv)
xi = xv[i]
if m0 <= xi <= m1
r[xi - b] += wv[i]
end
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
##CHUNK 3
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
if c != 0
index = ht_keyindex2!(cm, i)
if index > 0
@inbounds cm.vals[index] += c
else
@inbounds Base._setindex!(cm, c, i, -index)
end
end
end
cm
##CHUNK 4
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
##CHUNK 5
my0 = first(ylevels)
my1 = last(ylevels)
bx = mx0 - 1
by = my0 - 1
for i in eachindex(vec(x), vec(y))
xi = x[i]
yi = y[i]
if (mx0 <= xi <= mx1) && (my0 <= yi <= my1)
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
##CHUNK 6
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
"""
modes(a, [r])::Vector
mode(a::AbstractArray, wv::AbstractWeights)::Vector
##CHUNK 2
Return the mode (most common number) of an array, optionally
over a specified range `r` or weighted via a vector `wv`.
If several modes exist, the first one (in order of appearance) is returned.
"""
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
##CHUNK 3
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
##CHUNK 4
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
|
111
| 135
|
StatsBase.jl
| 305
|
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
return mv
end
|
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
return mv
end
|
[
111,
135
] |
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
return mv
end
|
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
return mv
end
|
mode
| 111
| 135
|
src/scalarstats.jl
|
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
##CHUNK 2
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
##CHUNK 3
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
##CHUNK 4
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
"""
modes(a, [r])::Vector
mode(a::AbstractArray, wv::AbstractWeights)::Vector
##CHUNK 5
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
##CHUNK 6
Return the mode (most common number) of an array, optionally
over a specified range `r` or weighted via a vector `wv`.
If several modes exist, the first one (in order of appearance) is returned.
"""
function mode(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
len = length(a)
r0 = r[1]
r1 = r[end]
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
##CHUNK 7
Return all modes (most common numbers) of an array, optionally over a
specified range `r` or weighted via vector `wv`.
"""
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
##CHUNK 8
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
##CHUNK 9
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
##CHUNK 10
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
137
| 161
|
StatsBase.jl
| 306
|
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
|
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
|
[
137,
161
] |
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
|
function modes(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
x, st = iterate(a)
cnts[x] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
|
modes
| 137
| 161
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
@inbounds for i = 2:n
if x[i - 1] == x[i]
k += 1
elseif k > 0
# Sort the corresponding chunk of y, so the rows of hcat(x,y) are
# sorted first on x, then (where x values are tied) on y. Hence
# double ties can be counted by calling countties.
sort!(view(y, (i - k - 1):(i - 1)))
ntiesx += div(widen(k) * (k + 1), 2) # Must use wide integers here
ndoubleties += countties(y, i - k - 1, i - 1)
k = 0
end
end
if k > 0
sort!(view(y, (n - k):n))
ntiesx += div(widen(k) * (k + 1), 2)
ndoubleties += countties(y, n - k, n)
end
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
function _addcounts!(::Type{Bool}, cm::Dict{Bool}, x; alg = :ignored)
sumx = 0
len = 0
for i in x
sumx += i
len += 1
end
cm[true] = get(cm, true, 0) + sumx
cm[false] = get(cm, false, 0) + len - sumx
cm
end
function _addcounts!(::Type{T}, cm::Dict{T}, x; alg = :ignored) where T <: Union{UInt8, UInt16, Int8, Int16}
counts = zeros(Int, 2^(8sizeof(T)))
@inbounds for xi in x
counts[Int(xi) - typemin(T) + 1] += 1
end
for (i, c) in zip(typemin(T):typemax(T), counts)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
##CHUNK 2
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
function mode(a)
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
cnts = Dict{eltype(a),Int}()
# first element
mc = 1
mv, st = iterate(a)
cnts[mv] = 1
# find the mode along with table construction
y = iterate(a, st)
while y !== nothing
##CHUNK 3
x, st = y
if haskey(cnts, x)
c = (cnts[x] += 1)
if c > mc
mc = c
mv = x
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
return mv
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
##CHUNK 4
cnts = zeros(Int, length(r))
mc = 0 # maximum count
mv = r0 # a value corresponding to maximum count
for i = 1:len
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
mv = x
end
end
end
return mv
end
"""
modes(a, [r])::Vector
mode(a::AbstractArray, wv::AbstractWeights)::Vector
##CHUNK 5
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
ms = T[]
for i = 1:n
@inbounds if cnts[i] == mc
push!(ms, r[i])
end
end
return ms
end
# compute mode over arbitrary iterable
##CHUNK 6
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
##CHUNK 7
Return all modes (most common numbers) of an array, optionally over a
specified range `r` or weighted via vector `wv`.
"""
function modes(a::AbstractArray{T}, r::UnitRange{T}) where T<:Integer
r0 = r[1]
r1 = r[end]
n = length(r)
cnts = zeros(Int, n)
# find the maximum count
mc = 0
for i = 1:length(a)
@inbounds x = a[i]
if r0 <= x <= r1
@inbounds c = (cnts[x - r0 + 1] += 1)
if c > mc
mc = c
end
end
end
# find all values corresponding to maximum count
##CHUNK 8
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
164
| 184
|
StatsBase.jl
| 307
|
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
[
164,
184
] |
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
end
return mv
end
|
mode
| 164
| 184
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
processing time to draw ``k`` elements. It consumes ``O(k \\log(n / k))`` random numbers.
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
##CHUNK 2
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
k = length(x)
w = Vector{Float64}(undef, n)
copyto!(w, wv)
for i = 1:k
u = rand(rng) * wsum
j = 1
##CHUNK 3
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
##CHUNK 5
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
s = 0
@inbounds for _s in 1:n
s = _s
w = wv.values[s]
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
"""
countmap(x; alg = :auto)
countmap(x::AbstractVector, wv::AbstractVector{<:Real})
##CHUNK 2
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
end
# Weighted mode of arbitrary vectors of values
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
##CHUNK 2
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
# Weighted mode of arbitrary vectors of values
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
##CHUNK 3
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
#############################
#
# quantile and friends
#
#############################
|
186
| 205
|
StatsBase.jl
| 308
|
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
|
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
|
[
186,
205
] |
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
|
function modes(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mw = _w
end
weights[x] = _w
end
# find values corresponding to maximum counts
return [x for (x, w) in weights if w == mw]
end
|
modes
| 186
| 205
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/counts.jl
##CHUNK 1
If a weighting vector `wv` is specified, the sum of weights is used rather than the
raw counts.
"""
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer})
# add counts of integers from x that fall within levels to r
checkbounds(r, axes(levels)...)
m0 = first(levels)
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
##CHUNK 2
m1 = last(levels)
b = m0 - firstindex(levels) # firstindex(levels) == 1 because levels::UnitRange{<:Integer}
@inbounds for xi in x
if m0 <= xi <= m1
r[xi - b] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, levels::UnitRange{<:Integer}, wv::AbstractWeights)
# add wv weighted counts of integers from x that fall within levels to r
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
checkbounds(r, axes(levels)...)
##CHUNK 3
end
return r
end
"""
counts(x, [wv::AbstractWeights])
counts(x, levels::UnitRange{<:Integer}, [wv::AbstractWeights])
counts(x, k::Integer, [wv::AbstractWeights])
Count the number of times each value in `x` occurs. If `levels` is provided, only values
falling in that range will be considered (the others will be ignored without
raising an error or a warning). If an integer `k` is provided, only values in the
range `1:k` will be considered.
If a vector of weights `wv` is provided, the proportion of weights is computed rather
than the proportion of raw counts.
The output is a vector of length `length(levels)`.
"""
##CHUNK 4
r[xi - bx, yi - by] += 1
end
end
return r
end
function addcounts!(r::AbstractArray, x::AbstractArray{<:Integer}, y::AbstractArray{<:Integer},
levels::NTuple{2,UnitRange{<:Integer}}, wv::AbstractWeights)
# add counts of pairs from zip(x,y) to r
length(x) == length(y) == length(wv) ||
throw(DimensionMismatch("x, y, and wv must have the same length, but got $(length(x)), $(length(y)), and $(length(wv))"))
axes(x) == axes(y) ||
throw(DimensionMismatch("x and y must have the same axes, but got $(axes(x)) and $(axes(y))"))
xv, yv = vec(x), vec(y) # discard shape because weights() discards shape
xlevels, ylevels = levels
##CHUNK 5
length(x) == length(wv) ||
throw(DimensionMismatch("x and wv must have the same length, got $(length(x)) and $(length(wv))"))
xv = vec(x) # discard shape because weights() discards shape
z = zero(W)
for i in eachindex(xv, wv)
@inbounds xi = xv[i]
@inbounds wi = wv[i]
cm[xi] = get(cm, xi, z) + wi
end
return cm
end
"""
countmap(x; alg = :auto)
countmap(x::AbstractVector, wv::AbstractVector{<:Real})
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
end) @inbounds (@nref $N R j) += f((@nref $N A i) - (@nref $N means j)) * wi
end
return R
end
end
_wsum!(R::AbstractArray, A::AbstractArray, w::AbstractVector, dim::Int, init::Bool) =
_wsum_general!(R, identity, A, w, dim, init)
## wsum! and wsum
wsumtype(::Type{T}, ::Type{W}) where {T,W} = typeof(zero(T) * zero(W) + zero(T) * zero(W))
"""
wsum!(R::AbstractArray, A::AbstractArray,
w::AbstractVector, dim::Int;
init::Bool=true)
Compute the weighted sum of `A` with weights `w` over the dimension `dim` and store
the result in `R`. If `init=false`, the sum is added to `R` rather than starting
##CHUNK 2
if corrected
n = count(!iszero, w)
n / (s * (n - 1))
else
1 / s
end
end
"""
eweights(t::AbstractArray{<:Integer}, λ::Real; scale=false)
eweights(t::AbstractVector{T}, r::StepRange{T}, λ::Real; scale=false) where T
eweights(n::Integer, λ::Real; scale=false)
Construct a [`Weights`](@ref) vector which assigns exponentially decreasing weights to past
observations (larger integer values `i` in `t`).
The integer value `n` represents the number of past observations to consider.
`n` defaults to `maximum(t) - minimum(t) + 1` if only `t` is passed in
and the elements are integers, and to `length(r)` if a superset range `r` is also passed in.
If `n` is explicitly passed instead of `t`, `t` defaults to `1:n`.
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
processing time to draw ``k`` elements. It consumes ``O(k \\log(n / k))`` random numbers.
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
mv = first(a)
mw = first(wv)
weights = Dict{eltype(a), T}()
for (x, w) in zip(a, wv)
_w = get!(weights, x, zero(T)) + w
if _w > mw
mv = x
mw = _w
end
weights[x] = _w
##CHUNK 2
mc = c
end
else
cnts[x] = 1
# in this case: c = 1, and thus c > mc won't happen
end
y = iterate(a, st)
end
# find values corresponding to maximum counts
return [x for (x, c) in cnts if c == mc]
end
# Weighted mode of arbitrary vectors of values
function mode(a::AbstractVector, wv::AbstractWeights{T}) where T <: Real
isempty(a) && throw(ArgumentError("mode is not defined for empty collections"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(a) == length(wv) ||
throw(ArgumentError("data and weight vectors must be the same size, got $(length(a)) and $(length(wv))"))
# Iterate through the data
|
450
| 486
|
StatsBase.jl
| 309
|
function sem(x; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
elseif mean === nothing
n = 0
y = iterate(x)
value, state = y
# Use Welford algorithm as seen in (among other places)
# Knuth's TAOCP, Vol 2, page 232, 3rd edition.
_mean = value / 1
sse = real(zero(_mean))
while y !== nothing
value, state = y
y = iterate(x, state)
n += 1
new_mean = _mean + (value - _mean) / n
sse += realXcY(value - _mean, value - new_mean)
_mean = new_mean
end
else
n = 1
y = iterate(x)
value, state = y
sse = abs2(value - mean)
while (y = iterate(x, state)) !== nothing
value, state = y
n += 1
sse += abs2(value - mean)
end
end
variance = sse / (n - 1)
return sqrt(variance / n)
end
|
function sem(x; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
elseif mean === nothing
n = 0
y = iterate(x)
value, state = y
# Use Welford algorithm as seen in (among other places)
# Knuth's TAOCP, Vol 2, page 232, 3rd edition.
_mean = value / 1
sse = real(zero(_mean))
while y !== nothing
value, state = y
y = iterate(x, state)
n += 1
new_mean = _mean + (value - _mean) / n
sse += realXcY(value - _mean, value - new_mean)
_mean = new_mean
end
else
n = 1
y = iterate(x)
value, state = y
sse = abs2(value - mean)
while (y = iterate(x, state)) !== nothing
value, state = y
n += 1
sse += abs2(value - mean)
end
end
variance = sse / (n - 1)
return sqrt(variance / n)
end
|
[
450,
486
] |
function sem(x; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
elseif mean === nothing
n = 0
y = iterate(x)
value, state = y
# Use Welford algorithm as seen in (among other places)
# Knuth's TAOCP, Vol 2, page 232, 3rd edition.
_mean = value / 1
sse = real(zero(_mean))
while y !== nothing
value, state = y
y = iterate(x, state)
n += 1
new_mean = _mean + (value - _mean) / n
sse += realXcY(value - _mean, value - new_mean)
_mean = new_mean
end
else
n = 1
y = iterate(x)
value, state = y
sse = abs2(value - mean)
while (y = iterate(x, state)) !== nothing
value, state = y
n += 1
sse += abs2(value - mean)
end
end
variance = sse / (n - 1)
return sqrt(variance / n)
end
|
function sem(x; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
elseif mean === nothing
n = 0
y = iterate(x)
value, state = y
# Use Welford algorithm as seen in (among other places)
# Knuth's TAOCP, Vol 2, page 232, 3rd edition.
_mean = value / 1
sse = real(zero(_mean))
while y !== nothing
value, state = y
y = iterate(x, state)
n += 1
new_mean = _mean + (value - _mean) / n
sse += realXcY(value - _mean, value - new_mean)
_mean = new_mean
end
else
n = 1
y = iterate(x)
value, state = y
sse = abs2(value - mean)
while (y = iterate(x, state)) !== nothing
value, state = y
n += 1
sse += abs2(value - mean)
end
end
variance = sse / (n - 1)
return sqrt(variance / n)
end
|
sem
| 450
| 486
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
Compute the standard deviation of the vector `x` using the estimator `ce`.
"""
std(ce::CovarianceEstimator, x::AbstractVector; kwargs...) = sqrt(var(ce, x; kwargs...))
"""
cor(ce::CovarianceEstimator, x::AbstractVector, y::AbstractVector)
Compute the correlation of the vectors `x` and `y` using estimator `ce`.
"""
function cor(ce::CovarianceEstimator, x::AbstractVector, y::AbstractVector)
# Here we allow `ce` to see both `x` and `y` simultaneously, and allow it to compute
# a full covariance matrix, from which we will extract the correlation.
#
# Whilst in some cases it might be equivalent (and possibly more efficient) to use:
# cov(ce, x, y) / (std(ce, x) * std(ce, y)),
# this need not apply in general.
return cor(ce, hcat(x, y))[1, 2]
end
"""
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#FILE: StatsBase.jl/src/moments.jl
##CHUNK 1
# This is Type 1 definition according to Joanes and Gill (1998)
"""
kurtosis(v, [wv::AbstractWeights], m=mean(v))
Compute the excess kurtosis of a real-valued array `v`, optionally
specifying a weighting vector `wv` and a center `m`.
"""
function kurtosis(v::AbstractArray{<:Real}, m::Real)
n = length(v)
cm2 = 0.0 # empirical 2nd centered moment (variance)
cm4 = 0.0 # empirical 4th centered moment
for i = 1:n
@inbounds z = v[i] - m
z2 = z * z
cm2 += z2
cm4 += z2 * z2
end
cm4 /= n
cm2 /= n
return (cm4 / (cm2 * cm2)) - 3.0
#FILE: StatsBase.jl/test/cov.jl
##CHUNK 1
X = randn(3, 8)
Z1 = X .- mean(X, dims = 1)
Z2 = X .- mean(X, dims = 2)
w1 = rand(3)
w2 = rand(8)
# varcorrection is negative if sum of weights is smaller than 1
if f === fweights
w1[1] += 1
w2[1] += 1
end
wv1 = f(w1)
wv2 = f(w2)
Z1w = X .- mean(X, wv1, dims=1)
Z2w = X .- mean(X, wv2, dims=2)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
sem(x::AbstractArray, weights::FrequencyWeights; mean=nothing) =
sqrt(var(x, weights; mean=mean, corrected=true) / sum(weights))
function sem(x::AbstractArray, weights::ProbabilityWeights; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
return var(x, weights; mean=mean, corrected=true) / 0
else
_mean = mean === nothing ? Statistics.mean(x, weights) : mean
# sum of squared errors = sse
sse = sum(Broadcast.instantiate(Broadcast.broadcasted(x, weights) do x_i, w
return abs2(w * (x_i - _mean))
end))
n = count(!iszero, weights)
return sqrt(sse * n / (n - 1)) / sum(weights)
end
end
# Median absolute deviation
@irrational mad_constant 1.4826022185056018 BigFloat("1.482602218505601860547076529360423431326703202590312896536266275245674447622701")
##CHUNK 2
sse = sum(Broadcast.instantiate(Broadcast.broadcasted(x, weights) do x_i, w
return abs2(w * (x_i - _mean))
end))
n = count(!iszero, weights)
return sqrt(sse * n / (n - 1)) / sum(weights)
end
end
# Median absolute deviation
@irrational mad_constant 1.4826022185056018 BigFloat("1.482602218505601860547076529360423431326703202590312896536266275245674447622701")
"""
mad(x; center=median(x), normalize=true)
Compute the median absolute deviation (MAD) of collection `x` around `center`
(by default, around the median).
If `normalize` is set to `true`, the MAD is multiplied by
`1 / quantile(Normal(), 3/4) ≈ 1.4826`, in order to obtain a consistent estimator
of the standard deviation under the assumption that the data is normally distributed.
##CHUNK 3
function sem(x::AbstractArray; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
end
return sqrt(var(x; mean=mean, corrected=true) / length(x))
end
function sem(x::AbstractArray, weights::UnitWeights; mean=nothing)
if length(x) ≠ length(weights)
throw(DimensionMismatch("array and weights do not have the same length"))
end
return sem(x; mean=mean)
end
# Weighted methods for the above
##CHUNK 4
c = center === nothing ? median!(x) : center
T = promote_type(typeof(c), eltype(x))
U = eltype(x)
x2 = U == T ? x : isconcretetype(U) && isconcretetype(T) && sizeof(U) == sizeof(T) ? reinterpret(T, x) : similar(x, T)
x2 .= abs.(x .- c)
m = median!(x2)
if normalize isa Nothing
Base.depwarn("the `normalize` keyword argument will be false by default in future releases: set it explicitly to silence this deprecation", :mad)
normalize = true
end
if !isa(constant, Nothing)
Base.depwarn("keyword argument `constant` is deprecated, use `normalize` instead or apply the multiplication directly", :mad)
m * constant
elseif normalize
m * mad_constant
else
m
end
end
##CHUNK 5
"""
function mad(x; center=nothing, normalize::Union{Bool, Nothing}=nothing, constant=nothing)
mad!(Base.copymutable(x); center=center, normalize=normalize, constant=constant)
end
"""
StatsBase.mad!(x; center=median!(x), normalize=true)
Compute the median absolute deviation (MAD) of array `x` around `center`
(by default, around the median), overwriting `x` in the process.
If `normalize` is set to `true`, the MAD is multiplied by
`1 / quantile(Normal(), 3/4) ≈ 1.4826`, in order to obtain a consistent estimator
of the standard deviation under the assumption that the data is normally distributed.
"""
function mad!(x::AbstractArray;
center=median!(x),
normalize::Union{Bool,Nothing}=true,
constant=nothing)
isempty(x) && throw(ArgumentError("mad is not defined for empty arrays"))
##CHUNK 6
# `mean` doesn't fail on empty input but rather returns `NaN`, so we can use the
# return type to populate the `SummaryStats` structure.
s = T >: Missing ? collect(skipmissing(a)) : a
m = mean(s)
stdev = std(s, mean=m)
R = typeof(m)
n = length(a)
ns = length(s)
qs = if ns == 0
R[NaN, NaN, NaN, NaN, NaN]
elseif T >: Missing
quantile!(s, [0.00, 0.25, 0.50, 0.75, 1.00])
else
quantile(s, [0.00, 0.25, 0.50, 0.75, 1.00])
end
SummaryStats{R}(m, stdev, qs..., n, n - ns)
end
function Base.show(io::IO, ss::SummaryStats)
println(io, "Summary Stats:")
|
621
| 634
|
StatsBase.jl
| 310
|
function _zscore!(Z::AbstractArray, X::AbstractArray, μ::Real, σ::Real)
# Z and X are assumed to have the same size
iσ = inv(σ)
if μ == zero(μ)
for i = 1 : length(X)
@inbounds Z[i] = X[i] * iσ
end
else
for i = 1 : length(X)
@inbounds Z[i] = (X[i] - μ) * iσ
end
end
return Z
end
|
function _zscore!(Z::AbstractArray, X::AbstractArray, μ::Real, σ::Real)
# Z and X are assumed to have the same size
iσ = inv(σ)
if μ == zero(μ)
for i = 1 : length(X)
@inbounds Z[i] = X[i] * iσ
end
else
for i = 1 : length(X)
@inbounds Z[i] = (X[i] - μ) * iσ
end
end
return Z
end
|
[
621,
634
] |
function _zscore!(Z::AbstractArray, X::AbstractArray, μ::Real, σ::Real)
# Z and X are assumed to have the same size
iσ = inv(σ)
if μ == zero(μ)
for i = 1 : length(X)
@inbounds Z[i] = X[i] * iσ
end
else
for i = 1 : length(X)
@inbounds Z[i] = (X[i] - μ) * iσ
end
end
return Z
end
|
function _zscore!(Z::AbstractArray, X::AbstractArray, μ::Real, σ::Real)
# Z and X are assumed to have the same size
iσ = inv(σ)
if μ == zero(μ)
for i = 1 : length(X)
@inbounds Z[i] = X[i] * iσ
end
else
for i = 1 : length(X)
@inbounds Z[i] = (X[i] - μ) * iσ
end
end
return Z
end
|
_zscore!
| 621
| 634
|
src/scalarstats.jl
|
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
end
return Z
end
end
function _zscore_chksize(X::AbstractArray, μ::AbstractArray, σ::AbstractArray)
size(μ) == size(σ) || throw(DimensionMismatch("μ and σ should have the same size."))
for i=1:ndims(X)
dμ_i = size(μ,i)
(dμ_i == 1 || dμ_i == size(X,i)) || throw(DimensionMismatch("X and μ have incompatible sizes."))
end
end
"""
zscore!([Z], X, μ, σ)
Compute the z-scores of an array `X` with mean `μ` and standard deviation `σ`.
z-scores are the signed number of standard deviations above the mean that an
observation lies, i.e. ``(x - μ) / σ``.
##CHUNK 2
"""
zscore(X, [μ, σ])
Compute the z-scores of `X`, optionally specifying a precomputed mean `μ` and
standard deviation `σ`. z-scores are the signed number of standard deviations
above the mean that an observation lies, i.e. ``(x - μ) / σ``.
`μ` and `σ` should be both scalars or both arrays. The computation is broadcasting.
In particular, when `μ` and `σ` are arrays, they should have the same size, and
`size(μ, i) == 1 || size(μ, i) == size(X, i)` for each dimension.
"""
function zscore(X::AbstractArray{T}, μ::Real, σ::Real) where T<:Real
ZT = typeof((zero(T) - zero(μ)) / one(σ))
_zscore!(Array{ZT}(undef, size(X)), X, μ, σ)
end
function zscore(X::AbstractArray{T}, μ::AbstractArray{U}, σ::AbstractArray{S}) where {T<:Real,U<:Real,S<:Real}
_zscore_chksize(X, μ, σ)
##CHUNK 3
μ::AbstractArray{<:Real}, σ::AbstractArray{<:Real})
size(Z) == size(X) || throw(DimensionMismatch("Z and X must have the same size."))
_zscore_chksize(X, μ, σ)
_zscore!(Z, X, μ, σ)
end
zscore!(X::AbstractArray{<:AbstractFloat}, μ::Real, σ::Real) = _zscore!(X, X, μ, σ)
zscore!(X::AbstractArray{<:AbstractFloat}, μ::AbstractArray{<:Real}, σ::AbstractArray{<:Real}) =
(_zscore_chksize(X, μ, σ); _zscore!(X, X, μ, σ))
"""
zscore(X, [μ, σ])
Compute the z-scores of `X`, optionally specifying a precomputed mean `μ` and
standard deviation `σ`. z-scores are the signed number of standard deviations
above the mean that an observation lies, i.e. ``(x - μ) / σ``.
`μ` and `σ` should be both scalars or both arrays. The computation is broadcasting.
##CHUNK 4
In particular, when `μ` and `σ` are arrays, they should have the same size, and
`size(μ, i) == 1 || size(μ, i) == size(X, i)` for each dimension.
"""
function zscore(X::AbstractArray{T}, μ::Real, σ::Real) where T<:Real
ZT = typeof((zero(T) - zero(μ)) / one(σ))
_zscore!(Array{ZT}(undef, size(X)), X, μ, σ)
end
function zscore(X::AbstractArray{T}, μ::AbstractArray{U}, σ::AbstractArray{S}) where {T<:Real,U<:Real,S<:Real}
_zscore_chksize(X, μ, σ)
ZT = typeof((zero(T) - zero(U)) / one(S))
_zscore!(Array{ZT}(undef, size(X)), X, μ, σ)
end
zscore(X::AbstractArray{<:Real}) = ((μ, σ) = mean_and_std(X); zscore(X, μ, σ))
zscore(X::AbstractArray{<:Real}, dim::Int) = ((μ, σ) = mean_and_std(X, dim); zscore(X, μ, σ))
#############################
##CHUNK 5
@generated function _zscore!(Z::AbstractArray{S,N}, X::AbstractArray{T,N},
μ::AbstractArray, σ::AbstractArray) where {S,T,N}
quote
# Z and X are assumed to have the same size
# μ and σ are assumed to have the same size, that is compatible with size(X)
siz1 = size(X, 1)
@nextract $N ud d->size(μ, d)
if size(μ, 1) == 1 && siz1 > 1
@nloops $N i d->(d>1 ? (1:size(X,d)) : (1:1)) d->(j_d = ud_d ==1 ? 1 : i_d) begin
v = (@nref $N μ j)
c = inv(@nref $N σ j)
for i_1 = 1:siz1
(@nref $N Z i) = ((@nref $N X i) - v) * c
end
end
else
@nloops $N i X d->(j_d = ud_d ==1 ? 1 : i_d) begin
(@nref $N Z i) = ((@nref $N X i) - (@nref $N μ j)) / (@nref $N σ j)
end
##CHUNK 6
v = (@nref $N μ j)
c = inv(@nref $N σ j)
for i_1 = 1:siz1
(@nref $N Z i) = ((@nref $N X i) - v) * c
end
end
else
@nloops $N i X d->(j_d = ud_d ==1 ? 1 : i_d) begin
(@nref $N Z i) = ((@nref $N X i) - (@nref $N μ j)) / (@nref $N σ j)
end
end
return Z
end
end
function _zscore_chksize(X::AbstractArray, μ::AbstractArray, σ::AbstractArray)
size(μ) == size(σ) || throw(DimensionMismatch("μ and σ should have the same size."))
for i=1:ndims(X)
dμ_i = size(μ,i)
(dμ_i == 1 || dμ_i == size(X,i)) || throw(DimensionMismatch("X and μ have incompatible sizes."))
##CHUNK 7
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
##CHUNK 8
ZT = typeof((zero(T) - zero(U)) / one(S))
_zscore!(Array{ZT}(undef, size(X)), X, μ, σ)
end
zscore(X::AbstractArray{<:Real}) = ((μ, σ) = mean_and_std(X); zscore(X, μ, σ))
zscore(X::AbstractArray{<:Real}, dim::Int) = ((μ, σ) = mean_and_std(X, dim); zscore(X, μ, σ))
#############################
#
# entropy and friends
#
#############################
"""
entropy(p, [b])
Compute the entropy of a collection of probabilities `p`,
optionally specifying a real number `b` such that the entropy is scaled by `1/log(b)`.
##CHUNK 9
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
##CHUNK 10
end
end
"""
zscore!([Z], X, μ, σ)
Compute the z-scores of an array `X` with mean `μ` and standard deviation `σ`.
z-scores are the signed number of standard deviations above the mean that an
observation lies, i.e. ``(x - μ) / σ``.
If a destination array `Z` is provided, the scores are stored
in `Z` and it must have the same shape as `X`. Otherwise `X` is overwritten.
"""
function zscore!(Z::AbstractArray{ZT}, X::AbstractArray{T}, μ::Real, σ::Real) where {ZT<:AbstractFloat,T<:Real}
size(Z) == size(X) || throw(DimensionMismatch("Z and X must have the same size."))
_zscore!(Z, X, μ, σ)
end
function zscore!(Z::AbstractArray{<:AbstractFloat}, X::AbstractArray{<:Real},
|
758
| 793
|
StatsBase.jl
| 311
|
function renyientropy(p::AbstractArray{T}, α::Real) where T<:Real
α < 0 && throw(ArgumentError("Order of Rényi entropy not legal, $(α) < 0."))
s = zero(T)
z = zero(T)
scale = sum(p)
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
|
function renyientropy(p::AbstractArray{T}, α::Real) where T<:Real
α < 0 && throw(ArgumentError("Order of Rényi entropy not legal, $(α) < 0."))
s = zero(T)
z = zero(T)
scale = sum(p)
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
|
[
758,
793
] |
function renyientropy(p::AbstractArray{T}, α::Real) where T<:Real
α < 0 && throw(ArgumentError("Order of Rényi entropy not legal, $(α) < 0."))
s = zero(T)
z = zero(T)
scale = sum(p)
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
|
function renyientropy(p::AbstractArray{T}, α::Real) where T<:Real
α < 0 && throw(ArgumentError("Order of Rényi entropy not legal, $(α) < 0."))
s = zero(T)
z = zero(T)
scale = sum(p)
if α ≈ 0
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += 1
end
end
s = log(s / scale)
elseif α ≈ 1
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s -= pi * log(pi)
end
end
s = s / scale
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
|
renyientropy
| 758
| 793
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
eweights(n::Integer, λ::Real; kwargs...) = _eweights(1:n, λ, n; kwargs...)
eweights(t::AbstractVector, r::AbstractRange, λ::Real; kwargs...) =
_eweights(something.(indexin(t, r)), λ, length(r); kwargs...)
function _eweights(t::AbstractArray{<:Integer}, λ::Real, n::Integer; scale::Union{Bool, Nothing}=nothing)
0 < λ <= 1 || throw(ArgumentError("Smoothing factor must be between 0 and 1"))
f = depcheck(:eweights, :scale, scale) ? _scaled_eweight : _unscaled_eweight
w0 = map(t) do i
i > 0 || throw(ArgumentError("Time indices must be non-zero positive integers"))
f(i, λ, n)
end
s = sum(w0)
Weights(w0, s)
end
_unscaled_eweight(i, λ, n) = λ * (1 - λ)^(1 - i)
_scaled_eweight(i, λ, n) = (1 - λ)^(n - i)
##CHUNK 2
if corrected
n = count(!iszero, w)
n / (s * (n - 1))
else
1 / s
end
end
"""
eweights(t::AbstractArray{<:Integer}, λ::Real; scale=false)
eweights(t::AbstractVector{T}, r::StepRange{T}, λ::Real; scale=false) where T
eweights(n::Integer, λ::Real; scale=false)
Construct a [`Weights`](@ref) vector which assigns exponentially decreasing weights to past
observations (larger integer values `i` in `t`).
The integer value `n` represents the number of past observations to consider.
`n` defaults to `maximum(t) - minimum(t) + 1` if only `t` is passed in
and the elements are integers, and to `length(r)` if a superset range `r` is also passed in.
If `n` is explicitly passed instead of `t`, `t` defaults to `1:n`.
#FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
levinson(r::AbstractVector{T}, b::AbstractVector{T}) where {T<:BlasReal} = levinson!(r, copy(b), zeros(T, length(b)))
##CHUNK 2
# Symmetric Toeplitz solver
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
#FILE: StatsBase.jl/src/reliability.jl
##CHUNK 1
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
end
i < k && throw(DimensionMismatch("wv must have at least $k strictly positive entries (got $i)"))
heapify!(pq)
# set threshold
@inbounds threshold = pq[1].first
X = threshold*randexp(rng)
@inbounds for i in s+1:n
w = wv.values[i]
w < 0 && error("Negative weight found in weight vector at index $i")
w > 0 || continue
X -= w
X <= 0 || continue
# update priority queue
t = exp(-w/threshold)
pq[1] = (-w/log(t+rand(rng)*(1-t)) => i)
percolate_down!(pq, 1)
#FILE: StatsBase.jl/test/scalarstats.jl
##CHUNK 1
##### Renyi entropies
# Generate a random probability distribution
nindiv = 50
dist = rand(nindiv)
dist /= sum(dist)
# Check Shannon entropy against Renyi entropy of order 1
@test entropy(dist) ≈ renyientropy(dist, 1)
@test renyientropy(dist, 1) ≈ renyientropy(dist, 1.0)
# Check Renyi entropy of order 0 is the natural log of the count of non-zeros
@test renyientropy(dist, 0) ≈ log(mapreduce(x -> x > 0 ? 1 : 0, +, dist))
# And is therefore not affected by the addition of non-zeros
zdist = dist
zdist = append!(dist, zeros(50))
@test renyientropy(dist, 0) ≈ renyientropy(zdist, 0)
# Indeed no Renyi entropy should be
loworder = rand() # Between 0 and 1
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
T = eltype(x)
_mean = mean === nothing ? zero(T) / 1 : mean
z = abs2(zero(T) - _mean)
return oftype((z + z) / 2, NaN)
elseif mean === nothing
n = 0
y = iterate(x)
value, state = y
# Use Welford algorithm as seen in (among other places)
# Knuth's TAOCP, Vol 2, page 232, 3rd edition.
_mean = value / 1
sse = real(zero(_mean))
while y !== nothing
value, state = y
y = iterate(x, state)
n += 1
new_mean = _mean + (value - _mean) / n
sse += realXcY(value - _mean, value - new_mean)
##CHUNK 2
end
return -sum(xlogx, p)
end
function entropy(p, b::Real)
e = entropy(p)
# Promote explicitly before applying `log` to avoid undesired promotions
# with `log(b)::Float64` arising from `b::Int` (ref: #924)
_b = first(promote(b, e))
return e / log(_b)
end
"""
renyientropy(p, α)
Compute the Rényi (generalized) entropy of order `α` of an array `p`.
"""
"""
crossentropy(p, q, [b])
##CHUNK 3
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
|
801
| 818
|
StatsBase.jl
| 312
|
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not " *
"represent proper probability distributions",
:crossentropy,
)
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
|
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not " *
"represent proper probability distributions",
:crossentropy,
)
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
|
[
801,
818
] |
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not " *
"represent proper probability distributions",
:crossentropy,
)
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
|
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not " *
"represent proper probability distributions",
:crossentropy,
)
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
|
crossentropy
| 801
| 818
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
"""
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
##CHUNK 2
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
kldivergence(p,q) / log(b)
##CHUNK 3
crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
"""
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
##CHUNK 4
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
kldivergence(p,q) / log(b)
#############################
#
# summary
#
#############################
struct SummaryStats{T<:Union{AbstractFloat,Missing}}
mean::T
sd::T
##CHUNK 5
#############################
#
# entropy and friends
#
#############################
"""
entropy(p, [b])
Compute the entropy of a collection of probabilities `p`,
optionally specifying a real number `b` such that the entropy is scaled by `1/log(b)`.
Elements with probability 0 or 1 add 0 to the entropy.
"""
function entropy(p)
if isempty(p)
throw(ArgumentError("empty collections are not supported since they do not " *
"represent proper probability distributions"))
##CHUNK 6
end
return s
end
"""
crossentropy(p, q, [b])
Compute the cross entropy between `p` and `q`, optionally specifying a real
number `b` such that the result is scaled by `1/log(b)`.
"""
crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
##CHUNK 7
entropy(p, [b])
Compute the entropy of a collection of probabilities `p`,
optionally specifying a real number `b` such that the entropy is scaled by `1/log(b)`.
Elements with probability 0 or 1 add 0 to the entropy.
"""
function entropy(p)
if isempty(p)
throw(ArgumentError("empty collections are not supported since they do not " *
"represent proper probability distributions"))
end
return -sum(xlogx, p)
end
function entropy(p, b::Real)
e = entropy(p)
# Promote explicitly before applying `log` to avoid undesired promotions
# with `log(b)::Float64` arising from `b::Int` (ref: #924)
_b = first(promote(b, e))
return e / log(_b)
##CHUNK 8
throw(DimensionMismatch("array and weights do not have the same length"))
end
return sem(x; mean=mean)
end
# Weighted methods for the above
sem(x::AbstractArray, weights::FrequencyWeights; mean=nothing) =
sqrt(var(x, weights; mean=mean, corrected=true) / sum(weights))
function sem(x::AbstractArray, weights::ProbabilityWeights; mean=nothing)
if isempty(x)
# Return the NaN of the type that we would get for a nonempty x
return var(x, weights; mean=mean, corrected=true) / 0
else
_mean = mean === nothing ? Statistics.mean(x, weights) : mean
# sum of squared errors = sse
sse = sum(Broadcast.instantiate(Broadcast.broadcasted(x, weights) do x_i, w
return abs2(w * (x_i - _mean))
end))
##CHUNK 9
elseif (isinf(α))
s = -log(maximum(p))
else # a normal Rényi entropy
for i = 1:length(p)
@inbounds pi = p[i]
if pi > z
s += pi ^ α
end
end
s = log(s / scale) / (1 - α)
end
return s
end
"""
crossentropy(p, q, [b])
Compute the cross entropy between `p` and `q`, optionally specifying a real
number `b` such that the result is scaled by `1/log(b)`.
"""
|
832
| 855
|
StatsBase.jl
| 313
|
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
|
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
|
[
832,
855
] |
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
|
function kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not "*
"represent proper probability distributions",
:kldivergence,
)
# return zero for empty arrays
pzero = zero(eltype(p))
qzero = zero(eltype(q))
return xlogy(pzero, zero(pzero / qzero))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(vec(p), vec(q)) do pi, qi
# handle pi = qi = 0, otherwise `NaN` is returned
piqi = iszero(pi) && iszero(qi) ? zero(pi / qi) : pi / qi
return xlogy(pi, piqi)
end
return sum(Broadcast.instantiate(broadcasted))
end
|
kldivergence
| 832
| 855
|
src/scalarstats.jl
|
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
##CHUNK 2
#######################################
"""
corspearman(x, y=x)
Compute Spearman's rank correlation coefficient. If `x` and `y` are vectors, the
output is a float, otherwise it's a matrix corresponding to the pairwise correlations
of the columns of `x` and `y`.
"""
function corspearman(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
##CHUNK 3
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 4
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 5
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
#CURRENT FILE: StatsBase.jl/src/scalarstats.jl
##CHUNK 1
function crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real})
length(p) == length(q) || throw(DimensionMismatch("Inconsistent array length."))
# handle empty collections
if isempty(p)
Base.depwarn(
"support for empty collections will be removed since they do not " *
"represent proper probability distributions",
:crossentropy,
)
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
##CHUNK 2
# return zero for empty arrays
return xlogy(zero(eltype(p)), zero(eltype(q)))
end
# use pairwise summation (https://github.com/JuliaLang/julia/pull/31020)
broadcasted = Broadcast.broadcasted(xlogy, vec(p), vec(q))
return - sum(Broadcast.instantiate(broadcasted))
end
crossentropy(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
##CHUNK 3
crossentropy(p,q) / log(b)
"""
kldivergence(p, q, [b])
Compute the Kullback-Leibler divergence from `q` to `p`,
also called the relative entropy of `p` with respect to `q`,
that is the sum `pᵢ * log(pᵢ / qᵢ)`. Optionally a real number `b`
can be specified such that the divergence is scaled by `1/log(b)`.
"""
kldivergence(p::AbstractArray{<:Real}, q::AbstractArray{<:Real}, b::Real) =
kldivergence(p,q) / log(b)
#############################
#
# summary
#
#############################
|
64
| 76
|
StatsBase.jl
| 314
|
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
|
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
|
[
64,
76
] |
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
|
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
|
autocov!
| 64
| 76
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
##CHUNK 2
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
end
end
return r
##CHUNK 3
If `x` is a vector, `r` must be a vector of the same length as `lags`.
If `x` is a matrix, `r` must be a matrix of size `(length(lags), size(x,2))`, and
where each column in the result will correspond to a column in `x`.
The output is normalized by the variance of `x`, i.e. so that the lag 0
autocorrelation is 1. See [`autocov!`](@ref) for the unnormalized form.
"""
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
##CHUNK 4
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
##CHUNK 5
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
##CHUNK 6
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
##CHUNK 7
of size `(length(lags), size(y, 2))`. If both `x` and `y` are matrices, `r` must be a
three-dimensional array of size `(length(lags), size(x, 2), size(y, 2))`.
The output is not normalized. See [`crosscor!`](@ref) for a function with normalization.
"""
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
##CHUNK 8
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 9
"""
autocov!(r, x, lags; demean=true)
Compute the autocovariance of a vector or matrix `x` at `lags` and store the result
in `r`. `demean` denotes whether the mean of `x` should be subtracted from `x`
before computing the autocovariance.
If `x` is a vector, `r` must be a vector of the same length as `lags`.
If `x` is a matrix, `r` must be a matrix of size `(length(lags), size(x,2))`, and
where each column in the result will correspond to a column in `x`.
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 10
#
#######################################
default_autolags(lx::Int) = 0 : default_laglen(lx)
_autodot(x::AbstractVector{<:Union{Float32, Float64}}, lx::Int, l::Int) = dot(x, 1:(lx-l), x, (1+l):lx)
_autodot(x::AbstractVector{<:Real}, lx::Int, l::Int) = dot(view(x, 1:(lx-l)), view(x, (1+l):lx))
## autocov
"""
autocov!(r, x, lags; demean=true)
Compute the autocovariance of a vector or matrix `x` at `lags` and store the result
in `r`. `demean` denotes whether the mean of `x` should be subtracted from `x`
before computing the autocovariance.
If `x` is a vector, `r` must be a vector of the same length as `lags`.
If `x` is a matrix, `r` must be a matrix of size `(length(lags), size(x,2))`, and
where each column in the result will correspond to a column in `x`.
|
78
| 94
|
StatsBase.jl
| 315
|
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
end
end
return r
end
|
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
end
end
return r
end
|
[
78,
94
] |
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
end
end
return r
end
|
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
end
end
return r
end
|
autocov!
| 78
| 94
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
##CHUNK 2
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
##CHUNK 3
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
##CHUNK 4
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
##CHUNK 5
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
##CHUNK 6
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
##CHUNK 7
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
"""
##CHUNK 8
"""
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
##CHUNK 9
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
##CHUNK 10
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
"""
|
142
| 155
|
StatsBase.jl
| 316
|
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
|
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
|
[
142,
155
] |
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
|
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
|
autocor!
| 142
| 155
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
"""
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
##CHUNK 2
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
##CHUNK 3
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 4
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
##CHUNK 5
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
##CHUNK 6
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 7
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
##CHUNK 8
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
##CHUNK 9
Compute the autocorrelation function (ACF) of a vector or matrix `x` at `lags`
and store the result in `r`. `demean` denotes whether the mean of `x` should
be subtracted from `x` before computing the ACF.
If `x` is a vector, `r` must be a vector of the same length as `lags`.
If `x` is a matrix, `r` must be a matrix of size `(length(lags), size(x,2))`, and
where each column in the result will correspond to a column in `x`.
The output is normalized by the variance of `x`, i.e. so that the lag 0
autocorrelation is 1. See [`autocov!`](@ref) for the unnormalized form.
"""
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
##CHUNK 10
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
|
157
| 174
|
StatsBase.jl
| 317
|
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
|
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
|
[
157,
174
] |
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
|
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
end
end
return r
end
|
autocor!
| 157
| 174
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
##CHUNK 2
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 3
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
##CHUNK 4
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
##CHUNK 5
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
##CHUNK 6
"""
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
"""
autocor(x, [lags]; demean=true)
##CHUNK 7
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
##CHUNK 8
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
##CHUNK 9
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
##CHUNK 10
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
|
249
| 263
|
StatsBase.jl
| 318
|
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
|
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
|
[
249,
263
] |
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
|
function crosscov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
|
crosscov!
| 249
| 263
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
##CHUNK 2
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
##CHUNK 3
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 4
The output is normalized by `sqrt(var(x)*var(y))`. See [`crosscov!`](@ref) for the
unnormalized form.
"""
function crosscor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
##CHUNK 5
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 6
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
##CHUNK 7
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 8
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
##CHUNK 9
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 10
"""
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
|
265
| 283
|
StatsBase.jl
| 319
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
[
265,
283
] |
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
crosscov!
| 265
| 283
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
##CHUNK 2
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 3
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
##CHUNK 4
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
##CHUNK 5
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 6
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
##CHUNK 7
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 8
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
##CHUNK 9
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
##CHUNK 10
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
|
285
| 303
|
StatsBase.jl
| 320
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
[
285,
303
] |
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
|
crosscov!
| 285
| 303
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
##CHUNK 3
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 4
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 5
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 6
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
##CHUNK 7
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
##CHUNK 8
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 9
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
##CHUNK 10
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
|
305
| 340
|
StatsBase.jl
| 321
|
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
for i = 1 : nx
zx = zxs[i]
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
end
return r
end
|
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
for i = 1 : nx
zx = zxs[i]
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
end
return r
end
|
[
305,
340
] |
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
for i = 1 : nx
zx = zxs[i]
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
end
return r
end
|
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
for i = 1 : nx
zx = zxs[i]
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
end
return r
end
|
crosscov!
| 305
| 340
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 3
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
##CHUNK 4
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
##CHUNK 5
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
##CHUNK 6
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 7
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 8
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / lx
##CHUNK 9
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
##CHUNK 10
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
|
400
| 415
|
StatsBase.jl
| 322
|
function crosscor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
|
function crosscor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
|
[
400,
415
] |
function crosscor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
|
function crosscor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
|
crosscor!
| 400
| 415
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 2
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
##CHUNK 3
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
##CHUNK 4
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 5
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
##CHUNK 6
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 7
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 8
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
##CHUNK 9
"""
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 10
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
|
417
| 437
|
StatsBase.jl
| 323
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
[
417,
437
] |
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
crosscor!
| 417
| 437
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 3
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 4
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 5
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 6
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
##CHUNK 7
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
##CHUNK 8
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
##CHUNK 9
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
##CHUNK 10
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
|
439
| 459
|
StatsBase.jl
| 324
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
[
439,
459
] |
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
|
crosscor!
| 439
| 459
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 2
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 3
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 4
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
##CHUNK 5
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 6
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
##CHUNK 7
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
##CHUNK 8
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
##CHUNK 9
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 10
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
|
461
| 501
|
StatsBase.jl
| 325
|
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
xxs[j] = dot(xj, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
end
|
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
xxs[j] = dot(xj, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
end
|
[
461,
501
] |
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
xxs[j] = dot(xj, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
end
|
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
xxs = Vector{T}(undef, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
for i = 1 : lx
xj[i] -= mv
end
end
push!(zxs, xj)
xxs[j] = dot(xj, xj)
end
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ny
demean_col!(zy, y, j, demean)
yy = dot(zy, zy)
for i = 1 : nx
zx = zxs[i]
sc = sqrt(xxs[i] * yy)
for k = 1 : m
r[k,i,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
end
return r
end
|
crosscor!
| 461
| 501
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 2
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 3
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
##CHUNK 4
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
##CHUNK 5
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 6
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
##CHUNK 7
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for j = 1 : ns
demean_col!(zx, x, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 8
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
##CHUNK 9
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / lx
end
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
##CHUNK 10
end
return r
end
function crosscov!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
zxs = Vector{T}[]
sizehint!(zxs, nx)
for j = 1 : nx
xj = x[:,j]
if demean
mv = mean(xj)
|
593
| 608
|
StatsBase.jl
| 326
|
function pacf!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}; method::Symbol=:regression) where T<:Union{Float32, Float64}
lx = size(X, 1)
m = length(lags)
minlag, maxlag = extrema(lags)
(0 <= minlag && 2maxlag < lx) || error("Invalid lag value.")
size(r) == (m, size(X,2)) || throw(DimensionMismatch())
if method == :regression
pacf_regress!(r, X, lags, maxlag)
elseif method == :yulewalker
pacf_yulewalker!(r, X, lags, maxlag)
else
error("Invalid method: $method")
end
return r
end
|
function pacf!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}; method::Symbol=:regression) where T<:Union{Float32, Float64}
lx = size(X, 1)
m = length(lags)
minlag, maxlag = extrema(lags)
(0 <= minlag && 2maxlag < lx) || error("Invalid lag value.")
size(r) == (m, size(X,2)) || throw(DimensionMismatch())
if method == :regression
pacf_regress!(r, X, lags, maxlag)
elseif method == :yulewalker
pacf_yulewalker!(r, X, lags, maxlag)
else
error("Invalid method: $method")
end
return r
end
|
[
593,
608
] |
function pacf!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}; method::Symbol=:regression) where T<:Union{Float32, Float64}
lx = size(X, 1)
m = length(lags)
minlag, maxlag = extrema(lags)
(0 <= minlag && 2maxlag < lx) || error("Invalid lag value.")
size(r) == (m, size(X,2)) || throw(DimensionMismatch())
if method == :regression
pacf_regress!(r, X, lags, maxlag)
elseif method == :yulewalker
pacf_yulewalker!(r, X, lags, maxlag)
else
error("Invalid method: $method")
end
return r
end
|
function pacf!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}; method::Symbol=:regression) where T<:Union{Float32, Float64}
lx = size(X, 1)
m = length(lags)
minlag, maxlag = extrema(lags)
(0 <= minlag && 2maxlag < lx) || error("Invalid lag value.")
size(r) == (m, size(X,2)) || throw(DimensionMismatch())
if method == :regression
pacf_regress!(r, X, lags, maxlag)
elseif method == :yulewalker
pacf_yulewalker!(r, X, lags, maxlag)
else
error("Invalid method: $method")
end
return r
end
|
pacf!
| 593
| 608
|
src/signalcorr.jl
|
#CURRENT FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
regression models, and `:yulewalker`, which computes the partial autocorrelations
using the Yule-Walker equations.
If `x` is a vector, return a vector of the same length as `lags`.
If `x` is a matrix, return a matrix of size `(length(lags), size(x, 2))`,
where each column in the result corresponds to a column in `x`.
"""
function pacf(X::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; method::Symbol=:regression)
out = Matrix{float(eltype(X))}(undef, length(lags), size(X,2))
pacf!(out, float(X), lags; method=method)
end
function pacf(x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; method::Symbol=:regression)
vec(pacf(reshape(x, length(x), 1), lags, method=method))
end
##CHUNK 2
"""
function autocor!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
zz = dot(z, z)
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 3
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
##CHUNK 4
The output is not normalized. See [`autocor!`](@ref) for a method with normalization.
"""
function autocov!(r::AbstractVector{<:Real}, x::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
m = length(lags)
length(r) == m || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z::Vector{T} = demean ? x .- mean(x) : x
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / lx
end
return r
end
function autocov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
##CHUNK 5
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx = Vector{T}(undef, lx)
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
yy = dot(zy, zy)
for j = 1 : ns
demean_col!(zx, x, j, demean)
##CHUNK 6
for k = 1 : m # foreach lag value
r[k] = _autodot(z, lx, lags[k]) / zz
end
return r
end
function autocor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
size(r) == (m, ns) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
z = Vector{T}(undef, lx)
for j = 1 : ns
demean_col!(z, x, j, demean)
zz = dot(z, z)
for k = 1 : m
r[k,j] = _autodot(z, lx, lags[k]) / zz
##CHUNK 7
function pacf_regress!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}, mk::Integer)
lx = size(X, 1)
tmpX = ones(eltype(X), lx, mk + 1)
for j = 1 : size(X,2)
for l = 1 : mk
for i = 1+l:lx
tmpX[i,l+1] = X[i-l,j]
end
end
for i = 1 : length(lags)
l = lags[i]
sX = view(tmpX, 1+l:lx, 1:l+1)
r[i,j] = l == 0 ? 1 : (cholesky!(sX'sX, Val(false)) \ (sX'view(X, 1+l:lx, j)))[end]
end
end
r
end
function pacf_yulewalker!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}, mk::Integer) where T<:Union{Float32, Float64}
##CHUNK 8
lx = length(x)
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
sc = sqrt(dot(zx, zx) * dot(zy, zy))
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / sc
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
##CHUNK 9
sc = sqrt(dot(zx, zx) * yy)
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractMatrix{<:Real}, x::AbstractVector{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = length(x)
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
##CHUNK 10
tmp = Vector{T}(undef, mk)
for j = 1 : size(X,2)
acfs = autocor(X[:,j], 1:mk)
for i = 1 : length(lags)
l = lags[i]
r[i,j] = l == 0 ? 1 : l == 1 ? acfs[i] : -durbin!(view(acfs, 1:l), tmp)[l]
end
end
end
"""
pacf!(r, X, lags; method=:regression)
Compute the partial autocorrelation function (PACF) of a matrix `X` at `lags` and
store the result in `r`. `method` designates the estimation method. Recognized values
are `:regression`, which computes the partial autocorrelations via successive
regression models, and `:yulewalker`, which computes the partial autocorrelations
using the Yule-Walker equations.
|
2
| 24
|
StatsBase.jl
| 327
|
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
|
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
|
[
2,
24
] |
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
|
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
|
durbin!
| 2
| 24
|
src/toeplitzsolvers.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
##CHUNK 2
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
# Preserve structure of Symmetric and Hermitian covariance matrices
function cov2cor!(C::Union{Symmetric{<:Real},Hermitian}, s::AbstractArray)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
A = parent(C)
if C.uplo === 'U'
for j = 1:n
sj = s[j]
for i = 1:(j-1)
A[i,j] = _clampcor(A[i,j] / (s[i] * sj))
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for i = 1 : length(lags)
l = lags[i]
sX = view(tmpX, 1+l:lx, 1:l+1)
r[i,j] = l == 0 ? 1 : (cholesky!(sX'sX, Val(false)) \ (sX'view(X, 1+l:lx, j)))[end]
end
end
r
end
function pacf_yulewalker!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}, mk::Integer) where T<:Union{Float32, Float64}
tmp = Vector{T}(undef, mk)
for j = 1 : size(X,2)
acfs = autocor(X[:,j], 1:mk)
for i = 1 : length(lags)
l = lags[i]
r[i,j] = l == 0 ? 1 : l == 1 ? acfs[i] : -durbin!(view(acfs, 1:l), tmp)[l]
end
end
end
##CHUNK 2
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#FILE: StatsBase.jl/src/reliability.jl
##CHUNK 1
v = vec(sum(covmatrix, dims=1))
σ = sum(v)
for i in axes(v, 1)
v[i] -= covmatrix[i, i]
end
σ_diag = sum(i -> covmatrix[i, i], 1:k)
alpha = k * (1 - σ_diag / σ) / (k - 1)
if k > 2
dropped = typeof(alpha)[(k - 1) * (1 - (σ_diag - covmatrix[i, i]) / (σ - 2*v[i] - covmatrix[i, i])) / (k - 2)
for i in 1:k]
else
# if k = 2 do not produce dropped; this has to be also
# correctly handled in show
dropped = Vector{typeof(alpha)}()
end
return CronbachAlpha(alpha, dropped)
end
#FILE: StatsBase.jl/src/partialcor.jl
##CHUNK 1
throw(DimensionMismatch("Inputs must have the same number of observations"))
length(x) > 0 || throw(ArgumentError("Inputs must be non-empty"))
return Statistics.clampcor(_partialcor(x, mean(x), y, mean(y), Z))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
#CURRENT FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
##CHUNK 2
# Symmetric Toeplitz solver
durbin(r::AbstractVector{T}) where {T<:BlasReal} = durbin!(r, zeros(T, length(r)))
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
x[1] = b[1]
b[1] = -r[2]/r[1]
β = one(T)
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
##CHUNK 3
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
levinson(r::AbstractVector{T}, b::AbstractVector{T}) where {T<:BlasReal} = levinson!(r, copy(b), zeros(T, length(b)))
##CHUNK 4
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
|
27
| 65
|
StatsBase.jl
| 328
|
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
x[1] = b[1]
b[1] = -r[2]/r[1]
β = one(T)
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
|
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
x[1] = b[1]
b[1] = -r[2]/r[1]
β = one(T)
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
|
[
27,
65
] |
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
x[1] = b[1]
b[1] = -r[2]/r[1]
β = one(T)
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
|
function levinson!(r::AbstractVector{T}, b::AbstractVector{T}, x::AbstractVector{T}) where T<:BlasReal
n = length(b)
n == length(r) || throw(DimensionMismatch("Vectors must have same length"))
n <= length(x) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
x[1] = b[1]
b[1] = -r[2]/r[1]
β = one(T)
α = -r[2]/r[1]
for k = 1:n-1
β *= one(T) - α*α
μ = b[k+1]
for j = 2:k+1
μ -= r[j]/r[1]*x[k-j+2]
end
μ /= β
for j = 1:k
x[j] += μ*b[k-j+1]
end
x[k+1] = μ
if k < n - 1
α = -r[k+2]
for j = 2:k+1
α -= r[j]*b[k-j+2]
end
α /= β*r[1]
for j = 1:div(k,2)
tmp = b[j]
b[j] += α*b[k-j+1]
b[k-j+1] += α*tmp
end
if isodd(k) b[div(k,2)+1] *= one(T) + α end
b[k+1] = α
end
end
for i = 1:n
x[i] /= r[1]
end
return x
end
|
levinson!
| 27
| 65
|
src/toeplitzsolvers.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
"""
function cov2cor!(C::AbstractMatrix, s::AbstractArray = map(sqrt, view(C, diagind(C))))
Base.require_one_based_indexing(C, s)
n = length(s)
size(C) == (n, n) || throw(DimensionMismatch("inconsistent dimensions"))
for j = 1:n
sj = s[j]
for i = 1:(j-1)
C[i,j] = adjoint(C[j,i])
end
C[j,j] = oneunit(C[j,j])
for i = (j+1):n
C[i,j] = _clampcor(C[i,j] / (s[i] * sj))
end
end
return C
end
_clampcor(x::Real) = clamp(x, -1, 1)
_clampcor(x) = x
#FILE: StatsBase.jl/src/partialcor.jl
##CHUNK 1
throw(DimensionMismatch("Inputs must have the same number of observations"))
length(x) > 0 || throw(ArgumentError("Inputs must be non-empty"))
return Statistics.clampcor(_partialcor(x, mean(x), y, mean(y), Z))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, Z::AbstractMatrix)
p = size(Z, 2)
p == 1 && return _partialcor(x, μx, y, μy, vec(Z))
z₀ = view(Z, :, 1)
Zmz₀ = view(Z, :, 2:p)
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
##CHUNK 2
μz₀ = mean(z₀)
rxz = _partialcor(x, μx, z₀, μz₀, Zmz₀)
rzy = _partialcor(z₀, μz₀, y, μy, Zmz₀)
rxy = _partialcor(x, μx, y, μy, Zmz₀)::typeof(rxz)
return (rxy - rxz * rzy) / (sqrt(1 - rxz^2) * sqrt(1 - rzy^2))
end
function _partialcor(x::AbstractVector, μx, y::AbstractVector, μy, z::AbstractVector)
μz = mean(z)
# Initialize all of the accumulators to 0 of the appropriate types
Σxx = abs2(zero(eltype(x)) - zero(μx))
Σyy = abs2(zero(eltype(y)) - zero(μy))
Σzz = abs2(zero(eltype(z)) - zero(μz))
Σxy = zero(Σxx * Σyy)
Σxz = zero(Σxx * Σzz)
Σzy = zero(Σzz * Σyy)
# We only want to make one pass over all of the arrays
@inbounds begin
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for i = 1 : length(lags)
l = lags[i]
sX = view(tmpX, 1+l:lx, 1:l+1)
r[i,j] = l == 0 ? 1 : (cholesky!(sX'sX, Val(false)) \ (sX'view(X, 1+l:lx, j)))[end]
end
end
r
end
function pacf_yulewalker!(r::AbstractMatrix{<:Real}, X::AbstractMatrix{T}, lags::AbstractVector{<:Integer}, mk::Integer) where T<:Union{Float32, Float64}
tmp = Vector{T}(undef, mk)
for j = 1 : size(X,2)
acfs = autocor(X[:,j], 1:mk)
for i = 1 : length(lags)
l = lags[i]
r[i,j] = l == 0 ? 1 : l == 1 ? acfs[i] : -durbin!(view(acfs, 1:l), tmp)[l]
end
end
end
##CHUNK 2
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#FILE: StatsBase.jl/test/cov.jl
##CHUNK 1
X = randn(3, 8)
Z1 = X .- mean(X, dims = 1)
Z2 = X .- mean(X, dims = 2)
w1 = rand(3)
w2 = rand(8)
# varcorrection is negative if sum of weights is smaller than 1
if f === fweights
w1[1] += 1
w2[1] += 1
end
wv1 = f(w1)
wv2 = f(w2)
Z1w = X .- mean(X, wv1, dims=1)
Z2w = X .- mean(X, wv2, dims=2)
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
nswaps = merge_sort!(y, 1, n)
ntiesy = countties(y, 1, n)
# Calls to float below prevent possible overflow errors when
# length(x) exceeds 77_936 (32 bit) or 5_107_605_667 (64 bit)
(npairs + ndoubleties - ntiesx - ntiesy - 2 * nswaps) /
sqrt(float(npairs - ntiesx) * float(npairs - ntiesy))
end
"""
corkendall(x, y=x)
Compute Kendall's rank correlation coefficient, τ. `x` and `y` must both be either
matrices or vectors.
"""
corkendall(x::AbstractVector{<:Real}, y::AbstractVector{<:Real}) = corkendall!(copy(x), copy(y))
function corkendall(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
permy = sortperm(y)
return([corkendall!(copy(y), X[:,i], permy) for i in 1:size(X, 2)])
#FILE: StatsBase.jl/test/weights.jl
##CHUNK 1
@test r ≈ sum(x .* reshape(w3, (1, 1, 5)), dims=3) .+ 1.0
end
## sum, mean and quantile
a = reshape(1.0:27.0, 3, 3, 3)
@testset "Sum $f" for f in weight_funcs
@test sum([1.0, 2.0, 3.0], f([1.0, 0.5, 0.5])) ≈ 3.5
@test sum(1:3, f([1.0, 1.0, 0.5])) ≈ 4.5
@test sum([1 + 2im, 2 + 3im], f([1.0, 0.5])) ≈ 2 + 3.5im
@test sum([[1, 2], [3, 4]], f([2, 3])) == [11, 16]
for wt in ([1.0, 1.0, 1.0], [1.0, 0.2, 0.0], [0.2, 0.0, 1.0])
@test sum(a, f(wt), dims=1) ≈ sum(a.*reshape(wt, length(wt), 1, 1), dims=1)
@test sum(a, f(wt), dims=2) ≈ sum(a.*reshape(wt, 1, length(wt), 1), dims=2)
@test sum(a, f(wt), dims=3) ≈ sum(a.*reshape(wt, 1, 1, length(wt)), dims=3)
end
end
#CURRENT FILE: StatsBase.jl/src/toeplitzsolvers.jl
##CHUNK 1
# Symmetric Toeplitz solver
function durbin!(r::AbstractVector{T}, y::AbstractVector{T}) where T<:BlasReal
n = length(r)
n <= length(y) || throw(DimensionMismatch("Auxiliary vector cannot be shorter than data vector"))
y[1] = -r[1]
β = one(T)
α = -r[1]
for k = 1:n-1
β *= one(T) - α*α
α = -r[k+1]
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
##CHUNK 2
for j = 1:k
α -= r[k-j+1]*y[j]
end
α /= β
for j = 1:div(k,2)
tmp = y[j]
y[j] += α*y[k-j+1]
y[k-j+1] += α*tmp
end
if isodd(k) y[div(k,2)+1] *= one(T) + α end
y[k+1] = α
end
return y
end
durbin(r::AbstractVector{T}) where {T<:BlasReal} = durbin!(r, zeros(T, length(r)))
levinson(r::AbstractVector{T}, b::AbstractVector{T}) where {T<:BlasReal} = levinson!(r, copy(b), zeros(T, length(b)))
|
111
| 130
|
StatsBase.jl
| 329
|
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
|
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
|
[
111,
130
] |
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
|
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
|
fit
| 111
| 130
|
src/transformations.jl
|
#FILE: StatsBase.jl/src/cov.jl
##CHUNK 1
function cov(sc::SimpleCovariance, X::AbstractMatrix; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X; dims=dims, corrected=sc.corrected)
else
return covm(X, mean, dims, corrected=sc.corrected)
end
end
function cov(sc::SimpleCovariance, X::AbstractMatrix, w::AbstractWeights; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X, w, dims, corrected=sc.corrected)
else
return covm(X, mean, w, dims, corrected=sc.corrected)
end
end
##CHUNK 2
corrected::Bool
SimpleCovariance(;corrected::Bool=false) = new(corrected)
end
cov(sc::SimpleCovariance, x::AbstractVector) =
cov(x; corrected=sc.corrected)
cov(sc::SimpleCovariance, x::AbstractVector, y::AbstractVector) =
cov(x, y; corrected=sc.corrected)
function cov(sc::SimpleCovariance, X::AbstractMatrix; dims::Int=1, mean=nothing)
dims ∈ (1, 2) || throw(ArgumentError("Argument dims can only be 1 or 2 (given: $dims)"))
if mean === nothing
return cov(X; dims=dims, corrected=sc.corrected)
else
return covm(X, mean, dims, corrected=sc.corrected)
end
end
function cov(sc::SimpleCovariance, X::AbstractMatrix, w::AbstractWeights; dims::Int=1, mean=nothing)
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
function crosscor!(r::AbstractArray{<:Real,3}, x::AbstractMatrix{<:Real}, y::AbstractMatrix{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
nx = size(x, 2)
ny = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, nx, ny)) || throw(DimensionMismatch())
check_lags(lx, lags)
# cached (centered) columns of x
T = typeof(zero(eltype(x)) / 1)
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 2
julia> StatsBase.transform(dt, X)
2×3 Matrix{Float64}:
0.5 0.0 1.0
0.0 0.5 1.0
```
"""
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
##CHUNK 3
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
##CHUNK 4
julia> dt = fit(ZScoreTransform, X, dims=2)
ZScoreTransform{Float64, Vector{Float64}}(2, 2, [0.0, 1.0], [0.5, 1.0])
julia> StatsBase.transform(dt, X)
2×3 Matrix{Float64}:
0.0 -1.0 1.0
-1.0 0.0 1.0
```
"""
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
##CHUNK 5
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
##CHUNK 6
return t.len
else
return getfield(t, p)
end
end
"""
fit(ZScoreTransform, X; dims=nothing, center=true, scale=true)
Fit standardization parameters to vector or matrix `X`
and return a `ZScoreTransform` transformation object.
# Keyword arguments
* `dims`: if `1` fit standardization parameters in column-wise fashion;
if `2` fit in row-wise fashion. The default is `nothing`, which is equivalent to `dims=2` with a deprecation warning.
* `center`: if `true` (the default) center data so that its mean is zero.
* `scale`: if `true` (the default) scale the data so that its variance is equal to one.
##CHUNK 7
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
|
141
| 171
|
StatsBase.jl
| 330
|
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if x !== y
copyto!(y, x)
end
else
broadcast!(/, y, x, s')
end
else
if isempty(s)
broadcast!(-, y, x, m')
else
broadcast!((x,m,s)->(x-m)/s, y, x, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
transform!(y', t_, x')
end
return y
end
|
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if x !== y
copyto!(y, x)
end
else
broadcast!(/, y, x, s')
end
else
if isempty(s)
broadcast!(-, y, x, m')
else
broadcast!((x,m,s)->(x-m)/s, y, x, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
transform!(y', t_, x')
end
return y
end
|
[
141,
171
] |
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if x !== y
copyto!(y, x)
end
else
broadcast!(/, y, x, s')
end
else
if isempty(s)
broadcast!(-, y, x, m')
else
broadcast!((x,m,s)->(x-m)/s, y, x, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
transform!(y', t_, x')
end
return y
end
|
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if x !== y
copyto!(y, x)
end
else
broadcast!(/, y, x, s')
end
else
if isempty(s)
broadcast!(-, y, x, m')
else
broadcast!((x,m,s)->(x-m)/s, y, x, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
transform!(y', t_, x')
end
return y
end
|
transform!
| 141
| 171
|
src/transformations.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
##CHUNK 2
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
##CHUNK 3
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
##CHUNK 4
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 5
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
##CHUNK 6
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
##CHUNK 7
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
##CHUNK 8
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
##CHUNK 9
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
|
173
| 203
|
StatsBase.jl
| 331
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
[
173,
203
] |
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
reconstruct!
| 173
| 203
|
src/transformations.jl
|
#FILE: StatsBase.jl/src/signalcorr.jl
##CHUNK 1
ns = size(y, 2)
m = length(lags)
(size(y, 1) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy = Vector{S}(undef, lx)
xx = dot(zx, zx)
for j = 1 : ns
demean_col!(zy, y, j, demean)
sc = sqrt(xx * dot(zy, zy))
for k = 1 : m
r[k,j] = _crossdot(zx, zy, lx, lags[k]) / sc
end
end
return r
end
##CHUNK 2
m = length(lags)
(length(y) == lx && length(r) == m) || throw(DimensionMismatch())
check_lags(lx, lags)
T = typeof(zero(eltype(x)) / 1)
zx::Vector{T} = demean ? x .- mean(x) : x
S = typeof(zero(eltype(y)) / 1)
zy::Vector{S} = demean ? y .- mean(y) : y
for k = 1 : m # foreach lag value
r[k] = _crossdot(zx, zy, lx, lags[k]) / lx
end
return r
end
function crosscov!(r::AbstractMatrix{<:Real}, x::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, lags::AbstractVector{<:Integer}; demean::Bool=true)
lx = size(x, 1)
ns = size(x, 2)
m = length(lags)
(length(y) == lx && size(r) == (m, ns)) || throw(DimensionMismatch())
check_lags(lx, lags)
#FILE: StatsBase.jl/src/rankcorr.jl
##CHUNK 1
n = length(x)
n == length(y) || throw(DimensionMismatch("vectors must have same length"))
(any(isnan, x) || any(isnan, y)) && return NaN
return cor(tiedrank(x), tiedrank(y))
end
function corspearman(X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real})
size(X, 1) == length(y) ||
throw(DimensionMismatch("X and y have inconsistent dimensions"))
n = size(X, 2)
C = Matrix{Float64}(I, n, 1)
any(isnan, y) && return fill!(C, NaN)
yrank = tiedrank(y)
for j = 1:n
Xj = view(X, :, j)
if any(isnan, Xj)
C[j,1] = NaN
else
Xjrank = tiedrank(Xj)
C[j,1] = cor(Xjrank, yrank)
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 2
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
##CHUNK 3
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
##CHUNK 4
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if x !== y
copyto!(y, x)
end
else
broadcast!(/, y, x, s')
end
else
if isempty(s)
##CHUNK 5
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
##CHUNK 6
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
##CHUNK 7
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
|
267
| 278
|
StatsBase.jl
| 332
|
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
|
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
|
[
267,
278
] |
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
|
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
|
fit
| 267
| 278
|
src/transformations.jl
|
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
##CHUNK 2
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
##CHUNK 3
julia> StatsBase.transform(dt, X)
2×3 Matrix{Float64}:
0.5 0.0 1.0
0.0 0.5 1.0
```
"""
function _compute_extrema(X::AbstractMatrix, dims::Integer)
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
##CHUNK 4
lenmax == l || lenmax == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
new{T, U}(l, dims, unit, min, max)
end
end
function Base.getproperty(t::UnitRangeTransform, p::Symbol)
if p === :indim || p === :outdim
return t.len
else
return getfield(t, p)
end
end
# fit a unit transform
"""
fit(UnitRangeTransform, X; dims=nothing, unit=true)
Fit a scaling parameters to vector or matrix `X`
and return a `UnitRangeTransform` transformation object.
##CHUNK 5
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
##CHUNK 6
function fit(::Type{ZScoreTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, center::Bool=true, scale::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
if dims == 1
n, l = size(X)
n >= 2 || error("X must contain at least two rows.")
m, s = mean_and_std(X, 1)
elseif dims == 2
l, n = size(X)
n >= 2 || error("X must contain at least two columns.")
m, s = mean_and_std(X, 2)
else
throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
end
return ZScoreTransform(l, dims, (center ? vec(m) : similar(m, 0)),
(scale ? vec(s) : similar(s, 0)))
end
##CHUNK 7
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
##CHUNK 8
len::Int
dims::Int
unit::Bool
min::U
scale::U
function UnitRangeTransform(l::Int, dims::Int, unit::Bool, min::U, max::U) where {T, U<:AbstractVector{T}}
lenmin = length(min)
lenmax = length(max)
lenmin == l || lenmin == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
lenmax == l || lenmax == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
new{T, U}(l, dims, unit, min, max)
end
end
function Base.getproperty(t::UnitRangeTransform, p::Symbol)
if p === :indim || p === :outdim
return t.len
else
return getfield(t, p)
##CHUNK 9
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
##CHUNK 10
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
|
301
| 321
|
StatsBase.jl
| 333
|
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
|
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
|
[
301,
321
] |
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
|
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
|
transform!
| 301
| 321
|
src/transformations.jl
|
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
##CHUNK 2
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
##CHUNK 3
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
##CHUNK 4
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 5
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
##CHUNK 6
julia> StatsBase.transform(dt, X)
2×3 Matrix{Float64}:
0.5 0.0 1.0
0.0 0.5 1.0
```
"""
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
##CHUNK 7
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
##CHUNK 8
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
end
"""
standardize(DT, X; dims=nothing, kwargs...)
Return a standardized copy of vector or matrix `X` along dimensions `dims`
using transformation `DT` which is a subtype of `AbstractDataTransform`:
- `ZScoreTransform`
##CHUNK 9
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
"""
UnitRangeTransform <: AbstractDataTransform
Unit range normalization
"""
struct UnitRangeTransform{T<:Real, U<:AbstractVector} <: AbstractDataTransform
##CHUNK 10
len::Int
dims::Int
unit::Bool
min::U
scale::U
function UnitRangeTransform(l::Int, dims::Int, unit::Bool, min::U, max::U) where {T, U<:AbstractVector{T}}
lenmin = length(min)
lenmax = length(max)
lenmin == l || lenmin == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
lenmax == l || lenmax == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
new{T, U}(l, dims, unit, min, max)
end
end
function Base.getproperty(t::UnitRangeTransform, p::Symbol)
if p === :indim || p === :outdim
return t.len
else
return getfield(t, p)
|
323
| 343
|
StatsBase.jl
| 334
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
[
323,
343
] |
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
function reconstruct!(x::AbstractMatrix{<:Real}, t::UnitRangeTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((y,s,m)->y/s+m, x, y, tscale', tmin')
else
broadcast!(/, x, y, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
reconstruct!(x', t_, y')
end
return x
end
|
reconstruct!
| 323
| 343
|
src/transformations.jl
|
#CURRENT FILE: StatsBase.jl/src/transformations.jl
##CHUNK 1
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
##CHUNK 2
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
function transform!(y::AbstractMatrix{<:Real}, t::UnitRangeTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(x,1)
size(y,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
tmin = t.min
tscale = t.scale
##CHUNK 3
end
function reconstruct!(x::AbstractMatrix{<:Real}, t::ZScoreTransform, y::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
if isempty(m)
if isempty(s)
if y !== x
copyto!(x, y)
end
else
broadcast!(*, x, y, s')
end
##CHUNK 4
function fit(::Type{ZScoreTransform}, X::AbstractVector{<:Real};
dims::Integer=1, center::Bool=true, scale::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accepts dims=1 over a vector. Try fit(t, x, dims=1)."))
end
return fit(ZScoreTransform, reshape(X, :, 1); dims=dims, center=center, scale=scale)
end
function transform!(y::AbstractMatrix{<:Real}, t::ZScoreTransform, x::AbstractMatrix{<:Real})
if t.dims == 1
l = t.len
size(x,2) == size(y,2) == l || throw(DimensionMismatch("Inconsistent dimensions."))
n = size(y,1)
size(x,1) == n || throw(DimensionMismatch("Inconsistent dimensions."))
m = t.mean
s = t.scale
##CHUNK 5
julia> StatsBase.transform(dt, X)
2×3 Matrix{Float64}:
0.5 0.0 1.0
0.0 0.5 1.0
```
"""
function fit(::Type{UnitRangeTransform}, X::AbstractMatrix{<:Real};
dims::Union{Integer,Nothing}=nothing, unit::Bool=true)
if dims === nothing
Base.depwarn("fit(t, x) is deprecated: use fit(t, x, dims=2) instead", :fit)
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
##CHUNK 6
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
function fit(::Type{UnitRangeTransform}, X::AbstractVector{<:Real};
dims::Integer=1, unit::Bool=true)
if dims != 1
throw(DomainError(dims, "fit only accept dims=1 over a vector. Try fit(t, x, dims=1)."))
end
tmin, tmax = extrema(X)
tmax = 1 / (tmax - tmin)
return UnitRangeTransform(1, dims, unit, [tmin], [tmax])
end
##CHUNK 7
if t.unit
broadcast!((x,s,m)->(x-m)*s, y, x, tscale', tmin')
else
broadcast!(*, y, x, tscale')
end
elseif t.dims == 2
t_ = UnitRangeTransform(t.len, 1, t.unit, t.min, t.scale)
transform!(y', t_, x')
end
return y
end
"""
standardize(DT, X; dims=nothing, kwargs...)
Return a standardized copy of vector or matrix `X` along dimensions `dims`
using transformation `DT` which is a subtype of `AbstractDataTransform`:
- `ZScoreTransform`
##CHUNK 8
dims = 2
end
dims ∈ (1, 2) || throw(DomainError(dims, "fit only accept dims to be 1 or 2."))
tmin, tmax = _compute_extrema(X, dims)
@. tmax = 1 / (tmax - tmin)
l = length(tmin)
return UnitRangeTransform(l, dims, unit, tmin, tmax)
end
function _compute_extrema(X::AbstractMatrix, dims::Integer)
dims == 2 && return _compute_extrema(X', 1)
l = size(X, 2)
tmin = similar(X, l)
tmax = similar(X, l)
for i in 1:l
@inbounds tmin[i], tmax[i] = extrema(@view(X[:, i]))
end
return tmin, tmax
end
##CHUNK 9
else
if isempty(s)
broadcast!(+, x, y, m')
else
broadcast!((y,m,s)->y*s+m, x, y, m', s')
end
end
elseif t.dims == 2
t_ = ZScoreTransform(t.len, 1, t.mean, t.scale)
reconstruct!(x', t_, y')
end
return x
end
"""
UnitRangeTransform <: AbstractDataTransform
Unit range normalization
"""
struct UnitRangeTransform{T<:Real, U<:AbstractVector} <: AbstractDataTransform
##CHUNK 10
len::Int
dims::Int
unit::Bool
min::U
scale::U
function UnitRangeTransform(l::Int, dims::Int, unit::Bool, min::U, max::U) where {T, U<:AbstractVector{T}}
lenmin = length(min)
lenmax = length(max)
lenmin == l || lenmin == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
lenmax == l || lenmax == 0 || throw(DimensionMismatch("Inconsistent dimensions."))
new{T, U}(l, dims, unit, min, max)
end
end
function Base.getproperty(t::UnitRangeTransform, p::Symbol)
if p === :indim || p === :outdim
return t.len
else
return getfield(t, p)
|
626
| 691
|
StatsBase.jl
| 335
|
function quantile(v::AbstractVector{<:Real}{V}, w::AbstractWeights{W}, p::AbstractVector{<:Real}) where {V,W<:Real}
# checks
isempty(v) && throw(ArgumentError("quantile of an empty array is undefined"))
isempty(p) && throw(ArgumentError("empty quantile array"))
isfinite(sum(w)) || throw(ArgumentError("only finite weights are supported"))
all(x -> 0 <= x <= 1, p) || throw(ArgumentError("input probability out of [0,1] range"))
w.sum == 0 && throw(ArgumentError("weight vector cannot sum to zero"))
length(v) == length(w) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(v)) and $(length(w))"))
for x in w.values
x < 0 && throw(ArgumentError("weight vector cannot contain negative entries"))
end
isa(w, FrequencyWeights) && !(eltype(w) <: Integer) && any(!isinteger, w) &&
throw(ArgumentError("The values of the vector of `FrequencyWeights` must be numerically" *
"equal to integers. Use `ProbabilityWeights` or `AnalyticWeights` instead."))
# remove zeros weights and sort
wsum = sum(w)
nz = .!iszero.(w)
vw = sort!(collect(zip(view(v, nz), view(w, nz))))
N = length(vw)
# prepare percentiles
ppermute = sortperm(p)
p = p[ppermute]
# prepare out vector
out = Vector{typeof(zero(V)/1)}(undef, length(p))
fill!(out, vw[end][1])
@inbounds for x in v
isnan(x) && return fill!(out, x)
end
# loop on quantiles
Sk, Skold = zero(W), zero(W)
vk, vkold = zero(V), zero(V)
k = 0
w1 = vw[1][2]
for i in 1:length(p)
if isa(w, FrequencyWeights)
h = p[i] * (wsum - 1) + 1
else
h = p[i] * (wsum - w1) + w1
end
while Sk <= h
k += 1
if k > N
# out was initialized with maximum v
return out
end
Skold, vkold = Sk, vk
vk, wk = vw[k]
Sk += wk
end
if isa(w, FrequencyWeights)
out[ppermute[i]] = vkold + min(h - Skold, 1) * (vk - vkold)
else
out[ppermute[i]] = vkold + (h - Skold) / (Sk - Skold) * (vk - vkold)
end
end
return out
end
|
function quantile(v::AbstractVector{<:Real}{V}, w::AbstractWeights{W}, p::AbstractVector{<:Real}) where {V,W<:Real}
# checks
isempty(v) && throw(ArgumentError("quantile of an empty array is undefined"))
isempty(p) && throw(ArgumentError("empty quantile array"))
isfinite(sum(w)) || throw(ArgumentError("only finite weights are supported"))
all(x -> 0 <= x <= 1, p) || throw(ArgumentError("input probability out of [0,1] range"))
w.sum == 0 && throw(ArgumentError("weight vector cannot sum to zero"))
length(v) == length(w) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(v)) and $(length(w))"))
for x in w.values
x < 0 && throw(ArgumentError("weight vector cannot contain negative entries"))
end
isa(w, FrequencyWeights) && !(eltype(w) <: Integer) && any(!isinteger, w) &&
throw(ArgumentError("The values of the vector of `FrequencyWeights` must be numerically" *
"equal to integers. Use `ProbabilityWeights` or `AnalyticWeights` instead."))
# remove zeros weights and sort
wsum = sum(w)
nz = .!iszero.(w)
vw = sort!(collect(zip(view(v, nz), view(w, nz))))
N = length(vw)
# prepare percentiles
ppermute = sortperm(p)
p = p[ppermute]
# prepare out vector
out = Vector{typeof(zero(V)/1)}(undef, length(p))
fill!(out, vw[end][1])
@inbounds for x in v
isnan(x) && return fill!(out, x)
end
# loop on quantiles
Sk, Skold = zero(W), zero(W)
vk, vkold = zero(V), zero(V)
k = 0
w1 = vw[1][2]
for i in 1:length(p)
if isa(w, FrequencyWeights)
h = p[i] * (wsum - 1) + 1
else
h = p[i] * (wsum - w1) + w1
end
while Sk <= h
k += 1
if k > N
# out was initialized with maximum v
return out
end
Skold, vkold = Sk, vk
vk, wk = vw[k]
Sk += wk
end
if isa(w, FrequencyWeights)
out[ppermute[i]] = vkold + min(h - Skold, 1) * (vk - vkold)
else
out[ppermute[i]] = vkold + (h - Skold) / (Sk - Skold) * (vk - vkold)
end
end
return out
end
|
[
626,
691
] |
function quantile(v::AbstractVector{<:Real}{V}, w::AbstractWeights{W}, p::AbstractVector{<:Real}) where {V,W<:Real}
# checks
isempty(v) && throw(ArgumentError("quantile of an empty array is undefined"))
isempty(p) && throw(ArgumentError("empty quantile array"))
isfinite(sum(w)) || throw(ArgumentError("only finite weights are supported"))
all(x -> 0 <= x <= 1, p) || throw(ArgumentError("input probability out of [0,1] range"))
w.sum == 0 && throw(ArgumentError("weight vector cannot sum to zero"))
length(v) == length(w) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(v)) and $(length(w))"))
for x in w.values
x < 0 && throw(ArgumentError("weight vector cannot contain negative entries"))
end
isa(w, FrequencyWeights) && !(eltype(w) <: Integer) && any(!isinteger, w) &&
throw(ArgumentError("The values of the vector of `FrequencyWeights` must be numerically" *
"equal to integers. Use `ProbabilityWeights` or `AnalyticWeights` instead."))
# remove zeros weights and sort
wsum = sum(w)
nz = .!iszero.(w)
vw = sort!(collect(zip(view(v, nz), view(w, nz))))
N = length(vw)
# prepare percentiles
ppermute = sortperm(p)
p = p[ppermute]
# prepare out vector
out = Vector{typeof(zero(V)/1)}(undef, length(p))
fill!(out, vw[end][1])
@inbounds for x in v
isnan(x) && return fill!(out, x)
end
# loop on quantiles
Sk, Skold = zero(W), zero(W)
vk, vkold = zero(V), zero(V)
k = 0
w1 = vw[1][2]
for i in 1:length(p)
if isa(w, FrequencyWeights)
h = p[i] * (wsum - 1) + 1
else
h = p[i] * (wsum - w1) + w1
end
while Sk <= h
k += 1
if k > N
# out was initialized with maximum v
return out
end
Skold, vkold = Sk, vk
vk, wk = vw[k]
Sk += wk
end
if isa(w, FrequencyWeights)
out[ppermute[i]] = vkold + min(h - Skold, 1) * (vk - vkold)
else
out[ppermute[i]] = vkold + (h - Skold) / (Sk - Skold) * (vk - vkold)
end
end
return out
end
|
function quantile(v::AbstractVector{<:Real}{V}, w::AbstractWeights{W}, p::AbstractVector{<:Real}) where {V,W<:Real}
# checks
isempty(v) && throw(ArgumentError("quantile of an empty array is undefined"))
isempty(p) && throw(ArgumentError("empty quantile array"))
isfinite(sum(w)) || throw(ArgumentError("only finite weights are supported"))
all(x -> 0 <= x <= 1, p) || throw(ArgumentError("input probability out of [0,1] range"))
w.sum == 0 && throw(ArgumentError("weight vector cannot sum to zero"))
length(v) == length(w) || throw(ArgumentError("data and weight vectors must be the same size," *
"got $(length(v)) and $(length(w))"))
for x in w.values
x < 0 && throw(ArgumentError("weight vector cannot contain negative entries"))
end
isa(w, FrequencyWeights) && !(eltype(w) <: Integer) && any(!isinteger, w) &&
throw(ArgumentError("The values of the vector of `FrequencyWeights` must be numerically" *
"equal to integers. Use `ProbabilityWeights` or `AnalyticWeights` instead."))
# remove zeros weights and sort
wsum = sum(w)
nz = .!iszero.(w)
vw = sort!(collect(zip(view(v, nz), view(w, nz))))
N = length(vw)
# prepare percentiles
ppermute = sortperm(p)
p = p[ppermute]
# prepare out vector
out = Vector{typeof(zero(V)/1)}(undef, length(p))
fill!(out, vw[end][1])
@inbounds for x in v
isnan(x) && return fill!(out, x)
end
# loop on quantiles
Sk, Skold = zero(W), zero(W)
vk, vkold = zero(V), zero(V)
k = 0
w1 = vw[1][2]
for i in 1:length(p)
if isa(w, FrequencyWeights)
h = p[i] * (wsum - 1) + 1
else
h = p[i] * (wsum - w1) + w1
end
while Sk <= h
k += 1
if k > N
# out was initialized with maximum v
return out
end
Skold, vkold = Sk, vk
vk, wk = vw[k]
Sk += wk
end
if isa(w, FrequencyWeights)
out[ppermute[i]] = vkold + min(h - Skold, 1) * (vk - vkold)
else
out[ppermute[i]] = vkold + (h - Skold) / (Sk - Skold) * (vk - vkold)
end
end
return out
end
|
quantile
| 626
| 691
|
src/weights.jl
|
#FILE: StatsBase.jl/src/sampling.jl
##CHUNK 1
processing time to draw ``k`` elements. It consumes ``O(k \\log(n / k))`` random numbers.
"""
function efraimidis_aexpj_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray;
ordered::Bool=false)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
k > 0 || return x
# initialize priority queue
pq = Vector{Pair{Float64,Int}}(undef, k)
i = 0
##CHUNK 2
naive_wsample_norep!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Naive implementation of weighted sampling without replacement.
It makes a copy of the weight vector at initialization, and sets the weight to zero
when the corresponding sample is picked.
Noting `k=length(x)` and `n=length(a)`, this algorithm consumes ``O(k)`` random numbers,
and has overall time complexity ``O(n k)``.
"""
function naive_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
wsum = sum(wv)
isfinite(wsum) || throw(ArgumentError("only finite weights are supported"))
##CHUNK 3
Reference: Efraimidis, P. S., Spirakis, P. G. "Weighted random sampling with a reservoir."
*Information Processing Letters*, 97 (5), 181-185, 2006. doi:10.1016/j.ipl.2005.11.003.
Noting `k=length(x)` and `n=length(a)`, this algorithm takes ``O(n + k \\log k)``
processing time to draw ``k`` elements. It consumes ``n`` random numbers.
"""
function efraimidis_a_wsample_norep!(rng::AbstractRNG, a::AbstractArray,
wv::AbstractWeights, x::AbstractArray)
Base.mightalias(a, x) &&
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
##CHUNK 4
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("Inconsistent lengths."))
for i = 1:length(x)
x[i] = a[sample(rng, wv)]
end
return x
end
direct_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
direct_sample!(default_rng(), a, wv, x)
"""
alias_sample!([rng], a::AbstractArray, wv::AbstractWeights, x::AbstractArray)
Alias method.
##CHUNK 5
throw(ArgumentError("output array x must not share memory with input array a"))
1 == firstindex(a) == firstindex(wv) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
length(wv) == length(a) || throw(DimensionMismatch("Inconsistent lengths."))
# create alias table
at = AliasTable(wv)
# sampling
for i in eachindex(x)
j = rand(rng, at)
x[i] = a[j]
end
return x
end
alias_sample!(a::AbstractArray, wv::AbstractWeights, x::AbstractArray) =
alias_sample!(default_rng(), a, wv, x)
"""
##CHUNK 6
throw(ArgumentError("output array x must not share memory with input array a"))
Base.mightalias(x, wv) &&
throw(ArgumentError("output array x must not share memory with weights array wv"))
1 == firstindex(a) == firstindex(wv) == firstindex(x) ||
throw(ArgumentError("non 1-based arrays are not supported"))
isfinite(sum(wv)) || throw(ArgumentError("only finite weights are supported"))
n = length(a)
length(wv) == n || throw(DimensionMismatch("a and wv must be of same length (got $n and $(length(wv)))."))
k = length(x)
# calculate keys for all items
keys = randexp(rng, n)
for i in 1:n
@inbounds keys[i] = wv.values[i]/keys[i]
end
# return items with largest keys
index = sortperm(keys; alg = PartialQuickSort(k), rev = true)
for i in 1:k
@inbounds x[i] = a[index[i]]
#FILE: StatsBase.jl/test/wsampling.jl
##CHUNK 1
for rev in (true, false), T in (Int, Int16, Float64, Float16, BigInt, ComplexF64, Rational{Int})
r = rev ? reverse(4:7) : (4:7)
r = T===Int ? r : T.(r)
aa = Int.(sample(r, wv, n; ordered=true))
check_wsample_wrep(aa, (4, 7), wv, 5.0e-3; ordered=true, rev=rev)
aa = Int.(sample(r, wv, 10; ordered=true))
check_wsample_wrep(aa, (4, 7), wv, -1; ordered=true, rev=rev)
end
#### weighted sampling without replacement
function check_wsample_norep(a::AbstractArray, vrgn, wv::AbstractWeights, ptol::Real;
ordered::Bool=false, rev::Bool=false)
# each column of a for one run
vmin, vmax = vrgn
(amin, amax) = extrema(a)
@test vmin <= amin <= amax <= vmax
n = vmax - vmin + 1
#CURRENT FILE: StatsBase.jl/src/weights.jl
##CHUNK 1
Weights must not be negative. The weights and data vectors must have the same length.
`NaN` is returned if `x` contains any `NaN` values. An error is raised if `w` contains
any `NaN` values.
With [`FrequencyWeights`](@ref), the function returns the same result as
`quantile` for a vector with repeated values. Weights must be integers.
With non `FrequencyWeights`, denote ``N`` the length of the vector, ``w`` the vector of weights,
``h = p (\\sum_{i ≤ N} w_i - w_1) + w_1`` the cumulative weight corresponding to the
probability ``p`` and ``S_k = \\sum_{i \\leq k} w_i`` the cumulative weight for each
observation, define ``v_{k+1}`` the smallest element of `v` such that ``S_{k+1}``
is strictly superior to ``h``. The weighted ``p`` quantile is given by ``v_k + γ (v_{k+1} - v_k)``
with ``γ = (h - S_k)/(S_{k+1} - S_k)``. In particular, when all weights are equal,
the function returns the same result as the unweighted `quantile`.
"""
function quantile(v::AbstractVector{<:Real}, w::UnitWeights, p::AbstractVector{<:Real})
length(v) != length(w) && throw(DimensionMismatch("Inconsistent array dimension."))
return quantile(v, p)
end
##CHUNK 2
return mean(A, dims=dims)
end
##### Weighted quantile #####
"""
quantile(v, w::AbstractWeights, p)
Compute the weighted quantiles of a vector `v` at a specified set of probability
values `p`, using weights given by a weight vector `w` (of type `AbstractWeights`).
Weights must not be negative. The weights and data vectors must have the same length.
`NaN` is returned if `x` contains any `NaN` values. An error is raised if `w` contains
any `NaN` values.
With [`FrequencyWeights`](@ref), the function returns the same result as
`quantile` for a vector with repeated values. Weights must be integers.
With non `FrequencyWeights`, denote ``N`` the length of the vector, ``w`` the vector of weights,
``h = p (\\sum_{i ≤ N} w_i - w_1) + w_1`` the cumulative weight corresponding to the
probability ``p`` and ``S_k = \\sum_{i \\leq k} w_i`` the cumulative weight for each
##CHUNK 3
observation, define ``v_{k+1}`` the smallest element of `v` such that ``S_{k+1}``
is strictly superior to ``h``. The weighted ``p`` quantile is given by ``v_k + γ (v_{k+1} - v_k)``
with ``γ = (h - S_k)/(S_{k+1} - S_k)``. In particular, when all weights are equal,
the function returns the same result as the unweighted `quantile`.
"""
function quantile(v::AbstractVector{<:Real}, w::UnitWeights, p::AbstractVector{<:Real})
length(v) != length(w) && throw(DimensionMismatch("Inconsistent array dimension."))
return quantile(v, p)
end
quantile(v::AbstractVector{<:Real}, w::AbstractWeights{<:Real}, p::Number) = quantile(v, w, [p])[1]
##### Weighted median #####
"""
median(v::AbstractVector{<:Real}, w::AbstractWeights)
Compute the weighted median of `v` with weights `w`
(of type `AbstractWeights`). See the documentation for [`quantile`](@ref) for more details.
|
20
| 30
|
Turing.jl
| 336
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
|
[
20,
30
] |
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
|
AbstractMCMC.sample
| 20
| 30
|
src/mcmc/abstractmcmc.jl
|
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
##CHUNK 2
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
progress=progress,
nparticles=N,
kwargs...,
)
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
#FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#CURRENT FILE: Turing.jl/src/mcmc/abstractmcmc.jl
##CHUNK 1
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
##CHUNK 2
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
##CHUNK 3
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
##CHUNK 4
# TODO: Implement additional checks for certain samplers, e.g.
# HMC not supporting discrete parameters.
function _check_model(model::DynamicPPL.Model)
return DynamicPPL.check_model(model; error_on_failure=true)
end
function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm)
return _check_model(model)
end
#########################################
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
##CHUNK 5
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
##CHUNK 6
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
|
32
| 43
|
Turing.jl
| 337
|
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
|
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
|
[
32,
43
] |
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
|
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
|
AbstractMCMC.sample
| 32
| 43
|
src/mcmc/abstractmcmc.jl
|
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
##CHUNK 2
model::DynamicPPL.Model,
sampler::Sampler{<:SMC},
N::Integer;
chain_type=DynamicPPL.default_chain_type(sampler),
resume_from=nothing,
initial_state=DynamicPPL.loadstate(resume_from),
progress=PROGRESS[],
kwargs...,
)
if resume_from === nothing
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
progress=progress,
nparticles=N,
kwargs...,
)
##CHUNK 3
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
progress=progress,
nparticles=N,
kwargs...,
)
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#CURRENT FILE: Turing.jl/src/mcmc/abstractmcmc.jl
##CHUNK 1
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
##CHUNK 2
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
##CHUNK 3
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
##CHUNK 4
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
##CHUNK 5
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
##CHUNK 6
# TODO: Implement additional checks for certain samplers, e.g.
# HMC not supporting discrete parameters.
function _check_model(model::DynamicPPL.Model)
return DynamicPPL.check_model(model; error_on_failure=true)
end
function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm)
return _check_model(model)
end
#########################################
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
|
45
| 57
|
Turing.jl
| 338
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
|
[
45,
57
] |
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
|
AbstractMCMC.sample
| 45
| 57
|
src/mcmc/abstractmcmc.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
##CHUNK 2
model::DynamicPPL.Model,
sampler::Sampler{<:SMC},
N::Integer;
chain_type=DynamicPPL.default_chain_type(sampler),
resume_from=nothing,
initial_state=DynamicPPL.loadstate(resume_from),
progress=PROGRESS[],
kwargs...,
)
if resume_from === nothing
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
progress=progress,
nparticles=N,
kwargs...,
)
#CURRENT FILE: Turing.jl/src/mcmc/abstractmcmc.jl
##CHUNK 1
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
##CHUNK 2
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
##CHUNK 3
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
##CHUNK 4
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
##CHUNK 5
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
##CHUNK 6
# TODO: Implement additional checks for certain samplers, e.g.
# HMC not supporting discrete parameters.
function _check_model(model::DynamicPPL.Model)
return DynamicPPL.check_model(model; error_on_failure=true)
end
function _check_model(model::DynamicPPL.Model, alg::InferenceAlgorithm)
return _check_model(model)
end
#########################################
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
|
59
| 81
|
Turing.jl
| 339
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
|
[
59,
81
] |
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
|
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
end
|
AbstractMCMC.sample
| 59
| 81
|
src/mcmc/abstractmcmc.jl
|
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:SMC},
N::Integer;
chain_type=DynamicPPL.default_chain_type(sampler),
resume_from=nothing,
initial_state=DynamicPPL.loadstate(resume_from),
progress=PROGRESS[],
kwargs...,
)
if resume_from === nothing
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
progress=progress,
nparticles=N,
kwargs...,
)
#FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
##CHUNK 2
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
end
DynamicPPL.initialsampler(::Sampler{<:Hamiltonian}) = SampleFromUniform()
# Handle setting `nadapts` and `discard_initial`
function AbstractMCMC.sample(
rng::AbstractRNG,
model::DynamicPPL.Model,
sampler::Sampler{<:AdaptiveHamiltonian},
N::Integer;
chain_type=DynamicPPL.default_chain_type(sampler),
resume_from=nothing,
initial_state=DynamicPPL.loadstate(resume_from),
progress=PROGRESS[],
nadapts=sampler.alg.n_adapts,
discard_adapt=true,
discard_initial=-1,
kwargs...,
)
if resume_from === nothing
##CHUNK 2
N;
chain_type=chain_type,
progress=progress,
nadapts=_nadapts,
discard_initial=_discard_initial,
kwargs...,
)
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type=chain_type,
initial_state=initial_state,
progress=progress,
nadapts=0,
discard_adapt=false,
discard_initial=0,
kwargs...,
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
#CURRENT FILE: Turing.jl/src/mcmc/abstractmcmc.jl
##CHUNK 1
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
##CHUNK 2
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
##CHUNK 3
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
function AbstractMCMC.sample(
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
kwargs...,
)
return AbstractMCMC.sample(
##CHUNK 4
# Default definitions for the interface #
#########################################
function AbstractMCMC.sample(
model::AbstractModel, alg::InferenceAlgorithm, N::Integer; kwargs...
)
return AbstractMCMC.sample(Random.default_rng(), model, alg, N; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
N::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), N; kwargs...)
end
|
34
| 76
|
Turing.jl
| 340
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
|
[
34,
76
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
|
length
| 34
| 76
|
src/mcmc/emcee.jl
|
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
##CHUNK 2
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
##CHUNK 3
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
##CHUNK 2
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 2
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/emcee.jl
|
78
| 99
|
Turing.jl
| 341
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
|
[
78,
99
] |
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
|
AbstractMCMC.step
| 78
| 99
|
src/mcmc/emcee.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 2
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 3
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
return sample, newstate
end
end
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 2
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
##CHUNK 3
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
|
101
| 163
|
Turing.jl
| 342
|
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
[
101,
163
] |
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
AbstractMCMC.bundle_samples
| 101
| 163
|
src/mcmc/emcee.jl
|
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
##CHUNK 2
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
##CHUNK 3
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
##CHUNK 4
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
##CHUNK 5
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
##CHUNK 6
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
##CHUNK 7
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
getlogevidence(transitions, sampler, state) = missing
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
##CHUNK 8
return getparams(model, DynamicPPL.typed_varinfo(untyped_vi))
end
function getparams(::DynamicPPL.Model, ::DynamicPPL.VarInfo{NamedTuple{(),Tuple{}}})
return Dict{VarName,Any}()
end
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
##CHUNK 9
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
##CHUNK 10
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
#CURRENT FILE: Turing.jl/src/mcmc/emcee.jl
|
37
| 65
|
Turing.jl
| 343
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
f = vi[:]
# define previous sampler state
# (do not use cache to avoid in-place sampling from prior)
oldstate = EllipticalSliceSampling.ESSState(f, getlogp(vi), nothing)
# compute next state
sample, state = AbstractMCMC.step(
rng,
EllipticalSliceSampling.ESSModel(
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
f = vi[:]
# define previous sampler state
# (do not use cache to avoid in-place sampling from prior)
oldstate = EllipticalSliceSampling.ESSState(f, getlogp(vi), nothing)
# compute next state
sample, state = AbstractMCMC.step(
rng,
EllipticalSliceSampling.ESSModel(
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
|
[
37,
65
] |
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
f = vi[:]
# define previous sampler state
# (do not use cache to avoid in-place sampling from prior)
oldstate = EllipticalSliceSampling.ESSState(f, getlogp(vi), nothing)
# compute next state
sample, state = AbstractMCMC.step(
rng,
EllipticalSliceSampling.ESSModel(
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
f = vi[:]
# define previous sampler state
# (do not use cache to avoid in-place sampling from prior)
oldstate = EllipticalSliceSampling.ESSState(f, getlogp(vi), nothing)
# compute next state
sample, state = AbstractMCMC.step(
rng,
EllipticalSliceSampling.ESSModel(
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
|
AbstractMCMC.step
| 37
| 65
|
src/mcmc/ess.jl
|
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
##CHUNK 3
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
##CHUNK 4
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
function AbstractMCMC.step(
##CHUNK 5
struct SGLDState{L,V<:AbstractVarInfo}
logdensity::L
vi::V
step::Int
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 2
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/mcmc/is.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::AbstractRNG, model::Model, spl::Sampler{<:IS}, vi::AbstractVarInfo; kwargs...
)
return Transition(model, vi), nothing
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:IS}, ::Nothing; kwargs...
)
vi = VarInfo(rng, model, spl)
return Transition(model, vi), nothing
end
# Calculate evidence.
function getlogevidence(samples::Vector{<:Transition}, ::Sampler{<:IS}, state)
return logsumexp(map(x -> x.lp, samples)) - log(length(samples))
end
function DynamicPPL.assume(rng, ::Sampler{<:IS}, dist::Distribution, vn::VarName, vi)
if haskey(vi, vn)
#CURRENT FILE: Turing.jl/src/mcmc/ess.jl
|
74
| 85
|
Turing.jl
| 344
|
function ESSPrior{M,S,V}(
model::M, sampler::S, varinfo::V
) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo}
vns = keys(varinfo)
μ = mapreduce(vcat, vns) do vn
dist = getdist(varinfo, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("[ESS] only supports Gaussian prior distributions")
DynamicPPL.tovec(mean(dist))
end
return new{M,S,V,typeof(μ)}(model, sampler, varinfo, μ)
end
|
function ESSPrior{M,S,V}(
model::M, sampler::S, varinfo::V
) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo}
vns = keys(varinfo)
μ = mapreduce(vcat, vns) do vn
dist = getdist(varinfo, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("[ESS] only supports Gaussian prior distributions")
DynamicPPL.tovec(mean(dist))
end
return new{M,S,V,typeof(μ)}(model, sampler, varinfo, μ)
end
|
[
74,
85
] |
function ESSPrior{M,S,V}(
model::M, sampler::S, varinfo::V
) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo}
vns = keys(varinfo)
μ = mapreduce(vcat, vns) do vn
dist = getdist(varinfo, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("[ESS] only supports Gaussian prior distributions")
DynamicPPL.tovec(mean(dist))
end
return new{M,S,V,typeof(μ)}(model, sampler, varinfo, μ)
end
|
function ESSPrior{M,S,V}(
model::M, sampler::S, varinfo::V
) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo}
vns = keys(varinfo)
μ = mapreduce(vcat, vns) do vn
dist = getdist(varinfo, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("[ESS] only supports Gaussian prior distributions")
DynamicPPL.tovec(mean(dist))
end
return new{M,S,V,typeof(μ)}(model, sampler, varinfo, μ)
end
|
ESSPrior{M,S,V}
| 74
| 85
|
src/mcmc/ess.jl
|
#FILE: Turing.jl/src/mcmc/is.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::AbstractRNG, model::Model, spl::Sampler{<:IS}, vi::AbstractVarInfo; kwargs...
)
return Transition(model, vi), nothing
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:IS}, ::Nothing; kwargs...
)
vi = VarInfo(rng, model, spl)
return Transition(model, vi), nothing
end
# Calculate evidence.
function getlogevidence(samples::Vector{<:Transition}, ::Sampler{<:IS}, state)
return logsumexp(map(x -> x.lp, samples)) - log(length(samples))
end
function DynamicPPL.assume(rng, ::Sampler{<:IS}, dist::Distribution, vn::VarName, vi)
if haskey(vi, vn)
#FILE: Turing.jl/src/mcmc/prior.jl
##CHUNK 1
"""
Prior()
Algorithm for sampling from the prior.
"""
struct Prior <: InferenceAlgorithm end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
const MHLogDensityFunction{M<:Model,S<:Sampler{<:MH},V<:AbstractVarInfo} =
DynamicPPL.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD}
function LogDensityProblems.logdensity(f::MHLogDensityFunction, x::NamedTuple)
vi = deepcopy(f.varinfo)
set_namedtuple!(vi, x)
vi_new = last(DynamicPPL.evaluate!!(f.model, vi, f.context))
lj = getlogp(vi_new)
return lj
end
# unpack a vector if possible
unvectorize(dists::AbstractVector) = length(dists) == 1 ? first(dists) : dists
# possibly unpack and reshape samples according to the prior distribution
function reconstruct(dist::Distribution, val::AbstractVector)
return DynamicPPL.from_vec_transform(dist)(val)
end
reconstruct(dist::AbstractVector{<:UnivariateDistribution}, val::AbstractVector) = val
function reconstruct(dist::AbstractVector{<:MultivariateDistribution}, val::AbstractVector)
#CURRENT FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
EllipticalSliceSampling.isgaussian(::Type{<:ESSPrior}) = true
# Only define out-of-place sampling
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
# Mean of prior distribution
Distributions.mean(p::ESSPrior) = p.μ
# Evaluate log-likelihood of proposals
const ESSLogLikelihood{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} =
##CHUNK 2
varinfo::V
μ::T
end
function ESSPrior(model::Model, sampler::Sampler{<:ESS}, varinfo::AbstractVarInfo)
return ESSPrior{typeof(model),typeof(sampler),typeof(varinfo)}(model, sampler, varinfo)
end
# Ensure that the prior is a Gaussian distribution (checked in the constructor)
EllipticalSliceSampling.isgaussian(::Type{<:ESSPrior}) = true
# Only define out-of-place sampling
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
##CHUNK 3
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
varinfo::V
μ::T
end
function ESSPrior(model::Model, sampler::Sampler{<:ESS}, varinfo::AbstractVarInfo)
return ESSPrior{typeof(model),typeof(sampler),typeof(varinfo)}(model, sampler, varinfo)
end
# Ensure that the prior is a Gaussian distribution (checked in the constructor)
##CHUNK 4
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
##CHUNK 5
```
"""
struct ESS <: InferenceAlgorithm end
# always accept in the first step
function DynamicPPL.initialstep(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
for vn in keys(vi)
dist = getdist(vi, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("ESS only supports Gaussian prior distributions")
end
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
##CHUNK 6
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
error("ESS only supports Gaussian prior distributions")
end
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:ESS}, vi::AbstractVarInfo; kwargs...
)
# obtain previous sample
f = vi[:]
# define previous sampler state
# (do not use cache to avoid in-place sampling from prior)
oldstate = EllipticalSliceSampling.ESSState(f, getlogp(vi), nothing)
# compute next state
sample, state = AbstractMCMC.step(
rng,
EllipticalSliceSampling.ESSModel(
##CHUNK 7
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
# Mean of prior distribution
Distributions.mean(p::ESSPrior) = p.μ
# Evaluate log-likelihood of proposals
const ESSLogLikelihood{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo} =
DynamicPPL.LogDensityFunction{M,V,<:DynamicPPL.SamplingContext{<:S},AD} where {AD}
(ℓ::ESSLogLikelihood)(f::AbstractVector) = LogDensityProblems.logdensity(ℓ, f)
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, ::DefaultContext, ::Sampler{<:ESS}, right, vn, vi
)
return DynamicPPL.tilde_assume(
rng, LikelihoodContext(), SampleFromPrior(), right, vn, vi
)
|
96
| 106
|
Turing.jl
| 345
|
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
|
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
|
[
96,
106
] |
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
|
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
|
Base.rand
| 96
| 106
|
src/mcmc/ess.jl
|
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
if e == KeyError(:task_variable)
return rng
else
rethrow(e)
end
end
end
function DynamicPPL.assume(
rng, ::Sampler{<:Union{PG,SMC}}, dist::Distribution, vn::VarName, _vi::AbstractVarInfo
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif DynamicPPL.is_flagged(vi, vn, "del")
DynamicPPL.unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
##CHUNK 2
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
##CHUNK 3
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
function DynamicPPL.use_threadsafe_eval(
::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, ::AbstractVarInfo
)
return false
end
##CHUNK 4
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
DynamicPPL.empty!!(vi)
# Create a new set of particles.
##CHUNK 5
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
##CHUNK 6
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
struct ModelDistribution{M<:DynamicPPL.Model,V<:DynamicPPL.VarInfo} <:
ContinuousMultivariateDistribution
model::M
varinfo::V
end
function ModelDistribution(model::DynamicPPL.Model)
return ModelDistribution(model, DynamicPPL.VarInfo(model))
end
Base.length(d::ModelDistribution) = length(d.varinfo[:])
function Distributions._logpdf(d::ModelDistribution, x::AbstractVector)
return logprior(d.model, DynamicPPL.unflatten(d.varinfo, x))
end
function Distributions._rand!(
rng::Random.AbstractRNG, d::ModelDistribution, x::AbstractVector{<:Real}
)
model = d.model
varinfo = deepcopy(d.varinfo)
for vn in keys(varinfo)
DynamicPPL.set_flag!(varinfo, vn, "del")
##CHUNK 2
function Distributions._logpdf(d::ModelDistribution, x::AbstractVector)
return logprior(d.model, DynamicPPL.unflatten(d.varinfo, x))
end
function Distributions._rand!(
rng::Random.AbstractRNG, d::ModelDistribution, x::AbstractVector{<:Real}
)
model = d.model
varinfo = deepcopy(d.varinfo)
for vn in keys(varinfo)
DynamicPPL.set_flag!(varinfo, vn, "del")
end
DynamicPPL.evaluate!!(model, varinfo, DynamicPPL.SamplingContext(rng))
x .= varinfo[:]
return x
end
function initialize_mh_with_prior_proposal(model)
return AdvancedMH.MetropolisHastings(
AdvancedMH.StaticProposal(ModelDistribution(model))
)
#FILE: Turing.jl/test/mcmc/ess.jl
##CHUNK 1
DynamicPPL.TestUtils.test_sampler(
models_conditioned,
DynamicPPL.Sampler(ESS()),
2000;
# Filter out the varnames we've conditioned on.
varnames_filter=vn -> DynamicPPL.getsym(vn) != :s,
)
end
end
# Test that ESS can sample multiple variables regardless of whether they are under the
# same symbol or not.
@testset "Multiple variables" begin
@model function xy()
z ~ Beta(2.0, 2.0)
x ~ Normal(z, 2.0)
return y ~ Normal(-3.0, 3.0)
end
@model function x12()
#CURRENT FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
varinfo::V
μ::T
function ESSPrior{M,S,V}(
model::M, sampler::S, varinfo::V
) where {M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo}
vns = keys(varinfo)
μ = mapreduce(vcat, vns) do vn
dist = getdist(varinfo, vn)
EllipticalSliceSampling.isgaussian(typeof(dist)) ||
|
28
| 39
|
Turing.jl
| 346
|
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
::Val{unconstrained}=Val(true),
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
|
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
::Val{unconstrained}=Val(true),
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
|
[
28,
39
] |
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
::Val{unconstrained}=Val(true),
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
|
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
::Val{unconstrained}=Val(true),
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
|
ExternalSampler
| 28
| 39
|
src/mcmc/external_sampler.jl
|
#FILE: Turing.jl/test/ad.jl
##CHUNK 1
adtypes = (
AutoForwardDiff(),
AutoReverseDiff(),
# Don't need to test Mooncake as it doesn't use tracer types
)
for actual_adtype in adtypes
sampler = HMC(0.1, 5; adtype=actual_adtype)
for expected_adtype in adtypes
contextualised_tm = DynamicPPL.contextualize(
tm, ADTypeCheckContext(expected_adtype, tm.context)
)
@testset "Expected: $expected_adtype, Actual: $actual_adtype" begin
if actual_adtype == expected_adtype
# Check that this does not throw an error.
sample(contextualised_tm, sampler, 2)
else
@test_throws AbstractWrongADBackendError sample(
contextualised_tm, sampler, 2
)
end
##CHUNK 2
)
@testset "Expected: $expected_adtype, Actual: $actual_adtype" begin
if actual_adtype == expected_adtype
# Check that this does not throw an error.
sample(contextualised_tm, sampler, 2)
else
@test_throws AbstractWrongADBackendError sample(
contextualised_tm, sampler, 2
)
end
end
end
end
end
@testset verbose = true "AD / ADTypeCheckContext" begin
# This testset ensures that samplers or optimisers don't accidentally
# override the AD backend set in it.
@testset "adtype=$adtype" for adtype in ADTYPES
seed = 123
##CHUNK 3
"""
ADTYPES = [AutoForwardDiff(), AutoReverseDiff(; compile=false)]
if INCLUDE_MOONCAKE
push!(ADTYPES, AutoMooncake(; config=nothing))
end
# Check that ADTypeCheckContext itself works as expected.
@testset "ADTypeCheckContext" begin
@model test_model() = x ~ Normal(0, 1)
tm = test_model()
adtypes = (
AutoForwardDiff(),
AutoReverseDiff(),
# Don't need to test Mooncake as it doesn't use tracer types
)
for actual_adtype in adtypes
sampler = HMC(0.1, 5; adtype=actual_adtype)
for expected_adtype in adtypes
contextualised_tm = DynamicPPL.contextualize(
tm, ADTypeCheckContext(expected_adtype, tm.context)
##CHUNK 4
struct IncompatibleADTypeError <: AbstractWrongADBackendError
valtype::Type
adtype::Type
end
function Base.showerror(io::IO, e::IncompatibleADTypeError)
return print(
io,
"Incompatible ADType: Did not expect element of type $(e.valtype) with $(e.adtype)",
)
end
"""
ADTypeCheckContext{ADType,ChildContext}
A context for checking that the expected ADType is being used.
Evaluating a model with this context will check that the types of values in a `VarInfo` are
compatible with the ADType of the context. If the check fails, an `IncompatibleADTypeError`
is thrown.
#CURRENT FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
end
"""
requires_unconstrained_space(sampler::ExternalSampler)
Return `true` if the sampler requires unconstrained space, and `false` otherwise.
"""
function requires_unconstrained_space(
::ExternalSampler{<:Any,<:Any,Unconstrained}
) where {Unconstrained}
return Unconstrained
end
##CHUNK 2
# Arguments
- `sampler::AbstractSampler`: The sampler to wrap.
- `adtype::ADTypes.AbstractADType`: The automatic differentiation (AD) backend to use.
- `unconstrained::Val=Val{true}()`: Value type containing a boolean indicating whether the sampler requires unconstrained space.
"""
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
(::Val{unconstrained})=Val(true),
) where {unconstrained}
if !(unconstrained isa Bool)
throw(
ArgumentError("Expected Val{true} or Val{false}, got Val{$unconstrained}")
)
end
return new{typeof(sampler),typeof(adtype),unconstrained}(sampler, adtype)
end
end
##CHUNK 3
"""
requires_unconstrained_space(sampler::ExternalSampler)
Return `true` if the sampler requires unconstrained space, and `false` otherwise.
"""
function requires_unconstrained_space(
::ExternalSampler{<:Any,<:Any,Unconstrained}
) where {Unconstrained}
return Unconstrained
end
"""
externalsampler(sampler::AbstractSampler; adtype=AutoForwardDiff(), unconstrained=true)
Wrap a sampler so it can be used as an inference algorithm.
# Arguments
- `sampler::AbstractSampler`: The sampler to wrap.
# Keyword Arguments
##CHUNK 4
"""
ExternalSampler{S<:AbstractSampler,AD<:ADTypes.AbstractADType,Unconstrained}
Represents a sampler that is not an implementation of `InferenceAlgorithm`.
The `Unconstrained` type-parameter is to indicate whether the sampler requires unconstrained space.
# Fields
$(TYPEDFIELDS)
##CHUNK 5
"""
externalsampler(sampler::AbstractSampler; adtype=AutoForwardDiff(), unconstrained=true)
Wrap a sampler so it can be used as an inference algorithm.
# Arguments
- `sampler::AbstractSampler`: The sampler to wrap.
# Keyword Arguments
- `adtype::ADTypes.AbstractADType=ADTypes.AutoForwardDiff()`: The automatic differentiation (AD) backend to use.
- `unconstrained::Bool=true`: Whether the sampler requires unconstrained space.
"""
function externalsampler(
sampler::AbstractSampler; adtype=Turing.DEFAULT_ADTYPE, unconstrained::Bool=true
)
return ExternalSampler(sampler, adtype, Val(unconstrained))
end
"""
##CHUNK 6
!!! note
In a future breaking release of Turing, this is likely to change to `AbstractMCMC.getparams(::DynamicPPL.Model, external_state)`, with no default method. `Turing.Inference.getparams` is technically an internal method, so the aim here is to unify the interface for samplers at a higher level.
There are a few more optional functions which you can implement to improve the integration with Turing.jl:
- `Turing.Inference.isgibbscomponent(::MySampler)`: If you want your sampler to function as a component in Turing's Gibbs sampler, you should make this evaluate to `true`.
ExternalSampler(sampler::AbstractSampler, adtype::ADTypes.AbstractADType, ::Val{unconstrained})
Wrap a sampler so it can be used as an inference algorithm.
# Arguments
- `sampler::AbstractSampler`: The sampler to wrap.
- `adtype::ADTypes.AbstractADType`: The automatic differentiation (AD) backend to use.
- `unconstrained::Val=Val{true}()`: Value type containing a boolean indicating whether the sampler requires unconstrained space.
"""
function ExternalSampler(
sampler::AbstractSampler,
adtype::ADTypes.AbstractADType,
(::Val{unconstrained})=Val(true),
|
102
| 146
|
Turing.jl
| 347
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
[
102,
146
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
AbstractMCMC.step
| 102
| 146
|
src/mcmc/external_sampler.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, params, logdensity)
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
##CHUNK 2
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
##CHUNK 3
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 2
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#CURRENT FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
##CHUNK 2
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
|
148
| 165
|
Turing.jl
| 348
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedHMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedHMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
[
148,
165
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedHMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedHMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Update the `state`
return transition_to_turing(f, transition_inner), state_to_turing(f, state_inner)
end
|
AbstractMCMC.step
| 148
| 165
|
src/mcmc/external_sampler.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, params, logdensity)
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
#FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
##CHUNK 2
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
##CHUNK 3
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
#CURRENT FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
##CHUNK 2
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
##CHUNK 3
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
##CHUNK 4
initial_params,
kwargs...,
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
|
142
| 199
|
Turing.jl
| 349
|
function DynamicPPL.tilde_assume(context::GibbsContext, right, vn, vi)
child_context = DynamicPPL.childcontext(context)
# Note that `child_context` may contain `PrefixContext`s -- in which case
# we need to make sure that vn is appropriately prefixed before we handle
# the `GibbsContext` behaviour below. For example, consider the following:
# @model inner() = x ~ Normal()
# @model outer() = a ~ to_submodel(inner())
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
function DynamicPPL.tilde_assume(context::GibbsContext, right, vn, vi)
child_context = DynamicPPL.childcontext(context)
# Note that `child_context` may contain `PrefixContext`s -- in which case
# we need to make sure that vn is appropriately prefixed before we handle
# the `GibbsContext` behaviour below. For example, consider the following:
# @model inner() = x ~ Normal()
# @model outer() = a ~ to_submodel(inner())
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
[
142,
199
] |
function DynamicPPL.tilde_assume(context::GibbsContext, right, vn, vi)
child_context = DynamicPPL.childcontext(context)
# Note that `child_context` may contain `PrefixContext`s -- in which case
# we need to make sure that vn is appropriately prefixed before we handle
# the `GibbsContext` behaviour below. For example, consider the following:
# @model inner() = x ~ Normal()
# @model outer() = a ~ to_submodel(inner())
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
function DynamicPPL.tilde_assume(context::GibbsContext, right, vn, vi)
child_context = DynamicPPL.childcontext(context)
# Note that `child_context` may contain `PrefixContext`s -- in which case
# we need to make sure that vn is appropriately prefixed before we handle
# the `GibbsContext` behaviour below. For example, consider the following:
# @model inner() = x ~ Normal()
# @model outer() = a ~ to_submodel(inner())
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
inner
| 142
| 199
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/test/optimisation/Optimisation.jl
##CHUNK 1
# The `stats` field is populated only in newer versions of OptimizationOptimJL and
# similar packages. Hence we end up doing this check a lot
hasstats(result) = result.optim_result.stats !== nothing
# Issue: https://discourse.julialang.org/t/two-equivalent-conditioning-syntaxes-giving-different-likelihood-values/100320
@testset "OptimizationContext" begin
# Used for testing how well it works with nested contexts.
struct OverrideContext{C,T1,T2} <: DynamicPPL.AbstractContext
context::C
logprior_weight::T1
loglikelihood_weight::T2
end
DynamicPPL.NodeTrait(::OverrideContext) = DynamicPPL.IsParent()
DynamicPPL.childcontext(parent::OverrideContext) = parent.context
DynamicPPL.setchildcontext(parent::OverrideContext, child) =
OverrideContext(child, parent.logprior_weight, parent.loglikelihood_weight)
# Only implement what we need for the models above.
function DynamicPPL.tilde_assume(context::OverrideContext, right, vn, vi)
#FILE: Turing.jl/test/ad.jl
##CHUNK 1
end
# A bunch of tilde_assume/tilde_observe methods that just call the same method on the child
# context, and then call check_adtype on the result before returning the results from the
# child context.
function DynamicPPL.tilde_assume(context::ADTypeCheckContext, right, vn, vi)
value, logp, vi = DynamicPPL.tilde_assume(
DynamicPPL.childcontext(context), right, vn, vi
)
check_adtype(context, vi)
return value, logp, vi
end
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::ADTypeCheckContext, sampler, right, vn, vi
)
value, logp, vi = DynamicPPL.tilde_assume(
rng, DynamicPPL.childcontext(context), sampler, right, vn, vi
)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
function is_target_varname(context::GibbsContext, vns::AbstractArray{<:VarName})
num_target = count(Iterators.map(Base.Fix1(is_target_varname, context), vns))
if (num_target != 0) && (num_target != length(vns))
error(
"Some but not all of the variables in `vns` are target variables. " *
"Having mixed targeting like this is not supported in GibbsContext.",
)
end
return num_target > 0
end
end
# As above but with an RNG.
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
##CHUNK 2
the child context that tilde calls will eventually be passed onto.
"""
context::Ctx
function GibbsContext(target_varnames, global_varinfo, context)
if !can_be_wrapped(context)
error("GibbsContext can only wrap a leaf or prefix context, not a $(context).")
end
target_varnames = tuple(target_varnames...) # Allow vectors.
return new{typeof(target_varnames),typeof(global_varinfo),typeof(context)}(
target_varnames, global_varinfo, context
)
end
end
function GibbsContext(target_varnames, global_varinfo)
return GibbsContext(target_varnames, global_varinfo, DynamicPPL.DefaultContext())
end
DynamicPPL.NodeTrait(::GibbsContext) = DynamicPPL.IsParent()
##CHUNK 3
end
# As above but with an RNG.
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
##CHUNK 4
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
values in `varinfo` but not in `target_variables` will be conditioned to the values they
have in `varinfo`.
# Returns
- A new model with the variables _not_ in `target_variables` conditioned.
- The `GibbsContext` object that will be used to condition the variables. This is necessary
because evaluation can mutate its `global_varinfo` field, which we need to access later.
"""
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
##CHUNK 5
# rather than the `observe` pipeline for the conditioned variables.
"""
GibbsContext(target_varnames, global_varinfo, context)
A context used in the implementation of the Turing.jl Gibbs sampler.
There will be one `GibbsContext` for each iteration of a component sampler.
`target_varnames` is a a tuple of `VarName`s that the current component sampler
is sampling. For those `VarName`s, `GibbsContext` will just pass `tilde_assume`
calls to its child context. For other variables, their values will be fixed to
the values they have in `global_varinfo`.
# Fields
$(FIELDS)
"""
struct GibbsContext{
VNs<:Tuple{Vararg{VarName}},GVI<:Ref{<:AbstractVarInfo},Ctx<:DynamicPPL.AbstractContext
} <: DynamicPPL.AbstractContext
"""
##CHUNK 6
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
return DynamicPPL.contextualize(model, gibbs_context), gibbs_context_inner
end
wrap_in_sampler(x::AbstractMCMC.AbstractSampler) = x
wrap_in_sampler(x::InferenceAlgorithm) = DynamicPPL.Sampler(x)
to_varname(x::VarName) = x
to_varname(x::Symbol) = VarName{x}()
to_varname_list(x::Union{VarName,Symbol}) = [to_varname(x)]
# Any other value is assumed to be an iterable of VarNames and Symbols.
##CHUNK 7
isgibbscomponent(::AdvancedHMC.AbstractHMCSampler) = true
isgibbscomponent(::AdvancedMH.MetropolisHastings) = true
isgibbscomponent(spl) = false
function can_be_wrapped(ctx::DynamicPPL.AbstractContext)
return DynamicPPL.NodeTrait(ctx) isa DynamicPPL.IsLeaf
end
can_be_wrapped(ctx::DynamicPPL.PrefixContext) = can_be_wrapped(ctx.context)
# Basically like a `DynamicPPL.FixedContext` but
# 1. Hijacks the tilde pipeline to fix variables.
# 2. Computes the log-probability of the fixed variables.
#
# Purpose: avoid triggering resampling of variables we're conditioning on.
# - Using standard `DynamicPPL.condition` results in conditioned variables being treated
# as observations in the truest sense, i.e. we hit `DynamicPPL.tilde_observe`.
# - But `observe` is overloaded by some samplers, e.g. `CSMC`, which can lead to
# undesirable behavior, e.g. `CSMC` triggering a resampling for every conditioned variable
# rather than only for the "true" observations.
# - `GibbsContext` allows us to perform conditioning while still hit the `assume` pipeline
##CHUNK 8
the VarNames being sampled
"""
target_varnames::VNs
"""
a `Ref` to the global `AbstractVarInfo` object that holds values for all variables, both
those fixed and those being sampled. We use a `Ref` because this field may need to be
updated if new variables are introduced.
"""
global_varinfo::GVI
"""
the child context that tilde calls will eventually be passed onto.
"""
context::Ctx
function GibbsContext(target_varnames, global_varinfo, context)
if !can_be_wrapped(context)
error("GibbsContext can only wrap a leaf or prefix context, not a $(context).")
end
target_varnames = tuple(target_varnames...) # Allow vectors.
return new{typeof(target_varnames),typeof(global_varinfo),typeof(context)}(
|
202
| 228
|
Turing.jl
| 350
|
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
[
202,
228
] |
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
|
DynamicPPL.tilde_assume
| 202
| 228
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/test/ad.jl
##CHUNK 1
end
# A bunch of tilde_assume/tilde_observe methods that just call the same method on the child
# context, and then call check_adtype on the result before returning the results from the
# child context.
function DynamicPPL.tilde_assume(context::ADTypeCheckContext, right, vn, vi)
value, logp, vi = DynamicPPL.tilde_assume(
DynamicPPL.childcontext(context), right, vn, vi
)
check_adtype(context, vi)
return value, logp, vi
end
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::ADTypeCheckContext, sampler, right, vn, vi
)
value, logp, vi = DynamicPPL.tilde_assume(
rng, DynamicPPL.childcontext(context), sampler, right, vn, vi
)
##CHUNK 2
check_adtype(context, vi)
return value, logp, vi
end
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::ADTypeCheckContext, sampler, right, vn, vi
)
value, logp, vi = DynamicPPL.tilde_assume(
rng, DynamicPPL.childcontext(context), sampler, right, vn, vi
)
check_adtype(context, vi)
return value, logp, vi
end
function DynamicPPL.tilde_observe(context::ADTypeCheckContext, right, left, vi)
logp, vi = DynamicPPL.tilde_observe(DynamicPPL.childcontext(context), right, left, vi)
check_adtype(context, vi)
return logp, vi
end
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
# Just defer to `SampleFromPrior`.
retval = DynamicPPL.assume(rng, SampleFromPrior(), dist, vn, vi)
return retval
end
function DynamicPPL.observe(spl::Sampler{<:MH}, d::Distribution, value, vi)
return DynamicPPL.observe(SampleFromPrior(), d, value, vi)
end
#FILE: Turing.jl/test/optimisation/Optimisation.jl
##CHUNK 1
logprior_weight::T1
loglikelihood_weight::T2
end
DynamicPPL.NodeTrait(::OverrideContext) = DynamicPPL.IsParent()
DynamicPPL.childcontext(parent::OverrideContext) = parent.context
DynamicPPL.setchildcontext(parent::OverrideContext, child) =
OverrideContext(child, parent.logprior_weight, parent.loglikelihood_weight)
# Only implement what we need for the models above.
function DynamicPPL.tilde_assume(context::OverrideContext, right, vn, vi)
value, logp, vi = DynamicPPL.tilde_assume(context.context, right, vn, vi)
return value, context.logprior_weight, vi
end
function DynamicPPL.tilde_observe(context::OverrideContext, right, left, vi)
logp, vi = DynamicPPL.tilde_observe(context.context, right, left, vi)
return context.loglikelihood_weight, vi
end
@model function model1(x)
μ ~ Uniform(0, 2)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
##CHUNK 2
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
##CHUNK 3
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
end
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
##CHUNK 4
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
##CHUNK 5
# rather than the `observe` pipeline for the conditioned variables.
"""
GibbsContext(target_varnames, global_varinfo, context)
A context used in the implementation of the Turing.jl Gibbs sampler.
There will be one `GibbsContext` for each iteration of a component sampler.
`target_varnames` is a a tuple of `VarName`s that the current component sampler
is sampling. For those `VarName`s, `GibbsContext` will just pass `tilde_assume`
calls to its child context. For other variables, their values will be fixed to
the values they have in `global_varinfo`.
# Fields
$(FIELDS)
"""
struct GibbsContext{
VNs<:Tuple{Vararg{VarName}},GVI<:Ref{<:AbstractVarInfo},Ctx<:DynamicPPL.AbstractContext
} <: DynamicPPL.AbstractContext
"""
##CHUNK 6
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
|
248
| 259
|
Turing.jl
| 351
|
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
return DynamicPPL.contextualize(model, gibbs_context), gibbs_context_inner
end
|
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
return DynamicPPL.contextualize(model, gibbs_context), gibbs_context_inner
end
|
[
248,
259
] |
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
return DynamicPPL.contextualize(model, gibbs_context), gibbs_context_inner
end
|
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
)
# Insert the `GibbsContext` just before the leaf.
# 1. Extract the `leafcontext` from `model` and wrap in `GibbsContext`.
gibbs_context_inner = GibbsContext(
target_variables, Ref(varinfo), DynamicPPL.leafcontext(model.context)
)
# 2. Set the leaf context to be the `GibbsContext` wrapping `leafcontext(model.context)`.
gibbs_context = DynamicPPL.setleafcontext(model.context, gibbs_context_inner)
return DynamicPPL.contextualize(model, gibbs_context), gibbs_context_inner
end
|
make_conditional
| 248
| 259
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
the child context that tilde calls will eventually be passed onto.
"""
context::Ctx
function GibbsContext(target_varnames, global_varinfo, context)
if !can_be_wrapped(context)
error("GibbsContext can only wrap a leaf or prefix context, not a $(context).")
end
target_varnames = tuple(target_varnames...) # Allow vectors.
return new{typeof(target_varnames),typeof(global_varinfo),typeof(context)}(
target_varnames, global_varinfo, context
)
end
end
function GibbsContext(target_varnames, global_varinfo)
return GibbsContext(target_varnames, global_varinfo, DynamicPPL.DefaultContext())
end
DynamicPPL.NodeTrait(::GibbsContext) = DynamicPPL.IsParent()
##CHUNK 2
target_varnames, global_varinfo, context
)
end
end
function GibbsContext(target_varnames, global_varinfo)
return GibbsContext(target_varnames, global_varinfo, DynamicPPL.DefaultContext())
end
DynamicPPL.NodeTrait(::GibbsContext) = DynamicPPL.IsParent()
DynamicPPL.childcontext(context::GibbsContext) = context.context
function DynamicPPL.setchildcontext(context::GibbsContext, childcontext)
return GibbsContext(
context.target_varnames, Ref(context.global_varinfo[]), childcontext
)
end
get_global_varinfo(context::GibbsContext) = context.global_varinfo[]
function set_global_varinfo!(context::GibbsContext, new_global_varinfo)
##CHUNK 3
DynamicPPL.childcontext(context::GibbsContext) = context.context
function DynamicPPL.setchildcontext(context::GibbsContext, childcontext)
return GibbsContext(
context.target_varnames, Ref(context.global_varinfo[]), childcontext
)
end
get_global_varinfo(context::GibbsContext) = context.global_varinfo[]
function set_global_varinfo!(context::GibbsContext, new_global_varinfo)
context.global_varinfo[] = new_global_varinfo
return nothing
end
# has and get
function has_conditioned_gibbs(context::GibbsContext, vn::VarName)
return DynamicPPL.haskey(get_global_varinfo(context), vn)
end
function has_conditioned_gibbs(context::GibbsContext, vns::AbstractArray{<:VarName})
num_conditioned = count(Iterators.map(Base.Fix1(has_conditioned_gibbs, context), vns))
##CHUNK 4
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
##CHUNK 5
# As above but with an RNG.
function DynamicPPL.tilde_assume(
rng::Random.AbstractRNG, context::GibbsContext, sampler, right, vn, vi
)
# See comment in the above, rng-less version of this method for an explanation.
child_context = DynamicPPL.childcontext(context)
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
##CHUNK 6
# In the current state of GibbsContext, doing this would require
# special-casing the way PrefixContext is used to wrap the leaf context.
# This is very inconvenient because PrefixContext's behaviour is defined in
# DynamicPPL, and we would basically have to create a new method in Turing
# and override it for GibbsContext. Indeed, a better way to do this would
# be to make GibbsContext a leaf context. In this case, we would be able to
# rely on the existing behaviour of DynamicPPL.make_evaluate_args_and_kwargs
# to correctly wrap the PrefixContext around the GibbsContext. This is very
# tricky to correctly do now, but once we remove the other leaf contexts
# (i.e. PriorContext and LikelihoodContext), we should be able to do this.
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
##CHUNK 7
# rather than the `observe` pipeline for the conditioned variables.
"""
GibbsContext(target_varnames, global_varinfo, context)
A context used in the implementation of the Turing.jl Gibbs sampler.
There will be one `GibbsContext` for each iteration of a component sampler.
`target_varnames` is a a tuple of `VarName`s that the current component sampler
is sampling. For those `VarName`s, `GibbsContext` will just pass `tilde_assume`
calls to its child context. For other variables, their values will be fixed to
the values they have in `global_varinfo`.
# Fields
$(FIELDS)
"""
struct GibbsContext{
VNs<:Tuple{Vararg{VarName}},GVI<:Ref{<:AbstractVarInfo},Ctx<:DynamicPPL.AbstractContext
} <: DynamicPPL.AbstractContext
"""
##CHUNK 8
# Tilde pipeline
function DynamicPPL.tilde_assume(context::GibbsContext, right, vn, vi)
child_context = DynamicPPL.childcontext(context)
# Note that `child_context` may contain `PrefixContext`s -- in which case
# we need to make sure that vn is appropriately prefixed before we handle
# the `GibbsContext` behaviour below. For example, consider the following:
# @model inner() = x ~ Normal()
# @model outer() = a ~ to_submodel(inner())
# If we run this with `Gibbs(@varname(a.x) => MH())`, then when we are
# executing the submodel, the `context` will contain the `@varname(a.x)`
# variable; `child_context` will contain `PrefixContext(@varname(a))`; and
# `vn` will just be `@varname(x)`. If we just simply run
# `is_target_varname(context, vn)`, it will return false, and everything
# will be messed up.
# TODO(penelopeysm): This 'problem' could be solved if we made GibbsContext a
# leaf context and wrapped the PrefixContext _above_ the GibbsContext, so
# that the prefixing would be handled by tilde_assume(::PrefixContext, ...)
# _before_ we hit this method.
##CHUNK 9
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
values in `varinfo` but not in `target_variables` will be conditioned to the values they
have in `varinfo`.
# Returns
- A new model with the variables _not_ in `target_variables` conditioned.
- The `GibbsContext` object that will be used to condition the variables. This is necessary
because evaluation can mutate its `global_varinfo` field, which we need to access later.
end
wrap_in_sampler(x::AbstractMCMC.AbstractSampler) = x
##CHUNK 10
# This is already implemented in
# https://github.com/TuringLang/DynamicPPL.jl/pull/885/ but not yet
# released. Exciting!
vn, child_context = DynamicPPL.prefix_and_strip_contexts(child_context, vn)
return if is_target_varname(context, vn)
# Fall back to the default behavior.
DynamicPPL.tilde_assume(child_context, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
# Short-circuit the tilde assume if `vn` is present in `context`.
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
# If the varname has not been conditioned on, nor is it a target variable, its
# presumably a new variable that should be sampled from its prior. We need to add
# this new variable to the global `varinfo` of the context, but not to the local one
# being used by the current sampler.
value, lp, new_global_vi = DynamicPPL.tilde_assume(
|
302
| 317
|
Turing.jl
| 352
|
function Gibbs(varnames, samplers)
if length(varnames) != length(samplers)
throw(ArgumentError("Number of varnames and samplers must match."))
end
for spl in samplers
if !isgibbscomponent(spl)
msg = "All samplers must be valid Gibbs components, $(spl) is not."
throw(ArgumentError(msg))
end
end
samplers = tuple(map(wrap_in_sampler, samplers)...)
varnames = tuple(map(to_varname_list, varnames)...)
return new{length(samplers),typeof(varnames),typeof(samplers)}(varnames, samplers)
end
|
function Gibbs(varnames, samplers)
if length(varnames) != length(samplers)
throw(ArgumentError("Number of varnames and samplers must match."))
end
for spl in samplers
if !isgibbscomponent(spl)
msg = "All samplers must be valid Gibbs components, $(spl) is not."
throw(ArgumentError(msg))
end
end
samplers = tuple(map(wrap_in_sampler, samplers)...)
varnames = tuple(map(to_varname_list, varnames)...)
return new{length(samplers),typeof(varnames),typeof(samplers)}(varnames, samplers)
end
|
[
302,
317
] |
function Gibbs(varnames, samplers)
if length(varnames) != length(samplers)
throw(ArgumentError("Number of varnames and samplers must match."))
end
for spl in samplers
if !isgibbscomponent(spl)
msg = "All samplers must be valid Gibbs components, $(spl) is not."
throw(ArgumentError(msg))
end
end
samplers = tuple(map(wrap_in_sampler, samplers)...)
varnames = tuple(map(to_varname_list, varnames)...)
return new{length(samplers),typeof(varnames),typeof(samplers)}(varnames, samplers)
end
|
function Gibbs(varnames, samplers)
if length(varnames) != length(samplers)
throw(ArgumentError("Number of varnames and samplers must match."))
end
for spl in samplers
if !isgibbscomponent(spl)
msg = "All samplers must be valid Gibbs components, $(spl) is not."
throw(ArgumentError(msg))
end
end
samplers = tuple(map(wrap_in_sampler, samplers)...)
varnames = tuple(map(to_varname_list, varnames)...)
return new{length(samplers),typeof(varnames),typeof(samplers)}(varnames, samplers)
end
|
Gibbs
| 302
| 317
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
end
end
end
@testset "Invalid Gibbs constructor" begin
# More samplers than varnames or vice versa
@test_throws ArgumentError Gibbs((@varname(s), @varname(m)), (NUTS(), NUTS(), NUTS()))
@test_throws ArgumentError Gibbs(
(@varname(s), @varname(m), @varname(x)), (NUTS(), NUTS())
)
# Invalid samplers
@test_throws ArgumentError Gibbs(@varname(s) => IS())
@test_throws ArgumentError Gibbs(@varname(s) => Emcee(10, 2.0))
@test_throws ArgumentError Gibbs(
@varname(s) => SGHMC(; learning_rate=0.01, momentum_decay=0.1)
)
@test_throws ArgumentError Gibbs(
@varname(s) => SGLD(; stepsize=PolynomialStepsize(0.25))
)
# Values that we don't know how to convert to VarNames.
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
wrap_in_sampler(x::AbstractMCMC.AbstractSampler) = x
wrap_in_sampler(x::InferenceAlgorithm) = DynamicPPL.Sampler(x)
to_varname(x::VarName) = x
to_varname(x::Symbol) = VarName{x}()
to_varname_list(x::Union{VarName,Symbol}) = [to_varname(x)]
# Any other value is assumed to be an iterable of VarNames and Symbols.
to_varname_list(t) = collect(map(to_varname, t))
"""
Gibbs
A type representing a Gibbs sampler.
# Constructors
`Gibbs` needs to be given a set of pairs of variable names and samplers. Instead of a single
variable name per sampler, one can also give an iterable of variables, all of which are
sampled by the same component sampler.
##CHUNK 2
"""
Gibbs
A type representing a Gibbs sampler.
# Constructors
`Gibbs` needs to be given a set of pairs of variable names and samplers. Instead of a single
variable name per sampler, one can also give an iterable of variables, all of which are
sampled by the same component sampler.
Each variable name can be given as either a `Symbol` or a `VarName`.
Some examples of valid constructors are:
```julia
Gibbs(:x => NUTS(), :y => MH())
Gibbs(@varname(x) => NUTS(), @varname(y) => MH())
Gibbs((@varname(x), :y) => NUTS(), :z => MH())
```
##CHUNK 3
Each variable name can be given as either a `Symbol` or a `VarName`.
Some examples of valid constructors are:
```julia
Gibbs(:x => NUTS(), :y => MH())
Gibbs(@varname(x) => NUTS(), @varname(y) => MH())
Gibbs((@varname(x), :y) => NUTS(), :z => MH())
```
# Fields
$(TYPEDFIELDS)
"""
struct Gibbs{N,V<:NTuple{N,AbstractVector{<:VarName}},A<:NTuple{N,Any}} <:
InferenceAlgorithm
# TODO(mhauru) Revisit whether A should have a fixed element type once
# InferenceAlgorithm/Sampler types have been cleaned up.
"varnames representing variables for each sampler"
varnames::V
"samplers for each entry in `varnames`"
##CHUNK 4
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
##CHUNK 5
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
##CHUNK 6
varname_vecs,
samplers,
vi,
states=();
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
##CHUNK 7
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
##CHUNK 8
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
##CHUNK 9
return get_global_varinfo(context)[vn]
end
function get_conditioned_gibbs(context::GibbsContext, vns::AbstractArray{<:VarName})
return map(Base.Fix1(get_conditioned_gibbs, context), vns)
end
function is_target_varname(ctx::GibbsContext, vn::VarName)
return any(Base.Fix2(AbstractPPL.subsumes, vn), ctx.target_varnames)
end
function is_target_varname(context::GibbsContext, vns::AbstractArray{<:VarName})
num_target = count(Iterators.map(Base.Fix1(is_target_varname, context), vns))
if (num_target != 0) && (num_target != length(vns))
error(
"Some but not all of the variables in `vns` are target variables. " *
"Having mixed targeting like this is not supported in GibbsContext.",
)
end
return num_target > 0
end
|
338
| 352
|
Turing.jl
| 353
|
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
|
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
|
[
338,
352
] |
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
|
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
end
return vi
end
|
initial_varinfo
| 338
| 352
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
##CHUNK 2
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
##CHUNK 2
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
function make_updated_varinfo(
f::DynamicPPL.LogDensityFunction, external_transition, external_state
)
# Set the parameters.
# NOTE: This is Turing.Inference.getparams, not AbstractMCMC.getparams (!!!!!)
# The latter uses the state rather than the transition.
# TODO(penelopeysm): Make this use AbstractMCMC.getparams instead
new_parameters = getparams(f.model, external_transition)
new_varinfo = DynamicPPL.unflatten(f.varinfo, new_parameters)
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
|
354
| 377
|
Turing.jl
| 354
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
[
354,
377
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
AbstractMCMC.step
| 354
| 377
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
return vi
end
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
##CHUNK 2
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
##CHUNK 3
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
##CHUNK 4
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
##CHUNK 5
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
##CHUNK 6
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
##CHUNK 7
function Gibbs(algs::Pair...)
return Gibbs(map(first, algs), map(last, algs))
end
struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S}
vi::V
states::S
end
varinfo(state::GibbsState) = state.vi
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
##CHUNK 8
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
##CHUNK 9
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
##CHUNK 10
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_initialstep_recursive(
rng,
|
379
| 402
|
Turing.jl
| 355
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
[
379,
402
] |
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
|
AbstractMCMC.step_warmup
| 379
| 402
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
##CHUNK 2
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
##CHUNK 3
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
##CHUNK 4
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
end
##CHUNK 5
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
##CHUNK 6
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
##CHUNK 7
function Gibbs(algs::Pair...)
return Gibbs(map(first, algs), map(last, algs))
end
struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S}
vi::V
states::S
end
varinfo(state::GibbsState) = state.vi
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
##CHUNK 8
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_initialstep_recursive(
rng,
##CHUNK 9
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
##CHUNK 10
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
|
412
| 470
|
Turing.jl
| 356
|
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
|
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
|
[
412,
470
] |
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
|
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
|
gibbs_initialstep_recursive
| 412
| 470
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
inner::Alg
end
unwrap_sampler(sampler::DynamicPPL.Sampler{<:AlgWrapper}) =
DynamicPPL.Sampler(sampler.alg.inner)
# Methods we need to define to be able to use AlgWrapper instead of an actual algorithm.
# They all just propagate the call to the inner algorithm.
Inference.isgibbscomponent(wrap::AlgWrapper) = Inference.isgibbscomponent(wrap.inner)
function Inference.setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:AlgWrapper},
state,
params::DynamicPPL.AbstractVarInfo,
)
return Inference.setparams_varinfo!!(model, unwrap_sampler(sampler), state, params)
end
# targets_and_algs will be a list of tuples, where the first element is the target_vns
# of a component sampler, and the second element is the component sampler itself.
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
new_states=();
kwargs...,
)
# End recursion.
if isempty(varname_vecs) && isempty(samplers) && isempty(states)
return global_vi, new_states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
##CHUNK 2
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
##CHUNK 3
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
##CHUNK 4
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
##CHUNK 5
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
##CHUNK 6
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
values in `varinfo` but not in `target_variables` will be conditioned to the values they
have in `varinfo`.
# Returns
- A new model with the variables _not_ in `target_variables` conditioned.
- The `GibbsContext` object that will be used to condition the variables. This is necessary
because evaluation can mutate its `global_varinfo` field, which we need to access later.
"""
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
##CHUNK 7
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
|
472
| 490
|
Turing.jl
| 357
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
[
472,
490
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
length
| 472
| 490
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
##CHUNK 2
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
##CHUNK 3
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
##CHUNK 4
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
##CHUNK 5
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
##CHUNK 6
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
##CHUNK 7
function Gibbs(algs::Pair...)
return Gibbs(map(first, algs), map(last, algs))
end
struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S}
vi::V
states::S
end
varinfo(state::GibbsState) = state.vi
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
##CHUNK 8
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
##CHUNK 9
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
##CHUNK 10
throw(ArgumentError(msg))
end
end
samplers = tuple(map(wrap_in_sampler, samplers)...)
varnames = tuple(map(to_varname_list, varnames)...)
return new{length(samplers),typeof(varnames),typeof(samplers)}(varnames, samplers)
end
end
function Gibbs(algs::Pair...)
return Gibbs(map(first, algs), map(last, algs))
end
struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S}
vi::V
states::S
end
varinfo(state::GibbsState) = state.vi
|
492
| 510
|
Turing.jl
| 358
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
[
492,
510
] |
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
|
length
| 492
| 510
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 2
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
##CHUNK 3
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
##CHUNK 4
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
##CHUNK 5
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
##CHUNK 6
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
##CHUNK 7
function Gibbs(algs::Pair...)
return Gibbs(map(first, algs), map(last, algs))
end
struct GibbsState{V<:DynamicPPL.AbstractVarInfo,S}
vi::V
states::S
end
varinfo(state::GibbsState) = state.vi
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
##CHUNK 8
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
##CHUNK 9
# end
# end
return varinfo_local
end
"""
Run a Gibbs step for the first varname/sampler/state tuple, and recursively call the same
function on the tail, until there are no more samplers left.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
samplers,
states,
global_vi,
##CHUNK 10
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
Take the first step of MCMC for the first component sampler, and call the same function
recursively on the remaining samplers, until no samplers remain. Return the global VarInfo
and a tuple of initial states for all component samplers.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_initialstep_recursive(
rng,
model,
step_function::Function,
varname_vecs,
samplers,
vi,
states=();
|
526
| 537
|
Turing.jl
| 359
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
|
[
526,
537
] |
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
|
setparams_varinfo!!
| 526
| 537
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
function make_updated_varinfo(
f::DynamicPPL.LogDensityFunction, external_transition, external_state
)
# Set the parameters.
# NOTE: This is Turing.Inference.getparams, not AbstractMCMC.getparams (!!!!!)
# The latter uses the state rather than the transition.
# TODO(penelopeysm): Make this use AbstractMCMC.getparams instead
new_parameters = getparams(f.model, external_transition)
new_varinfo = DynamicPPL.unflatten(f.varinfo, new_parameters)
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
##CHUNK 2
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
##CHUNK 3
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
##CHUNK 4
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
##CHUNK 2
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
##CHUNK 3
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, params, logdensity)
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
##CHUNK 4
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
|
539
| 550
|
Turing.jl
| 360
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
|
[
539,
550
] |
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
|
setparams_varinfo!!
| 539
| 550
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# and only this interface allows us to use the sampler in Turing.
struct MyTransition{V<:AbstractVector}
params::V
end
# Samplers need to implement `Turing.Inference.getparams`.
Turing.Inference.getparams(::DynamicPPL.Model, t::MyTransition) = t.params
# State doesn't matter (but we need to carry the params through to the next
# iteration).
struct MyState{V<:AbstractVector}
params::V
end
# externalsamplers must accept LogDensityModel inside their step function.
# By default Turing gives the externalsampler a LDF constructed with
# adtype=ForwardDiff, so we should expect that inside the sampler we can
# call both `logdensity` and `logdensity_and_gradient`.
#
# The behaviour of this sampler is to simply calculate logp and its
# gradient, and then return the same values.
#
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# Update the parameters if provided.
if initial_params !== nothing
length(initial_params) == n ||
throw(ArgumentError("initial parameters have to be specified for each walker"))
vis = map(vis, initial_params) do vi, init
vi = DynamicPPL.initialize_parameters!!(vi, init, model)
# Update log joint probability.
last(DynamicPPL.evaluate!!(model, rng, vi, SampleFromPrior()))
end
end
# Compute initial transition and states.
transition = map(Base.Fix1(Transition, model), vis)
# TODO: Make compatible with immutable `AbstractVarInfo`.
state = EmceeState(
vis[1],
map(vis) do vi
vi = DynamicPPL.link!!(vi, model)
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
end
##CHUNK 2
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
##CHUNK 3
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, params, logdensity)
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
##CHUNK 4
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
|
552
| 565
|
Turing.jl
| 361
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, logdensity)
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, logdensity)
end
|
[
552,
565
] |
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, logdensity)
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, logdensity)
end
|
setparams_varinfo!!
| 552
| 565
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
##CHUNK 2
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
##CHUNK 3
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
##CHUNK 4
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
##CHUNK 5
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
##CHUNK 6
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
varinfo = DynamicPPL.link(varinfo, model)
end
end
# Construct LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, varinfo; adtype=alg.adtype)
# Then just call `AbstractMCMC.step` with the right arguments.
if initial_state === nothing
transition_inner, state_inner = AbstractMCMC.step(
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::AbstractMCMC.LogDensityModel,
sampler::MySampler,
state::MyState;
kwargs...,
)
# Step >= 1
params = state.params
ldf = model.logdensity
lp = LogDensityProblems.logdensity(ldf, params)
@test lp isa Real
lp, grad = LogDensityProblems.logdensity_and_gradient(ldf, params)
@test lp isa Real
@test grad isa AbstractVector{<:Real}
return MyTransition(params), MyState(params)
end
@model function test_external_sampler()
a ~ Beta(2, 2)
return b ~ Normal(a)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob.
# NOTE: Using `leafcontext(model.context)` here is a no-op, as it will be concatenated
# with `model.context` before hitting `model.f`.
return last(DynamicPPL.evaluate!!(model, params, DynamicPPL.leafcontext(model.context)))
end
function setparams_varinfo!!(
##CHUNK 2
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
##CHUNK 3
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
|
567
| 582
|
Turing.jl
| 362
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
hamiltonian = get_hamiltonian(model, sampler, params, state, length(θ_new))
# Update the parameter values in `state.z`.
# TODO: Avoid mutation
z = state.z
resize!(z.θ, length(θ_new))
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
hamiltonian = get_hamiltonian(model, sampler, params, state, length(θ_new))
# Update the parameter values in `state.z`.
# TODO: Avoid mutation
z = state.z
resize!(z.θ, length(θ_new))
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
|
[
567,
582
] |
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
hamiltonian = get_hamiltonian(model, sampler, params, state, length(θ_new))
# Update the parameter values in `state.z`.
# TODO: Avoid mutation
z = state.z
resize!(z.θ, length(θ_new))
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
|
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
hamiltonian = get_hamiltonian(model, sampler, params, state, length(θ_new))
# Update the parameter values in `state.z`.
# TODO: Avoid mutation
z = state.z
resize!(z.θ, length(θ_new))
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
|
setparams_varinfo!!
| 567
| 582
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
##CHUNK 2
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
return sample, newstate
end
end
##CHUNK 2
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 3
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 4
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
varinfo(state::TuringState) = state.varinfo
varinfo(state::AbstractVarInfo) = state
getparams(::DynamicPPL.Model, transition::AdvancedHMC.Transition) = transition.z.θ
function getparams(model::DynamicPPL.Model, state::AdvancedHMC.HMCState)
return getparams(model, state.transition)
end
getstats(transition::AdvancedHMC.Transition) = transition.stat
getparams(::DynamicPPL.Model, transition::AdvancedMH.Transition) = transition.params
function make_updated_varinfo(
f::DynamicPPL.LogDensityFunction, external_transition, external_state
)
# Set the parameters.
# NOTE: This is Turing.Inference.getparams, not AbstractMCMC.getparams (!!!!!)
# The latter uses the state rather than the transition.
# TODO(penelopeysm): Make this use AbstractMCMC.getparams instead
new_parameters = getparams(f.model, external_transition)
new_varinfo = DynamicPPL.unflatten(f.varinfo, new_parameters)
##CHUNK 2
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
|
597
| 632
|
Turing.jl
| 363
|
function match_linking!!(varinfo_local, prev_state_local, model)
prev_varinfo_local = varinfo(prev_state_local)
was_linked = DynamicPPL.istrans(prev_varinfo_local)
is_linked = DynamicPPL.istrans(varinfo_local)
if was_linked && !is_linked
varinfo_local = DynamicPPL.link!!(varinfo_local, model)
elseif !was_linked && is_linked
varinfo_local = DynamicPPL.invlink!!(varinfo_local, model)
end
# TODO(mhauru) The above might run into trouble if some variables are linked and others
# are not. `istrans(varinfo)` returns an `all` over the individual variables. This could
# especially be a problem with dynamic models, where new variables may get introduced,
# but also in cases where component samplers have partial overlap in their target
# variables. The below is how I would like to implement this, but DynamicPPL at this
# time does not support linking individual variables selected by `VarName`. It soon
# should though, so come back to this.
# Issue ref: https://github.com/TuringLang/Turing.jl/issues/2401
# prev_links_dict = Dict(vn => DynamicPPL.istrans(prev_varinfo_local, vn) for vn in keys(prev_varinfo_local))
# any_linked = any(values(prev_links_dict))
# for vn in keys(varinfo_local)
# was_linked = if haskey(prev_varinfo_local, vn)
# prev_links_dict[vn]
# else
# # If the old state didn't have this variable, we assume it was linked if _any_
# # of the variables of the old state were linked.
# any_linked
# end
# is_linked = DynamicPPL.istrans(varinfo_local, vn)
# if was_linked && !is_linked
# varinfo_local = DynamicPPL.invlink!!(varinfo_local, vn)
# elseif !was_linked && is_linked
# varinfo_local = DynamicPPL.link!!(varinfo_local, vn)
# end
# end
return varinfo_local
end
|
function match_linking!!(varinfo_local, prev_state_local, model)
prev_varinfo_local = varinfo(prev_state_local)
was_linked = DynamicPPL.istrans(prev_varinfo_local)
is_linked = DynamicPPL.istrans(varinfo_local)
if was_linked && !is_linked
varinfo_local = DynamicPPL.link!!(varinfo_local, model)
elseif !was_linked && is_linked
varinfo_local = DynamicPPL.invlink!!(varinfo_local, model)
end
# TODO(mhauru) The above might run into trouble if some variables are linked and others
# are not. `istrans(varinfo)` returns an `all` over the individual variables. This could
# especially be a problem with dynamic models, where new variables may get introduced,
# but also in cases where component samplers have partial overlap in their target
# variables. The below is how I would like to implement this, but DynamicPPL at this
# time does not support linking individual variables selected by `VarName`. It soon
# should though, so come back to this.
# Issue ref: https://github.com/TuringLang/Turing.jl/issues/2401
# prev_links_dict = Dict(vn => DynamicPPL.istrans(prev_varinfo_local, vn) for vn in keys(prev_varinfo_local))
# any_linked = any(values(prev_links_dict))
# for vn in keys(varinfo_local)
# was_linked = if haskey(prev_varinfo_local, vn)
# prev_links_dict[vn]
# else
# # If the old state didn't have this variable, we assume it was linked if _any_
# # of the variables of the old state were linked.
# any_linked
# end
# is_linked = DynamicPPL.istrans(varinfo_local, vn)
# if was_linked && !is_linked
# varinfo_local = DynamicPPL.invlink!!(varinfo_local, vn)
# elseif !was_linked && is_linked
# varinfo_local = DynamicPPL.link!!(varinfo_local, vn)
# end
# end
return varinfo_local
end
|
[
597,
632
] |
function match_linking!!(varinfo_local, prev_state_local, model)
prev_varinfo_local = varinfo(prev_state_local)
was_linked = DynamicPPL.istrans(prev_varinfo_local)
is_linked = DynamicPPL.istrans(varinfo_local)
if was_linked && !is_linked
varinfo_local = DynamicPPL.link!!(varinfo_local, model)
elseif !was_linked && is_linked
varinfo_local = DynamicPPL.invlink!!(varinfo_local, model)
end
# TODO(mhauru) The above might run into trouble if some variables are linked and others
# are not. `istrans(varinfo)` returns an `all` over the individual variables. This could
# especially be a problem with dynamic models, where new variables may get introduced,
# but also in cases where component samplers have partial overlap in their target
# variables. The below is how I would like to implement this, but DynamicPPL at this
# time does not support linking individual variables selected by `VarName`. It soon
# should though, so come back to this.
# Issue ref: https://github.com/TuringLang/Turing.jl/issues/2401
# prev_links_dict = Dict(vn => DynamicPPL.istrans(prev_varinfo_local, vn) for vn in keys(prev_varinfo_local))
# any_linked = any(values(prev_links_dict))
# for vn in keys(varinfo_local)
# was_linked = if haskey(prev_varinfo_local, vn)
# prev_links_dict[vn]
# else
# # If the old state didn't have this variable, we assume it was linked if _any_
# # of the variables of the old state were linked.
# any_linked
# end
# is_linked = DynamicPPL.istrans(varinfo_local, vn)
# if was_linked && !is_linked
# varinfo_local = DynamicPPL.invlink!!(varinfo_local, vn)
# elseif !was_linked && is_linked
# varinfo_local = DynamicPPL.link!!(varinfo_local, vn)
# end
# end
return varinfo_local
end
|
function match_linking!!(varinfo_local, prev_state_local, model)
prev_varinfo_local = varinfo(prev_state_local)
was_linked = DynamicPPL.istrans(prev_varinfo_local)
is_linked = DynamicPPL.istrans(varinfo_local)
if was_linked && !is_linked
varinfo_local = DynamicPPL.link!!(varinfo_local, model)
elseif !was_linked && is_linked
varinfo_local = DynamicPPL.invlink!!(varinfo_local, model)
end
# TODO(mhauru) The above might run into trouble if some variables are linked and others
# are not. `istrans(varinfo)` returns an `all` over the individual variables. This could
# especially be a problem with dynamic models, where new variables may get introduced,
# but also in cases where component samplers have partial overlap in their target
# variables. The below is how I would like to implement this, but DynamicPPL at this
# time does not support linking individual variables selected by `VarName`. It soon
# should though, so come back to this.
# Issue ref: https://github.com/TuringLang/Turing.jl/issues/2401
# prev_links_dict = Dict(vn => DynamicPPL.istrans(prev_varinfo_local, vn) for vn in keys(prev_varinfo_local))
# any_linked = any(values(prev_links_dict))
# for vn in keys(varinfo_local)
# was_linked = if haskey(prev_varinfo_local, vn)
# prev_links_dict[vn]
# else
# # If the old state didn't have this variable, we assume it was linked if _any_
# # of the variables of the old state were linked.
# any_linked
# end
# is_linked = DynamicPPL.istrans(varinfo_local, vn)
# if was_linked && !is_linked
# varinfo_local = DynamicPPL.invlink!!(varinfo_local, vn)
# elseif !was_linked && is_linked
# varinfo_local = DynamicPPL.link!!(varinfo_local, vn)
# end
# end
return varinfo_local
end
|
match_linking!!
| 597
| 632
|
src/mcmc/gibbs.jl
|
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
function make_updated_varinfo(
f::DynamicPPL.LogDensityFunction, external_transition, external_state
)
# Set the parameters.
# NOTE: This is Turing.Inference.getparams, not AbstractMCMC.getparams (!!!!!)
# The latter uses the state rather than the transition.
# TODO(penelopeysm): Make this use AbstractMCMC.getparams instead
new_parameters = getparams(f.model, external_transition)
new_varinfo = DynamicPPL.unflatten(f.varinfo, new_parameters)
# Set (or recalculate, if needed) the log density.
new_logp = getlogp_external(external_transition, external_state)
return if ismissing(new_logp)
last(DynamicPPL.evaluate!!(f.model, new_varinfo, f.context))
else
DynamicPPL.setlogp!!(new_varinfo, new_logp)
end
end
# TODO: Do we also support `resume`, etc?
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
module Inference
using DynamicPPL:
DynamicPPL,
@model,
Metadata,
VarInfo,
LogDensityFunction,
SimpleVarInfo,
AbstractVarInfo,
# TODO(mhauru) all_varnames_grouped_by_symbol isn't exported by DPPL, because it is only
# implemented for NTVarInfo. It is used by mh.jl. Either refactor mh.jl to not use it
# or implement it for other VarInfo types and export it from DPPL.
all_varnames_grouped_by_symbol,
syms,
islinked,
setindex!!,
push!!,
setlogp!!,
getlogp,
##CHUNK 2
Return a key-value map of parameters from the varinfo.
"""
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
return DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
end
function getparams(
model::DynamicPPL.Model, untyped_vi::DynamicPPL.VarInfo{<:DynamicPPL.Metadata}
)
# values_as_in_model is unconscionably slow for untyped VarInfo. It's
# much faster to convert it to a typed varinfo before calling getparams.
# https://github.com/TuringLang/Turing.jl/issues/2604
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return true
end
# FIXME: This won't be hit unless `vals` are all the exactly same concrete type of `AdvancedMH.RandomWalkProposal`!
function should_link(
varinfo, sampler, proposal::NamedTuple{names,vals}
) where {names,vals<:NTuple{<:Any,<:AdvancedMH.RandomWalkProposal}}
return true
end
function maybe_link!!(varinfo, sampler, proposal, model)
return if should_link(varinfo, sampler, proposal)
DynamicPPL.link!!(varinfo, model)
else
varinfo
end
end
# Make a proposal if we don't have a covariance proposal matrix (the default).
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
function setparams_varinfo!!(
model::DynamicPPL.Model, sampler::Sampler{<:PG}, state::PGState, params::AbstractVarInfo
)
return PGState(params, state.rng)
end
"""
match_linking!!(varinfo_local, prev_state_local, model)
Make sure the linked/invlinked status of varinfo_local matches that of the previous
state for this sampler. This is relevant when multilple samplers are sampling the same
variables, and one might need it to be linked while the other doesn't.
end
"""
Run a Gibbs step for the first varname/sampler/state tuple, and recursively call the same
##CHUNK 2
"""
match_linking!!(varinfo_local, prev_state_local, model)
Make sure the linked/invlinked status of varinfo_local matches that of the previous
state for this sampler. This is relevant when multilple samplers are sampling the same
variables, and one might need it to be linked while the other doesn't.
end
"""
Run a Gibbs step for the first varname/sampler/state tuple, and recursively call the same
function on the tail, until there are no more samplers left.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
"""
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
##CHUNK 3
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
##CHUNK 4
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
new_vi_local = varinfo(new_state)
# Merge the latest values for all the variables in the current sampler.
new_global_vi = merge(get_global_varinfo(context), new_vi_local)
new_global_vi = setlogp!!(new_global_vi, getlogp(new_vi_local))
new_states = (new_states..., new_state)
##CHUNK 5
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
|
641
| 697
|
Turing.jl
| 364
|
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
samplers,
states,
global_vi,
new_states=();
kwargs...,
)
# End recursion.
if isempty(varname_vecs) && isempty(samplers) && isempty(states)
return global_vi, new_states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
new_vi_local = varinfo(new_state)
# Merge the latest values for all the variables in the current sampler.
new_global_vi = merge(get_global_varinfo(context), new_vi_local)
new_global_vi = setlogp!!(new_global_vi, getlogp(new_vi_local))
new_states = (new_states..., new_state)
return gibbs_step_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
states_tail,
new_global_vi,
new_states;
kwargs...,
)
end
|
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
samplers,
states,
global_vi,
new_states=();
kwargs...,
)
# End recursion.
if isempty(varname_vecs) && isempty(samplers) && isempty(states)
return global_vi, new_states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
new_vi_local = varinfo(new_state)
# Merge the latest values for all the variables in the current sampler.
new_global_vi = merge(get_global_varinfo(context), new_vi_local)
new_global_vi = setlogp!!(new_global_vi, getlogp(new_vi_local))
new_states = (new_states..., new_state)
return gibbs_step_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
states_tail,
new_global_vi,
new_states;
kwargs...,
)
end
|
[
641,
697
] |
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
samplers,
states,
global_vi,
new_states=();
kwargs...,
)
# End recursion.
if isempty(varname_vecs) && isempty(samplers) && isempty(states)
return global_vi, new_states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
new_vi_local = varinfo(new_state)
# Merge the latest values for all the variables in the current sampler.
new_global_vi = merge(get_global_varinfo(context), new_vi_local)
new_global_vi = setlogp!!(new_global_vi, getlogp(new_vi_local))
new_states = (new_states..., new_state)
return gibbs_step_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
states_tail,
new_global_vi,
new_states;
kwargs...,
)
end
|
function gibbs_step_recursive(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
step_function::Function,
varname_vecs,
samplers,
states,
global_vi,
new_states=();
kwargs...,
)
# End recursion.
if isempty(varname_vecs) && isempty(samplers) && isempty(states)
return global_vi, new_states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
state, states_tail... = states
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
new_vi_local = varinfo(new_state)
# Merge the latest values for all the variables in the current sampler.
new_global_vi = merge(get_global_varinfo(context), new_vi_local)
new_global_vi = setlogp!!(new_global_vi, getlogp(new_vi_local))
new_states = (new_states..., new_state)
return gibbs_step_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
states_tail,
new_global_vi,
new_states;
kwargs...,
)
end
|
gibbs_step_recursive
| 641
| 697
|
src/mcmc/gibbs.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
# Take initial step with the current sampler.
_, new_state = step_function(
rng,
conditioned_model,
sampler;
# FIXME: This will cause issues if the sampler expects initial params in unconstrained space.
# This is not the case for any samplers in Turing.jl, but will be for external samplers, etc.
initial_params=initial_params_local,
kwargs...,
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
##CHUNK 2
)
new_vi_local = varinfo(new_state)
# Merge in any new variables that were introduced during the step, but that
# were not in the domain of the current sampler.
vi = merge(vi, get_global_varinfo(context))
# Merge the new values for all the variables sampled by the current sampler.
vi = merge(vi, new_vi_local)
states = (states..., new_state)
return gibbs_initialstep_recursive(
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
##CHUNK 3
initial_params=nothing,
kwargs...,
)
# End recursion
if isempty(varname_vecs) && isempty(samplers)
return vi, states
end
varnames, varname_vecs_tail... = varname_vecs
sampler, samplers_tail... = samplers
# Get the initial values for this component sampler.
initial_params_local = if initial_params === nothing
nothing
else
DynamicPPL.subset(vi, varnames)[:]
end
# Construct the conditioned model.
conditioned_model, context = make_conditional(model, varnames, vi)
##CHUNK 4
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
##CHUNK 5
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
##CHUNK 6
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 7
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 8
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
##CHUNK 9
"""
make_conditional(model, target_variables, varinfo)
Return a new, conditioned model for a component of a Gibbs sampler.
# Arguments
- `model::DynamicPPL.Model`: The model to condition.
- `target_variables::AbstractVector{<:VarName}`: The target variables of the component
sampler. These will _not_ be conditioned.
- `varinfo::DynamicPPL.AbstractVarInfo`: Values for all variables in the model. All the
values in `varinfo` but not in `target_variables` will be conditioned to the values they
have in `varinfo`.
# Returns
- A new model with the variables _not_ in `target_variables` conditioned.
- The `GibbsContext` object that will be used to condition the variables. This is necessary
because evaluation can mutate its `global_varinfo` field, which we need to access later.
"""
function make_conditional(
model::DynamicPPL.Model, target_variables::AbstractVector{<:VarName}, varinfo
##CHUNK 10
# # If the old state didn't have this variable, we assume it was linked if _any_
# # of the variables of the old state were linked.
# any_linked
# end
# is_linked = DynamicPPL.istrans(varinfo_local, vn)
# if was_linked && !is_linked
# varinfo_local = DynamicPPL.invlink!!(varinfo_local, vn)
# elseif !was_linked && is_linked
# varinfo_local = DynamicPPL.link!!(varinfo_local, vn)
# end
# end
return varinfo_local
end
"""
Run a Gibbs step for the first varname/sampler/state tuple, and recursively call the same
function on the tail, until there are no more samplers left.
The `step_function` argument should always be either AbstractMCMC.step or
AbstractMCMC.step_warmup.
|
145
| 173
|
Turing.jl
| 365
|
function find_initial_params(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
varinfo::DynamicPPL.AbstractVarInfo,
hamiltonian::AHMC.Hamiltonian;
max_attempts::Int=1000,
)
varinfo = deepcopy(varinfo) # Don't mutate
for attempts in 1:max_attempts
theta = varinfo[:]
z = AHMC.phasepoint(rng, theta, hamiltonian)
isfinite(z) && return varinfo, z
attempts == 10 &&
@warn "failed to find valid initial parameters in $(attempts) tries; consider providing explicit initial parameters using the `initial_params` keyword"
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
|
function find_initial_params(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
varinfo::DynamicPPL.AbstractVarInfo,
hamiltonian::AHMC.Hamiltonian;
max_attempts::Int=1000,
)
varinfo = deepcopy(varinfo) # Don't mutate
for attempts in 1:max_attempts
theta = varinfo[:]
z = AHMC.phasepoint(rng, theta, hamiltonian)
isfinite(z) && return varinfo, z
attempts == 10 &&
@warn "failed to find valid initial parameters in $(attempts) tries; consider providing explicit initial parameters using the `initial_params` keyword"
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
|
[
145,
173
] |
function find_initial_params(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
varinfo::DynamicPPL.AbstractVarInfo,
hamiltonian::AHMC.Hamiltonian;
max_attempts::Int=1000,
)
varinfo = deepcopy(varinfo) # Don't mutate
for attempts in 1:max_attempts
theta = varinfo[:]
z = AHMC.phasepoint(rng, theta, hamiltonian)
isfinite(z) && return varinfo, z
attempts == 10 &&
@warn "failed to find valid initial parameters in $(attempts) tries; consider providing explicit initial parameters using the `initial_params` keyword"
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
|
function find_initial_params(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
varinfo::DynamicPPL.AbstractVarInfo,
hamiltonian::AHMC.Hamiltonian;
max_attempts::Int=1000,
)
varinfo = deepcopy(varinfo) # Don't mutate
for attempts in 1:max_attempts
theta = varinfo[:]
z = AHMC.phasepoint(rng, theta, hamiltonian)
isfinite(z) && return varinfo, z
attempts == 10 &&
@warn "failed to find valid initial parameters in $(attempts) tries; consider providing explicit initial parameters using the `initial_params` keyword"
# Resample and try again.
# NOTE: varinfo has to be linked to make sure this samples in unconstrained space
varinfo = last(
DynamicPPL.evaluate!!(model, rng, varinfo, DynamicPPL.SampleFromUniform())
)
end
# if we failed to find valid initial parameters, error
return error(
"failed to find valid initial parameters in $(max_attempts) tries. This may indicate an error with the model or AD backend; please open an issue at https://github.com/TuringLang/Turing.jl/issues",
)
end
|
find_initial_params
| 145
| 173
|
src/mcmc/hmc.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
##CHUNK 2
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
##CHUNK 3
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
#FILE: Turing.jl/test/mcmc/hmc.jl
##CHUNK 1
# The discrepancies in the chains are in the tails, so we can't just compare the mean, etc.
# KS will compare the empirical CDFs, which seems like a reasonable thing to do here.
@test pvalue(ApproximateTwoSampleKSTest(vec(results), vec(results_prior))) > 0.001
end
@testset "getstepsize: Turing.jl#2400" begin
algs = [HMC(0.1, 10), HMCDA(0.8, 0.75), NUTS(0.5), NUTS(0, 0.5)]
@testset "$(alg)" for alg in algs
# Construct a HMC state by taking a single step
spl = Sampler(alg)
hmc_state = DynamicPPL.initialstep(
Random.default_rng(), gdemo_default, spl, DynamicPPL.VarInfo(gdemo_default)
)[2]
# Check that we can obtain the current step size
@test Turing.Inference.getstepsize(spl, hmc_state) isa Float64
end
end
end
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
function callback(rng, model, sampler, sample, state, i; kwargs...)
sample isa Inference.Transition || error("incorrect sample")
return nothing
end
alg = Gibbs(:s => MH(), :m => HMC(0.2, 4))
sample(model, alg, 100; callback=callback)
end
@testset "dynamic model with analytical posterior" begin
# A dynamic model where b ~ Bernoulli determines the dimensionality
# When b=0: single parameter θ₁
# When b=1: two parameters θ₁, θ₂ where we observe their sum
@model function dynamic_bernoulli_normal(y_obs=2.0)
b ~ Bernoulli(0.3)
if b == 0
θ = Vector{Float64}(undef, 1)
θ[1] ~ Normal(0.0, 1.0)
y_obs ~ Normal(θ[1], 0.5)
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
#FILE: Turing.jl/src/variational/VariationalInference.jl
##CHUNK 1
Approximating the target `model` via variational inference by optimizing `objective` with the initialization `q`.
This is a thin wrapper around `AdvancedVI.optimize`.
# Arguments
- `model`: The target `DynamicPPL.Model`.
- `q`: The initial variational approximation.
- `n_iterations`: Number of optimization steps.
# Keyword Arguments
- `objective`: Variational objective to be optimized.
- `show_progress`: Whether to show the progress bar.
- `optimizer`: Optimization algorithm.
- `averager`: Parameter averaging strategy.
- `operator`: Operator applied after each optimization step.
- `adtype`: Automatic differentiation backend.
See the docs of `AdvancedVI.optimize` for additional keyword arguments.
# Returns
- `q`: Variational distribution formed by the last iterate of the optimization run.
#FILE: Turing.jl/test/mcmc/mh.jl
##CHUNK 1
# NOTE: Broken because MH doesn't really follow the `logdensity` interface, but calls
# it with `NamedTuple` instead of `AbstractVector`.
# s7 = externalsampler(MH(gdemo_default, proposal_type=AdvancedMH.StaticProposal))
# c7 = sample(gdemo_default, s7, N)
end
@testset "mh inference" begin
# Set the initial parameters, because if we get unlucky with the initial state,
# these chains are too short to converge to reasonable numbers.
discard_initial = 1_000
initial_params = [1.0, 1.0]
@testset "gdemo_default" begin
alg = MH()
chain = sample(
StableRNG(seed), gdemo_default, alg, 10_000; discard_initial, initial_params
)
check_gdemo(chain; atol=0.1)
end
#FILE: Turing.jl/test/mcmc/Inference.jl
##CHUNK 1
N = 10
alg = HMC(0.2, 4)
@model function vdemo3()
x = Vector{Real}(undef, N)
for i in 1:N
x[i] ~ Normal(0, sqrt(4))
end
end
# TODO(mhauru) What is the point of the below @elapsed stuff? It prints out some
# timings. Do we actually ever look at them?
t_loop = @elapsed res = sample(StableRNG(seed), vdemo3(), alg, 1000)
# Test for vectorize UnivariateDistribution
@model function vdemo4()
x = Vector{Real}(undef, N)
@. x ~ Normal(0, 2)
end
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
|
175
| 255
|
Turing.jl
| 366
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
|
[
175,
255
] |
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:Hamiltonian},
vi_original::AbstractVarInfo;
initial_params=nothing,
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
|
DynamicPPL.initialstep
| 175
| 255
|
src/mcmc/hmc.jl
|
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
##CHUNK 2
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
hamiltonian = AdvancedHMC.Hamiltonian(metric, f)
# Define a leapfrog solver, with initial step size chosen heuristically
initial_ϵ = AdvancedHMC.find_good_stepsize(hamiltonian, initial_θ)
integrator = AdvancedHMC.Leapfrog(initial_ϵ)
# Define an HMC sampler, with the following components
# - multinomial sampling scheme,
# - generalised No-U-Turn criteria, and
# - windowed adaption for step-size and diagonal mass matrix
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
##CHUNK 2
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
"""
HMCDA(
##CHUNK 2
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
end
# Compute next transition and state.
transition = Transition(model, vi, t)
newstate = HMCState(vi, i, kernel, hamiltonian, t.z, state.adaptor)
return transition, newstate
end
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
|
257
| 301
|
Turing.jl
| 367
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
nadapts,
t.z.θ,
t.stat.acceptance_rate,
)
else
kernel = state.kernel
end
# Update variables
vi = state.vi
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
end
# Compute next transition and state.
transition = Transition(model, vi, t)
newstate = HMCState(vi, i, kernel, hamiltonian, t.z, state.adaptor)
return transition, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
nadapts,
t.z.θ,
t.stat.acceptance_rate,
)
else
kernel = state.kernel
end
# Update variables
vi = state.vi
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
end
# Compute next transition and state.
transition = Transition(model, vi, t)
newstate = HMCState(vi, i, kernel, hamiltonian, t.z, state.adaptor)
return transition, newstate
end
|
[
257,
301
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
nadapts,
t.z.θ,
t.stat.acceptance_rate,
)
else
kernel = state.kernel
end
# Update variables
vi = state.vi
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
end
# Compute next transition and state.
transition = Transition(model, vi, t)
newstate = HMCState(vi, i, kernel, hamiltonian, t.z, state.adaptor)
return transition, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
nadapts,
t.z.θ,
t.stat.acceptance_rate,
)
else
kernel = state.kernel
end
# Update variables
vi = state.vi
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
end
# Compute next transition and state.
transition = Transition(model, vi, t)
newstate = HMCState(vi, i, kernel, hamiltonian, t.z, state.adaptor)
return transition, newstate
end
|
AbstractMCMC.step
| 257
| 301
|
src/mcmc/hmc.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 2
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 3
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
return sample, newstate
end
end
##CHUNK 4
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
state::HMCState,
params::AbstractVarInfo,
)
θ_new = params[:]
hamiltonian = get_hamiltonian(model, sampler, params, state, length(θ_new))
# Update the parameter values in `state.z`.
# TODO: Avoid mutation
z = state.z
resize!(z.θ, length(θ_new))
z.θ .= θ_new
return HMCState(params, state.i, state.kernel, hamiltonian, z, state.adaptor)
end
function setparams_varinfo!!(
model::DynamicPPL.Model, sampler::Sampler{<:PG}, state::PGState, params::AbstractVarInfo
)
return PGState(params, state.rng)
end
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
##CHUNK 2
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
##CHUNK 3
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
# Update `vi` based on acceptance
if t.stat.is_accept
vi = DynamicPPL.unflatten(vi, t.z.θ)
vi = setlogp!!(vi, t.stat.log_density)
else
vi = DynamicPPL.unflatten(vi, theta)
vi = setlogp!!(vi, log_density_old)
end
##CHUNK 2
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
|
303
| 318
|
Turing.jl
| 368
|
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
|
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
|
[
303,
318
] |
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
|
function get_hamiltonian(model, spl, vi, state, n)
metric = gen_metric(n, spl, state)
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
return AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
end
|
get_hamiltonian
| 303
| 318
|
src/mcmc/hmc.jl
|
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
##CHUNK 2
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
# Make a proposal if we DO have a covariance proposal matrix.
function propose!!(
rng::AbstractRNG,
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
##CHUNK 2
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
)
new_inner_state = setparams_varinfo!!(
AbstractMCMC.LogDensityModel(logdensity), sampler, state.state, params
)
return TuringState(new_inner_state, params, logdensity)
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:Hamiltonian},
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
##CHUNK 2
nadapts=0,
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
vi = DynamicPPL.link(vi_original, model)
# Extract parameters.
theta = vi[:]
# Create a Hamiltonian.
metricT = getmetricT(spl.alg)
metric = metricT(length(theta))
ldf = DynamicPPL.LogDensityFunction(
model,
vi,
# TODO(penelopeysm): Can we just use leafcontext(model.context)? Do we
# need to pass in the sampler? (In fact LogDensityFunction defaults to
# using leafcontext(model.context) so could we just remove the argument
# entirely?)
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context));
##CHUNK 3
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
|
427
| 437
|
Turing.jl
| 369
|
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
|
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
|
[
427,
437
] |
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
|
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
|
NUTS
| 427
| 437
|
src/mcmc/hmc.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
return HMCDA(n_adapts, δ, λ; kwargs...)
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT; adtype=adtype)
end
"""
NUTS(n_adapts::Int, δ::Float64; max_depth::Int=10, Δ_max::Float64=1000.0, init_ϵ::Float64=0.0; adtype::ADTypes.AbstractADType=AutoForwardDiff()
No-U-Turn Sampler (NUTS) sampler.
Usage:
##CHUNK 2
struct HMCDA{AD,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
λ::Float64 # target leapfrog length
ϵ::Float64 # (initial) step size
adtype::AD
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMCDA{typeof(adtype),metricT}(n_adapts, δ, λ, ϵ, adtype)
end
function HMCDA(
##CHUNK 3
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
function NUTS(
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
##CHUNK 4
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
function NUTS(
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(-1, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
function NUTS(; kwargs...)
return NUTS(-1, 0.65; kwargs...)
end
for alg in (:HMC, :HMCDA, :NUTS)
@eval getmetricT(::$alg{<:Any,metricT}) where {metricT} = metricT
##CHUNK 5
max_depth::Int # maximum tree depth
Δ_max::Float64
ϵ::Float64 # (initial) step size
adtype::AD
end
function NUTS(n_adapts::Int, δ::Float64, ::Tuple{}; kwargs...)
return NUTS(n_adapts, δ; kwargs...)
end
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
##CHUNK 6
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(-1, δ, λ, init_ϵ, metricT; adtype=adtype)
end
function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ::Tuple{}; kwargs...)
return HMCDA(n_adapts, δ, λ; kwargs...)
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
##CHUNK 7
δ::Float64,
λ::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMCDA{typeof(adtype),metricT}(n_adapts, δ, λ, ϵ, adtype)
end
function HMCDA(
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(-1, δ, λ, init_ϵ, metricT; adtype=adtype)
end
function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ::Tuple{}; kwargs...)
##CHUNK 8
ϵ::Float64 # leapfrog step size
n_leapfrog::Int # leapfrog step number
adtype::AD
end
function HMC(
ϵ::Float64,
n_leapfrog::Int,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMC{typeof(adtype),metricT}(ϵ, n_leapfrog, adtype)
end
function HMC(
ϵ::Float64,
n_leapfrog::Int;
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMC(ϵ, n_leapfrog, metricT; adtype=adtype)
##CHUNK 9
- `max_depth::Int` : Maximum doubling tree depth.
- `Δ_max::Float64` : Maximum divergence during doubling tree.
- `init_ϵ::Float64` : Initial step size; 0 means automatically searching using a heuristic procedure.
- `adtype::ADTypes.AbstractADType` : The automatic differentiation (AD) backend.
If not specified, `ForwardDiff` is used, with its `chunksize` automatically determined.
"""
struct NUTS{AD,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
max_depth::Int # maximum tree depth
Δ_max::Float64
ϵ::Float64 # (initial) step size
adtype::AD
end
function NUTS(n_adapts::Int, δ::Float64, ::Tuple{}; kwargs...)
return NUTS(n_adapts, δ; kwargs...)
end
##CHUNK 10
)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT; adtype=adtype)
end
"""
NUTS(n_adapts::Int, δ::Float64; max_depth::Int=10, Δ_max::Float64=1000.0, init_ϵ::Float64=0.0; adtype::ADTypes.AbstractADType=AutoForwardDiff()
No-U-Turn Sampler (NUTS) sampler.
Usage:
```julia
NUTS() # Use default NUTS configuration.
NUTS(1000, 0.65) # Use 1000 adaption steps, and target accept ratio 0.65.
```
Arguments:
- `n_adapts::Int` : The number of samples to use with adaptation.
- `δ::Float64` : Target acceptance rate for dual averaging.
|
443
| 453
|
Turing.jl
| 370
|
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
|
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
|
[
443,
453
] |
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
|
function NUTS(
n_adapts::Int,
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(n_adapts, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
|
NUTS
| 443
| 453
|
src/mcmc/hmc.jl
|
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
return HMCDA(n_adapts, δ, λ; kwargs...)
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT; adtype=adtype)
end
"""
NUTS(n_adapts::Int, δ::Float64; max_depth::Int=10, Δ_max::Float64=1000.0, init_ϵ::Float64=0.0; adtype::ADTypes.AbstractADType=AutoForwardDiff()
No-U-Turn Sampler (NUTS) sampler.
Usage:
##CHUNK 2
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
function NUTS(n_adapts::Int, δ::Float64, ::Tuple{}; kwargs...)
return NUTS(n_adapts, δ; kwargs...)
end
function NUTS(
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
##CHUNK 3
end
function NUTS(
δ::Float64;
max_depth::Int=10,
Δ_max::Float64=1000.0,
init_ϵ::Float64=0.0,
metricT=AHMC.DiagEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(-1, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
function NUTS(; kwargs...)
return NUTS(-1, 0.65; kwargs...)
end
for alg in (:HMC, :HMCDA, :NUTS)
@eval getmetricT(::$alg{<:Any,metricT}) where {metricT} = metricT
##CHUNK 4
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(-1, δ, λ, init_ϵ, metricT; adtype=adtype)
end
function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ::Tuple{}; kwargs...)
return HMCDA(n_adapts, δ, λ; kwargs...)
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
##CHUNK 5
max_depth::Int # maximum tree depth
Δ_max::Float64
ϵ::Float64 # (initial) step size
adtype::AD
end
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
Δ_max::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT}
return NUTS{typeof(adtype),metricT}(n_adapts, δ, max_depth, Δ_max, ϵ, adtype)
end
function NUTS(n_adapts::Int, δ::Float64, ::Tuple{}; kwargs...)
return NUTS(n_adapts, δ; kwargs...)
##CHUNK 6
)
return HMCDA(n_adapts, δ, λ, init_ϵ, metricT; adtype=adtype)
end
"""
NUTS(n_adapts::Int, δ::Float64; max_depth::Int=10, Δ_max::Float64=1000.0, init_ϵ::Float64=0.0; adtype::ADTypes.AbstractADType=AutoForwardDiff()
No-U-Turn Sampler (NUTS) sampler.
Usage:
```julia
NUTS() # Use default NUTS configuration.
NUTS(1000, 0.65) # Use 1000 adaption steps, and target accept ratio 0.65.
```
Arguments:
- `n_adapts::Int` : The number of samples to use with adaptation.
- `δ::Float64` : Target acceptance rate for dual averaging.
##CHUNK 7
δ::Float64,
λ::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMCDA{typeof(adtype),metricT}(n_adapts, δ, λ, ϵ, adtype)
end
function HMCDA(
δ::Float64,
λ::Float64;
init_ϵ::Float64=0.0,
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMCDA(-1, δ, λ, init_ϵ, metricT; adtype=adtype)
end
function HMCDA(n_adapts::Int, δ::Float64, λ::Float64, ::Tuple{}; kwargs...)
##CHUNK 8
struct HMCDA{AD,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
λ::Float64 # target leapfrog length
ϵ::Float64 # (initial) step size
adtype::AD
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMCDA{typeof(adtype),metricT}(n_adapts, δ, λ, ϵ, adtype)
end
function HMCDA(
##CHUNK 9
HMCDA(
n_adapts::Int, δ::Float64, λ::Float64; ϵ::Float64 = 0.0;
adtype::ADTypes.AbstractADType = AutoForwardDiff(),
)
Hamiltonian Monte Carlo sampler with Dual Averaging algorithm.
# Usage
```julia
HMCDA(200, 0.65, 0.3)
```
# Arguments
- `n_adapts`: Numbers of samples to use for adaptation.
- `δ`: Target acceptance rate. 65% is often recommended.
- `λ`: Target leapfrog length.
- `ϵ`: Initial step size; 0 means automatically search by Turing.
- `adtype`: The automatic differentiation (AD) backend.
##CHUNK 10
- `max_depth::Int` : Maximum doubling tree depth.
- `Δ_max::Float64` : Maximum divergence during doubling tree.
- `init_ϵ::Float64` : Initial step size; 0 means automatically searching using a heuristic procedure.
- `adtype::ADTypes.AbstractADType` : The automatic differentiation (AD) backend.
If not specified, `ForwardDiff` is used, with its `chunksize` automatically determined.
"""
struct NUTS{AD,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
max_depth::Int # maximum tree depth
Δ_max::Float64
ϵ::Float64 # (initial) step size
adtype::AD
end
function NUTS(
n_adapts::Int,
δ::Float64,
max_depth::Int,
|
527
| 543
|
Turing.jl
| 371
|
function AHMCAdaptor(alg::AdaptiveHamiltonian, metric::AHMC.AbstractMetric; ϵ=alg.ϵ)
pc = AHMC.MassMatrixAdaptor(metric)
da = AHMC.StepSizeAdaptor(alg.δ, ϵ)
if iszero(alg.n_adapts)
adaptor = AHMC.Adaptation.NoAdaptation()
else
if metric == AHMC.UnitEuclideanMetric
adaptor = AHMC.NaiveHMCAdaptor(pc, da) # there is actually no adaptation for mass matrix
else
adaptor = AHMC.StanHMCAdaptor(pc, da)
AHMC.initialize!(adaptor, alg.n_adapts)
end
end
return adaptor
end
|
function AHMCAdaptor(alg::AdaptiveHamiltonian, metric::AHMC.AbstractMetric; ϵ=alg.ϵ)
pc = AHMC.MassMatrixAdaptor(metric)
da = AHMC.StepSizeAdaptor(alg.δ, ϵ)
if iszero(alg.n_adapts)
adaptor = AHMC.Adaptation.NoAdaptation()
else
if metric == AHMC.UnitEuclideanMetric
adaptor = AHMC.NaiveHMCAdaptor(pc, da) # there is actually no adaptation for mass matrix
else
adaptor = AHMC.StanHMCAdaptor(pc, da)
AHMC.initialize!(adaptor, alg.n_adapts)
end
end
return adaptor
end
|
[
527,
543
] |
function AHMCAdaptor(alg::AdaptiveHamiltonian, metric::AHMC.AbstractMetric; ϵ=alg.ϵ)
pc = AHMC.MassMatrixAdaptor(metric)
da = AHMC.StepSizeAdaptor(alg.δ, ϵ)
if iszero(alg.n_adapts)
adaptor = AHMC.Adaptation.NoAdaptation()
else
if metric == AHMC.UnitEuclideanMetric
adaptor = AHMC.NaiveHMCAdaptor(pc, da) # there is actually no adaptation for mass matrix
else
adaptor = AHMC.StanHMCAdaptor(pc, da)
AHMC.initialize!(adaptor, alg.n_adapts)
end
end
return adaptor
end
|
function AHMCAdaptor(alg::AdaptiveHamiltonian, metric::AHMC.AbstractMetric; ϵ=alg.ϵ)
pc = AHMC.MassMatrixAdaptor(metric)
da = AHMC.StepSizeAdaptor(alg.δ, ϵ)
if iszero(alg.n_adapts)
adaptor = AHMC.Adaptation.NoAdaptation()
else
if metric == AHMC.UnitEuclideanMetric
adaptor = AHMC.NaiveHMCAdaptor(pc, da) # there is actually no adaptation for mass matrix
else
adaptor = AHMC.StanHMCAdaptor(pc, da)
AHMC.initialize!(adaptor, alg.n_adapts)
end
end
return adaptor
end
|
AHMCAdaptor
| 527
| 543
|
src/mcmc/hmc.jl
|
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
hamiltonian = AdvancedHMC.Hamiltonian(metric, f)
# Define a leapfrog solver, with initial step size chosen heuristically
initial_ϵ = AdvancedHMC.find_good_stepsize(hamiltonian, initial_θ)
integrator = AdvancedHMC.Leapfrog(initial_ϵ)
# Define an HMC sampler, with the following components
# - multinomial sampling scheme,
# - generalised No-U-Turn criteria, and
# - windowed adaption for step-size and diagonal mass matrix
proposal = AdvancedHMC.HMCKernel(
AdvancedHMC.Trajectory{AdvancedHMC.MultinomialTS}(
integrator, AdvancedHMC.GeneralisedNoUTurn()
),
)
adaptor = AdvancedHMC.StanHMCAdaptor(
AdvancedHMC.MassMatrixAdaptor(metric), AdvancedHMC.StepSizeAdaptor(0.65, integrator)
)
return AdvancedHMC.HMCSampler(proposal, metric, adaptor)
#CURRENT FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
ϵ = AHMC.find_good_stepsize(rng, hamiltonian, theta)
@info "Found initial step size" ϵ
else
ϵ = spl.alg.ϵ
end
# Generate a kernel.
kernel = make_ahmc_kernel(spl.alg, ϵ)
# Create initial transition and state.
# Already perform one step since otherwise we don't get any statistics.
t = AHMC.transition(rng, hamiltonian, kernel, z)
# Adaptation
adaptor = AHMCAdaptor(spl.alg, hamiltonian.metric; ϵ=ϵ)
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian, kernel, adaptor, 1, nadapts, t.z.θ, t.stat.acceptance_rate
)
end
##CHUNK 2
sampler::Sampler{<:AdaptiveHamiltonian},
state::HMCState{TV,TKernel,THam,PhType,AHMC.Adaptation.NoAdaptation},
) where {TV,TKernel,THam,PhType}
return state.kernel.τ.integrator.ϵ
end
gen_metric(dim::Int, spl::Sampler{<:Hamiltonian}, state) = AHMC.UnitEuclideanMetric(dim)
function gen_metric(dim::Int, spl::Sampler{<:AdaptiveHamiltonian}, state)
return AHMC.renew(state.hamiltonian.metric, AHMC.getM⁻¹(state.adaptor.pc))
end
function make_ahmc_kernel(alg::HMC, ϵ)
return AHMC.HMCKernel(
AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedNSteps(alg.n_leapfrog))
)
end
function make_ahmc_kernel(alg::HMCDA, ϵ)
return AHMC.HMCKernel(
AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedIntegrationTime(alg.λ))
)
##CHUNK 3
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return NUTS(-1, δ, max_depth, Δ_max, init_ϵ, metricT; adtype=adtype)
end
function NUTS(; kwargs...)
return NUTS(-1, 0.65; kwargs...)
end
for alg in (:HMC, :HMCDA, :NUTS)
@eval getmetricT(::$alg{<:Any,metricT}) where {metricT} = metricT
end
#####
##### HMC core functions
#####
getstepsize(sampler::Sampler{<:Hamiltonian}, state) = sampler.alg.ϵ
getstepsize(sampler::Sampler{<:AdaptiveHamiltonian}, state) = AHMC.getϵ(state.adaptor)
function getstepsize(
##CHUNK 4
struct HMCDA{AD,metricT<:AHMC.AbstractMetric} <: AdaptiveHamiltonian
n_adapts::Int # number of samples with adaption for ϵ
δ::Float64 # target accept rate
λ::Float64 # target leapfrog length
ϵ::Float64 # (initial) step size
adtype::AD
end
function HMCDA(
n_adapts::Int,
δ::Float64,
λ::Float64,
ϵ::Float64,
::Type{metricT};
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
) where {metricT<:AHMC.AbstractMetric}
return HMCDA{typeof(adtype),metricT}(n_adapts, δ, λ, ϵ, adtype)
end
function HMCDA(
##CHUNK 5
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
##CHUNK 6
function make_ahmc_kernel(alg::HMC, ϵ)
return AHMC.HMCKernel(
AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedNSteps(alg.n_leapfrog))
)
end
function make_ahmc_kernel(alg::HMCDA, ϵ)
return AHMC.HMCKernel(
AHMC.Trajectory{AHMC.EndPointTS}(AHMC.Leapfrog(ϵ), AHMC.FixedIntegrationTime(alg.λ))
)
end
function make_ahmc_kernel(alg::NUTS, ϵ)
return AHMC.HMCKernel(
AHMC.Trajectory{AHMC.MultinomialTS}(
AHMC.Leapfrog(ϵ), AHMC.GeneralisedNoUTurn(alg.max_depth, alg.Δ_max)
),
)
end
####
##CHUNK 7
) where {metricT<:AHMC.AbstractMetric}
return HMC{typeof(adtype),metricT}(ϵ, n_leapfrog, adtype)
end
function HMC(
ϵ::Float64,
n_leapfrog::Int;
metricT=AHMC.UnitEuclideanMetric,
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
)
return HMC(ϵ, n_leapfrog, metricT; adtype=adtype)
end
DynamicPPL.initialsampler(::Sampler{<:Hamiltonian}) = SampleFromUniform()
# Handle setting `nadapts` and `discard_initial`
function AbstractMCMC.sample(
rng::AbstractRNG,
model::DynamicPPL.Model,
sampler::Sampler{<:AdaptiveHamiltonian},
N::Integer;
##CHUNK 8
@eval getmetricT(::$alg{<:Any,metricT}) where {metricT} = metricT
end
#####
##### HMC core functions
#####
getstepsize(sampler::Sampler{<:Hamiltonian}, state) = sampler.alg.ϵ
getstepsize(sampler::Sampler{<:AdaptiveHamiltonian}, state) = AHMC.getϵ(state.adaptor)
function getstepsize(
sampler::Sampler{<:AdaptiveHamiltonian},
state::HMCState{TV,TKernel,THam,PhType,AHMC.Adaptation.NoAdaptation},
) where {TV,TKernel,THam,PhType}
return state.kernel.τ.integrator.ϵ
end
gen_metric(dim::Int, spl::Sampler{<:Hamiltonian}, state) = AHMC.UnitEuclideanMetric(dim)
function gen_metric(dim::Int, spl::Sampler{<:AdaptiveHamiltonian}, state)
return AHMC.renew(state.hamiltonian.metric, AHMC.getM⁻¹(state.adaptor.pc))
end
##CHUNK 9
t = AHMC.transition(rng, hamiltonian, state.kernel, z)
# Adaptation
i = state.i + 1
if spl.alg isa AdaptiveHamiltonian
hamiltonian, kernel, _ = AHMC.adapt!(
hamiltonian,
state.kernel,
state.adaptor,
i,
nadapts,
t.z.θ,
t.stat.acceptance_rate,
)
else
kernel = state.kernel
end
# Update variables
vi = state.vi
|
167
| 183
|
Turing.jl
| 372
|
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
vals = DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
# Obtain an iterator over the flattened parameter names and values.
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
# Materialize the iterators and concatenate.
return mapreduce(collect, vcat, iters)
end
|
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
vals = DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
# Obtain an iterator over the flattened parameter names and values.
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
# Materialize the iterators and concatenate.
return mapreduce(collect, vcat, iters)
end
|
[
167,
183
] |
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
vals = DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
# Obtain an iterator over the flattened parameter names and values.
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
# Materialize the iterators and concatenate.
return mapreduce(collect, vcat, iters)
end
|
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
vals = DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
# Obtain an iterator over the flattened parameter names and values.
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
# Materialize the iterators and concatenate.
return mapreduce(collect, vcat, iters)
end
|
getparams
| 167
| 183
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
##CHUNK 2
Create a `ModeResult` for a given `log_density` objective and a `solution` given by `solve`.
`Optimization.solve` returns its own result type. This function converts that into the
richer format of `ModeResult`. It also takes care of transforming them back to the original
parameter space in case the optimization was done in a transformed space.
"""
function ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
varinfo_new = DynamicPPL.unflatten(log_density.ldf.varinfo, solution.u)
# `getparams` performs invlinking if needed
vals = Turing.Inference.getparams(log_density.ldf.model, varinfo_new)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
vns_vals_iter = mapreduce(collect, vcat, iters)
syms = map(Symbol ∘ first, vns_vals_iter)
vals = map(last, vns_vals_iter)
return ModeResult(
NamedArrays.NamedArray(vals, syms), solution, -solution.objective, log_density
)
end
"""
##CHUNK 3
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
##CHUNK 4
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
"""
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
"""
Initialise a VarInfo for the Gibbs sampler.
This is straight up copypasta from DynamicPPL's src/sampler.jl. It is repeated here to
support calling both step and step_warmup as the initial step. DynamicPPL initialstep is
incompatible with step_warmup.
"""
function initial_varinfo(rng, model, spl, initial_params)
vi = DynamicPPL.default_varinfo(rng, model, spl)
# Update the parameters if provided.
if initial_params !== nothing
vi = DynamicPPL.initialize_parameters!!(vi, initial_params, model)
# Update joint log probability.
# This is a quick fix for https://github.com/TuringLang/Turing.jl/issues/1588
# and https://github.com/TuringLang/Turing.jl/issues/1563
# to avoid that existing variables are resampled
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.DefaultContext()))
##CHUNK 2
# Construct the conditional model and the varinfo that this sampler should use.
conditioned_model, context = make_conditional(model, varnames, global_vi)
vi = DynamicPPL.subset(global_vi, varnames)
vi = match_linking!!(vi, state, model)
# TODO(mhauru) The below may be overkill. If the varnames for this sampler are not
# sampled by other samplers, we don't need to `setparams`, but could rather simply
# recompute the log probability. More over, in some cases the recomputation could also
# be avoided, if e.g. the previous sampler has done all the necessary work already.
# However, we've judged that doing any caching or other tricks to avoid this now would
# be premature optimization. In most use cases of Gibbs a single model call here is not
# going to be a significant expense anyway.
# Set the state of the current sampler, accounting for any changes made by other
# samplers.
state = setparams_varinfo!!(conditioned_model, sampler, state, vi)
# Take a step with the local sampler.
new_state = last(step_function(rng, conditioned_model, sampler, state; kwargs...))
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
# Chain making utilities #
##########################
# TODO(penelopeysm): Separate Turing.Inference.getparams (should only be
# defined for AbstractVarInfo and Turing.Inference.Transition; returns varname
# => value maps) from AbstractMCMC.getparams (defined for any sampler transition,
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
return DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
end
function getparams(
model::DynamicPPL.Model, untyped_vi::DynamicPPL.VarInfo{<:DynamicPPL.Metadata}
)
##CHUNK 2
return merge((lp=t.lp,), stat)
end
end
DynamicPPL.getlogp(t::Transition) = t.lp
# Metadata of VarInfo object
metadata(vi::AbstractVarInfo) = (lp=getlogp(vi),)
##########################
# Chain making utilities #
##########################
# TODO(penelopeysm): Separate Turing.Inference.getparams (should only be
# defined for AbstractVarInfo and Turing.Inference.Transition; returns varname
# => value maps) from AbstractMCMC.getparams (defined for any sampler transition,
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
##CHUNK 3
# `values_as_in_model`, which re-runs the model and extracts the parameters
# as they are seen in the model, i.e. in the constrained space. Moreover,
# this means that the code below will work both of linked and invlinked `vi`.
# Ref: https://github.com/TuringLang/Turing.jl/issues/2195
# NOTE: We need to `deepcopy` here to avoid modifying the original `vi`.
return DynamicPPL.values_as_in_model(model, true, deepcopy(vi))
end
function getparams(
model::DynamicPPL.Model, untyped_vi::DynamicPPL.VarInfo{<:DynamicPPL.Metadata}
)
# values_as_in_model is unconscionably slow for untyped VarInfo. It's
# much faster to convert it to a typed varinfo before calling getparams.
# https://github.com/TuringLang/Turing.jl/issues/2604
return getparams(model, DynamicPPL.typed_varinfo(untyped_vi))
end
function getparams(::DynamicPPL.Model, ::DynamicPPL.VarInfo{NamedTuple{(),Tuple{}}})
return Dict{VarName,Any}()
end
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
|
188
| 207
|
Turing.jl
| 373
|
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
nms_and_vs = getparams(model, t)
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [
get(dicts[i], key, missing) for i in eachindex(dicts), (j, key) in enumerate(names)
]
return names, vals
end
|
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
nms_and_vs = getparams(model, t)
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [
get(dicts[i], key, missing) for i in eachindex(dicts), (j, key) in enumerate(names)
]
return names, vals
end
|
[
188,
207
] |
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
nms_and_vs = getparams(model, t)
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [
get(dicts[i], key, missing) for i in eachindex(dicts), (j, key) in enumerate(names)
]
return names, vals
end
|
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
nms_and_vs = getparams(model, t)
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [
get(dicts[i], key, missing) for i in eachindex(dicts), (j, key) in enumerate(names)
]
return names, vals
end
|
_params_to_array
| 188
| 207
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
`m`. The return value is a `NamedTuple` with `var_symbols` as the key(s). The second
argument should be either a `Symbol` or a vector of `Symbol`s.
"""
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
vals_dict = Turing.Inference.getparams(log_density.model, log_density.varinfo)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals_dict), values(vals_dict))
vns_and_vals = mapreduce(collect, vcat, iters)
varnames = collect(map(first, vns_and_vals))
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [get(dicts[i], key, missing) for i in eachindex(dicts), key in names]
return names, vals
end
function get_transition_extras(ts::AbstractVector{<:VarInfo})
valmat = reshape([getlogp(t) for t in ts], :, 1)
return [:lp], valmat
end
##CHUNK 2
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
##CHUNK 3
Return a key-value map of parameters from the varinfo.
"""
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
##CHUNK 4
end
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
getlogevidence(transitions, sampler, state) = missing
##CHUNK 5
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
"""
group_varnames_by_symbol(vns)
Group the varnames by their symbol.
##CHUNK 6
Return a vector of parameter values from the given sampler transition `t` (i.e.,
the first return value of AbstractMCMC.step). By default, returns the `t.θ` field.
!!! note
This method only needs to be implemented for external samplers. It will be
removed in future releases and replaced with `AbstractMCMC.getparams`.
"""
getparams(model, t) = t.θ
"""
Turing.Inference.getparams(model::DynamicPPL.Model, t::AbstractVarInfo)
Return a key-value map of parameters from the varinfo.
"""
function getparams(model::DynamicPPL.Model, vi::DynamicPPL.VarInfo)
# NOTE: In the past, `invlink(vi, model)` + `values_as(vi, OrderedDict)` was used.
# Unfortunately, using `invlink` can cause issues in scenarios where the constraints
# of the parameters change depending on the realizations. Hence we have to use
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
##CHUNK 7
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
##CHUNK 8
names = collect(names_set)
vals = [get(dicts[i], key, missing) for i in eachindex(dicts), key in names]
return names, vals
end
function get_transition_extras(ts::AbstractVector{<:VarInfo})
valmat = reshape([getlogp(t) for t in ts], :, 1)
return [:lp], valmat
end
function get_transition_extras(ts::AbstractVector)
# Extract all metadata.
extra_data = map(metadata, ts)
return names_values(extra_data)
end
function names_values(extra_data::AbstractVector{<:NamedTuple{names}}) where {names}
values = [getfield(data, name) for data in extra_data, name in names]
return collect(names), values
|
225
| 239
|
Turing.jl
| 374
|
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
|
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
|
[
225,
239
] |
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
|
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
|
names_values
| 225
| 239
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
set_namedtuple!(vi::VarInfo, nt::NamedTuple)
Places the values of a `NamedTuple` into the relevant places of a `VarInfo`.
"""
function set_namedtuple!(vi::DynamicPPL.VarInfoOrThreadSafeVarInfo, nt::NamedTuple)
for (n, vals) in pairs(nt)
vns = vi.metadata[n].vns
if vals isa AbstractVector
vals = unvectorize(vals)
end
if length(vns) == 1
# Only one variable, assign the values to it
DynamicPPL.setindex!(vi, vals, vns[1])
else
# Spread the values across the variables
length(vns) == length(vals) || error("Unequal number of variables and values")
for (vn, val) in zip(vns, vals)
DynamicPPL.setindex!(vi, val, vn)
end
end
##CHUNK 2
"""
dist_val_tuple(spl::Sampler{<:MH}, vi::VarInfo)
Return two `NamedTuples`.
The first `NamedTuple` has symbols as keys and distributions as values.
The second `NamedTuple` has model symbols as keys and their stored values as values.
"""
function dist_val_tuple(spl::Sampler{<:MH}, vi::DynamicPPL.VarInfoOrThreadSafeVarInfo)
vns = all_varnames_grouped_by_symbol(vi)
dt = _dist_tuple(spl.alg.proposals, vi, vns)
vt = _val_tuple(vi, vns)
return dt, vt
end
@generated function _val_tuple(vi::VarInfo, vns::NamedTuple{names}) where {names}
isempty(names) && return :(NamedTuple())
expr = Expr(:tuple)
expr.args = Any[
:(
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
##CHUNK 2
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
vns_and_vals = mapreduce(collect, vcat, iters)
varnames = collect(map(first, vns_and_vals))
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
Base.get(m::ModeResult, var_symbol::Symbol) = get(m, [var_symbol])
"""
ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
##CHUNK 2
`m`. The return value is a `NamedTuple` with `var_symbols` as the key(s). The second
argument should be either a `Symbol` or a vector of `Symbol`s.
"""
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
vals_dict = Turing.Inference.getparams(log_density.model, log_density.varinfo)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals_dict), values(vals_dict))
vns_and_vals = mapreduce(collect, vcat, iters)
varnames = collect(map(first, vns_and_vals))
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
end
function names_values(extra_data::AbstractVector{<:NamedTuple{names}}) where {names}
values = [getfield(data, name) for data in extra_data, name in names]
return collect(names), values
end
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
##CHUNK 2
for x in xs
for k in keys(x)
push!(names_set, k)
end
end
names_unique = collect(names_set)
# Extract all values as matrix.
values = [haskey(x, name) ? x[name] : missing for x in xs, name in names_unique]
return names_unique, values
end
getlogevidence(transitions, sampler, state) = missing
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
##CHUNK 3
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
end
function get_transition_extras(ts::AbstractVector)
# Extract all metadata.
extra_data = map(metadata, ts)
return names_values(extra_data)
end
function names_values(extra_data::AbstractVector{<:NamedTuple{names}}) where {names}
values = [getfield(data, name) for data in extra_data, name in names]
return collect(names), values
end
function names_values(xs::AbstractVector{<:NamedTuple})
# Obtain all parameter names.
names_set = Set{Symbol}()
##CHUNK 4
- `OrderedDict{Symbol, Vector{VarName}}`: A dictionary mapping symbol to a vector of varnames.
"""
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
function save(c::MCMCChains.Chains, spl::Sampler, model, vi, samples)
nt = NamedTuple{(:sampler, :model, :vi, :samples)}((spl, model, deepcopy(vi), samples))
return setinfo(c, merge(nt, c.info))
end
#######################################
|
245
| 305
|
Turing.jl
| 375
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
[
245,
305
] |
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
|
AbstractMCMC.bundle_samples
| 245
| 305
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# `hcat` first to ensure we get the right `eltype`.
x = hcat(first(vals_vec), first(extra_values_vec))
# Pre-allocate to minimize memory usage.
parray = Array{eltype(x),3}(undef, length(vals_vec), size(x, 2), size(x, 1))
for (i, (vals, extras)) in enumerate(zip(vals_vec, extra_values_vec))
parray[i, :, :] = transpose(hcat(vals, extras))
end
# Get the average or final log evidence, if it exists.
le = getlogevidence(samples, state, spl)
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
##CHUNK 2
# Set up the info tuple.
info = (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),)
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Concretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
##CHUNK 3
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
function get_transition_extras(ts::AbstractVector)
# Extract all metadata.
extra_data = map(metadata, ts)
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
##CHUNK 2
# Merge in the timing info, if available
if !ismissing(stats)
info = merge(info, (start_time=stats.start, stop_time=stats.stop))
end
# Conretize the array before giving it to MCMCChains.
parray = MCMCChains.concretize(parray)
# Chain construction.
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
##CHUNK 3
chain = MCMCChains.Chains(
parray,
nms,
(internals=extra_params,);
evidence=le,
info=info,
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
##CHUNK 4
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
##CHUNK 5
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
##CHUNK 6
names = collect(names_set)
vals = [get(dicts[i], key, missing) for i in eachindex(dicts), key in names]
return names, vals
end
function get_transition_extras(ts::AbstractVector{<:VarInfo})
valmat = reshape([getlogp(t) for t in ts], :, 1)
return [:lp], valmat
end
function get_transition_extras(ts::AbstractVector)
# Extract all metadata.
extra_data = map(metadata, ts)
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
if save_state
info = merge(info, (model=model, sampler=spl, samplerstate=state))
end
|
308
| 327
|
Turing.jl
| 376
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
|
[
308,
327
] |
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
|
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
|
AbstractMCMC.bundle_samples
| 308
| 327
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
function AbstractMCMC.bundle_samples(
samples::Vector{<:Vector},
model::AbstractModel,
spl::Sampler{<:Emcee},
state::EmceeState,
chain_type::Type{MCMCChains.Chains};
save_state=false,
sort_chain=false,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
##CHUNK 2
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
params_vec = map(Base.Fix1(_params_to_array, model), samples)
# Extract names and values separately.
varnames = params_vec[1][1]
varnames_symbol = map(Symbol, varnames)
vals_vec = [p[2] for p in params_vec]
# Get the values of the extra parameters in each transition.
extra_vec = map(get_transition_extras, samples)
# Get the extra parameter names & values.
extra_params = extra_vec[1][1]
extra_values_vec = [e[2] for e in extra_vec]
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
##CHUNK 2
chain_type::Type{Vector{NamedTuple}};
kwargs...,
)
return map(ts) do t
# Construct a dictionary of pairs `vn => value`.
params = OrderedDict(getparams(model, t))
# Group the variable names by their symbol.
sym_to_vns = group_varnames_by_symbol(keys(params))
# Convert the values to a vector.
vals = map(values(sym_to_vns)) do vns
map(Base.Fix1(getindex, params), vns)
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
"""
group_varnames_by_symbol(vns)
Group the varnames by their symbol.
##CHUNK 3
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
##CHUNK 4
# Set up the info tuple.
info = NamedTuple()
if include_varname_to_symbol
info = merge(info, (varname_to_symbol=OrderedDict(zip(varnames, varnames_symbol)),))
end
start=discard_initial + 1,
thin=thinning,
)
return sort_chain ? sort(chain) : chain
end
# This is type piracy (for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
##CHUNK 5
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
# Get the values of the extra parameters in each transition.
extra_params, extra_values = get_transition_extras(ts)
# Extract names & construct param array.
nms = [varnames_symbol; extra_params]
parray = hcat(vals, extra_values)
# Get the average or final log evidence, if it exists.
le = getlogevidence(ts, spl, state)
##CHUNK 6
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
##CHUNK 7
return getparams(model, DynamicPPL.typed_varinfo(untyped_vi))
end
function getparams(::DynamicPPL.Model, ::DynamicPPL.VarInfo{NamedTuple{(),Tuple{}}})
return Dict{VarName,Any}()
end
function _params_to_array(model::DynamicPPL.Model, ts::Vector)
names_set = OrderedSet{VarName}()
# Extract the parameter names and values from each transition.
dicts = map(ts) do t
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
##CHUNK 8
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
|
340
| 350
|
Turing.jl
| 377
|
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
|
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
|
[
340,
350
] |
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
|
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
|
group_varnames_by_symbol
| 340
| 350
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
`m`. The return value is a `NamedTuple` with `var_symbols` as the key(s). The second
argument should be either a `Symbol` or a vector of `Symbol`s.
"""
function Base.get(m::ModeResult, var_symbols::AbstractVector{Symbol})
log_density = m.f.ldf
# Get all the variable names in the model. This is the same as the list of keys in
# m.values, but they are more convenient to filter when they are VarNames rather than
# Symbols.
vals_dict = Turing.Inference.getparams(log_density.model, log_density.varinfo)
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals_dict), values(vals_dict))
vns_and_vals = mapreduce(collect, vcat, iters)
varnames = collect(map(first, vns_and_vals))
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
##CHUNK 2
vns_and_vals = mapreduce(collect, vcat, iters)
varnames = collect(map(first, vns_and_vals))
# For each symbol s in var_symbols, pick all the values from m.values for which the
# variable name has that symbol.
et = eltype(m.values)
value_vectors = Vector{et}[]
for s in var_symbols
push!(
value_vectors,
[m.values[Symbol(vn)] for vn in varnames if DynamicPPL.getsym(vn) == s],
)
end
return (; zip(var_symbols, value_vectors)...)
end
Base.get(m::ModeResult, var_symbol::Symbol) = get(m, [var_symbol])
"""
ModeResult(log_density::OptimLogDensity, solution::SciMLBase.OptimizationSolution)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
set_namedtuple!(vi::VarInfo, nt::NamedTuple)
Places the values of a `NamedTuple` into the relevant places of a `VarInfo`.
"""
function set_namedtuple!(vi::DynamicPPL.VarInfoOrThreadSafeVarInfo, nt::NamedTuple)
for (n, vals) in pairs(nt)
vns = vi.metadata[n].vns
if vals isa AbstractVector
vals = unvectorize(vals)
end
if length(vns) == 1
# Only one variable, assign the values to it
DynamicPPL.setindex!(vi, vals, vns[1])
else
# Spread the values across the variables
length(vns) == length(vals) || error("Unequal number of variables and values")
for (vn, val) in zip(vns, vals)
DynamicPPL.setindex!(vi, val, vn)
end
end
##CHUNK 2
dt = _dist_tuple(spl.alg.proposals, vi, vns)
vt = _val_tuple(vi, vns)
return dt, vt
end
@generated function _val_tuple(vi::VarInfo, vns::NamedTuple{names}) where {names}
isempty(names) && return :(NamedTuple())
expr = Expr(:tuple)
expr.args = Any[
:(
$name = reconstruct(
unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name)),
DynamicPPL.getindex_internal(vi, vns.$name),
)
) for name in names
]
return expr
end
_val_tuple(::VarInfo, ::Tuple{}) = ()
##CHUNK 3
offset = 0
return map(dist) do d
n = length(d)
newoffset = offset + n
v = val[(offset + 1):newoffset]
offset = newoffset
return v
end
end
"""
dist_val_tuple(spl::Sampler{<:MH}, vi::VarInfo)
Return two `NamedTuples`.
The first `NamedTuple` has symbols as keys and distributions as values.
The second `NamedTuple` has model symbols as keys and their stored values as values.
"""
function dist_val_tuple(spl::Sampler{<:MH}, vi::DynamicPPL.VarInfoOrThreadSafeVarInfo)
vns = all_varnames_grouped_by_symbol(vi)
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
model = test_model(1.2, 2, 10, 2.5)
all_varnames = DynamicPPL.VarName[
@varname(variance), @varname(z), @varname(y), @varname(q.a), @varname(r[1])
]
# All combinations of elements in all_varnames.
target_vn_combinations = Iterators.flatten(
Iterators.map(
n -> Combinatorics.combinations(all_varnames, n), 1:length(all_varnames)
),
)
@testset "$(target_vns)" for target_vns in target_vn_combinations
global_varinfo = DynamicPPL.VarInfo(model)
target_vns = collect(target_vns)
local_varinfo = DynamicPPL.subset(global_varinfo, target_vns)
ctx = Turing.Inference.GibbsContext(
target_vns, Ref(global_varinfo), DynamicPPL.DefaultContext()
)
# Check that the correct varnames are conditioned, and that getting their
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
end
return merge(NamedTuple(zip(keys(sym_to_vns), vals)), metadata(t))
end
end
"""
group_varnames_by_symbol(vns)
Group the varnames by their symbol.
# Arguments
- `vns`: Iterable of `VarName`.
# Returns
- `OrderedDict{Symbol, Vector{VarName}}`: A dictionary mapping symbol to a vector of varnames.
"""
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
##CHUNK 2
# Arguments
- `vns`: Iterable of `VarName`.
# Returns
- `OrderedDict{Symbol, Vector{VarName}}`: A dictionary mapping symbol to a vector of varnames.
"""
function group_varnames_by_symbol(vns)
d = OrderedDict{Symbol,Vector{VarName}}()
for vn in vns
sym = DynamicPPL.getsym(vn)
if !haskey(d, sym)
d[sym] = VarName[]
end
push!(d[sym], vn)
end
return d
end
function save(c::MCMCChains.Chains, spl::Sampler, model, vi, samples)
nt = NamedTuple{(:sampler, :model, :vi, :samples)}((spl, model, deepcopy(vi), samples))
##CHUNK 3
# In general getparams returns a dict of VarName => values. We need to also
# split it up into constituent elements using
# `DynamicPPL.varname_and_value_leaves` because otherwise MCMCChains.jl
# won't understand it.
vals = getparams(model, t)
nms_and_vs = if isempty(vals)
Tuple{VarName,Any}[]
else
iters = map(DynamicPPL.varname_and_value_leaves, keys(vals), values(vals))
mapreduce(collect, vcat, iters)
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
##CHUNK 4
end
nms = map(first, nms_and_vs)
vs = map(last, nms_and_vs)
for nm in nms
push!(names_set, nm)
end
# Convert the names and values to a single dictionary.
return OrderedDict(zip(nms, vs))
end
names = collect(names_set)
vals = [get(dicts[i], key, missing) for i in eachindex(dicts), key in names]
return names, vals
end
function get_transition_extras(ts::AbstractVector{<:VarInfo})
valmat = reshape([getlogp(t) for t in ts], :, 1)
return [:lp], valmat
end
|
446
| 465
|
Turing.jl
| 378
|
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
return transitions
end
|
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
return transitions
end
|
[
446,
465
] |
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
return transitions
end
|
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
return transitions
end
|
transitions_from_chain
| 446
| 465
|
src/mcmc/Inference.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 2
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
#FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
#CURRENT FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
##CHUNK 2
transitions_from_chain(
[rng::AbstractRNG,]
model::Model,
chain::MCMCChains.Chains;
sampler = DynamicPPL.SampleFromPrior()
)
Execute `model` conditioned on each sample in `chain`, and return resulting transitions.
The returned transitions are represented in a `Vector{<:Turing.Inference.Transition}`.
# Details
In a bit more detail, the process is as follows:
1. For every `sample` in `chain`
1. For every `variable` in `sample`
1. Set `variable` in `model` to its value in `sample`
2. Execute `model` with variables fixed as above, sampling variables NOT present
in `chain` using `SampleFromPrior`
3. Return sampled variables and log-joint
##CHUNK 3
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
return transitions
end
end # module
##CHUNK 4
) where {T,N,TV<:Array{T,N}}
return Array{T,N}
end
##############
# Utilities #
##############
"""
transitions_from_chain(
[rng::AbstractRNG,]
model::Model,
chain::MCMCChains.Chains;
sampler = DynamicPPL.SampleFromPrior()
)
Execute `model` conditioned on each sample in `chain`, and return resulting transitions.
The returned transitions are represented in a `Vector{<:Turing.Inference.Transition}`.
##CHUNK 5
# Details
In a bit more detail, the process is as follows:
1. For every `sample` in `chain`
1. For every `variable` in `sample`
1. Set `variable` in `model` to its value in `sample`
2. Execute `model` with variables fixed as above, sampling variables NOT present
in `chain` using `SampleFromPrior`
3. Return sampled variables and log-joint
# Example
```julia-repl
julia> using Turing
```
"""
function transitions_from_chain(
model::DynamicPPL.Model, chain::MCMCChains.Chains; kwargs...
)
##CHUNK 6
# Default MCMCChains.Chains constructor.
# This is type piracy (at least for SampleFromPrior).
function AbstractMCMC.bundle_samples(
ts::Vector{<:Union{AbstractTransition,AbstractVarInfo}},
model::AbstractModel,
spl::Union{Sampler{<:InferenceAlgorithm},SampleFromPrior,RepeatSampler},
state,
chain_type::Type{MCMCChains.Chains};
save_state=false,
stats=missing,
sort_chain=false,
include_varname_to_symbol=true,
discard_initial=0,
thinning=1,
kwargs...,
)
# Convert transitions to array format.
# Also retrieve the variable names.
varnames, vals = _params_to_array(model, ts)
varnames_symbol = map(Symbol, varnames)
##CHUNK 7
# Example
```julia-repl
julia> using Turing
```
"""
function transitions_from_chain(
model::DynamicPPL.Model, chain::MCMCChains.Chains; kwargs...
)
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
|
110
| 139
|
Turing.jl
| 379
|
function MH(proposals...)
prop_syms = Symbol[]
props = AMH.Proposal[]
for s in proposals
if s isa Pair || s isa Tuple
# Check to see whether it's a pair that specifies a kernel
# or a specific proposal distribution.
push!(prop_syms, s[1])
push!(props, proposal(s[2]))
elseif length(proposals) == 1
# If we hit this block, check to see if it's
# a run-of-the-mill proposal or covariance
# matrix.
prop = proposal(s)
# Return early, we got a covariance matrix.
return new{typeof(prop)}(prop)
else
# Try to convert it to a proposal anyways,
# throw an error if not acceptable.
prop = proposal(s)
push!(props, prop)
end
end
proposals = NamedTuple{tuple(prop_syms...)}(tuple(props...))
return new{typeof(proposals)}(proposals)
end
|
function MH(proposals...)
prop_syms = Symbol[]
props = AMH.Proposal[]
for s in proposals
if s isa Pair || s isa Tuple
# Check to see whether it's a pair that specifies a kernel
# or a specific proposal distribution.
push!(prop_syms, s[1])
push!(props, proposal(s[2]))
elseif length(proposals) == 1
# If we hit this block, check to see if it's
# a run-of-the-mill proposal or covariance
# matrix.
prop = proposal(s)
# Return early, we got a covariance matrix.
return new{typeof(prop)}(prop)
else
# Try to convert it to a proposal anyways,
# throw an error if not acceptable.
prop = proposal(s)
push!(props, prop)
end
end
proposals = NamedTuple{tuple(prop_syms...)}(tuple(props...))
return new{typeof(proposals)}(proposals)
end
|
[
110,
139
] |
function MH(proposals...)
prop_syms = Symbol[]
props = AMH.Proposal[]
for s in proposals
if s isa Pair || s isa Tuple
# Check to see whether it's a pair that specifies a kernel
# or a specific proposal distribution.
push!(prop_syms, s[1])
push!(props, proposal(s[2]))
elseif length(proposals) == 1
# If we hit this block, check to see if it's
# a run-of-the-mill proposal or covariance
# matrix.
prop = proposal(s)
# Return early, we got a covariance matrix.
return new{typeof(prop)}(prop)
else
# Try to convert it to a proposal anyways,
# throw an error if not acceptable.
prop = proposal(s)
push!(props, prop)
end
end
proposals = NamedTuple{tuple(prop_syms...)}(tuple(props...))
return new{typeof(proposals)}(proposals)
end
|
function MH(proposals...)
prop_syms = Symbol[]
props = AMH.Proposal[]
for s in proposals
if s isa Pair || s isa Tuple
# Check to see whether it's a pair that specifies a kernel
# or a specific proposal distribution.
push!(prop_syms, s[1])
push!(props, proposal(s[2]))
elseif length(proposals) == 1
# If we hit this block, check to see if it's
# a run-of-the-mill proposal or covariance
# matrix.
prop = proposal(s)
# Return early, we got a covariance matrix.
return new{typeof(prop)}(prop)
else
# Try to convert it to a proposal anyways,
# throw an error if not acceptable.
prop = proposal(s)
push!(props, prop)
end
end
proposals = NamedTuple{tuple(prop_syms...)}(tuple(props...))
return new{typeof(proposals)}(proposals)
end
|
MH
| 110
| 139
|
src/mcmc/mh.jl
|
#FILE: Turing.jl/test/mcmc/mh.jl
##CHUNK 1
@test chain isa MCMCChains.Chains
end
@testset "proposal matrix" begin
mat = [1.0 -0.05; -0.05 1.0]
prop1 = mat # Matrix only constructor
prop2 = AdvancedMH.RandomWalkProposal(MvNormal(mat)) # Explicit proposal constructor
spl1 = MH(prop1)
spl2 = MH(prop2)
# Test that the two constructors are equivalent.
@test spl1.proposals.proposal.μ == spl2.proposals.proposal.μ
@test spl1.proposals.proposal.Σ.mat == spl2.proposals.proposal.Σ.mat
# Test inference.
chain1 = sample(StableRNG(seed), gdemo_default, spl1, 2_000)
chain2 = sample(StableRNG(seed), gdemo_default, spl2, 2_000)
##CHUNK 2
@test dt[:m] isa
AdvancedMH.StaticProposal{false,Vector{ContinuousUnivariateDistribution}}
@test dt[:m].proposal[1] isa Normal && dt[:m].proposal[2] isa InverseGamma
@test dt[:s] isa AdvancedMH.StaticProposal{false,<:InverseGamma}
@test vt[:z] isa Vector{Float64} && length(vt[:z]) == 2
@test vt[:m] isa Vector{Float64} && length(vt[:m]) == 2
@test vt[:s] isa Float64
chain = sample(model, MH(), 10)
@test chain isa MCMCChains.Chains
end
@testset "proposal matrix" begin
mat = [1.0 -0.05; -0.05 1.0]
prop1 = mat # Matrix only constructor
prop2 = AdvancedMH.RandomWalkProposal(MvNormal(mat)) # Explicit proposal constructor
#FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
prob = Optimization.OptimizationProblem(log_density, adtype, constraints)
solution = Optimization.solve(prob, solver; kwargs...)
# TODO(mhauru) We return a ModeResult for compatibility with the older Optim.jl
# interface. Might we want to break that and develop a better return type?
return ModeResult(log_density, solution)
end
"""
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
#CURRENT FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return true
end
# FIXME: This won't be hit unless `vals` are all the exactly same concrete type of `AdvancedMH.RandomWalkProposal`!
function should_link(
varinfo, sampler, proposal::NamedTuple{names,vals}
) where {names,vals<:NTuple{<:Any,<:AdvancedMH.RandomWalkProposal}}
return true
end
function maybe_link!!(varinfo, sampler, proposal, model)
return if should_link(varinfo, sampler, proposal)
DynamicPPL.link!!(varinfo, model)
else
varinfo
end
end
# Make a proposal if we don't have a covariance proposal matrix (the default).
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
##CHUNK 2
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
##CHUNK 3
_dist_tuple(::@NamedTuple{}, ::VarInfo, ::Tuple{}) = ()
# Utility functions to link
should_link(varinfo, sampler, proposal) = false
function should_link(varinfo, sampler, proposal::NamedTuple{(),Tuple{}})
# If it's an empty `NamedTuple`, we're using the priors as proposals
# in which case we shouldn't link.
return false
end
function should_link(varinfo, sampler, proposal::AdvancedMH.RandomWalkProposal)
return true
end
# FIXME: This won't be hit unless `vals` are all the exactly same concrete type of `AdvancedMH.RandomWalkProposal`!
function should_link(
varinfo, sampler, proposal::NamedTuple{names,vals}
) where {names,vals<:NTuple{<:Any,<:AdvancedMH.RandomWalkProposal}}
return true
end
function maybe_link!!(varinfo, sampler, proposal, model)
##CHUNK 4
# Otherwise, use the default proposal.
:(
$name = AMH.StaticProposal(
unvectorize(DynamicPPL.getdist.(Ref(vi), vns.$name))
)
)
end for name in names
]
return expr
end
_dist_tuple(::@NamedTuple{}, ::VarInfo, ::Tuple{}) = ()
# Utility functions to link
should_link(varinfo, sampler, proposal) = false
function should_link(varinfo, sampler, proposal::NamedTuple{(),Tuple{}})
# If it's an empty `NamedTuple`, we're using the priors as proposals
# in which case we shouldn't link.
return false
end
function should_link(varinfo, sampler, proposal::AdvancedMH.RandomWalkProposal)
##CHUNK 5
return if should_link(varinfo, sampler, proposal)
DynamicPPL.link!!(varinfo, model)
else
varinfo
end
end
# Make a proposal if we don't have a covariance proposal matrix (the default).
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
##CHUNK 6
return Transition(model, vi), vi
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
|
299
| 326
|
Turing.jl
| 380
|
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
|
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
|
[
299,
326
] |
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
|
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
|
propose!!
| 299
| 326
|
src/mcmc/mh.jl
|
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
##CHUNK 2
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
##CHUNK 2
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
##CHUNK 3
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
##CHUNK 4
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
# Just defer to `SampleFromPrior`.
retval = DynamicPPL.assume(rng, SampleFromPrior(), dist, vn, vi)
return retval
end
function DynamicPPL.observe(spl::Sampler{<:MH}, d::Distribution, value, vi)
return DynamicPPL.observe(SampleFromPrior(), d, value, vi)
end
|
329
| 358
|
Turing.jl
| 381
|
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
|
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
|
[
329,
358
] |
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
|
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(spl.alg.proposals)
prev_trans = AMH.Transition(vals, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
|
propose!!
| 329
| 358
|
src/mcmc/mh.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
##CHUNK 2
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
##CHUNK 3
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
function AbstractMCMC.step(
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
#CURRENT FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return if should_link(varinfo, sampler, proposal)
DynamicPPL.link!!(varinfo, model)
else
varinfo
end
end
# Make a proposal if we don't have a covariance proposal matrix (the default).
function propose!!(
rng::AbstractRNG, vi::AbstractVarInfo, model::Model, spl::Sampler{<:MH}, proposal
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
##CHUNK 2
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
# Make a proposal if we DO have a covariance proposal matrix.
function DynamicPPL.initialstep(
##CHUNK 3
)
# Retrieve distribution and value NamedTuples.
dt, vt = dist_val_tuple(spl, vi)
# Create a sampler and the previous transition.
mh_sampler = AMH.MetropolisHastings(dt)
prev_trans = AMH.Transition(vt, getlogp(vi), false)
# Make a new transition.
densitymodel = AMH.DensityModel(
Base.Fix1(
LogDensityProblems.logdensity,
DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
##CHUNK 4
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
# Make a proposal if we DO have a covariance proposal matrix.
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
|
360
| 372
|
Turing.jl
| 382
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
|
[
360,
372
] |
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
return Transition(model, vi), vi
end
|
DynamicPPL.initialstep
| 360
| 372
|
src/mcmc/mh.jl
|
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
struct SGLDState{L,V<:AbstractVarInfo}
logdensity::L
vi::V
step::Int
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
##CHUNK 2
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
)
return Transition(model, vi), GibbsState(vi, states)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
##CHUNK 3
end
return vi
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step,
##CHUNK 4
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs};
initial_params=nothing,
kwargs...,
)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
vi = initial_varinfo(rng, model, spl, initial_params)
vi, states = gibbs_initialstep_recursive(
rng,
model,
AbstractMCMC.step_warmup,
varnames,
samplers,
vi;
initial_params=initial_params,
kwargs...,
#FILE: Turing.jl/src/mcmc/is.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::AbstractRNG, model::Model, spl::Sampler{<:IS}, vi::AbstractVarInfo; kwargs...
)
return Transition(model, vi), nothing
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:IS}, ::Nothing; kwargs...
)
vi = VarInfo(rng, model, spl)
return Transition(model, vi), nothing
end
# Calculate evidence.
function getlogevidence(samples::Vector{<:Transition}, ::Sampler{<:IS}, state)
return logsumexp(map(x -> x.lp, samples)) - log(length(samples))
end
function DynamicPPL.assume(rng, ::Sampler{<:IS}, dist::Distribution, vn::VarName, vi)
if haskey(vi, vn)
#CURRENT FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
# Just defer to `SampleFromPrior`.
retval = DynamicPPL.assume(rng, SampleFromPrior(), dist, vn, vi)
return retval
##CHUNK 2
# TODO: Make this compatible with immutable `VarInfo`.
# Update the values in the VarInfo.
set_namedtuple!(vi, trans.params)
return setlogp!!(vi, trans.lp)
end
# Make a proposal if we DO have a covariance proposal matrix.
function propose!!(
rng::AbstractRNG,
vi::AbstractVarInfo,
model::Model,
spl::Sampler{<:MH},
proposal::AdvancedMH.RandomWalkProposal,
)
# If this is the case, we can just draw directly from the proposal
# matrix.
vals = vi[:]
# Create a sampler and the previous transition.
##CHUNK 3
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:MH}, vi::AbstractVarInfo; kwargs...
)
# Cases:
# 1. A covariance proposal matrix
# 2. A bunch of NamedTuples that specify the proposal space
vi = propose!!(rng, vi, model, spl, spl.alg.proposals)
return Transition(model, vi), vi
end
|
15
| 31
|
Turing.jl
| 383
|
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}(
model, sampler, varinfo, (model.f, args...)
)
end
|
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}(
model, sampler, varinfo, (model.f, args...)
)
end
|
[
15,
31
] |
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}(
model, sampler, varinfo, (model.f, args...)
)
end
|
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
return TracedModel{AbstractSampler,AbstractVarInfo,Model,Tuple}(
model, sampler, varinfo, (model.f, args...)
)
end
|
TracedModel
| 15
| 31
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
##CHUNK 2
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
##CHUNK 3
end
"""
setparams_varinfo!!(model, sampler::Sampler, state, params::AbstractVarInfo)
A lot like AbstractMCMC.setparams!!, but instead of taking a vector of parameters, takes an
`AbstractVarInfo` object. Also takes the `sampler` as an argument. By default, falls back to
`AbstractMCMC.setparams!!(model, state, params[:])`.
`model` is typically a `DynamicPPL.Model`, but can also be e.g. an
`AbstractMCMC.LogDensityModel`.
"""
function setparams_varinfo!!(model, ::Sampler, state, params::AbstractVarInfo)
return AbstractMCMC.setparams!!(model, state, params[:])
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:MH},
state::AbstractVarInfo,
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
::DynamicPPL.Sampler,
::VarInfoState,
params::DynamicPPL.AbstractVarInfo,
)
return VarInfoState(params)
end
function AbstractMCMC.step(
::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.non_warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step_warmup(
::Random.AbstractRNG,
##CHUNK 2
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step(
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.non_warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
##CHUNK 2
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
##CHUNK 3
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
end
# Convenient constructor
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
return newtrace
end
# We need to tell Libtask which calls may have `produce` calls within them. In practice most
# of these won't be needed, because of inlining and the fact that `might_produce` is only
# called on `:invoke` expressions rather than `:call`s, but since those are implementation
|
62
| 72
|
Turing.jl
| 384
|
function AdvancedPS.update_rng!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}
)
# Extract the `args`.
args = trace.model.ctask.args
# From `args`, extract the `SamplingContext`, which contains the RNG.
sampling_context = args[3]
rng = sampling_context.rng
trace.rng = rng
return trace
end
|
function AdvancedPS.update_rng!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}
)
# Extract the `args`.
args = trace.model.ctask.args
# From `args`, extract the `SamplingContext`, which contains the RNG.
sampling_context = args[3]
rng = sampling_context.rng
trace.rng = rng
return trace
end
|
[
62,
72
] |
function AdvancedPS.update_rng!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}
)
# Extract the `args`.
args = trace.model.ctask.args
# From `args`, extract the `SamplingContext`, which contains the RNG.
sampling_context = args[3]
rng = sampling_context.rng
trace.rng = rng
return trace
end
|
function AdvancedPS.update_rng!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}
)
# Extract the `args`.
args = trace.model.ctask.args
# From `args`, extract the `SamplingContext`, which contains the RNG.
sampling_context = args[3]
rng = sampling_context.rng
trace.rng = rng
return trace
end
|
AdvancedPS.update_rng!
| 62
| 72
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/test/essential/container.jl
##CHUNK 1
vi = DynamicPPL.VarInfo()
sampler = Sampler(PG(10))
model = test()
trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG())
# Make sure the backreference from taped_globals to the trace is in place.
@test trace.model.ctask.taped_globals.other === trace
res = AdvancedPS.advance!(trace, false)
@test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 1
@test res ≈ -log(2)
# Catch broken copy, espetially for RNG / VarInfo
newtrace = AdvancedPS.fork(trace)
res2 = AdvancedPS.advance!(trace)
@test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 2
@test DynamicPPL.get_num_produce(newtrace.model.f.varinfo) == 1
end
@testset "fork" begin
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
evaluator = (model.f, args...)
return TracedModel(model, sampler, varinfo, evaluator)
end
function AdvancedPS.advance!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, isref::Bool=false
)
# Make sure we load/reset the rng in the new replaying mechanism
DynamicPPL.increment_num_produce!(trace.model.f.varinfo)
isref ? AdvancedPS.load_state!(trace.rng) : AdvancedPS.save_state!(trace.rng)
score = consume(trace.model.ctask)
if score === nothing
return nothing
##CHUNK 2
function AdvancedPS.advance!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, isref::Bool=false
)
# Make sure we load/reset the rng in the new replaying mechanism
DynamicPPL.increment_num_produce!(trace.model.f.varinfo)
isref ? AdvancedPS.load_state!(trace.rng) : AdvancedPS.save_state!(trace.rng)
score = consume(trace.model.ctask)
if score === nothing
return nothing
else
return score + DynamicPPL.getlogp(trace.model.f.varinfo)
end
end
function AdvancedPS.delete_retained!(trace::TracedModel)
DynamicPPL.set_retained_vns_del!(trace.varinfo)
return trace
end
##CHUNK 3
function DynamicPPL.acclogp_observe!!(
context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, varinfo::AbstractVarInfo, logp
)
Libtask.produce(logp)
return trace_local_varinfo_maybe(varinfo)
end
# Convenient constructor
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
##CHUNK 4
end
end
end
function trace_local_rng_maybe(rng::Random.AbstractRNG)
try
return Libtask.get_taped_globals(Any).rng
catch e
# NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`.
if e == KeyError(:task_variable)
return rng
else
rethrow(e)
end
end
end
function DynamicPPL.assume(
rng, ::Sampler{<:Union{PG,SMC}}, dist::Distribution, vn::VarName, _vi::AbstractVarInfo
)
##CHUNK 5
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
return newtrace
end
# We need to tell Libtask which calls may have `produce` calls within them. In practice most
# of these won't be needed, because of inlining and the fact that `might_produce` is only
# called on `:invoke` expressions rather than `:call`s, but since those are implementation
# details of the compiler, we set a bunch of methods as might_produce = true. We start with
# `acclogp_observe!!` which is what calls `produce` and go up the call stack.
Libtask.might_produce(::Type{<:Tuple{typeof(DynamicPPL.acclogp_observe!!),Vararg}}) = true
Libtask.might_produce(::Type{<:Tuple{typeof(DynamicPPL.tilde_observe!!),Vararg}}) = true
##CHUNK 6
function AdvancedPS.reset_model(trace::TracedModel)
DynamicPPL.reset_num_produce!(trace.varinfo)
return trace
end
function AdvancedPS.reset_logprob!(trace::TracedModel)
DynamicPPL.resetlogp!!(trace.model.varinfo)
return trace
end
function Libtask.TapedTask(taped_globals, model::TracedModel; kwargs...)
"""
$(TYPEDEF)
Sequential Monte Carlo sampler.
# Fields
$(TYPEDFIELDS)
"""
##CHUNK 7
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
function AbstractMCMC.step(
##CHUNK 8
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
##CHUNK 9
###
### Particle Filtering and Particle MCMC Samplers.
###
### AdvancedPS models and interface
struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model,E<:Tuple} <:
AdvancedPS.AbstractGenericModel
model::M
sampler::S
varinfo::V
evaluator::E
end
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
|
187
| 220
|
Turing.jl
| 385
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
|
[
187,
220
] |
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
|
DynamicPPL.initialstep
| 187
| 220
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
##CHUNK 2
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
##CHUNK 3
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
##CHUNK 4
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
##CHUNK 5
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
##CHUNK 6
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
##CHUNK 7
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
##CHUNK 8
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
return transition, nextstate
end
####
#### Particle Gibbs sampler.
####
"""
$(TYPEDEF)
|
222
| 238
|
Turing.jl
| 386
|
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
return transition, nextstate
end
|
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
return transition, nextstate
end
|
[
222,
238
] |
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
return transition, nextstate
end
|
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
# Compute the transition and the next state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
nextstate = SMCState(state.particles, index + 1, state.average_logevidence)
return transition, nextstate
end
|
AbstractMCMC.step
| 222
| 238
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
transition = Transition(model, vi, t)
state = HMCState(vi, 1, kernel, hamiltonian, t.z, adaptor)
return transition, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Hamiltonian},
state::HMCState;
nadapts=0,
kwargs...,
)
# Get step size
@debug "current ϵ" getstepsize(spl, state)
# Compute transition.
hamiltonian = state.hamiltonian
z = state.z
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
##CHUNK 2
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
##CHUNK 3
rng, AbstractMCMC.LogDensityModel(f), sampler; initial_params, kwargs...
)
else
transition_inner, state_inner = AbstractMCMC.step(
rng,
AbstractMCMC.LogDensityModel(f),
sampler,
initial_state;
initial_params,
kwargs...,
)
end
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
# Update the `state`
return (
Transition(f.model, new_varinfo, transition_inner),
TuringState(state_inner, new_varinfo, f),
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
AMH.Transition(vi[:], getlogp(vi), false)
end,
)
return transition, state
end
function AbstractMCMC.step(
rng::AbstractRNG, model::Model, spl::Sampler{<:Emcee}, state::EmceeState; kwargs...
)
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
##CHUNK 2
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
$(TYPEDFIELDS)
"""
struct PG{R} <: ParticleInference
"""Number of particles."""
nparticles::Int
"""Resampling algorithm."""
resampler::R
end
##CHUNK 3
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
##CHUNK 4
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
##CHUNK 5
else
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
N;
chain_type,
initial_state,
progress=progress,
nparticles=N,
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
|
322
| 355
|
Turing.jl
| 387
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
|
[
322,
355
] |
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
|
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
reset_num_produce!(vi)
set_retained_vns_del!(vi)
resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
|
DynamicPPL.initialstep
| 322
| 355
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
##CHUNK 2
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
##CHUNK 3
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
DynamicPPL.empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
##CHUNK 4
DynamicPPL.resetlogp!!(vi)
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
##CHUNK 5
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
##CHUNK 6
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
function DynamicPPL.use_threadsafe_eval(
::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, ::AbstractVarInfo
)
return false
end
function trace_local_varinfo_maybe(varinfo)
try
trace = Libtask.get_taped_globals(Any).other
##CHUNK 7
kwargs...,
)
end
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:SMC},
vi::AbstractVarInfo;
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
DynamicPPL.empty!!(vi)
# Create a new set of particles.
##CHUNK 8
lp::F
"The log evidence of the sample."
logevidence::F
end
struct PGState
vi::AbstractVarInfo
rng::Random.AbstractRNG
end
varinfo(state::PGState) = state.vi
function PGTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, logevidence)
theta = getparams(model, vi)
# This is pretty useless since we reset the log probability continuously in the
# particle sweep.
lp = getlogp(vi)
return PGTransition(theta, lp, logevidence)
##CHUNK 9
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
|
357
| 395
|
Turing.jl
| 388
|
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
reset_num_produce!(vi)
resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
reset_num_produce!(vi)
resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
|
[
357,
395
] |
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
reset_num_produce!(vi)
resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
|
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
reset_num_produce!(vi)
resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl, reference)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
newreference = particles.vals[indx]
# Compute the transition.
_vi = newreference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
|
AbstractMCMC.step
| 357
| 395
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/Inference.jl
##CHUNK 1
return transitions_from_chain(Random.default_rng(), model, chain; kwargs...)
end
function transitions_from_chain(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
chain::MCMCChains.Chains;
sampler=DynamicPPL.SampleFromPrior(),
)
vi = Turing.VarInfo(model)
iters = Iterators.product(1:size(chain, 1), 1:size(chain, 3))
transitions = map(iters) do (sample_idx, chain_idx)
# Set variables present in `chain` and mark those NOT present in chain to be resampled.
DynamicPPL.setval_and_resample!(vi, chain, sample_idx, chain_idx)
model(rng, vi, sampler)
# Convert `VarInfo` into `NamedTuple` and save.
Transition(model, vi)
end
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
##CHUNK 2
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
##CHUNK 3
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
##CHUNK 4
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform a particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Pick a particle to be retained.
Ws = AdvancedPS.getweights(particles)
indx = AdvancedPS.randcat(rng, Ws)
reference = particles.vals[indx]
# Compute the first transition.
_vi = reference.model.f.varinfo
##CHUNK 5
nparticles::Int,
kwargs...,
)
# Reset the VarInfo.
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
DynamicPPL.empty!!(vi)
# Create a new set of particles.
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:nparticles],
AdvancedPS.TracedRNG(),
rng,
)
# Perform particle sweep.
logevidence = AdvancedPS.sweep!(rng, particles, spl.alg.resampler, spl)
# Extract the first particle and its weight.
##CHUNK 6
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
catch e
# NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`.
if e == KeyError(:task_variable)
return varinfo
##CHUNK 7
particle = particles.vals[1]
weight = AdvancedPS.getweight(particles, 1)
# Compute the first transition and the first state.
transition = SMCTransition(model, particle.model.f.varinfo, weight)
state = SMCState(particles, 2, logevidence)
return transition, state
end
function AbstractMCMC.step(
::AbstractRNG, model::AbstractModel, spl::Sampler{<:SMC}, state::SMCState; kwargs...
)
# Extract the index of the current particle.
index = state.particleindex
# Extract the current particle and its weight.
particles = state.particles
particle = particles.vals[index]
weight = AdvancedPS.getweight(particles, index)
|
431
| 455
|
Turing.jl
| 389
|
function DynamicPPL.assume(
rng,
spl::Sampler{<:Union{PG,SMC}},
dist::Distribution,
vn::VarName,
_vi::AbstractVarInfo,
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif is_flagged(vi, vn, "del")
unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
vi[vn] = DynamicPPL.tovec(r)
setorder!(vi, vn, get_num_produce(vi))
else
r = vi[vn]
end
# TODO: Should we make this `zero(promote_type(eltype(dist), eltype(r)))` or something?
lp = 0
return r, lp, vi
end
|
function DynamicPPL.assume(
rng,
spl::Sampler{<:Union{PG,SMC}},
dist::Distribution,
vn::VarName,
_vi::AbstractVarInfo,
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif is_flagged(vi, vn, "del")
unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
vi[vn] = DynamicPPL.tovec(r)
setorder!(vi, vn, get_num_produce(vi))
else
r = vi[vn]
end
# TODO: Should we make this `zero(promote_type(eltype(dist), eltype(r)))` or something?
lp = 0
return r, lp, vi
end
|
[
431,
455
] |
function DynamicPPL.assume(
rng,
spl::Sampler{<:Union{PG,SMC}},
dist::Distribution,
vn::VarName,
_vi::AbstractVarInfo,
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif is_flagged(vi, vn, "del")
unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
vi[vn] = DynamicPPL.tovec(r)
setorder!(vi, vn, get_num_produce(vi))
else
r = vi[vn]
end
# TODO: Should we make this `zero(promote_type(eltype(dist), eltype(r)))` or something?
lp = 0
return r, lp, vi
end
|
function DynamicPPL.assume(
rng,
spl::Sampler{<:Union{PG,SMC}},
dist::Distribution,
vn::VarName,
_vi::AbstractVarInfo,
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif is_flagged(vi, vn, "del")
unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
vi[vn] = DynamicPPL.tovec(r)
setorder!(vi, vn, get_num_produce(vi))
else
r = vi[vn]
end
# TODO: Should we make this `zero(promote_type(eltype(dist), eltype(r)))` or something?
lp = 0
return r, lp, vi
end
|
DynamicPPL.assume
| 431
| 455
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/src/mcmc/is.jl
##CHUNK 1
return Transition(model, vi), nothing
end
# Calculate evidence.
function getlogevidence(samples::Vector{<:Transition}, ::Sampler{<:IS}, state)
return logsumexp(map(x -> x.lp, samples)) - log(length(samples))
end
function DynamicPPL.assume(rng, ::Sampler{<:IS}, dist::Distribution, vn::VarName, vi)
if haskey(vi, vn)
r = vi[vn]
else
r = rand(rng, dist)
vi = push!!(vi, vn, r, dist)
end
return r, 0, vi
end
function DynamicPPL.observe(::Sampler{<:IS}, dist::Distribution, value, vi)
return logpdf(dist, value), vi
##CHUNK 2
r = vi[vn]
else
r = rand(rng, dist)
vi = push!!(vi, vn, r, dist)
end
return r, 0, vi
end
function DynamicPPL.observe(::Sampler{<:IS}, dist::Distribution, value, vi)
return logpdf(dist, value), vi
end
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
DynamicPPL.tilde_assume(rng, child_context, sampler, right, vn, vi)
elseif has_conditioned_gibbs(context, vn)
value, lp, _ = DynamicPPL.tilde_assume(
child_context, right, vn, get_global_varinfo(context)
)
value, lp, vi
else
value, lp, new_global_vi = DynamicPPL.tilde_assume(
rng,
child_context,
DynamicPPL.SampleFromPrior(),
right,
vn,
get_global_varinfo(context),
)
set_global_varinfo!(context, new_global_vi)
value, lp, vi
end
end
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
# Ensure that the prior is a Gaussian distribution (checked in the constructor)
EllipticalSliceSampling.isgaussian(::Type{<:ESSPrior}) = true
# Only define out-of-place sampling
function Base.rand(rng::Random.AbstractRNG, p::ESSPrior)
sampler = p.sampler
varinfo = p.varinfo
# TODO: Surely there's a better way of doing this now that we have `SamplingContext`?
vns = keys(varinfo)
for vn in vns
set_flag!(varinfo, vn, "del")
end
p.model(rng, varinfo, sampler)
return varinfo[:]
end
# Mean of prior distribution
Distributions.mean(p::ESSPrior) = p.μ
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
# Just defer to `SampleFromPrior`.
retval = DynamicPPL.assume(rng, SampleFromPrior(), dist, vn, vi)
return retval
end
function DynamicPPL.observe(spl::Sampler{<:MH}, d::Distribution, value, vi)
return DynamicPPL.observe(SampleFromPrior(), d, value, vi)
end
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
if e == KeyError(:task_variable)
return rng
else
rethrow(e)
end
end
end
function DynamicPPL.assume(
rng, ::Sampler{<:Union{PG,SMC}}, dist::Distribution, vn::VarName, _vi::AbstractVarInfo
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif DynamicPPL.is_flagged(vi, vn, "del")
DynamicPPL.unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
##CHUNK 2
)
vi = trace_local_varinfo_maybe(_vi)
trng = trace_local_rng_maybe(rng)
if ~haskey(vi, vn)
r = rand(trng, dist)
push!!(vi, vn, r, dist)
elseif DynamicPPL.is_flagged(vi, vn, "del")
DynamicPPL.unset_flag!(vi, vn, "del") # Reference particle parent
r = rand(trng, dist)
Libtask.produce(logp)
return trace_local_varinfo_maybe(varinfo)
end
# Convenient constructor
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
##CHUNK 3
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
##CHUNK 4
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
##CHUNK 5
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
num_particles = spl.alg.nparticles
x = map(1:num_particles) do i
if i != num_particles
return AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG())
else
return reference
end
end
particles = AdvancedPS.ParticleContainer(x, AdvancedPS.TracedRNG(), rng)
|
477
| 490
|
Turing.jl
| 390
|
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
AdvancedPS.addreference!(newtrace.model.ctask.task, newtrace)
return newtrace
end
|
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
AdvancedPS.addreference!(newtrace.model.ctask.task, newtrace)
return newtrace
end
|
[
477,
490
] |
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
AdvancedPS.addreference!(newtrace.model.ctask.task, newtrace)
return newtrace
end
|
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
AdvancedPS.addreference!(newtrace.model.ctask.task, newtrace)
return newtrace
end
|
AdvancedPS.Trace
| 477
| 490
|
src/mcmc/particle_mcmc.jl
|
#FILE: Turing.jl/test/essential/container.jl
##CHUNK 1
vi = DynamicPPL.VarInfo()
sampler = Sampler(PG(10))
model = test()
trace = AdvancedPS.Trace(model, sampler, vi, AdvancedPS.TracedRNG())
# Make sure the backreference from taped_globals to the trace is in place.
@test trace.model.ctask.taped_globals.other === trace
res = AdvancedPS.advance!(trace, false)
@test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 1
@test res ≈ -log(2)
# Catch broken copy, espetially for RNG / VarInfo
newtrace = AdvancedPS.fork(trace)
res2 = AdvancedPS.advance!(trace)
@test DynamicPPL.get_num_produce(trace.model.f.varinfo) == 2
@test DynamicPPL.get_num_produce(newtrace.model.f.varinfo) == 1
end
@testset "fork" begin
#CURRENT FILE: Turing.jl/src/mcmc/particle_mcmc.jl
##CHUNK 1
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
newtrace = AdvancedPS.Trace(tmodel, rng)
return newtrace
end
# We need to tell Libtask which calls may have `produce` calls within them. In practice most
# of these won't be needed, because of inlining and the fact that `might_produce` is only
return true
end
Libtask.might_produce(::Type{<:Tuple{<:DynamicPPL.Model,Vararg}}) = true
##CHUNK 2
end
function DynamicPPL.acclogp_observe!!(
context::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, varinfo::AbstractVarInfo, logp
)
Libtask.produce(logp)
return trace_local_varinfo_maybe(varinfo)
end
# Convenient constructor
function AdvancedPS.Trace(
model::Model,
sampler::Sampler{<:Union{SMC,PG}},
varinfo::AbstractVarInfo,
rng::AdvancedPS.TracedRNG,
)
newvarinfo = deepcopy(varinfo)
DynamicPPL.reset_num_produce!(newvarinfo)
tmodel = TracedModel(model, sampler, newvarinfo, rng)
##CHUNK 3
context = SamplingContext(rng, sampler, DefaultContext())
args, kwargs = DynamicPPL.make_evaluate_args_and_kwargs(model, varinfo, context)
if kwargs !== nothing && !isempty(kwargs)
error(
"Sampling with `$(sampler.alg)` does not support models with keyword arguments. See issue #2007 for more details.",
)
end
evaluator = (model.f, args...)
return TracedModel(model, sampler, varinfo, evaluator)
end
function AdvancedPS.advance!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, isref::Bool=false
)
# Make sure we load/reset the rng in the new replaying mechanism
DynamicPPL.increment_num_produce!(trace.model.f.varinfo)
isref ? AdvancedPS.load_state!(trace.rng) : AdvancedPS.save_state!(trace.rng)
score = consume(trace.model.ctask)
if score === nothing
return nothing
##CHUNK 4
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:PG},
vi::AbstractVarInfo;
kwargs...,
)
# Reset the VarInfo before new sweep
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.set_retained_vns_del!(vi)
DynamicPPL.resetlogp!!(vi)
# Create a new set of particles
num_particles = spl.alg.nparticles
particles = AdvancedPS.ParticleContainer(
[AdvancedPS.Trace(model, spl, vi, AdvancedPS.TracedRNG()) for _ in 1:num_particles],
AdvancedPS.TracedRNG(),
rng,
)
##CHUNK 5
function AdvancedPS.advance!(
trace::AdvancedPS.Trace{<:AdvancedPS.LibtaskModel{<:TracedModel}}, isref::Bool=false
)
# Make sure we load/reset the rng in the new replaying mechanism
DynamicPPL.increment_num_produce!(trace.model.f.varinfo)
isref ? AdvancedPS.load_state!(trace.rng) : AdvancedPS.save_state!(trace.rng)
score = consume(trace.model.ctask)
if score === nothing
return nothing
else
return score + DynamicPPL.getlogp(trace.model.f.varinfo)
end
end
function AdvancedPS.delete_retained!(trace::TracedModel)
DynamicPPL.set_retained_vns_del!(trace.varinfo)
return trace
end
##CHUNK 6
function AdvancedPS.reset_model(trace::TracedModel)
DynamicPPL.reset_num_produce!(trace.varinfo)
return trace
end
function AdvancedPS.reset_logprob!(trace::TracedModel)
DynamicPPL.resetlogp!!(trace.model.varinfo)
return trace
end
function Libtask.TapedTask(taped_globals, model::TracedModel; kwargs...)
return Libtask.TapedTask(
taped_globals, model.evaluator[1], model.evaluator[2:end]...; kwargs...
)
end
abstract type ParticleInference <: InferenceAlgorithm end
####
#### Generic Sequential Monte Carlo sampler.
##CHUNK 7
###
### Particle Filtering and Particle MCMC Samplers.
###
### AdvancedPS models and interface
struct TracedModel{S<:AbstractSampler,V<:AbstractVarInfo,M<:Model,E<:Tuple} <:
AdvancedPS.AbstractGenericModel
model::M
sampler::S
varinfo::V
evaluator::E
end
function TracedModel(
model::Model,
sampler::AbstractSampler,
varinfo::AbstractVarInfo,
rng::Random.AbstractRNG,
)
##CHUNK 8
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, newreference.rng)
end
function DynamicPPL.use_threadsafe_eval(
::SamplingContext{<:Sampler{<:Union{PG,SMC}}}, ::AbstractVarInfo
)
return false
end
function trace_local_varinfo_maybe(varinfo)
try
trace = Libtask.get_taped_globals(Any).other
return (trace === nothing ? varinfo : trace.model.f.varinfo)::AbstractVarInfo
catch e
# NOTE: this heuristic allows Libtask evaluating a model outside a `Trace`.
if e == KeyError(:task_variable)
return varinfo
else
##CHUNK 9
transition = PGTransition(model, _vi, logevidence)
return transition, PGState(_vi, reference.rng)
end
function AbstractMCMC.step(
rng::AbstractRNG, model::AbstractModel, spl::Sampler{<:PG}, state::PGState; kwargs...
)
# Reset the VarInfo before new sweep.
vi = state.vi
DynamicPPL.reset_num_produce!(vi)
DynamicPPL.resetlogp!!(vi)
# Create reference particle for which the samples will be retained.
reference = AdvancedPS.forkr(AdvancedPS.Trace(model, spl, vi, state.rng))
# For all other particles, do not retain the variables but resample them.
DynamicPPL.set_retained_vns_del!(vi)
# Create a new set of particles.
|
8
| 23
|
Turing.jl
| 391
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
)
return vi, nothing
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
)
return vi, nothing
end
|
[
8,
23
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
)
return vi, nothing
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
)
return vi, nothing
end
|
AbstractMCMC.step
| 8
| 23
|
src/mcmc/prior.jl
|
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
::DynamicPPL.Sampler,
::VarInfoState,
params::DynamicPPL.AbstractVarInfo,
)
return VarInfoState(params)
end
function AbstractMCMC.step(
::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.non_warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step_warmup(
::Random.AbstractRNG,
##CHUNK 2
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.non_warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step_warmup(
::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step(
##CHUNK 3
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.non_warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
function AbstractMCMC.step_warmup(
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
##CHUNK 2
"Cache of sample, log density, and gradient of log density evaluation."
cache::C
metric::M
stepsize::S
end
function DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS})
return DynamicPPL.SampleFromUniform()
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
#FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
struct SGLDState{L,V<:AbstractVarInfo}
logdensity::L
vi::V
step::Int
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
#FILE: Turing.jl/src/mcmc/external_sampler.jl
##CHUNK 1
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler},
state::TuringState;
kwargs...,
)
sampler = sampler_wrapper.alg.sampler
f = state.ldf
# Then just call `AdvancedMCMC.step` with the right arguments.
transition_inner, state_inner = AbstractMCMC.step(
rng, AbstractMCMC.LogDensityModel(f), sampler, state.state; kwargs...
)
# Get the parameters and log density, and set them in the varinfo.
new_varinfo = make_updated_varinfo(f, transition_inner, state_inner)
##CHUNK 2
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler_wrapper::Sampler{<:ExternalSampler};
initial_state=nothing,
initial_params=nothing,
kwargs...,
)
alg = sampler_wrapper.alg
sampler = alg.sampler
# Initialise varinfo with initial params and link the varinfo if needed.
varinfo = DynamicPPL.VarInfo(model)
if requires_unconstrained_space(alg)
if initial_params !== nothing
# If we have initial parameters, we need to set the varinfo before linking.
varinfo = DynamicPPL.link(DynamicPPL.unflatten(varinfo, initial_params), model)
# Extract initial parameters in unconstrained space.
initial_params = varinfo[:]
else
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
rng,
model,
step_function,
varname_vecs_tail,
samplers_tail,
vi,
states;
initial_params=initial_params,
kwargs...,
)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
#CURRENT FILE: Turing.jl/src/mcmc/prior.jl
|
49
| 61
|
Turing.jl
| 392
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
|
[
49,
61
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
|
AbstractMCMC.step
| 49
| 61
|
src/mcmc/repeat_sampler.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#FILE: Turing.jl/src/mcmc/abstractmcmc.jl
##CHUNK 1
model::AbstractModel,
sampler::Union{Sampler{<:InferenceAlgorithm},RepeatSampler},
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
chain_type=MCMCChains.Chains,
progress=PROGRESS[],
kwargs...,
)
return AbstractMCMC.mcmcsample(
rng,
model,
sampler,
ensemble,
N,
n_chains;
chain_type=chain_type,
progress=progress,
kwargs...,
)
##CHUNK 2
Random.default_rng(), model, alg, ensemble, N, n_chains; kwargs...
)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
model::AbstractModel,
alg::InferenceAlgorithm,
ensemble::AbstractMCMC.AbstractMCMCEnsemble,
N::Integer,
n_chains::Integer;
check_model::Bool=true,
kwargs...,
)
check_model && _check_model(model, alg)
return AbstractMCMC.sample(rng, model, Sampler(alg), ensemble, N, n_chains; kwargs...)
end
function AbstractMCMC.sample(
rng::AbstractRNG,
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
#CURRENT FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
##CHUNK 2
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
##CHUNK 3
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
##CHUNK 4
repeated_sampler = RepeatSampler(sampler, 10)
AbstractMCMC.step(rng, model, repeated_sampler) # take 10 steps of `sampler`
```
"""
struct RepeatSampler{S<:AbstractMCMC.AbstractSampler} <: AbstractMCMC.AbstractSampler
"The sampler to repeat"
sampler::S
"The number of times to repeat the sampler"
num_repeat::Int
function RepeatSampler(sampler::S, num_repeat::Int) where {S}
@assert num_repeat > 0
return new{S}(sampler, num_repeat)
end
end
function RepeatSampler(alg::InferenceAlgorithm, num_repeat::Int)
return RepeatSampler(Sampler(alg), num_repeat)
end
##CHUNK 5
getADType(spl::RepeatSampler) = getADType(spl.sampler)
DynamicPPL.default_chain_type(sampler::RepeatSampler) = default_chain_type(sampler.sampler)
# TODO(mhauru) Remove the below once DynamicPPL has removed all its Selector stuff.
DynamicPPL.inspace(vn::VarName, spl::RepeatSampler) = inspace(vn, spl.sampler)
function setparams_varinfo!!(model::DynamicPPL.Model, sampler::RepeatSampler, state, params)
return setparams_varinfo!!(model, sampler.sampler, state, params)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step_warmup(
##CHUNK 6
"""
RepeatSampler <: AbstractMCMC.AbstractSampler
A `RepeatSampler` is a container for a sampler and a number of times to repeat it.
# Fields
$(FIELDS)
# Examples
```julia
repeated_sampler = RepeatSampler(sampler, 10)
AbstractMCMC.step(rng, model, repeated_sampler) # take 10 steps of `sampler`
```
"""
struct RepeatSampler{S<:AbstractMCMC.AbstractSampler} <: AbstractMCMC.AbstractSampler
"The sampler to repeat"
sampler::S
"The number of times to repeat the sampler"
num_repeat::Int
|
72
| 88
|
Turing.jl
| 393
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
|
[
72,
88
] |
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
|
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step_warmup(
rng, model, sampler.sampler, state; kwargs...
)
end
return transition, state
end
|
AbstractMCMC.step_warmup
| 72
| 88
|
src/mcmc/repeat_sampler.jl
|
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
states::S
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:Emcee};
resume_from=nothing,
initial_params=nothing,
kwargs...,
)
if resume_from !== nothing
state = loadstate(resume_from)
return AbstractMCMC.step(rng, model, spl, state; kwargs...)
end
# Sample from the prior
n = spl.alg.ensemble.n_walkers
vis = [VarInfo(rng, model, SampleFromPrior()) for _ in 1:n]
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step_warmup, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
##CHUNK 2
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:Gibbs},
state::GibbsState;
kwargs...,
)
vi = varinfo(state)
alg = spl.alg
varnames = alg.varnames
samplers = alg.samplers
states = state.states
@assert length(samplers) == length(state.states)
vi, states = gibbs_step_recursive(
rng, model, AbstractMCMC.step, varnames, samplers, states, vi; kwargs...
)
return Transition(model, vi), GibbsState(vi, states)
#FILE: Turing.jl/test/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter};
kwargs...,
)
spl.alg.warmup_init_count += 1
return Turing.Inference.Transition(nothing, 0.0),
VarInfoState(DynamicPPL.VarInfo(model))
end
function AbstractMCMC.step(
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.non_warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
##CHUNK 2
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.non_warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
function AbstractMCMC.step_warmup(
::Random.AbstractRNG,
::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:WarmupCounter},
s::VarInfoState;
kwargs...,
)
spl.alg.warmup_count += 1
return Turing.Inference.Transition(nothing, 0.0), s
end
#CURRENT FILE: Turing.jl/src/mcmc/repeat_sampler.jl
##CHUNK 1
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
##CHUNK 2
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler,
state;
kwargs...,
)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
for _ in 2:(sampler.num_repeat)
transition, state = AbstractMCMC.step(rng, model, sampler.sampler, state; kwargs...)
end
return transition, state
##CHUNK 3
end
function AbstractMCMC.step_warmup(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step_warmup(rng, model, sampler.sampler; kwargs...)
end
##CHUNK 4
repeated_sampler = RepeatSampler(sampler, 10)
AbstractMCMC.step(rng, model, repeated_sampler) # take 10 steps of `sampler`
```
"""
struct RepeatSampler{S<:AbstractMCMC.AbstractSampler} <: AbstractMCMC.AbstractSampler
"The sampler to repeat"
sampler::S
"The number of times to repeat the sampler"
num_repeat::Int
function RepeatSampler(sampler::S, num_repeat::Int) where {S}
@assert num_repeat > 0
return new{S}(sampler, num_repeat)
end
end
function RepeatSampler(alg::InferenceAlgorithm, num_repeat::Int)
return RepeatSampler(Sampler(alg), num_repeat)
end
##CHUNK 5
getADType(spl::RepeatSampler) = getADType(spl.sampler)
DynamicPPL.default_chain_type(sampler::RepeatSampler) = default_chain_type(sampler.sampler)
# TODO(mhauru) Remove the below once DynamicPPL has removed all its Selector stuff.
DynamicPPL.inspace(vn::VarName, spl::RepeatSampler) = inspace(vn, spl.sampler)
function setparams_varinfo!!(model::DynamicPPL.Model, sampler::RepeatSampler, state, params)
return setparams_varinfo!!(model, sampler.sampler, state, params)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::AbstractMCMC.AbstractModel,
sampler::RepeatSampler;
kwargs...,
)
return AbstractMCMC.step(rng, model, sampler.sampler; kwargs...)
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
|
54
| 78
|
Turing.jl
| 394
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
|
[
54,
78
] |
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
|
DynamicPPL.initialstep
| 54
| 78
|
src/mcmc/sghmc.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 2
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
##CHUNK 3
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 4
"Cache of sample, log density, and gradient of log density evaluation."
cache::C
metric::M
stepsize::S
end
function DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS})
return DynamicPPL.SampleFromUniform()
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
#FILE: Turing.jl/src/mcmc/is.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::AbstractRNG, model::Model, spl::Sampler{<:IS}, vi::AbstractVarInfo; kwargs...
)
return Transition(model, vi), nothing
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:IS}, ::Nothing; kwargs...
)
vi = VarInfo(rng, model, spl)
return Transition(model, vi), nothing
end
# Calculate evidence.
function getlogevidence(samples::Vector{<:Transition}, ::Sampler{<:IS}, state)
return logsumexp(map(x -> x.lp, samples)) - log(length(samples))
end
function DynamicPPL.assume(rng, ::Sampler{<:IS}, dist::Distribution, vn::VarName, vi)
if haskey(vi, vn)
#CURRENT FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
##CHUNK 2
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
|
80
| 110
|
Turing.jl
| 395
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
|
[
80,
110
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
|
AbstractMCMC.step
| 80
| 110
|
src/mcmc/sghmc.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 2
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 3
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
return sample, newstate
end
end
##CHUNK 4
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
##CHUNK 5
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
#FILE: Turing.jl/src/mcmc/emcee.jl
##CHUNK 1
# Generate a log joint function.
vi = state.vi
densitymodel = AMH.DensityModel(
Base.Fix1(LogDensityProblems.logdensity, DynamicPPL.LogDensityFunction(model, vi))
)
# Compute the next states.
states = last(AbstractMCMC.step(rng, densitymodel, spl.alg.ensemble, state.states))
# Compute the next transition and state.
transition = map(states) do _state
vi = DynamicPPL.unflatten(vi, _state.params)
t = Transition(getparams(model, vi), _state.lp)
return t
end
newstate = EmceeState(vi, states)
return transition, newstate
end
#CURRENT FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
##CHUNK 2
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
##CHUNK 3
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
##CHUNK 4
logdensity::L
vi::V
step::Int
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
|
217
| 241
|
Turing.jl
| 396
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
|
[
217,
241
] |
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
|
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Create first sample and state.
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
|
DynamicPPL.initialstep
| 217
| 241
|
src/mcmc/sghmc.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
##CHUNK 2
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 3
"Cache of sample, log density, and gradient of log density evaluation."
cache::C
metric::M
stepsize::S
end
function DynamicPPL.initialsampler(::DynamicPPL.Sampler{<:DynamicNUTS})
return DynamicPPL.SampleFromUniform()
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
##CHUNK 4
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#FILE: Turing.jl/src/mcmc/gibbs.jl
##CHUNK 1
model::DynamicPPL.Model,
sampler::Sampler{<:ESS},
state::AbstractVarInfo,
params::AbstractVarInfo,
)
# The state is already a VarInfo, so we can just return `params`, but first we need to
# update its logprob. To do this, we have to call evaluate!! with the sampler, rather
# than just a context, because ESS is peculiar in how it uses LikelihoodContext for
# some variables and DefaultContext for others.
return last(DynamicPPL.evaluate!!(model, params, SamplingContext(sampler)))
end
function setparams_varinfo!!(
model::DynamicPPL.Model,
sampler::Sampler{<:ExternalSampler},
state::TuringState,
params::AbstractVarInfo,
)
logdensity = DynamicPPL.LogDensityFunction(
model, state.ldf.varinfo, state.ldf.context; adtype=sampler.alg.adtype
#FILE: Turing.jl/src/mcmc/ess.jl
##CHUNK 1
ESSPrior(model, spl, vi),
DynamicPPL.LogDensityFunction(
model, vi, DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext())
),
),
EllipticalSliceSampling.ESS(),
oldstate,
)
# update sample and log-likelihood
vi = DynamicPPL.unflatten(vi, sample)
vi = setlogp!!(vi, state.loglikelihood)
return Transition(model, vi), vi
end
# Prior distribution of considered random variable
struct ESSPrior{M<:Model,S<:Sampler{<:ESS},V<:AbstractVarInfo,T}
model::M
sampler::S
#CURRENT FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
velocity::T
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
vi::AbstractVarInfo;
kwargs...,
)
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
##CHUNK 2
# Transform the samples to unconstrained space and compute the joint log probability.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Compute initial sample and state.
sample = Transition(model, vi)
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGHMCState(ℓ, vi, zero(vi[:]))
return sample, state
end
function AbstractMCMC.step(
##CHUNK 3
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
|
243
| 264
|
Turing.jl
| 397
|
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
|
[
243,
264
] |
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
|
function AbstractMCMC.step(
rng::Random.AbstractRNG, model::Model, spl::Sampler{<:SGLD}, state::SGLDState; kwargs...
)
# Perform gradient step.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
step = state.step
stepsize = spl.alg.stepsize(step)
θ .+= (stepsize / 2) .* grad .+ sqrt(stepsize) .* randn(rng, eltype(θ), length(θ))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = SGLDTransition(model, vi, stepsize)
newstate = SGLDState(ℓ, vi, state.step + 1)
return sample, newstate
end
|
AbstractMCMC.step
| 243
| 264
|
src/mcmc/sghmc.jl
|
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
##CHUNK 2
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
state::DynamicNUTSState;
kwargs...,
)
# Compute next sample.
vi = state.vi
ℓ = state.logdensity
steps = DynamicHMC.mcmc_steps(rng, spl.alg.sampler, state.metric, ℓ, state.stepsize)
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
##CHUNK 3
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create next sample and state.
sample = Turing.Inference.Transition(model, vi)
newstate = DynamicNUTSState(ℓ, vi, Q, state.metric, state.stepsize)
return sample, newstate
end
end
##CHUNK 4
# Perform initial step.
results = DynamicHMC.mcmc_keep_warmup(
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
)
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
# Update the variables.
vi = DynamicPPL.unflatten(vi, Q.q)
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
# Create first sample and state.
sample = Turing.Inference.Transition(model, vi)
state = DynamicNUTSState(ℓ, vi, Q, steps.H.κ, steps.ϵ)
return sample, state
end
function AbstractMCMC.step(
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
DynamicPPL.SamplingContext(rng, spl, DynamicPPL.leafcontext(model.context)),
),
),
)
trans, _ = AbstractMCMC.step(rng, densitymodel, mh_sampler, prev_trans)
return setlogp!!(DynamicPPL.unflatten(vi, trans.params), trans.lp)
end
function DynamicPPL.initialstep(
rng::AbstractRNG,
model::AbstractModel,
spl::Sampler{<:MH},
vi::AbstractVarInfo;
kwargs...,
)
# If we're doing random walk with a covariance matrix,
# just link everything before sampling.
vi = maybe_link!!(vi, spl, spl.alg.proposals, model)
#CURRENT FILE: Turing.jl/src/mcmc/sghmc.jl
##CHUNK 1
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
# Save new variables and recompute log density.
vi = DynamicPPL.unflatten(vi, θ)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
# Compute next sample and state.
sample = Transition(model, vi)
newstate = SGHMCState(ℓ, vi, newv)
return sample, newstate
end
##CHUNK 2
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGHMC},
state::SGHMCState;
kwargs...,
)
# Compute gradient of log density.
ℓ = state.logdensity
vi = state.vi
θ = vi[:]
grad = last(LogDensityProblems.logdensity_and_gradient(ℓ, θ))
# Update latent variables and velocity according to
# equation (15) of Chen et al. (2014)
v = state.velocity
θ .+= v
η = spl.alg.learning_rate
α = spl.alg.momentum_decay
newv = (1 - α) .* v .+ η .* grad .+ sqrt(2 * η * α) .* randn(rng, eltype(v), length(v))
##CHUNK 3
sample = SGLDTransition(model, vi, zero(spl.alg.stepsize(0)))
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
state = SGLDState(ℓ, vi, 1)
return sample, state
end
##CHUNK 4
function SGLDTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, stepsize)
theta = getparams(model, vi)
lp = getlogp(vi)
return SGLDTransition(theta, lp, stepsize)
end
metadata(t::SGLDTransition) = (lp=t.lp, SGLD_stepsize=t.stepsize)
DynamicPPL.getlogp(t::SGLDTransition) = t.lp
struct SGLDState{L,V<:AbstractVarInfo}
logdensity::L
vi::V
step::Int
end
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::Model,
spl::Sampler{<:SGLD},
##CHUNK 5
struct SGLDTransition{T,F<:Real} <: AbstractTransition
"The parameters for any given sample."
θ::T
"The joint log probability of the sample."
lp::F
"The stepsize that was used to obtain the sample."
stepsize::F
end
function SGLDTransition(model::DynamicPPL.Model, vi::AbstractVarInfo, stepsize)
theta = getparams(model, vi)
lp = getlogp(vi)
return SGLDTransition(theta, lp, stepsize)
end
metadata(t::SGLDTransition) = (lp=t.lp, SGLD_stepsize=t.stepsize)
DynamicPPL.getlogp(t::SGLDTransition) = t.lp
|
79
| 89
|
Turing.jl
| 398
|
function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi)
r = vi[vn, dist]
lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext}
# MAP
Distributions.logpdf(dist, r)
else
# MLE
0
end
return r, lp, vi
end
|
function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi)
r = vi[vn, dist]
lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext}
# MAP
Distributions.logpdf(dist, r)
else
# MLE
0
end
return r, lp, vi
end
|
[
79,
89
] |
function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi)
r = vi[vn, dist]
lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext}
# MAP
Distributions.logpdf(dist, r)
else
# MLE
0
end
return r, lp, vi
end
|
function DynamicPPL.tilde_assume(ctx::OptimizationContext, dist, vn, vi)
r = vi[vn, dist]
lp = if ctx.context isa Union{DynamicPPL.DefaultContext,DynamicPPL.PriorContext}
# MAP
Distributions.logpdf(dist, r)
else
# MLE
0
end
return r, lp, vi
end
|
DynamicPPL.tilde_assume
| 79
| 89
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/src/mcmc/prior.jl
##CHUNK 1
"""
Prior()
Algorithm for sampling from the prior.
"""
struct Prior <: InferenceAlgorithm end
function AbstractMCMC.step(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
sampler::DynamicPPL.Sampler{<:Prior},
state=nothing;
kwargs...,
)
vi = last(
DynamicPPL.evaluate!!(
model,
VarInfo(),
SamplingContext(rng, DynamicPPL.SampleFromPrior(), DynamicPPL.PriorContext()),
),
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
x ~ Normal(m, 1)
end
model = f(1.5)
map_est = optimize(model, MAP())
# Use a different optimizer
map_est = optimize(model, MAP(), NelderMead())
```
"""
function Optim.optimize(
model::DynamicPPL.Model,
::Optimisation.MAP,
options::Optim.Options=Optim.Options();
kwargs...,
)
ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
f = Optimisation.OptimLogDensity(model, ctx)
init_vals = DynamicPPL.getparams(f.ldf)
optimizer = Optim.LBFGS()
#FILE: Turing.jl/test/optimisation/Optimisation.jl
##CHUNK 1
return DynamicPPL.contextualize(
model, OverrideContext(model.context, 100, 1)
)
end
m1 = override(model1(x))
m2 = override(model2() | (x=x,))
ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
@test Turing.Optimisation.OptimLogDensity(m1, ctx)(w) ==
Turing.Optimisation.OptimLogDensity(m2, ctx)(w)
end
@testset "Default, Likelihood, Prior Contexts" begin
m1 = model1(x)
defctx = Turing.Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
llhctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
prictx = Turing.Optimisation.OptimizationContext(DynamicPPL.PriorContext())
a = [0.3]
@test Turing.Optimisation.OptimLogDensity(m1, defctx)(a) ==
Turing.Optimisation.OptimLogDensity(m1, llhctx)(a) +
#FILE: Turing.jl/src/mcmc/mh.jl
##CHUNK 1
return Transition(model, vi), vi
end
####
#### Compiler interface, i.e. tilde operators.
####
function DynamicPPL.assume(
rng::Random.AbstractRNG, spl::Sampler{<:MH}, dist::Distribution, vn::VarName, vi
)
# Just defer to `SampleFromPrior`.
retval = DynamicPPL.assume(rng, SampleFromPrior(), dist, vn, vi)
return retval
end
function DynamicPPL.observe(spl::Sampler{<:MH}, d::Distribution, value, vi)
return DynamicPPL.observe(SampleFromPrior(), d, value, vi)
end
#FILE: Turing.jl/src/Turing.jl
##CHUNK 1
# Querying model probabilities - DynamicPPL
returned,
pointwise_loglikelihoods,
logprior,
loglikelihood,
logjoint,
condition,
decondition,
conditioned,
fix,
unfix,
OrderedDict, # OrderedCollections
# Point estimates - Turing.Optimisation
# The MAP and MLE exports are only needed for the Optim.jl interface.
maximum_a_posteriori,
maximum_likelihood,
MAP,
MLE
end
#FILE: Turing.jl/ext/TuringDynamicHMCExt.jl
##CHUNK 1
function DynamicPPL.initialstep(
rng::Random.AbstractRNG,
model::DynamicPPL.Model,
spl::DynamicPPL.Sampler{<:DynamicNUTS},
vi::DynamicPPL.AbstractVarInfo;
kwargs...,
)
# Ensure that initial sample is in unconstrained space.
if !DynamicPPL.islinked(vi)
vi = DynamicPPL.link!!(vi, model)
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
end
# Define log-density function.
ℓ = DynamicPPL.LogDensityFunction(
model,
vi,
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
adtype=spl.alg.adtype,
)
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
Concrete type for maximum a posteriori estimation. Only used for the Optim.jl interface.
"""
struct MAP <: ModeEstimator end
"""
OptimizationContext{C<:AbstractContext} <: AbstractContext
The `OptimizationContext` transforms variables to their constrained space, but
does not use the density with respect to the transformation. This context is
intended to allow an optimizer to sample in R^n freely.
"""
struct OptimizationContext{C<:DynamicPPL.AbstractContext} <: DynamicPPL.AbstractContext
context::C
function OptimizationContext{C}(context::C) where {C<:DynamicPPL.AbstractContext}
if !(
context isa Union{
DynamicPPL.DefaultContext,
DynamicPPL.LikelihoodContext,
##CHUNK 2
return new{C}(context)
end
end
OptimizationContext(ctx::DynamicPPL.AbstractContext) = OptimizationContext{typeof(ctx)}(ctx)
DynamicPPL.NodeTrait(::OptimizationContext) = DynamicPPL.IsLeaf()
function DynamicPPL.tilde_observe(
ctx::OptimizationContext{<:DynamicPPL.PriorContext}, args...
)
return DynamicPPL.tilde_observe(ctx.context, args...)
end
"""
OptimLogDensity{
M<:DynamicPPL.Model,
V<:DynamicPPL.VarInfo,
C<:OptimizationContext,
##CHUNK 3
DynamicPPL.PriorContext,
}
)
msg = """
`OptimizationContext` supports only leaf contexts of type
`DynamicPPL.DefaultContext`, `DynamicPPL.LikelihoodContext`,
and `DynamicPPL.PriorContext` (given: `$(typeof(context)))`
"""
throw(ArgumentError(msg))
end
return new{C}(context)
end
end
OptimizationContext(ctx::DynamicPPL.AbstractContext) = OptimizationContext{typeof(ctx)}(ctx)
DynamicPPL.NodeTrait(::OptimizationContext) = DynamicPPL.IsLeaf()
function DynamicPPL.tilde_observe(
##CHUNK 4
lb=nothing,
ub=nothing,
kwargs...,
)
check_model && DynamicPPL.check_model(model; error_on_failure=true)
constraints = ModeEstimationConstraints(lb, ub, cons, lcons, ucons)
initial_params = generate_initial_params(model, initial_params, constraints)
if solver === nothing
solver = default_solver(constraints)
end
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
|
181
| 202
|
Turing.jl
| 399
|
function (f::OptimLogDensity)(F, G, z)
if G !== nothing
# Calculate log joint and its gradient.
logp, ∇logp = LogDensityProblems.logdensity_and_gradient(f.ldf, z)
# Save the negative gradient to the pre-allocated array.
copyto!(G, -∇logp)
# If F is something, the negative log joint is requested as well.
# We have already computed it as a by-product above and hence return it directly.
if F !== nothing
return -logp
end
end
# Only negative log joint requested but no gradient.
if F !== nothing
return -LogDensityProblems.logdensity(f.ldf, z)
end
return nothing
end
|
function (f::OptimLogDensity)(F, G, z)
if G !== nothing
# Calculate log joint and its gradient.
logp, ∇logp = LogDensityProblems.logdensity_and_gradient(f.ldf, z)
# Save the negative gradient to the pre-allocated array.
copyto!(G, -∇logp)
# If F is something, the negative log joint is requested as well.
# We have already computed it as a by-product above and hence return it directly.
if F !== nothing
return -logp
end
end
# Only negative log joint requested but no gradient.
if F !== nothing
return -LogDensityProblems.logdensity(f.ldf, z)
end
return nothing
end
|
[
181,
202
] |
function (f::OptimLogDensity)(F, G, z)
if G !== nothing
# Calculate log joint and its gradient.
logp, ∇logp = LogDensityProblems.logdensity_and_gradient(f.ldf, z)
# Save the negative gradient to the pre-allocated array.
copyto!(G, -∇logp)
# If F is something, the negative log joint is requested as well.
# We have already computed it as a by-product above and hence return it directly.
if F !== nothing
return -logp
end
end
# Only negative log joint requested but no gradient.
if F !== nothing
return -LogDensityProblems.logdensity(f.ldf, z)
end
return nothing
end
|
function (f::OptimLogDensity)(F, G, z)
if G !== nothing
# Calculate log joint and its gradient.
logp, ∇logp = LogDensityProblems.logdensity_and_gradient(f.ldf, z)
# Save the negative gradient to the pre-allocated array.
copyto!(G, -∇logp)
# If F is something, the negative log joint is requested as well.
# We have already computed it as a by-product above and hence return it directly.
if F !== nothing
return -logp
end
end
# Only negative log joint requested but no gradient.
if F !== nothing
return -LogDensityProblems.logdensity(f.ldf, z)
end
return nothing
end
|
unknown_function
| 181
| 202
|
src/optimisation/Optimisation.jl
|
#FILE: Turing.jl/test/mcmc/external_sampler.jl
##CHUNK 1
# expected_logpdf = logpdf(Beta(2, 2), a) + logpdf(Normal(a), b)
# @test all(chn[:lp] .== expected_logpdf)
# @test all(chn[:logprior] .== expected_logpdf)
# @test all(chn[:loglikelihood] .== 0.0)
end
function initialize_nuts(model::DynamicPPL.Model)
# Create a linked varinfo
vi = DynamicPPL.VarInfo(model)
linked_vi = DynamicPPL.link!!(vi, model)
# Create a LogDensityFunction
f = DynamicPPL.LogDensityFunction(model, linked_vi; adtype=Turing.DEFAULT_ADTYPE)
# Choose parameter dimensionality and initial parameter value
D = LogDensityProblems.dimension(f)
initial_θ = rand(D) .- 0.5
# Define a Hamiltonian system
metric = AdvancedHMC.DiagEuclideanMetric(D)
#FILE: Turing.jl/src/mcmc/hmc.jl
##CHUNK 1
adtype=spl.alg.adtype,
)
lp_func = Base.Fix1(LogDensityProblems.logdensity, ldf)
lp_grad_func = Base.Fix1(LogDensityProblems.logdensity_and_gradient, ldf)
hamiltonian = AHMC.Hamiltonian(metric, lp_func, lp_grad_func)
# If no initial parameters are provided, resample until the log probability
# and its gradient are finite. Otherwise, just use the existing parameters.
vi, z = if initial_params === nothing
find_initial_params(rng, model, vi, hamiltonian)
else
vi, AHMC.phasepoint(rng, theta, hamiltonian)
end
theta = vi[:]
# Cache current log density.
log_density_old = getlogp(vi)
# Find good eps if not provided one
if iszero(spl.alg.ϵ)
#FILE: Turing.jl/ext/TuringOptimExt.jl
##CHUNK 1
kwargs...,
)
# Convert the initial values, since it is assumed that users provide them
# in the constrained space.
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
# whether initialisation is really necessary at all
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
vi = DynamicPPL.link(vi, f.ldf.model)
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
init_vals = DynamicPPL.getparams(f.ldf)
# Optimize!
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
# Warn the user if the optimization did not converge.
if !Optim.converged(M)
@warn """
Optimization did not converge! You may need to correct your model or adjust the
Optim parameters.
"""
#FILE: Turing.jl/test/optimisation/Optimisation.jl
##CHUNK 1
@testset "Negative variance" begin
# A model for which the likelihood has a saddle point at x=0, y=0.
# Creating an optimisation result for this model at the x=0, y=0 results in negative
# variance for one of the variables, because the variance is calculated as the
# diagonal of the inverse of the Hessian.
@model function saddle_model()
x ~ Normal(0, 1)
y ~ Normal(x, 1)
@addlogprob! x^2 - y^2
return nothing
end
m = saddle_model()
ctx = Turing.Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
optim_ld = Turing.Optimisation.OptimLogDensity(m, ctx)
vals = Turing.Optimisation.NamedArrays.NamedArray([0.0, 0.0])
m = Turing.Optimisation.ModeResult(vals, nothing, 0.0, optim_ld)
ct = coeftable(m)
@assert isnan(ct.cols[2][1])
@assert ct.colnms[end] == "Error notes"
#CURRENT FILE: Turing.jl/src/optimisation/Optimisation.jl
##CHUNK 1
DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype)
)
end
"""
(f::OptimLogDensity)(z)
(f::OptimLogDensity)(z, _)
Evaluate the negative log joint or log likelihood at the array `z`. Which one is evaluated
depends on the context of `f`.
Any second argument is ignored. The two-argument method only exists to match interface the
required by Optimization.jl.
"""
(f::OptimLogDensity)(z::AbstractVector) = -LogDensityProblems.logdensity(f.ldf, z)
(f::OptimLogDensity)(z, _) = f(z)
# NOTE: The format of this function is dictated by Optim. The first argument sets whether to
# compute the function value, the second whether to compute the gradient (and stores the
# gradient). The last one is the actual argument of the objective function.
##CHUNK 2
or
```julia
OptimLogDensity(model, ctx; adtype=adtype)
```
If not specified, `adtype` defaults to `AutoForwardDiff()`.
An OptimLogDensity does not, in itself, obey the LogDensityProblems interface.
Thus, if you want to calculate the log density of its contents at the point
`z`, you should manually call
```julia
LogDensityProblems.logdensity(f.ldf, z)
```
However, it is a callable object which returns the *negative* log density of
the underlying LogDensityFunction at the point `z`. This is done to satisfy
the Optim.jl interface.
##CHUNK 3
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
# bounds/constraints and whether we transform the objective function to an
# unconstrained space. These should be separate concerns, but for that we need to
# implement getting the bounds of the prior distributions.
optimise_in_unconstrained_space = !has_constraints(constraints)
if optimise_in_unconstrained_space
vi = DynamicPPL.link(vi, model)
end
log_density = OptimLogDensity(model, vi, ctx)
##CHUNK 4
return OptimLogDensity(DynamicPPL.LogDensityFunction(model, vi, ctx; adtype=adtype))
end
# No varinfo
function OptimLogDensity(
model::DynamicPPL.Model,
ctx::OptimizationContext;
adtype::ADTypes.AbstractADType=AutoForwardDiff(),
)
return OptimLogDensity(
DynamicPPL.LogDensityFunction(model, DynamicPPL.VarInfo(model), ctx; adtype=adtype)
)
end
"""
(f::OptimLogDensity)(z)
(f::OptimLogDensity)(z, _)
Evaluate the negative log joint or log likelihood at the array `z`. Which one is evaluated
depends on the context of `f`.
##CHUNK 5
`z`, you should manually call
```julia
LogDensityProblems.logdensity(f.ldf, z)
```
However, it is a callable object which returns the *negative* log density of
the underlying LogDensityFunction at the point `z`. This is done to satisfy
the Optim.jl interface.
```julia
optim_ld = OptimLogDensity(model, varinfo, ctx)
optim_ld(z) # returns -logp
```
"""
struct OptimLogDensity{
M<:DynamicPPL.Model,
V<:DynamicPPL.VarInfo,
C<:OptimizationContext,
AD<:ADTypes.AbstractADType,
##CHUNK 6
# Create an OptimLogDensity object that can be used to evaluate the objective function,
# i.e. the negative log density.
inner_context = if estimator isa MAP
DynamicPPL.DefaultContext()
else
DynamicPPL.LikelihoodContext()
end
ctx = OptimizationContext(inner_context)
# Set its VarInfo to the initial parameters.
# TODO(penelopeysm): Unclear if this is really needed? Any time that logp is calculated
# (using `LogDensityProblems.logdensity(ldf, x)`) the parameters in the
# varinfo are completely ignored. The parameters only matter if you are calling evaluate!!
# directly on the fields of the LogDensityFunction
vi = DynamicPPL.VarInfo(model)
vi = DynamicPPL.unflatten(vi, initial_params)
# Link the varinfo if needed.
# TODO(mhauru) We currently couple together the questions of whether the user specified
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.