content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
<reponame>lucifer1004/Pluto.jl
### A Pluto.jl notebook ###
# v0.16.4
using Markdown
using InteractiveUtils
# ╔═╡ f7dfc33e-6ff8-44d6-a88e-bea5834a9d27
import A1
using A2
# ╔═╡ 360ee541-cbc4-4df6-bdc5-ea23fe08abdd
import A1, B1, .C1, D1.E1
using A2, B2, .C2, D2.E2
# ╔═╡ 8a4eff2d-5dbc-4056-a81b-da0618503467
import A1: b1, c1
using A2: b2, c2
# ╔═╡ 393a7816-0fa0-4f74-8fe7-c8d8d8c7e868
import A1.B1: b1, c1
using A2.B2: b2, c2
# ╔═╡ c1b5da89-c9a3-439c-8bee-2a8690265796
import .A1
using .A2
# ╔═╡ daf52b3f-513c-45de-a232-bda50e45b326
import .A1: x1
using .A2: x2
# ╔═╡ 6a90ece9-e7ad-404e-a3b0-4d484821f461
import ..A1
using ..A2
# ╔═╡ 4206dd87-a9e9-4f2c-865f-bec68d199b55
import A1.B1
using A2.B2
# ╔═╡ eb7ba436-d23b-4e97-af92-81271fa76989
import A1.B1.C1
using A2.B2.C2
# ╔═╡ 4f1d3da9-370c-42c2-9d17-a3caffca638d
import A1.B1.C1.D1
using A2.B2.C2.D2
# ╔═╡ Cell order:
# ╠═f7dfc33e-6ff8-44d6-a88e-bea5834a9d27
# ╠═360ee541-cbc4-4df6-bdc5-ea23fe08abdd
# ╠═8a4eff2d-5dbc-4056-a81b-da0618503467
# ╠═393a7816-0fa0-4f74-8fe7-c8d8d8c7e868
# ╠═c1b5da89-c9a3-439c-8bee-2a8690265796
# ╠═daf52b3f-513c-45de-a232-bda50e45b326
# ╠═6a90ece9-e7ad-404e-a3b0-4d484821f461
# ╠═4206dd87-a9e9-4f2c-865f-bec68d199b55
# ╠═eb7ba436-d23b-4e97-af92-81271fa76989
# ╠═4f1d3da9-370c-42c2-9d17-a3caffca638d
| [
27,
7856,
261,
480,
29,
75,
1229,
7087,
3064,
19,
14,
3646,
9390,
13,
20362,
198,
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1433,
13,
19,
198,
198,
3500,
2940,
2902,
198,
3500,
21365,
18274,
4487,
198,
198,
... | 1.449944 | 889 |
<reponame>memetics19/My_julia_practice-<gh_stars>1-10
#=
Author:= <NAME>
License:= MIT
UTF-8 =#
#=
In Julia the for loop is simple, Unlike python, matlab it won't use vectorized code.
`for.....end`
=#
println("***********************1st Example*************************")
for num = 1:10 # here the colon is used represent the range like in python
println("number is $num")
end
println("***********************2nd Example*************************")
x = ["shreeda",1,2,0.3,4]
for i in x
println("what i'm passing now is $i")
end
#=
Note: in the condition it should be `variable = iterable`
=#
println("***********************3rd Example*************************")
for i in 1:10
println(i)
end
println("***********************4th Example*************************")
my_bio = ["shreeda","20","Astaqc Consulting"]
for bio in my_bio
println("let's see this array $bio")
end
println("*********************** 5th Example*************************")
m,n = 3,3
array = zeros(m,n)
println(array)
for i in 1:m
for j in 1:n
array[i,j]=i+j
end
end
println(array)
println("*********************** 6th Example*************************")
# Let's do list comprehension
B = zeros(3,3)
for i in 1:m,j in 1:n
B[i,j]=i+j
end
println(B)
println("*********************** 7th Example*************************")
# Sorry its array comprehension
c= [i+j for i in 1:n, j in 1:m]
print(c) | [
27,
7856,
261,
480,
29,
11883,
14596,
1129,
14,
3666,
62,
73,
43640,
62,
39541,
12,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
1303,
28,
198,
13838,
25,
28,
1279,
20608,
29,
198,
34156,
25,
28,
17168,
220,
198,
48504,
12,
23,
796... | 2.868952 | 496 |
<reponame>joehuchette/Justitia.jl<gh_stars>1-10
"""
Subtypes are different approaches for solving an optimization problem. We use
"approach" to mean a configured algorithm: that is, the algorithm (e.g.
simplex), along with fixed values for all the algorithm hyperparemeters.
"""
abstract type AbstractApproach end
"""
Subtypes contain all the information for an individual instance of an
optimization problem. They either contain all of the requisite data themselves,
or a mechanism to somehow retreive it from, e.g., a website.
"""
abstract type AbstractInstance end
"""
Subtypes encapsulate an single approach applied to a single instance. Callers
can ask to optimize a model and then query the results, but the workings of the
approach are otherwise treated as a black box.
"""
abstract type AbstractModel end
"""
Subtypes contain all the useful information needed for understanding how a
particular approach performed on a particular instance. Typical fields stored
may include solve time, or node count for a MIP-based approach.
"""
abstract type AbstractResult end
"""
Subtypes store the results for experiments across multiple approaches and
multiple instances. Backends could be as simple as a CSV file, or something
more complex like HDF5.
"""
abstract type AbstractResultTable end
"""
prep_instance!(instance::AbstractInstance)::Nothing
Prepare an instance for experimentation. This is useful if there is a
lightweight way to represent an instance (e.g. a string name in an instance
library), but collecting the data is relatively costly (e.g. download something
from the web and converting it to the right file format). Additionally, this
prep work can do more complex pre-processing: if you are evaluating methods for
dual bounds, this function could run a set of heuristics to produce a good
primal solution. This method is only called once per instance, meaning that the
resulting computations can be shared across all approaches run on the
particular instance.
!!! note
Implementing this method is optional. If you don't need to do any prep work
as all the requisite data is already available, simply defer to the
available fallback method.
"""
function prep_instance! end
prep_instance!(::AbstractInstance) = nothing
"""
build_model(
approach::AbstractApproach,
instance::AbstractInstance,
config::Dict{String,Any}=Dict{String,Any}()
)::AbstractModel
Given a particular `approach` and a particular `instance`, build a model that
encapsulates the two. The optional `config` argument contains parameters that
are shared across all approaches and instances in a experiment run: for
example, a time limit, a node limit, a constraint violation tolerance, etc.
"""
function build_model end
"""
optimize!(model::AbstractModel)
Solve the instance using the approach specified by `model`.
"""
function optimize! end
"""
tear_down(model::AbstractModel, instance::AbstractInstance, T::Type{<:AbstractResult}))
Record the results of in individual experiment of a particular approach on a
particular instance. The results will be stored in an object of type `T`.
!!! note
This function should only be called after `optimize!(model)` has been
called.
"""
function tear_down end
"""
record_result!(table::AbstractResultTable, result::AbstractResult)
Given the results from an individual experiment, record it in `table`.
"""
function record_result! end
| [
27,
7856,
261,
480,
29,
73,
2577,
71,
1229,
3202,
660,
14,
5703,
36723,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
198,
7004,
19199,
389,
1180,
10581,
329,
18120,
281,
23989,
1917,
13,
775,
779,
198,
1,
21064,
620... | 3.924658 | 876 |
<gh_stars>0
# UnitCommitment.jl: Optimization Package for Security-Constrained Unit Commitment
# Copyright (C) 2020, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
using UnitCommitment, Test, LinearAlgebra
@testset "Sensitivity" begin
@testset "Susceptance matrix" begin
instance = UnitCommitment.read_benchmark("test/case14")
actual = UnitCommitment.susceptance_matrix(instance.lines)
@test size(actual) == (20, 20)
expected = Diagonal([29.5, 7.83, 8.82, 9.9, 10.04,
10.2, 41.45, 8.35, 3.14, 6.93,
8.77, 6.82, 13.4, 9.91, 15.87,
20.65, 6.46, 9.09, 8.73, 5.02])
@test round.(actual, digits=2) == expected
end
@testset "Reduced incidence matrix" begin
instance = UnitCommitment.read_benchmark("test/case14")
actual = UnitCommitment.reduced_incidence_matrix(lines=instance.lines,
buses=instance.buses)
@test size(actual) == (20, 13)
@test actual[1, 1] == -1.0
@test actual[3, 1] == 1.0
@test actual[4, 1] == 1.0
@test actual[5, 1] == 1.0
@test actual[3, 2] == -1.0
@test actual[6, 2] == 1.0
@test actual[4, 3] == -1.0
@test actual[6, 3] == -1.0
@test actual[7, 3] == 1.0
@test actual[8, 3] == 1.0
@test actual[9, 3] == 1.0
@test actual[2, 4] == -1.0
@test actual[5, 4] == -1.0
@test actual[7, 4] == -1.0
@test actual[10, 4] == 1.0
@test actual[10, 5] == -1.0
@test actual[11, 5] == 1.0
@test actual[12, 5] == 1.0
@test actual[13, 5] == 1.0
@test actual[8, 6] == -1.0
@test actual[14, 6] == 1.0
@test actual[15, 6] == 1.0
@test actual[14, 7] == -1.0
@test actual[9, 8] == -1.0
@test actual[15, 8] == -1.0
@test actual[16, 8] == 1.0
@test actual[17, 8] == 1.0
@test actual[16, 9] == -1.0
@test actual[18, 9] == 1.0
@test actual[11, 10] == -1.0
@test actual[18, 10] == -1.0
@test actual[12, 11] == -1.0
@test actual[19, 11] == 1.0
@test actual[13, 12] == -1.0
@test actual[19, 12] == -1.0
@test actual[20, 12] == 1.0
@test actual[17, 13] == -1.0
@test actual[20, 13] == -1.0
end
@testset "Injection Shift Factors (ISF)" begin
instance = UnitCommitment.read_benchmark("test/case14")
actual = UnitCommitment.injection_shift_factors(lines=instance.lines,
buses=instance.buses)
@test size(actual) == (20, 13)
@test round.(actual, digits=2) == [
-0.84 -0.75 -0.67 -0.61 -0.63 -0.66 -0.66 -0.65 -0.65 -0.64 -0.63 -0.63 -0.64;
-0.16 -0.25 -0.33 -0.39 -0.37 -0.34 -0.34 -0.35 -0.35 -0.36 -0.37 -0.37 -0.36;
0.03 -0.53 -0.15 -0.1 -0.12 -0.14 -0.14 -0.14 -0.13 -0.13 -0.12 -0.12 -0.13;
0.06 -0.14 -0.32 -0.22 -0.25 -0.3 -0.3 -0.29 -0.28 -0.27 -0.25 -0.26 -0.27;
0.08 -0.07 -0.2 -0.29 -0.26 -0.22 -0.22 -0.22 -0.23 -0.25 -0.26 -0.26 -0.24;
0.03 0.47 -0.15 -0.1 -0.12 -0.14 -0.14 -0.14 -0.13 -0.13 -0.12 -0.12 -0.13;
0.08 0.31 0.5 -0.3 -0.03 0.36 0.36 0.28 0.23 0.1 -0.0 0.02 0.17;
0.0 0.01 0.02 -0.01 -0.22 -0.63 -0.63 -0.45 -0.41 -0.32 -0.24 -0.25 -0.36;
0.0 0.01 0.01 -0.01 -0.12 -0.17 -0.17 -0.26 -0.24 -0.18 -0.14 -0.14 -0.21;
-0.0 -0.02 -0.03 0.02 -0.66 -0.2 -0.2 -0.29 -0.36 -0.5 -0.63 -0.61 -0.43;
-0.0 -0.01 -0.02 0.01 0.21 -0.12 -0.12 -0.17 -0.28 -0.53 0.18 0.15 -0.03;
-0.0 -0.0 -0.0 0.0 0.03 -0.02 -0.02 -0.03 -0.02 0.01 -0.52 -0.17 -0.09;
-0.0 -0.01 -0.01 0.01 0.11 -0.06 -0.06 -0.09 -0.05 0.02 -0.28 -0.59 -0.31;
-0.0 -0.0 -0.0 -0.0 -0.0 -0.0 -1.0 -0.0 -0.0 -0.0 -0.0 -0.0 0.0 ;
0.0 0.01 0.02 -0.01 -0.22 0.37 0.37 -0.45 -0.41 -0.32 -0.24 -0.25 -0.36;
0.0 0.01 0.02 -0.01 -0.21 0.12 0.12 0.17 -0.72 -0.47 -0.18 -0.15 0.03;
0.0 0.01 0.01 -0.01 -0.14 0.08 0.08 0.12 0.07 -0.03 -0.2 -0.24 -0.6 ;
0.0 0.01 0.02 -0.01 -0.21 0.12 0.12 0.17 0.28 -0.47 -0.18 -0.15 0.03;
-0.0 -0.0 -0.0 0.0 0.03 -0.02 -0.02 -0.03 -0.02 0.01 0.48 -0.17 -0.09;
-0.0 -0.01 -0.01 0.01 0.14 -0.08 -0.08 -0.12 -0.07 0.03 0.2 0.24 -0.4 ]
end
@testset "Line Outage Distribution Factors (LODF)" begin
instance = UnitCommitment.read_benchmark("test/case14")
isf_before = UnitCommitment.injection_shift_factors(lines=instance.lines,
buses=instance.buses)
lodf = UnitCommitment.line_outage_factors(lines=instance.lines,
buses=instance.buses,
isf=isf_before)
for contingency in instance.contingencies
for lc in contingency.lines
prev_susceptance = lc.susceptance
lc.susceptance = 0.0
isf_after = UnitCommitment.injection_shift_factors(lines=instance.lines,
buses=instance.buses)
lc.susceptance = prev_susceptance
for lm in instance.lines
expected = isf_after[lm.offset, :]
actual = isf_before[lm.offset, :] +
lodf[lm.offset, lc.offset] * isf_before[lc.offset, :]
@test norm(expected - actual) < 1e-6
end
end
end
end
end | [
27,
456,
62,
30783,
29,
15,
198,
2,
11801,
6935,
270,
434,
13,
20362,
25,
30011,
1634,
15717,
329,
4765,
12,
3103,
2536,
1328,
11801,
35910,
434,
198,
2,
15069,
357,
34,
8,
12131,
11,
471,
25705,
49974,
710,
11,
11419,
13,
1439,
2... | 1.627155 | 3,712 |
# Winston:
using Winston
# optionally call figure prior to plotting to set the size
figure(width=600, height=400)
# plot some data
pl = plot(cumsum(rand(500) - 0.5), "r", cumsum(rand(500) - 0.5), "b")
# display the plot (not done automatically!)
display(pl)
println("Press enter to continue: ")
readline(STDIN)
# save the current figure
savefig("winston.svg") | [
2,
21400,
25,
198,
3500,
21400,
198,
2,
42976,
869,
3785,
3161,
284,
29353,
284,
900,
262,
2546,
198,
26875,
7,
10394,
28,
8054,
11,
6001,
28,
7029,
8,
198,
2,
7110,
617,
1366,
198,
489,
796,
7110,
7,
66,
5700,
388,
7,
25192,
7,... | 3.016807 | 119 |
<filename>src/julia/Audio/music.jl
type Music
ptr::Ptr{Void}
function Music(ptr::Ptr{Void})
m = new(ptr)
finalizer(m, destroy)
m
end
end
function Music(filename::AbstractString)
Music(ccall((:sfMusic_createFromFile, libcsfml_audio), Ptr{Void}, (Ptr{Cchar},), filename))
end
function destroy(music::Music)
ccall((:sfMusic_destroy, libcsfml_audio), Void, (Ptr{Void},), music.ptr)
end
function set_loop(music::Music, loop::Bool)
ccall((:sfMusic_setLoop, libcsfml_audio), Void, (Ptr{Void}, Int32,), music.ptr, loop)
end
function get_loop(music::Music)
return Bool(ccall((:sfMusic_getLoop, libcsfml_audio), Int32, (Ptr{Void},), music.ptr))
end
function play(music::Music)
ccall((:sfMusic_play, libcsfml_audio), Void, (Ptr{Void},), music.ptr)
end
function pause(music::Music)
ccall((:sfMusic_pause, libcsfml_audio), Void, (Ptr{Void},), music.ptr)
end
function stop(music::Music)
ccall((:sfMusic_pause, libcsfml_audio), Void, (Ptr{Void},), music.ptr)
end
function get_duration(music::Music)
return ccall((:sfMusic_getDuration, libcsfml_audio), Time, (Ptr{Void},), music.ptr)
end
function get_channelcount(music::Music)
return Real(ccall((:sfMusic_getChannelCount, libcsfml_audio), UInt32, (Ptr{Void},), music.ptr))
end
function get_samplerate(music::Music)
return Real(ccall((:sfMusic_getSampleRate, libcsfml_audio), UInt32, (Ptr{Void},), music.ptr))
end
function set_pitch(music::Music, pitch::Real)
ccall((:sfMusic_setPitch, libcsfml_audio), Void, (Ptr{Void}, Cfloat,), music.ptr, pitch)
end
function set_volume(music::Music, volume::Real)
ccall((:sfMusic_setVolume, libcsfml_audio), Void, (Ptr{Void}, Cfloat,), music.ptr, volume)
end
function get_pitch(music::Music)
return Real(ccall((:sfMusic_getPitch, libcsfml_audio), Cfloat, (Ptr{Void},), music.ptr))
end
function get_volume(music::Music)
return Real(ccall((:sfMusic_getVolume, libcsfml_audio), Cfloat, (Ptr{Void},), music.ptr))
end
function get_status(music::Music)
return ccall((:sfMusic_getStatus, libcsfml_audio), Int32, (Ptr{Void},), music.ptr)
end
| [
27,
34345,
29,
10677,
14,
73,
43640,
14,
21206,
14,
28965,
13,
20362,
198,
4906,
7849,
198,
220,
220,
220,
50116,
3712,
46745,
90,
53,
1868,
92,
628,
220,
220,
220,
2163,
7849,
7,
20692,
3712,
46745,
90,
53,
1868,
30072,
198,
220,
... | 2.544471 | 832 |
<reponame>Joel-Dahne/ArbExtras.jl<gh_stars>1-10
"""
bounded_by(f, a::Arf, b::Arf, C::Arf; degree, abs_value, log_bisection, depth_start, maxevals, depth, threaded, verbose)
Return `true` if the function `f` can be shown to be bounded by `C` on
the interval `[a, b]`, i.e. `f(x) <= C` for all `x ∈ [a, b]`,
otherwise return `false`.
This function is similar to first computing the maximum with
[`maximum_enclosure`](@ref) and then check if the computed maximum
satisfies the bound. However if the only thing needed is to check if
the bound holds this method has a number of benefits
- it aborts early if the bound is shown to not hold
- it doesn't try to compute an accurate enclosure of the maximum, it
only bisects as much as is needed for getting the bound.
- the implementation is simpler and easier to check for correctness.
The maximum of `f` is enclosed by the use of Taylor series through
[`maximum_series`](@ref). The degree of the expansion is kept constant
and the interval is bisected until we can either conclude that the
bound holds on the whole interval or there is subinterval where it
doesn't hold.
The function `f` should support evaluation on both `Arb` and
`ArbSeries` and should return an enclosure of the result in both
cases. The degree used for `ArbSeries` can be set with the `degree`
argument (defaults to 8). If `degree` is negative then it will fall
back to direct evaluation with `Arb` and not make use of
[`maximum_series`](@ref), this is usually much slower but does not
require the function to be implemented for `ArbSeries`.
If `abs_value = true` the instead consider the function `abs(f(x))` on
the interval `[a, b]`.
If `log_bisection = true` then the intervals are bisected in a
logarithmic scale, see [`bisect_interval`](@ref) for details.
The argument `depth_start` bisect the interval using
[`bisect_interval_recursive`](@ref) before starting to compute the
extrema. This can be useful if it is known beforehand that a certain
number of bisections will be necessary before the enclosures get good
enough. It defaults to `0` which corresponds to not bisecting the
interval at all before starting.
The arguments `maxevals` and `depth` can be used to limit the number
of function evaluations and the number of bisections of the interval
respectively. Notice that `depth` takes `depth_start` into account, so
the maximum number of iterations is `depth - depth_start`.
If `threaded = true` then evaluate `f` in parallel on the intervals
using [`Threads.@threads`](@ref).
If `verbose = true` then output information about the process.
"""
function bounded_by(
f,
a::Arf,
b::Arf,
C::Arf;
degree::Integer = 8,
abs_value = false,
log_bisection = false,
depth_start::Integer = 0,
maxevals::Integer = 1000,
depth::Integer = 20,
threaded = false,
verbose = false,
)
check_interval(a, b)
maybe_abs = abs_value ? abs : identity
if a == b
return maybe_abs(f(Arb(a))) <= C
end
# List of intervals
intervals = bisect_interval_recursive(a, b, depth_start, log_midpoint = log_bisection)
iterations = 0
evals = 0
while true
iterations += 1
evals += length(intervals)
# Evaluate f on the remaining intervals
values = similar(intervals, Arb)
if degree < 0
if threaded
Threads.@threads for i in eachindex(intervals)
values[i] = maybe_abs(f(Arb(intervals[i])))
end
else
for i in eachindex(intervals)
values[i] = maybe_abs(f(Arb(intervals[i])))
end
end
else
if threaded
Threads.@threads for i in eachindex(intervals)
values[i], _ = maximum_series(f, intervals[i]...; degree, abs_value)
end
else
for (i, (a, b)) in enumerate(intervals)
values[i], _ = maximum_series(f, a, b; degree, abs_value)
end
end
end
# Check for each interval if the bound is satisfied
next_intervals = sizehint!(empty(intervals), 2length(intervals))
for i in eachindex(intervals)
if values[i] > C
verbose && @info "bound doesn't hold on the interval x = $(intervals[i])"
verbose && @info "got the maximum of f(x) to be $(values[i])"
return false
elseif !(values[i] <= C)
push!(
next_intervals,
bisect_interval(intervals[i]..., log_midpoint = log_bisection)...,
)
end
end
intervals = next_intervals
verbose && @info "iteration: $(lpad(iterations, 2)), " *
"remaining intervals: $(lpad(length(intervals) ÷ 2, 3)), "
non_finite = count(!isfinite, values)
verbose && non_finite > 0 && @info "non-finite intervals: $non_finite"
isempty(intervals) && break
# Check if we have done the maximum number of function
# evaluations or reached the maximum depth
if evals >= maxevals || iterations >= depth - depth_start
if verbose
evals >= maxevals &&
@warn "reached maximum number of evaluations $evals >= $maxevals"
iterations >= depth - depth_start && @warn "reached maximum depth $depth"
end
return false
end
end
return true
end
| [
27,
7856,
261,
480,
29,
9908,
417,
12,
35,
993,
710,
14,
3163,
65,
11627,
8847,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
198,
220,
220,
220,
49948,
62,
1525,
7,
69,
11,
257,
3712,
3163,
69,
11,
275,
3712,
31... | 2.514779 | 2,199 |
<gh_stars>10-100
using Revise
using POMDPModelChecking
using POMDPs
using POMDPModels
using POMDPSimulators
using BeliefUpdaters
using QMDP
using SARSOP
using POMCPOW
pomdp = TigerPOMDP()
function POMDPModelChecking.labels(pomdp::TigerPOMDP, s::Bool, a::Int64)
if (a == 1 && s) || (a == 2 && !s)
return ["eaten"]
elseif (a == 2 && s) || (a == 1 && !s)
return ["!eaten"]
else
return ["!eaten"]
end
end
# POMDPs.reward(pomdp::TigerPOMDP, s::Bool, a::Int64) = ((a == 1 && s) || (a == 2 && !s)) ? -1. : 0.
# POMDPs.reward(pomdp::TigerPOMDP, s::Bool, a::Int64, sp::Int64) = (a == 1 && sp) || (a == 2 && !sp) ? -1. : 0.
γ = 1 - 1e-3
POMDPs.discount(::ProductPOMDP{Bool,Int64,Bool,Int64,String}) = γ
sarsop = SARSOPSolver(precision = 1e-3)
solver = ModelCheckingSolver(property = "!eaten", solver=sarsop)
ltl2tgba(solver.property, solver.automata_file)
autom_type = automata_type(solver.automata_file)
automata = nothing
if autom_type == "Buchi"
automata = hoa2buchi(solver.automata_file)
elseif autom_type == "Rabin"
automata = hoa2rabin(solver.automata_file)
end
pmdp = ProductPOMDP(pomdp, automata, Set{ProductState{Bool, Int64}}(), ProductState(false, -1))
accepting_states!(pmdp)
policy = solve(sarsop, pmdp)
updater = DiscreteUpdater(pmdp)
b0 = initialize_belief(up, initialstate_distribution(pmdp))
b0 = initialize_belief(up, initialstate_distribution(pmdp))
using Random
rng = MersenneTwister(1)
n_ep = 1000
avg_r = 0.
for ep=1:n_ep
global avg_r
s0 = initialstate(pomdp, rng)
hist = simulate(hr, pomdp, policy, up, b0, s0);
if hist.reward_hist[end] > 0.
avg_r += 1
end
end
avg_r /= n_ep
policy = solve(QMDPSolver(), pomdp)
pomcpow = POMCPOWSolver()
solver = ModelCheckingSolver(property = "!eaten U safe", solver=sarsop)
POMDPs.discount(::ProductPOMDP{Bool, Int64, Bool, Int64, String}) = γ
policy = solve(solver, pomdp)
pmdp = policy.mdp
function POMDPs.action(policy::ModelCheckingPolicy{P,M}, b::DiscreteBelief) where {P<:Policy, M<:ProductPOMDP}
return action(policy.policy, b)
end
policy = solve(sarsop, pomdp)
hr = HistoryRecorder(max_steps = 20)
up = DiscreteUpdater(policy.mdp)
b0 = initialize_belief(up, initialstate_distribution(pmdp))
using Random
rng = MersenneTwister(1)
n_ep = 1000
avg_r = 0.
for ep=1:n_ep
global avg_r
s0 = initialstate(pomdp, rng)
hist = simulate(hr, pomdp, policy, up, b0, s0);
if hist.reward_hist[end] > 0.
avg_r += 1
end
end
avg_r /= n_ep
trans_prob_consistency_check(pmdp)
for s in states(pmdp)
for a in actions(pmdp)
d = transition(pmdp, s, a)
println("Transition from state ", s, " action ", a, " : ", d.vals, " ", d.probs)
end
end
solver = ModelCheckingSolver(property = "!eaten U safe", solver=pomcpow)
policy = solve(solver, pomdp);
action(policy, b0)
using POMDPModelTools
using Random
using ParticleFilters
rng = MersenneTwister(1)
filter = SimpleParticleFilter(pomdp, LowVarianceResampler(1000))
b0 = initialize_belief(filter, initialstate_distribution(pomdp))
s0 = initialstate(pomdp, rng)
hr = HistoryRecorder(max_steps=100)
simulate(hr, pomdp, policy, filter, b0)
state_space = states(pmdp)
transition(pmdp, state_space[4], 2)
solve(sarsop, pmdp, pomdp_file_name="model.pomdpx")
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
5416,
786,
201,
198,
3500,
350,
2662,
6322,
17633,
9787,
278,
201,
198,
3500,
350,
2662,
6322,
82,
201,
198,
3500,
350,
2662,
6322,
5841,
1424,
201,
198,
3500,
350,
2662,
35,
3705,
... | 2.118077 | 1,643 |
<gh_stars>1-10
#-------------------------------------------------------------------
#* EMSO Model Library (EML) Copyright (C) 2004 - 2007 ALSOC.
#*
#* This LIBRARY is free software; you can distribute it and/or modify
#* it under the therms of the ALSOC FREE LICENSE as available at
#* http://www.enq.ufrgs.br/alsoc.
#*
#* EMSO Copyright (C) 2004 - 2007 ALSOC, original code
#* from http://www.rps.eng.br Copyright (C) 2002-2004.
#* All rights reserved.
#*
#* EMSO is distributed under the therms of the ALSOC LICENSE as
#* available at http://www.enq.ufrgs.br/alsoc.
#*
#*----------------------------------------------------------------------
#* File containg models of columns: distillation, stripping, absorbers
#* rectifier, ....
#*
#* The default nomenclature is:
#* Type_Column_reboilertype_condensertyper
#*
#* where:
#* Type = refluxed or reboiled or section
#* Column = Stripping, Absorption, Rectifier, Distillation
#* Reboiler type (if exists) = kettle or thermosyphon
#* Condenser type (if exists) = with subccoling or without subcooling
#*
#*-----------------------------------------------------------------------
#* Author: <NAME>
#* $Id: column.mso 511 2008-05-12 17:25:33Z paula $
#*---------------------------------------------------------------------
type Packed_Section_ColumnBasic
Packed_Section_ColumnBasic()=begin
PP=outers.PP
NComp=outers.NComp
[
],
[
],
new(
DanaPlugin(Dict{Symbol,Any}(
:Brief=>"External Physical Properties",
:Type=>"PP"
)),
DanaInteger(Dict{Symbol,Any}(
:Brief=>"Number of components",
:Protected=>true
)),
DanaInteger(Dict{Symbol,Any}(
:Brief=>"Number of Feed Stages",
:Default=>3,
:Protected=>true
)),
DanaInteger(Dict{Symbol,Any}(
:Brief=>"Number of Stages",
:Default=>3
)),
fill(DanaInteger(Dict{Symbol,Any}(
:Brief=>"Feed Stage Location",
:Default=>2
)),(NumberOfFeeds)),
length(Dict{Symbol,Any}(
:Brief=>"Height of packing"
)),
heat_rate(Dict{Symbol,Any}(
:Brief=>"Rate of heat supply"
)),
length(Dict{Symbol,Any}(
:Brief=>"Column diameter"
)),
DanaReal(Dict{Symbol,Any}(
:Brief=>"Void fraction of packing, (m^3 void space/m^3 packed bed)"
)),
positive(Dict{Symbol,Any}(
:Brief=>"Resistance coefficient on the liquid load",
:Default=>1
)),
DanaReal(Dict{Symbol,Any}(
:Brief=>"surface area per packing volume",
:Unit=>"m^2/m^3"
)),
length(Dict{Symbol,Any}(
:Brief=>"The Height Equivalent to a Theoretical Plate",
:Protected=>true
)),
[
:(LiquidConnector.F= LiquidInlet.F),
:(LiquidConnector.T = LiquidInlet.T),
:(LiquidConnector.P = LiquidInlet.P),
:(LiquidConnector.z = LiquidInlet.z),
:(LiquidConnector.v = LiquidInlet.v),
:(LiquidConnector.h = LiquidInlet.h),
:(VapourConnector.F= VapourInlet.F),
:(VapourConnector.T = VapourInlet.T),
:(VapourConnector.P = VapourInlet.P),
:(VapourConnector.z = VapourInlet.z),
:(VapourConnector.v = VapourInlet.v),
:(VapourConnector.h = VapourInlet.h),
:(LiquidOutlet.F= STAGES(NumberOfStages).OutletLiquid.F),
:(LiquidOutlet.T = STAGES(NumberOfStages).OutletLiquid.T),
:(LiquidOutlet.P = STAGES(NumberOfStages).OutletLiquid.P),
:(LiquidOutlet.z = STAGES(NumberOfStages).OutletLiquid.z),
:(VapourOutlet.F= STAGES(1).OutletVapour.F),
:(VapourOutlet.T = STAGES(1).OutletVapour.T),
:(VapourOutlet.P = STAGES(1).OutletVapour.P),
:(VapourOutlet.z = STAGES(1).OutletVapour.z),
:(TCI*"K" = STAGES(CONTROL.Tindicator_TrayNumber).OutletVapour.T),
:(PCI*"atm" = STAGES(CONTROL.Pindicator_TrayNumber).OutletVapour.P),
:(zCI * 1e-6 = STAGES(CONTROL.zindicator_TrayNumber).OutletLiquid.z(CONTROL.zindicator_Component) * STAGES(CONTROL.zindicator_TrayNumber).Mw(CONTROL.zindicator_Component) / sum(STAGES(CONTROL.zindicator_TrayNumber).Mw*STAGES(CONTROL.zindicator_TrayNumber).OutletLiquid.z)),
:(PressureDrop = STAGES(NumberOfStages).OutletLiquid.P - STAGES(1).OutletLiquid.P),
],
[
"","","","","","","","","","","","","","","","","","","","","Tray Temperature Indicator","Tray Pressure Indicator","Tray Composition Indicator","PressureDrop",
],
[:PP,:NComp,:NumberOfFeeds,:NumberOfStages,:FeedStageLocation,:PackingHeight,:HeatSupply,:ColumnDiameter,:VoidFraction,:ResistanceCoeff,:AreaPerPackingVol,:HETP,],
)
end
PP::DanaPlugin
NComp::DanaInteger
NumberOfFeeds::DanaInteger
NumberOfStages::DanaInteger
FeedStageLocation::Array{DanaInteger}
PackingHeight::length
HeatSupply::heat_rate
ColumnDiameter::length
VoidFraction::DanaReal
ResistanceCoeff::positive
AreaPerPackingVol::DanaReal
HETP::length
equations::Array{Expr,1}
equationNames::Array{String,1}
initials::Array{Expr,1}
initialNames::Array{String,1}
parameters::Array{Symbol,1}
attributes::Dict{Symbol,Any}
end
export Packed_Section_ColumnBasic
function set(in::Packed_Section_ColumnBasic)
STAGES.Number_Stages = NumberOfStages
STAGES.HeightOfPacking = PackingHeight
STAGES.HeatOnStage = HeatSupply
STAGES.ColumnInternalDiameter = ColumnDiameter
STAGES.PackingVoidFraction = VoidFraction
STAGES.LiquidResistanceCoeff = ResistanceCoeff
STAGES.AreaPerPackingVolume = AreaPerPackingVol
HETP = PackingHeight/NumberOfStages
end
function setEquationFlow(in::Packed_Section_ColumnBasic)
addEquation(1)
addEquation(2)
addEquation(3)
addEquation(4)
addEquation(5)
addEquation(6)
addEquation(7)
addEquation(8)
addEquation(9)
addEquation(10)
addEquation(11)
addEquation(12)
addEquation(13)
addEquation(14)
addEquation(15)
addEquation(16)
addEquation(17)
addEquation(18)
addEquation(19)
addEquation(20)
#TCI*'K' = TRAYS(min([NumberOfTrays, CONTROL.Tindicator_TrayNumber])).OutletVapour.T;
addEquation(21)
#PCI*'atm' = TRAYS(min([NumberOfTrays, CONTROL.Pindicator_TrayNumber])).OutletVapour.P;
addEquation(22)
#zCI * .000001 = STAGES(CONTROL.zindicator_TrayNumber).OutletVapour.z(CONTROL.zindicator_Component) * STAGES(CONTROL.zindicator_TrayNumber).Mw(CONTROL.zindicator_Component) ; #/sum(STAGES(CONTROL.zindicator_TrayNumber).Mw*STAGES(CONTROL.zindicator_TrayNumber).OutletLiquid.z);
addEquation(23)
addEquation(24)
end
function initial(in::Packed_Section_ColumnBasic)
end
function atributes(in::Packed_Section_ColumnBasic,_::Dict{Symbol,Any})
fields::Dict{Symbol,Any}=Dict{Symbol,Any}()
fields[:Pallete]=false
fields[:Icon]="icon/PackedSectionColumn"
fields[:Brief]="Model of a packed column section."
fields[:Info]="== Model of a packed column section containing ==
* NStages theoretical stages.
== Specify ==
* the feed stream of each tray (Inlet);
* the InletLiquid stream of the top tray;
* the InletVapour stream of the bottom tray;
* the total pressure drop (dP) of the section.
== Initial Conditions ==
* the stages temperature (OutletLiquid.T);
* the stages liquid holdup;
* (NoComps - 1) OutletLiquid (OR OutletVapour) compositions for each tray.
"
drive!(fields,_)
return fields
end
Packed_Section_ColumnBasic(_::Dict{Symbol,Any})=begin
newModel=Packed_Section_ColumnBasic()
newModel.attributes=atributes(newModel,_)
newModel
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
10097,
6329,
198,
2,
9,
41363,
46,
9104,
10074,
357,
3620,
43,
8,
15069,
357,
34,
8,
5472,
532,
4343,
27249,
4503,
13,
198,
2,
9,
198,
2,
9,
770,
45651,
49,
13153,
318,
1479,
3788,
... | 2.500525 | 2,855 |
<filename>test/packages/DocumentedCode/src/organized_folder/v_func.jl
"""
v_func()
Lorem ipsum dolor sit amet.
"""
function v_func()
9
end
"""
v_func(cur_obj::GType)
Lorem ipsum dolor sit amet.
"""
function v_func(cur_obj::GType)
8
end
"""
v_func(cur_obj::AType)
Lorem ipsum dolor sit amet.
"""
function v_func(cur_obj::AType)
7
end
| [
27,
34345,
29,
9288,
14,
43789,
14,
35,
17664,
10669,
14,
10677,
14,
30280,
62,
43551,
14,
85,
62,
20786,
13,
20362,
198,
37811,
198,
220,
220,
220,
410,
62,
20786,
3419,
198,
198,
43,
29625,
220,
2419,
388,
288,
45621,
1650,
716,
... | 2.21118 | 161 |
<reponame>giordano/DataScienceTutorials.jl<gh_stars>0
# Before running this, please make sure to activate and instantiate the environment
# corresponding to [this `Project.toml`](https://raw.githubusercontent.com/alan-turing-institute/DataScienceTutorials.jl/master/Project.toml) and [this `Manifest.toml`](https://raw.githubusercontent.com/alan-turing-institute/DataScienceTutorials.jl/master/Manifest.toml)
# so that you get an environment which matches the one used to generate the tutorials:
#
# ```julia
# cd("DataScienceTutorials") # cd to folder with the *.toml
# using Pkg; Pkg.activate("."); Pkg.instantiate()
# ```
# **Main author**: <NAME> (IQVIA).## ## Getting started
using MLJ
using PrettyPrinting
import DataFrames
import Statistics
using PyPlot
using StableRNGs
@load LGBMRegressor
# Let us try LightGBM out by doing a regression task on the Boston house prices dataset.# This is a commonly used dataset so there is a loader built into MLJ.
# Here, the objective is to show how LightGBM can do better than a Linear Regressor# with minimal effort.## We start out by taking a quick peek at the data itself and its statistical properties.
features, targets = @load_boston
features = DataFrames.DataFrame(features)
@show size(features)
@show targets[1:3]
first(features, 3) |> pretty
# We can also describe the dataframe
DataFrames.describe(features)
# Do the usual train/test partitioning. This is important so we can estimate generalisation.
train, test = partition(eachindex(targets), 0.70, shuffle=true,
rng=StableRNG(52))
# Let us investigation some of the commonly tweaked LightGBM parameters. We start with looking at a learning curve for number of boostings.
lgb = LGBMRegressor() #initialised a model with default params
lgbm = machine(lgb, features[train, :], targets[train, 1])
boostrange = range(lgb, :num_iterations, lower=2, upper=500)
curve = learning_curve!(lgbm, resampling=CV(nfolds=5),
range=boostrange, resolution=100,
measure=rms)
figure(figsize=(8,6))
plot(curve.parameter_values, curve.measurements)
xlabel("Number of rounds", fontsize=14)
ylabel("RMSE", fontsize=14)
# \fig{lgbm_hp1.svg}
# It looks like that we don't need to go much past 100 boosts
# Since LightGBM is a gradient based learning method, we also have a learning rate parameter which controls the size of gradient updates.# Let us look at a learning curve for this parameter too
lgb = LGBMRegressor() #initialised a model with default params
lgbm = machine(lgb, features[train, :], targets[train, 1])
learning_range = range(lgb, :learning_rate, lower=1e-3, upper=1, scale=:log)
curve = learning_curve!(lgbm, resampling=CV(nfolds=5),
range=learning_range, resolution=100,
measure=rms)
figure(figsize=(8,6))
plot(curve.parameter_values, curve.measurements)
xscale("log")
xlabel("Learning rate (log scale)", fontsize=14)
ylabel("RMSE", fontsize=14)
# \fig{lgbm_hp2.svg}
# It seems like near 0.5 is a reasonable place. Bearing in mind that for lower# values of learning rate we possibly require more boosting in order to converge, so the default# value of 100 might not be sufficient for convergence. We leave this as an exercise to the reader.# We can still try to tune this parameter, however.
# Finally let us check number of datapoints required to produce a leaf in an individual tree. This parameter# controls the complexity of individual learner trees, and too low a value might lead to overfitting.
lgb = LGBMRegressor() #initialised a model with default params
lgbm = machine(lgb, features[train, :], targets[train, 1])
# dataset is small enough and the lower and upper sets the tree to have certain number of leaves
leaf_range = range(lgb, :min_data_in_leaf, lower=1, upper=50)
curve = learning_curve!(lgbm, resampling=CV(nfolds=5),
range=leaf_range, resolution=50,
measure=rms)
figure(figsize=(8,6))
plot(curve.parameter_values, curve.measurements)
xlabel("Min data in leaf", fontsize=14)
ylabel("RMSE", fontsize=14)
# \fig{lgbm_hp3.svg}
# It does not seem like there is a huge risk for overfitting, and lower is better for this parameter.
# Using the learning curves above we can select some small-ish ranges to jointly search for the best# combinations of these parameters via cross validation.
r1 = range(lgb, :num_iterations, lower=50, upper=100)
r2 = range(lgb, :min_data_in_leaf, lower=2, upper=10)
r3 = range(lgb, :learning_rate, lower=1e-1, upper=1e0)
tm = TunedModel(model=lgb, tuning=Grid(resolution=5),
resampling=CV(rng=StableRNG(123)), ranges=[r1,r2,r3],
measure=rms)
mtm = machine(tm, features, targets)
fit!(mtm, rows=train);
# Lets see what the cross validation best model parameters turned out to be?
best_model = fitted_params(mtm).best_model
@show best_model.learning_rate
@show best_model.min_data_in_leaf
@show best_model.num_iterations
# Great, and now let's predict using the held out data.
predictions = predict(mtm, rows=test)
rms_score = round(rms(predictions, targets[test, 1]), sigdigits=4)
@show rms_score
# This file was generated using Literate.jl, https://github.com/fredrikekre/Literate.jl
| [
27,
7856,
261,
480,
29,
12397,
585,
5733,
14,
6601,
26959,
51,
44917,
82,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
2,
7413,
2491,
428,
11,
3387,
787,
1654,
284,
15155,
290,
9113,
9386,
262,
2858,
198,
2,
11188,
284,
685,
5661,
... | 2.950645 | 1,783 |
# benchmarks of reduction on views
using NumericExtensions
const a = rand(1000, 1000)
const a_sub = sub(a, 1:999, :)
const a_view = view(a, 1:999, :)
println("for sum:")
for dim = 1:2
# warmup
sum(a_sub, dim)
sum(a_view, dim)
# profile
et1 = @elapsed for i=1:100; sum(a_sub, dim); end
et2 = @elapsed for i=1:100; sum(a_view, dim); end
@printf(" dim = %d: on a_sub => %.4fs on a_view => %.4fs | gain = %.4fx\n",
dim, et1, et2, et1 / et2)
end
println("for sumabs:")
for dim = 1:2
# warmup
sum(a_sub, dim)
sum(a_view, dim)
# profile
et1 = @elapsed for i=1:100; sum(abs(a_sub), dim); end
et2 = @elapsed for i=1:100; sumabs(a_view, dim); end
@printf(" dim = %d: on a_sub => %.4fs on a_view => %.4fs | gain = %.4fx\n",
dim, et1, et2, et1 / et2)
end
| [
2,
31747,
286,
7741,
319,
5009,
198,
198,
3500,
399,
39223,
11627,
5736,
198,
198,
9979,
257,
796,
43720,
7,
12825,
11,
8576,
8,
198,
198,
9979,
257,
62,
7266,
796,
850,
7,
64,
11,
352,
25,
17032,
11,
14373,
198,
9979,
257,
62,
... | 2.041262 | 412 |
<reponame>raphaelpanta/julia-lang-exemplos
include("..\\src\\introducao.jl")
module IntroducaoTeste
include("error_handler_pt_br.jl")
end
using Base.Test
import Introducao
Test.with_handler(custom_handler) do
@test soma(1,2) == 2
end
| [
27,
7856,
261,
480,
29,
1470,
3010,
79,
4910,
14,
73,
43640,
12,
17204,
12,
1069,
18856,
418,
198,
17256,
7203,
492,
6852,
10677,
6852,
27427,
66,
5488,
13,
20362,
4943,
198,
198,
21412,
11036,
66,
5488,
14402,
68,
198,
220,
2291,
7... | 2.520833 | 96 |
module Node
mutable struct node
mass
radius
position
velocity
end
export node
end
| [
21412,
19081,
198,
198,
76,
18187,
2878,
10139,
198,
220,
220,
220,
2347,
198,
220,
220,
220,
16874,
198,
220,
220,
220,
2292,
198,
220,
220,
220,
15432,
198,
437,
198,
198,
39344,
10139,
198,
437,
198
] | 2.702703 | 37 |
<gh_stars>1-10
using RCall, MixedModels, Test
using StatsBase: zscore
const LMM = LinearMixedModel
const GLMM = GeneralizedLinearMixedModel
@testset "merMod" begin
# this is available in MixedModels.dataset(:sleepstudy) but with different
# capitalization than in R
sleepstudy = rcopy(R"sleepstudy")
jlmm = fit!(LMM(@formula(Reaction ~ 1 + round(Days) + (1|Subject)),sleepstudy), REML=false)
@testset "bare model" begin
@test_throws ArgumentError (@rput jlmm)
end
@testset "reversed tuple" begin
jm = (sleepstudy, jlmm);
@test_throws ArgumentError (@rput jm)
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
13987,
439,
11,
35250,
5841,
1424,
11,
6208,
198,
3500,
20595,
14881,
25,
1976,
26675,
198,
9979,
406,
12038,
796,
44800,
44,
2966,
17633,
198,
9979,
10188,
12038,
796,
3611,
1143,
14993... | 2.526316 | 247 |
<filename>basics.jl
println(Sys.WORD_SIZE)
println("typeof(1): $(typeof(1))")
println("zero: $(zero(Float64))")
println("zero: $(one(Float64))")
for T in [Int8,Int16,Int32,Int64,Int128,UInt8,UInt16,UInt32,UInt64,UInt128]
println("$(lpad(T,7)): [$(typemin(T)),$(typemax(T))]")
end
x = [1,2,3] .^ 3
println("vectorized operation using dot notation: $(x))")
# NaN is not equal to, not less than, and not greater than anything, including itself.
println(NaN == NaN)
println([1 NaN] == [1 NaN])
println([1 3] == [1 3])
println(isequal(NaN, NaN))
println(isfinite(8))
println(isinf(Inf))
println(isnan(7))
# can chain comparsions
v(x) = (println(x); x)
v(1) < v(2) <= v(3)
# rational numbers - note that number is reduced to smallest possible terms
x = 6//9
println("rational number: $(x), numerator: $(numerator(x)), denominator: $(denominator(x))")
# strings
println("""Can use triple "quotes" to escape embedded quotes""")
str = "Hello World!"
println("Single index: $(str[1]), $(typeof(str[1]))") # also note indexing is 1-based
println("Same thing but using range: $(str[1:1]), $(typeof(str[1:1]))")
println("Using 'end' keyword: $(str[end])")
# function declaration - "traditional"
function f(x,y)
x + y
end
println("1 + 2 = $(f(1, 2))")
# assignment form
g(x, y) = x + y
println("2 + 3 = $(g(2, 3))")
# anonymous function example - can also use more than 1 variable (or 0)
println(map(x -> x^2 + 2x - 1, [1,3,-1]))
# named tuples
x = (a=1, b=1+1)
println(x.a)
# argument destructuring
minmax2(x, y) = (y < x) ? (y, x) : (x, y)
range2((min, max)) = max - min # note the extra set of parentheses
println("range of (10, 2): $(range2(minmax2(10, 2)))")
# varargs
bar(a,b,x...) = (a,b,x)
println("2 args (no varargs): $(bar(1,2))")
println("6 args: $(bar(1,2,3,4,5,6))")
x = (11, 12)
println("With splatting: $(bar(1, 2, x...))")
# Optional arguments
function Example(y::Int64, x::Int64=0, z::Int64=0)
y + x + z
end
println(Example(2011, 2))
# Keyword arguments - note the semicolon
function circle(x, y; radius::Int=1)
return (x=x, y=y, radius=radius)
end
println(circle(2, 3))
println(circle(4, 5; :radius => 6))
function keywords(; kwargs...)
println(kwargs...)
end
keywords(x="Hello", y="World", z="!")
# do blocks - pass in function as the first argument
y = map([-2, -1, 0, 2]) do x
if x < 0 && iseven(x)
return 0
elseif x == 0
return 1
else
return x
end
end
println(y)
# vectorizing functions (dot syntax)
f(x,y) = 3x + 4y
A = [1.0, 2.0, 3.0]
B = [4.0, 5.0, 6.0]
println(f.(pi, A))
println(f.(A, B))
Y = [1.0, 2.0, 3.0, 4.0]
X = similar(Y); # pre-allocate output array - more efficient than allocating for results
@. X = sin(cos(Y)) # equivalent to X .= sin.(cos.(Y))
println(X)
# compound expressions - begin blocks, note that they don't have to be multiline
z = begin
x = 1
y = 2
x + y
end
println("compound expression: $(z)")
z = (x = 1; y = 2; x + y) # using chain syntax - can be multiline
println("using chain syntax: $(z)")
# conditional statements - note that no local scope
function test(x,y)
if x < y
relation = "less than"
elseif x == y
relation = "equal to"
else
relation = "greater than"
end
println("x is ", relation, " y.")
end
test(1, 2)
test(2, 1)
test(2, 2)
# ternary operator
x = 1; y = 0
println(x < y ? "less than" : "not less than")
# short circuit evaluation
# you can use non-booleans as the last expression in a conditional chain
function fact(n::Int)
n >= 0 || error("n must be non-negative")
n == 0 && return 1
n * fact(n-1)
end
println(fact(5))
println(fact(0))
#println(fact(-1))
# loops - while and for
i = 1
while i <= 5
println(i)
global i += 1
end
for i = 1:5 # 1:5 is a range object
println(i)
end
for i in [1,4,0]
println(i)
end
# same as using the keyword in
for s ∈ ["foo","bar","baz"]
println(s)
end
for i ∈ [1,2,3,4,5,6]
if iseven(i)
continue
elseif i == 5
break
end
println(i)
end
# forms cartesian product of the iterables
for i = 1:2, j = 3:4
println((i, j))
end
# exception handling
f(x) = x>=0 ? exp(-x) : throw(DomainError(x, "argument must be nonnegative"))
try
f(-1)
catch e
if isa(e, DomainError)
println("you can only provide nonnegative numbers")
else
rethrow(e)
end
finally
println("we did it team")
end
| [
27,
34345,
29,
12093,
873,
13,
20362,
198,
198,
35235,
7,
44387,
13,
54,
12532,
62,
33489,
8,
198,
198,
35235,
7203,
4906,
1659,
7,
16,
2599,
29568,
4906,
1659,
7,
16,
4008,
4943,
198,
35235,
7203,
22570,
25,
29568,
22570,
7,
43879,... | 2.413509 | 1,821 |
"""
readMapFromFITS{T <: Number}(f::FITSIO.FITSFILE, column, t::Type{T})
readMapFromFITS{T <: Number}(fileName::String, column, t::Type{T})
Read a Healpix map from the specified (1-base indexed) column in a
FITS file. The values will be read as numbers of type T. If the code
fails, FITSIO will raise an exception. (Refer to the FITSIO library
for more information.)
"""
function readMapFromFITS(f::FITSIO.FITSFile, column, t::Type{T}) where
{T <: Number}
value, comment = FITSIO.fits_read_keyword(f, "NSIDE")
nside = parse(Int, value)
value, comment = FITSIO.fits_read_keyword(f, "ORDERING")
ringOrdering = uppercase(strip(value[2:end-1])) == "RING"
repeat = (FITSIO.fits_get_coltype(f, column))[2]
nrows = FITSIO.fits_get_num_rows(f)
if repeat * nrows != nside2npix(nside)
error("Wrong number of pixels in column $column of FITS file (NSIDE=$nside)")
end
if ringOrdering
result = Map{T, Ring}(Array{T}(undef, nside2npix(nside)))
else
result = Map{T, Nest}(Array{T}(undef, nside2npix(nside)))
end
FITSIO.fits_read_col(f, column, 1, 1, result.pixels)
result
end
function readMapFromFITS(fileName::AbstractString,
column,
t::Type{T}) where {T <: Number}
f = FITSIO.fits_open_table(fileName)
result = readMapFromFITS(f, column, t)
FITSIO.fits_close_file(f)
result
end
################################################################################
"""
savePixelsToFITS(map::Map{T}, f::FITSIO.FITSFile, column) where {T <: Number}
Save the pixels of `map` into the column with index/name `column` in the FITS
file, which must have been already opened.
"""
function savePixelsToFITS(map::AbstractMap{T, O},
f::FITSIO.FITSFile,
column) where {T <: Number, O}
FITSIO.fits_update_key(f, "PIXTYPE", "HEALPIX",
"HEALPIX pixelisation")
FITSIO.fits_update_key(f, "NSIDE", map.resolution.nside,
"Value of NSIDE")
FITSIO.fits_update_key(f, "FIRSTPIX", 1,
"First pixel (1 based)")
FITSIO.fits_update_key(f, "LASTPIX", map.resolution.numOfPixels,
"Last pixel (1 based)")
FITSIO.fits_update_key(f, "INDXSCHM", "IMPLICIT",
"Indexing: IMPLICIT or EXPLICIT")
FITSIO.fits_write_col(f, column, 1, 1, map.pixels)
end
"""
saveToFITS{T <: Number, O <: Order}(map::Map{T, O},
f::FITSIO.FITSFile,
column)
saveToFITS{T <: Number, O <: Order}(map::Map{T, O},
fileName::String,
typechar="D",
unit="",
extname="MAP")
Save a Healpix map in the specified (1-based index) column in a FITS
file. If the code fails, FITSIO will raise an exception. (Refer to the
FITSIO library for more information.)
"""
function saveToFITS(map::OrderedMap{T, Ring},
f::FITSIO.FITSFile,
column) where {T <: Number}
FITSIO.fits_update_key(f, "ORDERING", "RING")
savePixelsToFITS(map, f, column)
end
function saveToFITS(map::OrderedMap{T, Nest},
f::FITSIO.FITSFile,
column) where {T <: Number}
FITSIO.fits_update_key(f, "ORDERING", "NEST")
savePixelsToFITS(map, f, column)
end
"""
saveToFITS(map::Map{T, O}, filename::AbstractString, typechar="D", unit="", extname="MAP") where {T <: Number, O <: Order}
Save a map into a FITS file. The name of the file is specified in
`filename`; if it begins with `!`, existing files will be overwritten
without warning. The parameter `typechar` specifies the data type to
be used in the FITS file: the default (`D`) will save 64-bit
floating-point values. See the CFITSIO documentation for other
values. The keyword `unit` specifies the measure unit used for the
pixels in the map. The keyword `extname` specifies the name of the HDU
where the map pixels will be written.
"""
function saveToFITS(map::OrderedMap{T, O},
fileName::AbstractString;
typechar="D",
unit="",
extname="MAP") where {T <: Number, O <: Order}
f = FITSIO.fits_create_file(fileName)
try
FITSIO.fits_create_binary_tbl(f, 0, [("PIXELS", "1$typechar", unit)], extname)
saveToFITS(map, f, 1)
finally
FITSIO.fits_close_file(f)
end
end
################################################################################
| [
37811,
198,
220,
220,
220,
1100,
13912,
4863,
37,
29722,
90,
51,
1279,
25,
7913,
92,
7,
69,
3712,
37,
29722,
9399,
13,
37,
29722,
25664,
11,
5721,
11,
256,
3712,
6030,
90,
51,
30072,
198,
220,
220,
220,
1100,
13912,
4863,
37,
2972... | 2.141118 | 2,218 |
"""
Bridges `CP.Reification{MOI.EqualTo}` to indicator constraints, both with equality
and inequalities (CP.DifferentFrom).
"""
struct ReificationEqualTo2IndicatorBridge{T <: Real} <: MOIBC.AbstractBridge
indic_true::MOI.ConstraintIndex{MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ONE, MOI.EqualTo{T}}}
indic_false::MOI.ConstraintIndex{MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ZERO, CP.DifferentFrom{T}}}
end
function MOIBC.bridge_constraint(
::Type{ReificationEqualTo2IndicatorBridge{T}},
model,
f::MOI.VectorOfVariables,
s::CP.Reification{MOI.EqualTo{T}},
) where {T}
return MOIBC.bridge_constraint(
ReificationEqualTo2IndicatorBridge{T},
model,
MOI.VectorAffineFunction{T}(f),
s,
)
end
function MOIBC.bridge_constraint(
::Type{ReificationEqualTo2IndicatorBridge{T}},
model,
f::MOI.VectorAffineFunction{T},
s::CP.Reification{MOI.EqualTo{T}},
) where {T <: Real}
indic_true = MOI.add_constraint(
model,
f,
MOI.Indicator{MOI.ACTIVATE_ON_ONE}(MOI.EqualTo(s.set.value))
)
indic_false = MOI.add_constraint(
model,
f,
MOI.Indicator{MOI.ACTIVATE_ON_ZERO}(CP.DifferentFrom(s.set.value))
# TODO: helper to build CP.\neq from MOI.EqTo, CP.Strictly from inequalities, like `!()`?
)
return ReificationEqualTo2IndicatorBridge{T}(indic_true, indic_false)
end
function MOI.supports_constraint(
::Type{ReificationEqualTo2IndicatorBridge{T}},
::Union{Type{MOI.VectorOfVariables}, Type{MOI.VectorAffineFunction{T}}},
::Type{CP.Reification{MOI.EqualTo{T}}},
) where {T <: Real}
return true
end
function MOIB.added_constrained_variable_types(::Type{ReificationEqualTo2IndicatorBridge{T}}) where {T <: Real}
return Tuple{Type}[]
end
function MOIB.added_constraint_types(::Type{ReificationEqualTo2IndicatorBridge{T}}) where {T <: Real}
return [
(MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ONE, MOI.EqualTo{T}}),
(MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ZERO, CP.DifferentFrom{T}}),
]
end
function MOI.get(
::ReificationEqualTo2IndicatorBridge{T},
::MOI.NumberOfConstraints{
MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ONE, MOI.EqualTo{T}},
},
) where {T <: Real}
return 1
end
function MOI.get(
::ReificationEqualTo2IndicatorBridge{T},
::MOI.NumberOfConstraints{
MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ZERO, CP.DifferentFrom{T}},
},
) where {T <: Real}
return 1
end
function MOI.get(
b::ReificationEqualTo2IndicatorBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ONE, MOI.EqualTo{T}},
},
) where {T <: Real}
return [b.indic_true]
end
function MOI.get(
b::ReificationEqualTo2IndicatorBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VectorAffineFunction{T}, MOI.Indicator{MOI.ACTIVATE_ON_ZERO, CP.DifferentFrom{T}},
},
) where {T <: Real}
return [b.indic_false]
end
| [
37811,
198,
33,
32124,
4600,
8697,
13,
3041,
2649,
90,
11770,
40,
13,
36,
13255,
2514,
92,
63,
284,
16916,
17778,
11,
1111,
351,
10537,
198,
392,
45460,
357,
8697,
13,
40341,
4863,
737,
198,
37811,
198,
7249,
797,
2649,
36,
13255,
2... | 2.239366 | 1,387 |
module SizeInterlacedTest
using SimplePNGs
using Test
include("common.jl")
using .TestCommon: load_json
pl(name) = SimplePNGs.load(joinpath("PngSuite", name*".png"))
@testset "Size test files" begin
@testset "1x1 paletted file, interlaced" begin
img1 = load_json("s01n3p01")
img2 = pl("s01i3p01")
@test img1 == img2
end
@testset "2x2 paletted file, interlaced" begin
img1 = load_json("s02n3p01")
img2 = pl("s02i3p01")
@test img1 == img2
end
@testset "3x3 paletted file, interlaced" begin
img1 = load_json("s03n3p01")
img2 = pl("s03i3p01")
@test img1 == img2
end
@testset "4x4 paletted file, interlaced" begin
img1 = load_json("s04n3p01")
img2 = pl("s04i3p01")
@test img1 == img2
end
@testset "5x5 paletted file, interlaced" begin
img1 = load_json("s05n3p02")
img2 = pl("s05i3p02")
@test img1 == img2
end
@testset "6x6 paletted file, interlaced" begin
img1 = load_json("s06n3p02")
img2 = pl("s06i3p02")
@test img1 == img2
end
@testset "7x7 paletted file, interlaced" begin
img1 = load_json("s07n3p02")
img2 = pl("s07i3p02")
@test img1 == img2
end
@testset "8x8 paletted file, interlaced" begin
img1 = load_json("s08n3p02")
img2 = pl("s08i3p02")
@test img1 == img2
end
@testset "9x9 paletted file, interlaced" begin
img1 = load_json("s09n3p02")
img2 = pl("s09i3p02")
@test img1 == img2
end
@testset "32x32 paletted file, interlaced" begin
img1 = load_json("s32n3p04")
img2 = pl("s32i3p04")
@test img1 == img2
end
@testset "33x33 paletted file, interlaced" begin
img1 = load_json("s33n3p04")
img2 = pl("s33i3p04")
@test img1 == img2
end
@testset "34x34 paletted file, interlaced" begin
img1 = load_json("s34n3p04")
img2 = pl("s34i3p04")
@test img1 == img2
end
@testset "35x35 paletted file, interlaced" begin
img1 = load_json("s35n3p04")
img2 = pl("s35i3p04")
@test img1 == img2
end
@testset "36x36 paletted file, interlaced" begin
img1 = load_json("s36n3p04")
img2 = pl("s36i3p04")
@test img1 == img2
end
@testset "37x37 paletted file, interlaced" begin
img1 = load_json("s37n3p04")
img2 = pl("s37i3p04")
@test img1 == img2
end
@testset "38x38 paletted file, interlaced" begin
img1 = load_json("s38n3p04")
img2 = pl("s38i3p04")
@test img1 == img2
end
@testset "39x39 paletted file, interlaced" begin
img1 = load_json("s39n3p04")
img2 = pl("s39i3p04")
@test img1 == img2
end
@testset "40x40 paletted file, interlaced" begin
img1 = load_json("s40n3p04")
img2 = pl("s40i3p04")
@test img1 == img2
end
end
end # module
| [
21412,
12849,
9492,
75,
2286,
14402,
198,
3500,
17427,
47,
10503,
82,
198,
3500,
6208,
198,
198,
17256,
7203,
11321,
13,
20362,
4943,
198,
3500,
764,
14402,
17227,
25,
3440,
62,
17752,
198,
198,
489,
7,
3672,
8,
796,
17427,
47,
10503,... | 1.900126 | 1,592 |
using Dates
@testset "util.jl" begin
p1 = DDR2import.util.Point(1.0, 2.0)
@test p1.lat == 1.0
@test p1.lon == 2.0
@test DDR2import.util.extract_lat("S123456") ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("N123456.0") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("123456N") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lat("S1234") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lat("N12") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lat("1234S") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lat("12N") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lat("") === NaN
@test DDR2import.util.extract_lon("W1123456") ≈ -112.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("E1123456.0") ≈ 112.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("0123456E") ≈ 12.58222222 atol = 0.0001
@test DDR2import.util.extract_lon("W01234") ≈ -12.56666666 atol = 0.0001
@test DDR2import.util.extract_lon("E112") ≈ 112.0 atol = 0.0001
@test DDR2import.util.extract_lon("11234W") ≈ -112.56666666 atol = 0.0001
@test DDR2import.util.extract_lon("012E") ≈ 12.0 atol = 0.0001
@test DDR2import.util.extract_lon("E1") === NaN
# @test DDR2import.util.latlon("S123456", "012E").lat ≈ -12.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456", "012E").lon ≈ 12.0 atol = 0.0001
# @test DDR2import.util.latlon("S123456", "E1") === NaN
#
# @test DDR2import.util.latlon("S123456E1123456.0").lat ≈ -12.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456E1123456.0").lon ≈ 112.58222222 atol = 0.0001
# @test DDR2import.util.latlon("S123456W1123456.0").lon ≈ -112.58222222 atol = 0.0001
@test DDR2import.util.latlon("S123456", "012E").ϕ ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.latlon("S123456", "012E").λ ≈ 12.0 atol = 0.0001
@test DDR2import.util.latlon("S123456", "E1") === NaN
@test DDR2import.util.latlon("123456S1123456.0E").ϕ ≈ -12.58222222 atol = 0.0001
@test DDR2import.util.latlon("123456S1123456.0E").λ ≈ 112.58222222 atol = 0.0001
@test DDR2import.util.latlon("123456S1123456.0W").λ ≈ -112.58222222 atol = 0.0001
yymmdd = DateFormat("YYmmdd")
testdate = DDR2import.util.format_date("200110", yymmdd, addyear=Year(2000))
@test Dates.year(testdate) == 2020
@test Dates.month(testdate) == 1
@test Dates.day(testdate) == 10
hhmmss = DateFormat("HHMMSS")
testtime = DDR2import.util.format_time("102030", hhmmss)
@test Dates.hour(testtime) == 10
@test Dates.minute(testtime) == 20
@test Dates.second(testtime) == 30
hhmm = DateFormat("HHMM")
testtime2 = DDR2import.util.format_time("2400", hhmm)
@test Dates.hour(testtime2) == 23
@test Dates.minute(testtime2) == 59
@test Dates.second(testtime2) == 59
yyyymmddhhmmss = DateFormat("YYYYmmddHHMMSS")
testdatetime = DDR2import.util.format_datetime("20200110102030", yyyymmddhhmmss)
@test Dates.year(testdatetime) == 2020
@test Dates.month(testdatetime) == 1
@test Dates.day(testdatetime) == 10
@test Dates.hour(testdatetime) == 10
@test Dates.minute(testdatetime) == 20
@test Dates.second(testdatetime) == 30
end
| [
3500,
44712,
198,
198,
31,
9288,
2617,
366,
22602,
13,
20362,
1,
2221,
198,
220,
220,
220,
279,
16,
796,
30085,
17,
11748,
13,
22602,
13,
12727,
7,
16,
13,
15,
11,
362,
13,
15,
8,
198,
220,
220,
220,
2488,
9288,
279,
16,
13,
1... | 2.238095 | 1,470 |
<reponame>tpr0p/Altro.jl
############################################################################################
# INFEASIBLE MODELS #
############################################################################################
struct Infeasible{N,M,D<:AbstractModel} <: AbstractModel
model::D
_u::SVector{M,Int} # inds to original controls
_ui::SVector{N,Int} # inds to infeasible controls
end
struct InfeasibleLie{N,M,D<:AbstractModel} <: RobotDynamics.LieGroupModel
model::D
_u::SVector{M,Int} # inds to original controls
_ui::SVector{N,Int} # inds to infeasible controls
end
""" $(TYPEDEF)
An infeasible model is an augmented dynamics model that makes the system artifically fully
actuated by augmenting the control vector with `n` additional controls. The dynamics are
handled explicitly in discrete time:
``x_{k+1} = f(x_k,u_k,dt) + w_k``
where ``w_k`` are the additional `n`-dimensional controls. In practice, these are constrained
to be zero by the end of the solve.
# Constructors
```julia
InfeasibleModel(model::AbstractModel)
```
"""
const InfeasibleModel{N,M,D} = Union{Infeasible{N,M,D},InfeasibleLie{N,M,D}} where {N,M,D}
function InfeasibleModel(model::AbstractModel)
n,m = size(model)
_u = SVector{m}(1:m)
_ui = SVector{n}((1:n) .+ m)
Infeasible(model, _u, _ui)
end
function InfeasibleModel(model::RobotDynamics.LieGroupModel)
n,m = size(model)
_u = SVector{m}(1:m)
_ui = SVector{n}((1:n) .+ m)
InfeasibleLie(model, _u, _ui)
end
RobotDynamics.LieState(model::InfeasibleLie) = RobotDynamics.LieState(model.model)
# Generic Infeasible Methods
RobotDynamics.state_dim(model::InfeasibleModel{n}) where n = n
RobotDynamics.control_dim(model::InfeasibleModel{n,m}) where {n,m} = n+m
RobotDynamics.dynamics(::InfeasibleModel, x, u) =
throw(ErrorException("Cannot evaluate continuous dynamics on an infeasible model"))
@generated function RobotDynamics.discrete_dynamics(::Type{Q}, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}) where {T,N,M,Q<:Explicit}
_u = SVector{M}((1:M) .+ N)
_ui = SVector{N}((1:N) .+ (N+M))
quote
x = state(z)
dt = z.dt
u0 = z.z[$_u]
ui = z.z[$_ui]
RobotDynamics.discrete_dynamics($Q, model.model, x, u0, z.t, dt) + ui
end
end
@inline RobotDynamics.rotation_type(model::InfeasibleModel) where D = rotation_type(model.model)
@generated function RobotDynamics.discrete_jacobian!(::Type{Q}, ∇f, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}, cache=nothing) where {T,N,M,Q<:Explicit}
∇ui = [(@SMatrix zeros(N,N+M)) Diagonal(@SVector ones(N)) @SVector zeros(N)]
_x = SVector{N}(1:N)
_u = SVector{M}((1:M) .+ N)
_z = SVector{N+M}(1:N+M)
_ui = SVector{N}((1:N) .+ (N+M))
zi = [:(z.z[$i]) for i = 1:N+M]
NM1 = N+M+1
NM = N+M
∇u0 = @SMatrix zeros(N,N)
quote
# Build KnotPoint for original model
s0 = SVector{$NM1}($(zi...), z.dt)
u0 = z.z[$_u]
ui = z.z[$_ui]
z_ = StaticKnotPoint(z.z[$_z], $_x, $_u, z.dt, z.t)
∇f_ = uview(∇f, 1:N, 1:$NM)
discrete_jacobian!($Q, ∇f_, model.model, z_)
# ∇f[$_x, N+NM] .= ∇f_[$_x, N+M] # ∇dt
∇f[$_x, $_ui] .= Diagonal(@SVector ones(N))
return
# ∇f[$_x,$_ui]
# [∇f[$_x, $_z] $∇u0 ∇dt] + $∇ui
end
end
function RD._discrete_jacobian!(::RD.ForwardAD, ::Type{Q}, ∇f, model::InfeasibleModel{N,M},
z::AbstractKnotPoint{T,N}, cache=nothing) where {T,N,M,Q<:Explicit}
RD.discrete_jacobian!(Q, ∇f, model, z, cache)
end
function RobotDynamics.state_diff(model::InfeasibleModel, x::SVector, x0::SVector)
RobotDynamics.state_diff(model.model, x, x0)
end
function RobotDynamics.state_diff_jacobian!(G, model::InfeasibleModel, Z::Traj)
RobotDynamics.state_diff_jacobian!(G, model.model, Z)
end
function RobotDynamics.∇²differential!(∇G, model::InfeasibleModel, x::SVector, dx::SVector)
return ∇²differential!(∇G, model.model, x, dx)
end
RobotDynamics.state_diff_size(model::InfeasibleModel) = RobotDynamics.state_diff_size(model.model)
Base.position(model::InfeasibleModel, x::SVector) = position(model.model, x)
RobotDynamics.orientation(model::InfeasibleModel, x::SVector) = orientation(model.model, x)
"Calculate a dynamically feasible initial trajectory for an infeasible problem, given a
desired trajectory"
function infeasible_trajectory(model::InfeasibleModel{n,m}, Z0::Traj) where {T,n,m}
x,u = zeros(model)
ui = @SVector zeros(n)
Z = [KnotPoint(state(z), [control(z); ui], z.dt, z.t) for z in Z0]
N = length(Z0)
for k = 1:N-1
RobotDynamics.propagate_dynamics(RobotDynamics.RK4, model, Z[k+1], Z[k])
x′ = state(Z[k+1])
u_slack = state(Z0[k+1]) - x′
u = [control(Z0[k]); u_slack]
RobotDynamics.set_control!(Z[k], u)
RobotDynamics.set_state!(Z[k+1], x′ + u_slack)
end
return Traj(Z)
end
############################################################################################
# INFEASIBLE CONSTRAINT #
############################################################################################
""" $(TYPEDEF) Constraints additional ``infeasible'' controls to be zero.
Constructors: ```julia
InfeasibleConstraint(model::InfeasibleModel)
InfeasibleConstraint(n,m)
```
"""
struct InfeasibleConstraint{n} <: TO.ControlConstraint
ui::SVector{n,Int}
m::Int
function InfeasibleConstraint(n::Int, m::Int)
ui = SVector{n}((1:n) .+ m)
new{n}(ui, m)
end
end
InfeasibleConstraint(model::InfeasibleModel{n,m}) where {n,m} = InfeasibleConstraint(n,m)
RobotDynamics.control_dim(con::InfeasibleConstraint{n}) where n = n + con.m
@inline TO.sense(::InfeasibleConstraint) = TO.Equality()
@inline Base.length(::InfeasibleConstraint{n}) where n = n
function TO.evaluate(con::InfeasibleConstraint, u::SVector)
ui = u[con.ui] # infeasible controls
end
function TO.jacobian!(∇c, con::InfeasibleConstraint{n}, u::SVector) where n
for (i,j) in enumerate(con.ui)
∇c[i,j] = 1
end
return true
end
| [
27,
7856,
261,
480,
29,
83,
1050,
15,
79,
14,
2348,
23528,
13,
20362,
198,
198,
29113,
29113,
14468,
7804,
4242,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.243124 | 2,727 |
module GatekeepHelperFlagSummitGem
using ..Ahorn, Maple
# Thanks to Communal Helper for most of this implementation
@mapdef Entity "GatekeepHelper/FlagSummitGem" FlagSummitGem(
x::Integer,
y::Integer,
index::Integer=0,
sprite::String="",
flag::String="",
particleColor::String="",
)
const placements = Ahorn.PlacementDict(
"Flag Summit Gem (Gatekeep Helper)" => Ahorn.EntityPlacement(
FlagSummitGem,
),
)
const sprites = ["collectables/summitgems/$i/gem00" for i in 0:7]
function getSprite(index)
if index > length(sprites)
return sprites[end]
end
return sprites[index]
end
# positive numbers only
function getClampedIndex(entity::FlagSummitGem)
index = Int(get(entity.data, "index", 0))
if index < 0
index = 0
end
entity.data["index"] = index
return index
end
function Ahorn.selection(entity::FlagSummitGem)
x, y = Ahorn.position(entity)
return Ahorn.getSpriteRectangle(getSprite(getClampedIndex(entity) + 1), x, y)
end
Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::FlagSummitGem) =
Ahorn.drawSprite(ctx, getSprite(getClampedIndex(entity) + 1), 0, 0)
end
| [
21412,
12816,
14894,
47429,
34227,
13065,
2781,
38,
368,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
2,
6930,
284,
4440,
282,
5053,
525,
329,
749,
286,
428,
7822,
198,
198,
31,
8899,
4299,
20885,
366,
22628,
14894,
47429,
... | 2.581498 | 454 |
<gh_stars>0
coast(region=[110 140 20 35],
proj=(name=:Albers, center=[125 20], parallels=[25 45]),
frame=:ag,
resolution=:low,
area=250,
land=:green,
shore=:thinnest,
fmt=:png, savefig="1")
coast(region=[-88 -70 18 24], proj=(name=:eqdc, center=[-79 21], parallels=[19 23]),
frame=:ag, res=:intermediate, borders=(type=1,pen=("thick","red")), land=:green,
shore=:thinnest, fmt=:png, savefig="2")
coast(region=[-130 -70 24 52], proj=(name=:lambertConic, center=[-100 35], parallels=[33 45]),
frame=:ag, res=:low, borders=((type=1, pen=("thick","red")), (type=2, pen=("thinner",))),
area=500, land=:tan, water=:blue, shore=(:thinnest,:white), fmt=:png, savefig="3")
coast(region=(-180,-20,0,90), proj=:poly, xaxis=(annot=30,grid=10), yaxis=(annot=10,grid=10),
res=:crude, area=1000, land=:lightgray, shore=:thinnest, figsize=10, fmt=:png, savefig="4")
coast(region="0/-40/60/-10+r", proj=(name=:laea, center=[30,-30]), frame=:ag, res=:low,
area=500, land=(pattern=10,dpi=300), shore=:thinnest, figsize=10, fmt=:png, savefig="5")
gmt("destroy")
coast(region=:g, proj=(name=:laea, center=[280,30]), frame=:g, res=:crude, area=1000,
land=:navy, figsize=8, fmt=:png, savefig="6")
coast(region=(-30,30,60,72), proj=(name=:Stereographic, center=[0,90], paralles=60),
frame=:a10g, res=:low, area=250, land=:royalblue, water=:seashell,
figscale="1:30000000", fmt=:png, savefig="7")
coast(region="-25/59/70/72+r", proj=(name=:stereographic, center=(10,90)), frame=:a20g, res=:low,
area=250, land=:darkbrown, shore=:thinnest, water=:lightgray, figsize=11, fmt=:png, savefig="8")
coast(region="100/-42/160/-8r", proj=(name=:stereographic, center=(130,-30)), frame=:ag, res=:low,
area=500, land=:green, ocean=:lightblue, shore=:thinnest, figsize=10, fmt=:png, savefig="1")
coast(region=:g, proj="G4/52/230/90/60/180/60/60", xaxis=(annot=2,grid=2), yaxis=(annot=1,grid=1),
rivers=:all, res=:intermediate, land=:lightbrown, ocean=:lightblue, shore=:thinnest, figsize=10,
par=(:MAP_ANNOT_MIN_SPACING,0.65), fmt=:png, savefig="9")
coast(region=:g, proj=(name=:ortho, center=(-75,41)), frame=:g, res=:crude, area=5000,
land=:pink, ocean=:thistle, figsize=10, fmt=:png, savefig="10")
coast(region=:g, proj=(name=:azimuthalEquidistant, center=(-100,40)), frame=:g,
res=:crude, area=10000, land=:lightgray, shore=:thinnest, figsize=10, fmt=:png, savefig="11")
coast(region=:g, proj=(name=:Gnomonic, center=(-120,35), horizon=60),
frame=(annot=30, grid=15), res=:crude, area=10000, land=:tan, ocean=:cyan,
shore=:thinnest, figsize=10, fmt=:png, savefig="12")
coast(region=(0,360,-70,70), proj=:Mercator, xaxis=(annot=60,ticks=15), yaxis=(annot=30,ticks=15),
res=:crude, area=:5000, land=:red, scale=0.03, par=(:MAP_FRAME_TYPE,"fancy+"), fmt=:png, savefig="13")
coast(region="20/30/50/45r", proj=(name=:tmerc, center=35), frame=:ag, res=:low,
area=250, land=:lightbrown, ocean=:seashell, shore=:thinnest, scale=0.45, fmt=:png, savefig="14")
coast(region=(0,360,-80,80), proj=(name=:tmerc, center=[330 -45]),
frame=(annot=30, grid=:auto, axes=:WSne), res=:crude, area=2000, land=:black,
water=:lightblue, figsize=9, fmt=:png, savefig="15")
coast(region="270/20/305/25+r", proj=(name=:omercp, center=[280 25.5], parallels=[22 69]),
frame=:ag, res=:i, area=250, shore=:thinnest, land=:burlywood, water=:azure,
rose="jTR+w1+f2+l+o0.4", figsize=12, par=(FONT_TITLE=8, MAP_TITLE_OFFSET=0.12), fmt=:png, savefig="16")
coast(region="7:30/38:30/10:30/41:30r", proj=(name=:Cassini, center=[8.75 40]),
frame=:afg, map_scale="jBR+c40+w100+f+o0.4/0.5", land=:springgreen,
res=:high, water=:azure, shore=:thinnest, rivers=(:all,:thinner), figsize=6,
par=(:FONT_LABEL,12), fmt=:png, savefig="17")
coast(region=:g, proj=:equidistCylindrical, frame=(annot=60, ticks=30, grid=30),
res=:crude, area=5000, land=:tan4, water=:lightcyan, figsize=12, fmt=:png, savefig="18")
coast(region=(-145,215,-90,90), proj=(name=:cylindricalEqualArea, center=(35,30)),
frame=(annot=45, grid=45), res=:crude, area=10000, water=:dodgerblue,
shore=:thinnest, figsize=12, fmt=:png, savefig="19")
coast(region=(-90,270,-80,90), proj=:Miller, xaxis=(annot=45,grid=45),
yaxis=(annot=30,grid=30), res=:crude, area=10000, land=:khaki, water=:azure,
shore=:thinnest, scale="1:400000000", fmt=:png, savefig="20")
coast(region=(-180,180,-60,80), proj=(name=:cylindricalStereographic, center=(0,45)),
xaxis=(annot=60,ticks=30, grid=30), yaxis=(annot=30,grid=30), res=:crude,
area=5000, shore=:black, land=:seashell4, ocean=:antiquewhite1, figsize=12, fmt=:png, savefig="21")
coast(region=:g, proj=:Hammer, frame=:g, res=:crude, area=10000, land=:black,
ocean=:cornsilk, figsize=12, fmt=:png, savefig="22")
coast(region=:d, proj=:Mollweide, frame=:g, res=:crude, area=10000, land=:tomato1,
water=:skyblue, figsize=12, fmt=:png, savefig="23")
coast(region=:d, proj=:Winkel, frame=:g, res=:crude, area=10000, land=:burlywood4,
water=:wheat1, figsize=12, fmt=:png, savefig="24")
coast(region=:d, proj=:Robinson, frame=:g, res=:crude, area=10000, land=:goldenrod,
water=:snow2, figsize=12, fmt=:png, savefig="25")
coast(region=:d, proj=:EckertIV, frame=:g, res=:crude, area=10000, land=:ivory,
water=:bisque3, shore=:thinnest, figsize=12, fmt=:png, savefig="26")
coast(region=:d, proj=:EckertVI, frame=:g, res=:crude, area=10000, land=:ivory,
water=:bisque3, shore=:thinnest, figsize=12, fmt=:png, savefig="27")
coast(region=:d, proj=:Sinusoidal, xaxis=(grid=30,), yaxis=(grid=15,), res=:crude,
area=10000, land=:coral4, water=:azure3, figsize=12, fmt=:png, savefig="28")
coast(region=(200,340,-90,90), proj=:Sinusoidal, frame=:g, res=:crude, area=10000,
land=:darkred, water=:azure, scale=0.03333)
coast!(region=(-20,60,-90,90), frame=:g, res=:crude, area=10000, land=:darkgreen,
water=:azure, xoff=4.666)
coast!(region=(60,200,-90,90), frame=:g, res=:crude, area=10000, land=:darkblue,
water=:azure, xoff=2.6664, fmt=:png, savefig="29")
coast(region=:g, proj=:VanderGrinten, xaxis=(grid=30,), yaxis=(grid=15,),res=:crude,
land=:lightgray, water=:cornsilk, area=10000, shore=:thinnest, figsize=10, fmt=:png, savefig="30")
| [
27,
456,
62,
30783,
29,
15,
198,
1073,
459,
7,
36996,
41888,
11442,
12713,
1160,
3439,
4357,
220,
198,
220,
220,
220,
220,
220,
386,
73,
16193,
3672,
28,
25,
2348,
1213,
11,
3641,
41888,
11623,
1160,
4357,
30614,
41888,
1495,
4153,
... | 2.160787 | 2,948 |
<filename>src/Assessors.jl
# ----------------------------------------------------
# --- Accessor.jl
# ----------------------------------------------------
# Function to access to radio parameters
getError(obj::UHDBinding) = UHDBindings.getError(obj);
getError(obj::RadioSim) = RadioSims.getError(obj);
getError(obj::SDROverNetwork) = SDROverNetworks.getMD(obj)[3];
getError(obj::RTLSDRBinding) = RTLSDRBindings.getError(obj);
getTimestamp(obj::UHDBinding) = UHDBindings.getTimestamp(obj);
getTimestamp(obj::RadioSim) = RadioSims.getTimestamp(obj);
getTimestamp(obj::SDROverNetwork) = SDROverNetworks.getMD(obj)[1:2];
getTimestamp(obj::RTLSDRBinding) = RTLSDRBindings.getTimestamp(obj);
"""
Get the current sampling rate of the radio device
The second parameter (optionnal) speicfies the Rx or Tx board (default : Rx)
"""
getSamplingRate(obj::AbstractSDR;mode=:rx) = ((mode == :rx) ? obj.rx.samplingRate : obj.tx.samplingRate)
getSamplingRate(obj::PlutoSDR;mode=:rx) = ((mode == :rx) ? obj.rx.effectiveSamplingRate : obj.tx.effectiveSamplingRate)
"""
Get the current carrier frequency of the radio device
The second parameter (optionnal) speicfies the Rx or Tx board (default : Rx)
"""
getCarrierFreq(obj::AbstractSDR;mode=:rx) = (mode == :rx) ? obj.rx.carrierFreq : obj.tx.carrierFreq
getCarrierFreq(obj::PlutoSDR;mode=:rx) = (mode == :rx) ? obj.rx.effectiveCarrierFreq : obj.tx.effectiveCarrierFreq
"""
Get the current radio gain
The second parameter (optionnal) specifies the Rx or Tx board (default : Rx)
"""
getGain(obj::AbstractSDR;mode=:rx) = (mode == :rx) ? obj.rx.gain : obj.tx.gain
getGain(obj::PlutoSDR;mode=:rx) = AdalmPluto.getGain(obj)
"""
Check if a SDR has already been closed. The falg is true is the SDR ressources have been released and false otherwise.
# --- Syntax
flag = isClosed(radio)
# --- Input parameters
- radio : SDR device
# --- Output parameters
- flag : True is SDR is already closed, false otherwise
"""
isClosed(obj::AbstractSDR) = Bool(obj.tx.released) || Bool(obj.rx.released)
isClosed(obj::PlutoSDR) = Bool(obj.released)
"""
Returns the radio packet size. Each radio backend encapsulates the IQ samples into chunks of data. The `recv` command can be used with any size but it can be more efficient to match the desired size with the one provided by the radio
# --- Syntax
bufferSize = getBufferSize(radio)
# --- Input parameters
- radio : SDR device
# --- Output parameters
bufferSize : Size of radio internal buffer
"""
getBufferSize(obj::AbstractSDR) = obj.rx.packetSize # We get the fields
getBufferSize(obj::PlutoSDR) = obj.rx.buf.C_sample_size # For Pluto this is hidden in the buffer config
| [
27,
34345,
29,
10677,
14,
8021,
23295,
13,
20362,
198,
2,
20368,
19351,
198,
2,
11420,
8798,
273,
13,
20362,
198,
2,
20368,
19351,
220,
198,
2,
15553,
284,
1895,
284,
5243,
10007,
198,
198,
1136,
12331,
7,
26801,
3712,
52,
10227,
33... | 2.968062 | 908 |
<reponame>yakir12/polarimetryLab
push!(LOAD_PATH, pwd())
const assets = "assets"
using Images, ImageMagick, Colors, Photopolarimetry
import Tk.ChooseDirectory
#include("Photopolar.jl")
include("PhotopolarGUIfunctions.jl")
path = Input(ChooseDirectory())
flip = Input(false)
flop = Input(false)
torun = Input{Any}(leftbutton)
rotate = Input(0)
cropright = Input(100)
cropbottom = Input(100)
cropleft = Input(0)
croptop = Input(0)
scale = Input(20)
topI = Input(100)
topdolp = Input(100)
topdolcp = Input(100)
topdorcp = Input(100)
botprofile = Input(0)
topprofile = Input(100)
Sp = Input(5)
ellipse_window = Input(0.5)
minpol = Input(0.2)
maxpol = Input(1.0)
eccentricity = Input(0.01)
torun2 = Input{Any}(leftbutton)
fnames = lift(getfnames,path)
SZ = lift(x -> getsz(x[1]),fnames)
lift(x -> write_originalRGB(x[1]),fnames)
row1 = ["Flip", checkbox(false) >>> flip, "Flop", checkbox(false) >>> flop, button("Done!") >>> torun]
row2 = ["Rotate", slider(-90:90,value=0) >>> rotate, "Scale", slider(1:100,value=20) >>> scale]
row3 = ["Crop top", slider(0:99,value=0) >>> croptop, "Crop bottom", slider(1:100,value=100) >>> cropbottom]
row4 = ["Crop left", slider(0:99,value=0) >>> cropleft, "Crop right", slider(1:100,value=100) >>> cropright]
mymin(u,l) = u > l ? l : u - 1
botprofile2 = lift(mymin, topprofile, botprofile)
s = lift(_ -> path2stokes(value(path),value(flip),value(flop),value(rotate),value(cropright),value(cropbottom),value(cropleft),value(croptop),value(scale), angleoffset = deg2rad(60)), torun, typ=Any, init=empty)
polarimgs = lift(calculatepolar, s, typ=Any, init=empty)
I = lift((x,t) -> recolor_I(x[1],t),polarimgs,topI,typ=Any, init=empty)
dolp = lift((x,t) -> recolor_dolp(x[2],t),polarimgs,topdolp,typ=Any, init=empty)
docp = lift((x,b,t) -> recolor_docp(x[4],b,t),polarimgs,topdolcp,topdorcp,typ=Any, init=empty)
profileimg = lift(stokes2profile, s, Sp, typ=Any, init=empty)
ellipse_img_name = lift(latexellipse, s, ellipse_window, maxpol, minpol, eccentricity, typ=Any, init=empty)
orientationtab = vbox(map(pad(0.5em),[
hbox(map(pad(0.5em), row1)),
hbox(map(pad(0.5em), row2)),
hbox(map(pad(0.5em), row3)),
hbox(map(pad(0.5em), row4)),
consume(flip,flop,rotate,cropright,cropbottom,cropleft,croptop,scale) do flp, flP, rtt, crprght, crpbttm, crplft, crptp, scl
@async cleanassets()
name = updateRGB(flp,flP,rtt,crprght,crpbttm,crplft,crptp,scl)
image("$assets/$name.jpg")
end,
]))
intensitytab = vbox(map(pad(0.5em),[
hbox("Intensity", slider(0:100, value = 100) >>> topI),
hbox("DoLP", slider(0:100, value = 100) >>> topdolp),
hbox("DoLCP", slider(0:100, value = 100) >>> topdolcp),
hbox("DoRCP", slider(0:100, value = 100) >>> topdorcp),
consume(I, dolp, docp, polarimgs, typ=Any, init=empty) do i, dlp, dcp, p
buff = Image(ones(RGB{FixedPointNumbers.UFixed{UInt8,8}},10,size(i,2)))
cat(1,i, buff, dlp, buff, p[3], buff, dcp)
end
]))
profiletab = vbox(map(pad(0.5em),[
hbox(map(pad(0.5em),["upper", slider(2:100, value = 100) >>> topprofile])),
consume(topprofile) do x
hbox(map(pad(0.5em),["Lower", slider(0:(x - 1), value = 0) >>> botprofile]))
end,
hbox(map(pad(0.5em),[tex("S_p"), slider(2:10, value = 5) >>> Sp])),
iiimg = lift(recolor_profile, profileimg, botprofile2, topprofile, typ=Any, init=empty)
]))
lift(_ -> println(join([splitdir(value(path))[2],value(flip),value(flop),value(rotate),value(cropright),value(cropbottom),value(cropleft),value(croptop),value(scale),value(ellipse_window), value(minpol), value(maxpol), value(eccentricity)],",")), torun2, typ=Any, init=empty)
ellipsetab = vbox(map(pad(0.5em),[
hbox(map(pad(0.5em),["Cell size (cm)", slider(0.1:0.05:2, value = 0.5) >>> ellipse_window])),
hbox(map(pad(0.5em),["Min pol.", slider(0:.01:1, value = 0.2) >>> minpol])),
hbox(map(pad(0.5em),["Max pol.", slider(0:.01:1, value = 1.0) >>> maxpol])),
hbox(map(pad(0.5em),["Eccentricity", slider(0:.01:1, value = 0.01) >>> eccentricity])),
button("Done!") >>> torun2,
lift(image,ellipse_img_name, typ=Any, init=empty)
]))
#=ok = Input{Any}(leftbutton)
donetab = vbox(map(pad(0.5em),[
button("Print", raised = true) >>> ok,
consume(ok, typ=Any, init=empty) do o
colorimg = load("$assets/RGB.jpg")
aopimg = convert(Image, value(X)[3])
Iimg = convert(Image, value(I))
dolpimg = convert(Image, value(dolp))
docpimg = convert(Image, value(docp))
buff = round(Int,0.05*mean(widthheight(colorimg)))
buffimg = Iimg[1:buff,:]
buffimg[:] = RGB{U8}(1,1,1)
img = cat(1,colorimg, buffimg, Iimg, buffimg, dolpimg, buffimg, aopimg, buffimg, docpimg)
name = string(abs(rand(Int)))
save( "$assets/$name.jpg",img)
image("assets/$name.jpg")
end
]))=#
function main(window)
push!(window.assets, "layout2")
push!(window.assets, "widgets")
push!(window.assets, "tex")
tabbar = tabs([ hbox("Orientation"), hbox("Photopolarimetry"), hbox("Interneuron"), hbox("Ellipse")])
tabcontent = pages([ orientationtab, intensitytab, profiletab, ellipsetab])
t, p = wire(tabbar, tabcontent, :tab_channel, :selected)
vbox(t, p)
end
| [
27,
7856,
261,
480,
29,
88,
461,
343,
1065,
14,
79,
6192,
320,
11973,
17822,
198,
14689,
0,
7,
35613,
62,
34219,
11,
279,
16993,
28955,
198,
9979,
6798,
796,
366,
19668,
1,
198,
3500,
5382,
11,
7412,
13436,
624,
11,
29792,
11,
591... | 2.316055 | 2,180 |
d = [0 19 17 34 7 20 10 17 28 15 23 29 23 29 21 20 9 16 21 13 12;
19 0 10 41 26 3 27 25 15 17 17 14 18 48 17 6 21 14 17 13 31;
17 10 0 47 23 13 26 15 25 22 26 24 27 44 7 5 23 21 25 18 29;
34 41 47 0 36 39 25 51 36 24 27 38 25 44 54 45 25 28 26 28 27;
7 26 23 36 0 27 11 17 35 22 30 36 30 22 25 26 14 23 28 20 10;
20 3 13 39 27 0 26 27 12 15 14 11 15 49 20 9 20 11 14 11 30;
10 27 26 25 11 26 0 26 31 14 23 32 22 25 31 28 6 17 21 15 4;
17 25 15 51 17 27 26 0 39 31 38 38 38 34 13 20 26 31 36 28 27;
28 15 25 36 35 12 31 39 0 17 9 2 11 56 32 21 24 13 11 15 35;
15 17 22 24 22 15 14 31 17 0 9 18 8 39 29 21 8 4 7 4 18;
23 17 26 27 30 14 23 38 9 9 0 11 2 48 33 23 17 7 2 10 27;
29 14 24 38 36 11 32 38 2 18 11 0 13 57 31 20 25 14 13 17 36;
23 18 27 25 30 15 22 38 11 8 2 13 0 47 34 24 16 7 2 10 26;
29 48 44 44 22 49 25 34 56 39 48 57 47 0 46 48 31 42 46 40 21;
21 17 7 54 25 20 31 13 32 29 33 31 34 46 0 11 29 28 32 25 33;
20 6 5 45 26 9 28 20 21 21 23 20 24 48 11 0 23 19 22 17 32;
9 21 23 25 14 20 6 26 24 8 17 25 16 31 29 23 0 11 15 9 10;
16 14 21 28 23 11 17 31 13 4 7 14 7 42 28 19 11 0 5 3 21;
21 17 25 26 28 14 21 36 11 7 2 13 2 46 32 22 15 5 0 8 25;
13 13 18 28 20 11 15 28 15 4 10 17 10 40 25 17 9 3 8 0 19;
12 31 29 27 10 30 4 27 35 18 27 36 26 21 33 32 10 21 25 19 0]
time_windows = [0 408;
62 68;
181 205;
306 324;
214 217;
51 61;
102 129;
175 186;
250 263;
3 23;
21 49;
79 90;
78 96;
140 154;
354 386;
42 63;
2 13;
24 42;
20 33;
9 21;
275 300]
# s = [1 17 20 10 18 19 11 6 16 2 12 13 7 14 8 3 5 9 21 4 15]
# total_cost = 0
# current_time = 0
# for i in 1:20
# global total_cost += d[s[i], s[i+1]]
# println("current_time", current_time)
# println("d[s[i], s[i+1]]", d[s[i], s[i+1]])
# println("time_windows[s[i+1], 2]", time_windows[s[i+1], :])
# @assert current_time + d[s[i], s[i+1]] <= time_windows[s[i+1], 2]
# global current_time = max(time_windows[s[i+1], 1], current_time + d[s[i], s[i+1]])
# end
# println("total_cost", total_cost)
# total_cost += d[s[21], s[1]]
# println("total_cost", total_cost)
using SeaPearl
struct TsptwVariableSelection{TakeObjective} <: SeaPearl.AbstractVariableSelection{TakeObjective} end
TsptwVariableSelection(;take_objective=false) = TsptwVariableSelection{take_objective}()
function (::TsptwVariableSelection{false})(cpmodel::SeaPearl.CPModel; rng=nothing)
for i in 1:length(keys(cpmodel.variables))
if haskey(cpmodel.variables, "a_"*string(i)) && !SeaPearl.isbound(cpmodel.variables["a_"*string(i)])
return cpmodel.variables["a_"*string(i)]
end
end
println(cpmodel.variables)
end
function closer_city(x::SeaPearl.IntVar, dist::Matrix, model::SeaPearl.CPModel)
i = 1
while "a_"*string(i) != x.id
i += 1
end
current_city = SeaPearl.assignedValue(model.variables["v_"*string(i)])
j = 0
minDist = 0
closer = j
found_one = false
while j < size(dist, 1)
j += 1
if (!found_one || dist[current_city, j] < minDist) && j in x.domain
minDist = dist[current_city, j]
closer = j
found_one = true
end
end
return closer
end
function solve_tsptw(n_city=21)
trailer = SeaPearl.Trailer()
model = SeaPearl.CPModel(trailer)
grid_size = 100
max_tw_gap = 10
max_tw = 100
generator = SeaPearl.TsptwGenerator(n_city, grid_size, max_tw_gap, max_tw)
dist, time_windows = SeaPearl.fill_with_generator!(model, generator)
variableheuristic = TsptwVariableSelection{false}()
my_heuristic(x::SeaPearl.IntVar) = minimum(x.domain)
valueheuristic = SeaPearl.BasicHeuristic((x; cpmodel=nothing) -> closer_city(x, dist, model))
SeaPearl.search!(model, SeaPearl.DFSearch, variableheuristic, valueheuristic)
solution_found = Int[]
for i in 1:(n_city-1)
push!(solution_found, model.solutions[end]["a_"*string(i)])
end
println("Solution: ", solution_found)
println("Nodes visited: ", model.statistics.numberOfNodes)
end
function solve_tsptw_known_instance()
dist = [0 19 17 34 7 20 10 17 28 15 23 29 23 29 21 20 9 16 21 13 12;
19 0 10 41 26 3 27 25 15 17 17 14 18 48 17 6 21 14 17 13 31;
17 10 0 47 23 13 26 15 25 22 26 24 27 44 7 5 23 21 25 18 29;
34 41 47 0 36 39 25 51 36 24 27 38 25 44 54 45 25 28 26 28 27;
7 26 23 36 0 27 11 17 35 22 30 36 30 22 25 26 14 23 28 20 10;
20 3 13 39 27 0 26 27 12 15 14 11 15 49 20 9 20 11 14 11 30;
10 27 26 25 11 26 0 26 31 14 23 32 22 25 31 28 6 17 21 15 4;
17 25 15 51 17 27 26 0 39 31 38 38 38 34 13 20 26 31 36 28 27;
28 15 25 36 35 12 31 39 0 17 9 2 11 56 32 21 24 13 11 15 35;
15 17 22 24 22 15 14 31 17 0 9 18 8 39 29 21 8 4 7 4 18;
23 17 26 27 30 14 23 38 9 9 0 11 2 48 33 23 17 7 2 10 27;
29 14 24 38 36 11 32 38 2 18 11 0 13 57 31 20 25 14 13 17 36;
23 18 27 25 30 15 22 38 11 8 2 13 0 47 34 24 16 7 2 10 26;
29 48 44 44 22 49 25 34 56 39 48 57 47 0 46 48 31 42 46 40 21;
21 17 7 54 25 20 31 13 32 29 33 31 34 46 0 11 29 28 32 25 33;
20 6 5 45 26 9 28 20 21 21 23 20 24 48 11 0 23 19 22 17 32;
9 21 23 25 14 20 6 26 24 8 17 25 16 31 29 23 0 11 15 9 10;
16 14 21 28 23 11 17 31 13 4 7 14 7 42 28 19 11 0 5 3 21;
21 17 25 26 28 14 21 36 11 7 2 13 2 46 32 22 15 5 0 8 25;
13 13 18 28 20 11 15 28 15 4 10 17 10 40 25 17 9 3 8 0 19;
12 31 29 27 10 30 4 27 35 18 27 36 26 21 33 32 10 21 25 19 0]
time_windows = [0 408;
62 68;
181 205;
306 324;
214 217;
51 61;
102 129;
175 186;
250 263;
3 23;
21 49;
79 90;
78 96;
140 154;
354 386;
42 63;
2 13;
24 42;
20 33;
9 21;
275 300]
trailer = SeaPearl.Trailer()
model = SeaPearl.CPModel(trailer)
n_city = 21
grid_size = 100
max_tw_gap = 10
max_tw = 100
generator = SeaPearl.TsptwGenerator(n_city, grid_size, max_tw_gap, max_tw)
dist, time_windows = SeaPearl.fill_with_generator!(model, generator; dist=dist, time_windows=time_windows)
variableheuristic = TsptwVariableSelection{false}()
my_heuristic(x::SeaPearl.IntVar) = minimum(x.domain)
valueheuristic = SeaPearl.BasicHeuristic((x; cpmodel=nothing) -> closer_city(x, dist, model))
SeaPearl.search!(model, SeaPearl.DFSearch, variableheuristic, valueheuristic)
solution_found = Int[]
for i in 1:(n_city-1)
push!(solution_found, model.solutions[end]["a_"*string(i)])
end
println("Solution: ", solution_found)
println("Nodes visited: ", model.statistics.numberOfNodes)
end | [
67,
796,
685,
15,
678,
1596,
4974,
767,
1160,
838,
1596,
2579,
1315,
2242,
2808,
2242,
2808,
2310,
1160,
860,
1467,
2310,
1511,
1105,
26,
201,
198,
1129,
657,
838,
6073,
2608,
513,
2681,
1679,
1315,
1596,
1596,
1478,
1248,
4764,
1596,... | 2.158105 | 3,251 |
immutable NGramModel
ml::Dict{UTF8String, Float64}
n::Integer
end
getindex(m::NGramModel, gram::String) = get(m.ml, utf8(gram), 0.0)
function NGramModel(sentences::Vector{String}, n)
# Tokenize string
tokenize(s) = TextAnalysis.tokenize(TextAnalysis.EnglishLanguage, s)
# NGramize tokens
function ngramize(words)
# Use "*" symbol start and "STOP" for end of sentence
words = vcat("*", "*", words, "STOP")
n_words = length(words)
tokens = Dict{UTF8String, Int}()
for m in 1:n
for index in 1:(n_words - m + 1)
token = join(words[index:(index + m - 1)], " ")
tokens[token] = get(tokens, token, 0) + 1
end
end
tokens
end
# Create a NGramDocument from a gram dict
document(gram) = NGramDocument(gram, n)
togeneric(docs) = convert(Array{GenericDocument, 1}, docs)
# Create corpus
corpus= Corpus(map(x -> x |> tokenize |> ngramize |> document, sentences) |> togeneric)
# Create lexicon
update_lexicon!(corpus)
# Create model with maximum likelihood estimation
# => Linear approximation
ml = Dict{UTF8String, Float64}()
words = collect(UTF8String, keys(lexicon(corpus)))
n_words = length(words)
gramcount = lexicon(corpus)
function likelihood(gram)
println(join(gram, " "))
res = 0
for i = 1:(n-1)
println("$(i) => $(join(gram[i:end], " ")) / $(join(gram[i:(end - 1)], " "))")
res += (1 / n) * gramcount[join(gram[i:end], " ")] / gramcount[join(gram[i:(end - 1)], " ")]
end
println("$(n) => $(gram[end]) / $(n_words)")
res += (1 / n) * gramcount[gram[end]] / n_words
res
end
for w in words
trigram = tokenize(w)
if length(trigram) == n
ml[w] = likelihood(trigram)
end
end
NGramModel(ml, n)
end
| [
198,
198,
8608,
18187,
39058,
859,
17633,
198,
220,
220,
220,
25962,
3712,
35,
713,
90,
48504,
23,
10100,
11,
48436,
2414,
92,
198,
220,
220,
220,
299,
3712,
46541,
198,
437,
198,
198,
1136,
9630,
7,
76,
3712,
10503,
859,
17633,
11,... | 2.196591 | 880 |
abstract type CIService end
struct GitHubActions <: CIService
username::String
email::String
api_hostname::String
clone_hostname::String
function GitHubActions(;
username="github-actions[bot]",
email="41898282+<EMAIL>-<EMAIL>[<EMAIL>",
api_hostname="https://api.github.com",
clone_hostname="github.com",
)
return new(username, email, api_hostname, clone_hostname)
end
end
struct GitLabCI <: CIService
username::String
email::String
api_hostname::String
clone_hostname::String
function GitLabCI(;
username="gitlab-ci[bot]",
email="gitlab-ci[bot]<EMAIL>",
api_hostname="https://gitlab.com/api/v4",
clone_hostname="gitlab.com",
)
return new(username, email, api_hostname, clone_hostname)
end
end
ci_repository(::GitHubActions, env::AbstractDict=ENV) = env["GITHUB_REPOSITORY"]
ci_repository(::GitLabCI, env::AbstractDict=ENV) = env["CI_PROJECT_PATH"]
ci_token(::GitHubActions, env::AbstractDict=ENV) = env["GITHUB_TOKEN"]
ci_token(::GitLabCI, env::AbstractDict=ENV) = env["GITLAB_TOKEN"]
function get_api_and_repo(ci::GitHubActions; env=ENV)
token = GitHub.Token(ci_token(ci, env))
api = GitHub.GitHubAPI(; token=token, url=ci.api_hostname)
repo, _ = @mock GitForge.get_repo(api, ci_repository(ci, env))
return api, repo
end
function get_api_and_repo(ci::GitLabCI; env=ENV)
token = GitLab.PersonalAccessToken(ci_token(ci, env))
api = GitLab.GitLabAPI(; token=token, url=ci.api_hostname)
repo, _ = @mock GitForge.get_repo(api, ci_repository(ci, env))
return api, repo
end
function get_api_and_repo(ci::Any)
err = "Unknown CI Config: $(typeof(ci))"
@error(err)
return throw(ErrorException(err))
end
function auto_detect_ci_service(; env::AbstractDict=ENV)
if haskey(env, "GITHUB_REPOSITORY")
return GitHubActions()
elseif haskey(env, "GITLAB_CI")
return GitLabCI()
else
throw(UnableToDetectCIService("Could not detect CI service"))
end
end
| [
397,
8709,
2099,
36159,
712,
501,
886,
198,
198,
7249,
21722,
32,
2733,
1279,
25,
36159,
712,
501,
198,
220,
220,
220,
20579,
3712,
10100,
198,
220,
220,
220,
3053,
3712,
10100,
198,
220,
220,
220,
40391,
62,
4774,
3672,
3712,
10100,
... | 2.309497 | 895 |
<reponame>invenia/Patchwork.jl
# Use Julia's `jl_type_morespecific` function to emulate Julia's multiple dispatch across
# generic functions.
#
# Origin:
# https://github.com/JuliaLang/julia/blob/master/doc/src/devdocs/types.md#subtyping-and-method-sorting
type_morespecific(a, b) = ccall(:jl_type_morespecific, Bool, (Any, Any), a, b)
"""
anonymous_signature(m::Method) -> Type{<:Tuple}
Construct a Tuple of the methods signature with the function type removed
# Example
```jldoctest
julia> m = first(methods(first, (String,)));
julia> m.sig
Tuple{typeof(first),Any}
julia> anonymous_signature(m)
Tuple{Any}
```
"""
anonymous_signature(m::Method) = anonymous_signature(m.sig)
anonymous_signature(sig::DataType) = Tuple{sig.parameters[2:end]...}
anonymous_signature(sig::UnionAll) = UnionAll(sig.var, anonymous_signature(sig.body))
"""
anon_morespecific(a::Method, b::Method) -> Bool
Determine which method is more specific for multiple dispatch without considering the
function type. By not considering the function type we can determine which method is more
specific as if they are a part of the same generic function.
"""
function anon_morespecific(a::Method, b::Method)
# Drop the function type from the parameter
a_sig = anonymous_signature(a)
b_sig = anonymous_signature(b)
return type_morespecific(a_sig, b_sig)
end
"""
dispatch(funcs::AbstractVector, args...) -> Tuple{Method, Any}
Choose which method to execute based upon the provided arguments (values not types).
Emulates Julia's multiple dispatch system but allows for dispatching between methods of
multiple generic functions instead of just methods of a single generic function. Returns a
tuple of the selected method and the generic function of the method.
When the function to dispatch to is ambiguous last ambiguous function in the vector is used.
"""
function dispatch(funcs::AbstractVector, args...)
arg_types = map(Core.Typeof, args)
best_method = nothing
best_function = nothing
for f in reverse(funcs)
# Since arguments will be using concrete types `methods` should only return up to
# one method.
for m in methods(f, arg_types)
if best_method === nothing || anon_morespecific(m, best_method)
best_method = m
best_function = f
end
end
end
return best_method, best_function
end
| [
27,
7856,
261,
480,
29,
259,
574,
544,
14,
33952,
1818,
13,
20362,
198,
2,
5765,
22300,
338,
4600,
20362,
62,
4906,
62,
76,
2850,
431,
7790,
63,
2163,
284,
33836,
22300,
338,
3294,
27965,
1973,
198,
2,
14276,
5499,
13,
198,
2,
198... | 2.919903 | 824 |
"""
Facet (`Vector{Int64}`) -> `BellGame`
convert(
::Type{BellGame},
facet::Vector{Int64},
scenario::Union{BlackBox, LocalSignaling};
rep = "normalized"::String
)
"""
function convert( ::Type{BellGame},
facet::Vector{Int64},
scenario::Union{BlackBox,LocalSignaling};
rep = "normalized"::String
)
if !(rep in ("normalized", "generalized"))
throw(DomainError(rep, "Argument `rep` must be either 'normalized' or 'generalized'"))
end
game_dims = strategy_dims(scenario)
div_facet = facet .÷ gcd(facet...)
bound = div_facet[end]
game_matrix = (rep == "normalized") ? cat(
reshape(div_facet[1:end-1], (game_dims[1]-1, game_dims[2])),
zeros(Int64, (1,game_dims[2])),
dims=1
) : reshape(div_facet[1:end-1], game_dims)
BellGame(_reverse_game_normalization(game_matrix, bound)...)
end
"""
Facet (`Vector{Int64}`) -> `BellGame`
convert(
::Type{BellGame},
facet::Vector{Int64},
scenario::BipartiteNonSignaling;
rep = "non-signaling"::String
)
Transforms LocalPolytope facets into `BellGame` types.
"""
function convert(::Type{BellGame},
facet::Vector{Int64},
scenario::BipartiteNonSignaling;
rep = "non-signaling"::String
)
if !(rep in ["non-signaling","normalized","generalized"])
throw(DomainError(rep, "input `rep` must be in [\"non-signaling\",\"normalized\",\"generalized\"]"))
end
game_dims = strategy_dims(scenario)
game = (rep == "generalized") ? reshape(facet[1:end-1], game_dims) : zeros(Int64, game_dims)
bound = facet[end]
if rep == "non-signaling"
α_dim = (scenario.A-1)*scenario.X
β_dim = (scenario.B-1)*scenario.Y
α_game = reshape(facet[1:α_dim], (scenario.A-1, scenario.X))
β_game = reshape(facet[α_dim+1:α_dim+β_dim], (scenario.B-1, scenario.Y))
αβ_game = reshape(facet[α_dim+β_dim+1:end-1], ((scenario.A-1)*(scenario.B-1), scenario.X*scenario.Y))
αβ_col_sum = sum.(eachcol(αβ_game))
# using non-signaling constraints to remove g_a,x
for a in 1:scenario.A-1
game[(a-1)*scenario.B+1:(a-1)*scenario.B + scenario.B-1,:] = αβ_game[(a-1)*(scenario.B-1)+1:a*(scenario.B-1),:]
for x in 1:scenario.X
if α_game[a,x] != 0
x_vec = zeros(Int64, scenario.X)
x_vec[x] = 1
y_vec = ones(Int64,scenario.Y)
αβ_col_id = findfirst(i -> i != 0, kron(x_vec,y_vec).*αβ_col_sum)
game[(a-1)*scenario.B+1:a*scenario.B,αβ_col_id] += α_game[a,x]*ones(Int64,scenario.B)
end
end
end
# using non-signaling constraints to remove g_b,y
for b in 1:scenario.B-1
game_row_ids = b:scenario.B:scenario.A*scenario.B-1
for y in 1:scenario.Y
if β_game[b,y] != 0
y_vec = zeros(Int64, scenario.Y)
y_vec[y] = 1
x_vec = ones(Int64, scenario.X)
αβ_col_id = findfirst(i -> i != 0, kron(x_vec,y_vec).*αβ_col_sum)
game[game_row_ids,αβ_col_id] += β_game[b,y]*ones(Int64,scenario.A)
end
end
end
elseif rep == "normalized"
game[1:game_dims[1]-1,:] = reshape(facet[1:end-1], (game_dims[1]-1, game_dims[2]))
end
(game, bound) = _reverse_game_normalization(game,bound)
BellGame(game,bound)
end
"""
`XPORTA.IEQ` to `BellGame`'s
convert(
::Type{Vector{BellGame}},
ieq::IEQ,
scenario::Union{BlackBox, LocalSignaling};
rep = "normalized" :: String
)
"""
function convert(::Type{Vector{BellGame}},
ieq::IEQ,
scenario::Union{BlackBox,LocalSignaling};
rep = "normalized"::String
)
inequalities = convert.(Int64, ieq.inequalities)
map( row_id -> convert(BellGame, inequalities[row_id,:], scenario, rep=rep), 1:size(inequalities,1))
end
"""
`BellGame` -> Facet (`Vector{Int64}`)
convert(::Type{Vector{Int64}}, BG::BellGame; rep = "normalized" :: String)
"""
function convert(::Type{Vector{Int64}}, BG::BellGame; rep = "normalized"::String)
if !(rep in ("normalized", "generalized"))
throw(DomainError(rep, "Argument `rep` must be either 'normalized' or 'generalized'"))
end
bound = BG.β
game_matrix = BG.game[:,:]
game_dims = size(game_matrix)
if rep == "normalized"
(game_matrix, bound) = _apply_game_normalization!(game_matrix, bound)
game_matrix = game_matrix[1:(end-1),:]
end
vcat(game_matrix[:], bound)
end
"""
BellGame -> Vector{Int64}
convert(::Type{Vector{Int64}},
BG::BellGame,
scenario::BipartiteNonSignaling;
rep = "non-signaling" :: String
)
Transforms a `BellGame` for a `BipartiteNonSignaling` scenario into a facet vector.
"""
function convert(::Type{Vector{Int64}},
BG::BellGame,
scenario::BipartiteNonSignaling;
rep = "non-signaling" :: String
)
if !(rep in ["non-signaling","normalized","generalized"])
throw(DomainError(rep, "input `rep` must be in [\"non-signaling\",\"normalized\",\"generalized\"]"))
end
game_dims = size(BG)
v_dim = LocalPolytope.vertex_dims(scenario, rep)
facet = (rep == "generalized") ? vcat(BG[:], BG.β) : zeros(Int64, v_dim+1)
if rep == "normalized"
(game_matrix, bound) = _apply_game_normalization(BG[:,:], BG.β)
facet = vcat(game_matrix[1:game_dims[1]-1,:][:], bound)
elseif rep == "non-signaling"
(game_matrix, bound) = _apply_game_normalization!(BG[:,:], BG.β)
# construct G(a|x) and G(b|y)
# in each column, subtract off from each Alice/Bob column the values excluded from the non-signaling
α_game = zeros(Int64, (scenario.A-1, scenario.X))
β_game = zeros(Int64, (scenario.B-1, scenario.Y))
# removing greatest output for Alice using non-signaling constraint
for a in 1:scenario.A-1
target_row = a * scenario.B
subtract_vals = game_matrix[target_row,:]
a_dims = (a-1)*scenario.B +1: a * scenario.B
game_matrix[a_dims,:] -= ones(Int64, scenario.B) * subtract_vals'
α_game_rows = map(x -> begin
x_dims = (x-1)*scenario.Y+1:x*scenario.Y
sum(subtract_vals[x_dims])
end, 1:scenario.X)
α_game[a,:] = α_game_rows
end
# removing greatest outputs for Bob using non-signaling constraint
for b in 1:scenario.B-1
target_row = (scenario.A-1) * (scenario.B) + b
subtract_vals = game_matrix[target_row,:]
b_dims = b:scenario.B:scenario.A * scenario.B -1
game_matrix[b_dims,:] -= ones(Int64, scenario.A) * subtract_vals'
β_game_rows = map(y -> begin
y_dims = y:scenario.Y:scenario.X*scenario.Y
sum(subtract_vals[y_dims])
end, 1:scenario.Y)
β_game[b,:] = β_game_rows
end
# All remaining terms are in the no-sig subspace and are taken as is
αβ_game = zeros(Int64, ((scenario.A-1)*(scenario.B-1), scenario.X*scenario.Y))
for a in 1:scenario.A-1
αβ_game[(a-1)*(scenario.B-1) + 1:a*(scenario.B-1),:] = game_matrix[(a-1)*scenario.B+1:a*scenario.B-1,:]
end
facet = vcat(α_game[:], β_game[:], αβ_game[:], bound)
end
facet
end
"""
`BellGame`'s to `XPORTA.IEQ`
convert(::Type{IEQ}, bell_games::Vector{BellGame}; rep = "normalized" :: String)
"""
function convert(::Type{IEQ}, bell_games::Vector{BellGame}; rep = "normalized"::String)
ieq_vectors = map( bg -> convert(Vector{Int64}, bg, rep=rep), bell_games )
IEQ(inequalities = hcat(ieq_vectors...)'[:,:])
end
"""
Applies the normalization constraint to remove all negative values in the provided
`game_matrix`. Returns a tuple `(new_game_matrix, new_bound)`
"""
function _reverse_game_normalization(game_matrix::Matrix{Int64}, bound::Int64) :: Tuple{Matrix{Int64}, Int64}
new_bound = bound
new_game_matrix = game_matrix
for col_id in 1:size(game_matrix,2)
col = game_matrix[:,col_id]
col_min = min(col...)
if col_min != 0
new_game_matrix[:,col_id] .-= col_min
new_bound -= col_min
end
end
(new_game_matrix, new_bound)
end
function _apply_game_normalization!(game_matrix::Matrix{Int64}, bound::Int64) :: Tuple{Matrix{Int64}, Int64}
for col_id in 1:size(game_matrix,2)
col = game_matrix[:,col_id]
if col[end] !== 0
game_matrix[:,col_id] .-= col[end]
bound -= col[end]
end
end
(game_matrix, bound)
end
| [
37811,
198,
37,
23253,
357,
63,
38469,
90,
5317,
2414,
92,
63,
8,
4613,
4600,
36488,
8777,
63,
628,
220,
220,
220,
10385,
7,
198,
220,
220,
220,
220,
220,
220,
220,
7904,
6030,
90,
36488,
8777,
5512,
198,
220,
220,
220,
220,
220,
... | 2.071243 | 4,239 |
"""
KMarkovEnvironment{OV, M<:POMDP, S, R<:AbstractRNG} <: AbstractEnvironment{OV}
A k-markov wrapper for MDPs and POMDPs, given a MDP or POMDP create an AbstractEnvironment where s_t = (o_t, ..., o_t-k)
The K-Markov observation is represented by a vector of k observations.
"""
mutable struct KMarkovEnvironment{OV, M<:POMDP, S, R<:AbstractRNG} <: AbstractEnvironment{OV}
problem::M
k::Int64
state::S
obs::Vector{OV}
rng::R
end
function KMarkovEnvironment(problem::M,
ov::Type{<:AbstractArray} = obsvector_type(problem);
k::Int64=1,
rng::AbstractRNG=MersenneTwister(0)
) where {M<:POMDP}
# determine size of obs vector
s = rand(rng, initialstate(problem))
o = rand(rng, initialobs(problem, s))
obs = convert_o(ov, o, problem)
# init vector of obs
obsvec = fill(zeros(eltype(ov), size(obs)...), k)
return KMarkovEnvironment{ov, M, typeof(s), typeof(rng)}(
problem,
k,
rand(rng, initialstate(problem)),
obsvec,
rng)
end
"""
reset!(env::KMarkovEnvironment{OV})
Reset an POMDP environment by sampling an initial state,
generating an observation and returning it.
"""
function reset!(env::KMarkovEnvironment{OV}) where OV
s = rand(env.rng, initialstate(env.problem))
env.state = s
o = rand(env.rng, initialobs(env.problem, s))
obs = convert_o(OV, o, env.problem)
fill!(env.obs, obs)
return env.obs
end
"""
step!(env::POMDPEnvironment{OV}, a::A)
Take in an POMDP environment, and an action to execute, and
step the environment forward. Return the observation, reward,
terminal flag and info
"""
function step!(env::KMarkovEnvironment{OV}, a::A) where {OV, A}
s, o, r, info = @gen(:sp, :o, :r, :info)(env.problem, env.state, a, env.rng)
env.state = s
t = isterminal(env.problem, s)
obs = convert_o(OV, o, env.problem)
# shift the old observation to lower indices
for i=1:env.k-1
env.obs[i] = env.obs[i + 1]
end
env.obs[env.k] = obs
return env.obs, r, t, info
end
"""
actions(env::KMarkovEnvironment)
Return an action object that can be sampled with rand.
"""
function POMDPs.actions(env::KMarkovEnvironment)
return actions(env.problem)
end
"""
sample_action(env::Union{POMDPEnvironment, MDPEnvironment})
Sample an action from the action space of the environment.
"""
function sample_action(env::KMarkovEnvironment)
return rand(env.rng, actions(env))
end
"""
obs_dimensions(env::KMarkovEnvironment{OV})
returns the size of the observation vector.
The object return by `step!` and `reset!` is a vector of k observation vector of size `obs_dimensions(env)`
It generates an initial state, converts it to an array and returns its size.
"""
function obs_dimensions(env::KMarkovEnvironment{OV}) where OV
s = rand(env.rng, initialstate(env.problem))
o = rand(env.rng, initialobs(env.problem, s))
obs_dim = size(convert_o(OV, o, env.problem))
return (obs_dim..., env.k)
end
| [
37811,
198,
220,
220,
220,
509,
9704,
709,
31441,
90,
8874,
11,
337,
27,
25,
47,
2662,
6322,
11,
311,
11,
371,
27,
25,
23839,
49,
10503,
92,
1279,
25,
27741,
31441,
90,
8874,
92,
198,
32,
479,
12,
4102,
709,
29908,
329,
337,
632... | 2.470353 | 1,248 |
<filename>src/QuadOsc.jl
module QuadOsc
export quadosc
include("SeriesAccelerations.jl")
using .SeriesAccelerations
using QuadGK
@doc raw"""
quadosc(fn, a, Inf, fnzeros; ...)
Integrate the function `fn(x)` from `a` to `Inf`. The function `fnzeros(n)`
takes an integer `n` and is such that `fn(fnzeros(n)) == 0`. The algorithm
works by integrating between successive zeros, and accelerating the alternating
series.
The argument `pren` is the number of intervals to integrate before applying the
series acceleration.
`atol` and `rtol` specify the absolute and relative tolerances for determining
convergence.
`order` is passed on to `quadgk()` of the
[QuadGK](https://github.com/JuliaMath/QuadGK.jl) package.
`nconvergences` is the number of iterations before convergence is declared.
See `?QuadOsc.accel_cohen_villegas_zagier` for details on the series
acceleration.
"""
function quadosc(f::Function, a::Number, b::Number, zerosf::Function; pren=2,
atol=zero(Float64), rtol=sqrt(eps(Float64)), order=7,
#nconvergences=ceil(Int,-1.31*log10(rtol)),
#accelerator=accel_cohen_villegas_zagier)
nconvergences=5,
accelerator=accel_wynn_eps)
@assert b == Inf
T = Float64
i1 = findfirst(n -> zerosf(n) - a >= 0, 1:typemax(Int))
z1 = zerosf(i1)
Ipre, Epre = quadgk(f, a, z1; atol=atol, rtol=rtol, order=order)
z0 = z1
for i=1:pren
i1 += 1
z1 = zerosf(i1)
I, E = quadgk(f, z0, z1; atol=atol, rtol=rtol, order=order)
Ipre += I
Epre += E
z0 = z1
end
I = T(0)
oldI = T(0)
ak = T[]
ek = T[]
while nconvergences > 0
i1 += 1
z1 = zerosf(i1)
I, E = quadgk(f, z0, z1; atol=atol, rtol=rtol)
push!(ak, I)
push!(ek, E)
z0 = z1
I = accelerator(ak)
adiff = abs(I - oldI)
rdiff = adiff * 2 / abs(I + oldI)
if adiff <= atol || rdiff <= rtol
nconvergences -= 1
end
oldI = I
end
I = Ipre + I
E = Epre + abs(accelerator(ek))
return I, E
end
end # module
# vim: set sw=4 et sts=4 :
| [
27,
34345,
29,
10677,
14,
4507,
324,
46,
1416,
13,
20362,
198,
21412,
20648,
46,
1416,
198,
198,
39344,
15094,
17500,
198,
198,
17256,
7203,
27996,
12832,
7015,
602,
13,
20362,
4943,
198,
3500,
764,
27996,
12832,
7015,
602,
198,
3500,
... | 2.090646 | 1,037 |
<filename>src/hartree_fock.jl<gh_stars>1-10
module HartreeFock
export extract_tij_Uijlk, solve_scf
using LinearAlgebra
import PyCall: pyimport
"""
Extract tij and Uijlk from a FermionOperator object, representing a Hamiltonian.
The Hamiltonian must be number conserving, and is allowed to contain upto two-body operators.
"""
function extract_tij_Uijlk(ham)
ofermion = pyimport("openfermion")
FermionOperator = ofermion.ops.operators.FermionOperator
count_qubits = ofermion.utils.operator_utils.count_qubits
normal_ordered = ofermion.transforms.opconversions.normal_ordered
ham = normal_ordered(ham)
if ham.many_body_order() > 4
throw(DomainError("Allowed only upto two-body operators!"))
end
if !ham.is_two_body_number_conserving()
throw(DomainError("ham must be number conserving!"))
end
nflavors = count_qubits(ham)
T = ham.zero().constant isa Real ? Float64 : Complex{Float64}
tij = zeros(T, nflavors, nflavors)
Uijlk = zeros(T, nflavors, nflavors, nflavors, nflavors)
for (ops, constant) in ham.terms
flavors = collect((o[1]+1 for o in ops))
if length(ops) == 2
tij[flavors[1], flavors[2]] += constant
elseif length(ops) == 4
Uijlk[flavors[1], flavors[2], flavors[3], flavors[4]] += 2*constant
else
throw(DomainError("Invalid many_body_order!"))
end
end
tij, Uijlk
end
"""
Compute Tij
"""
function compute_Tij(tij, barUijlk, rhoij)
Tij = copy(tij)
N = size(tij)[1]
for i in 1:N, j in 1:N
for k in 1:N, l in 1:N
Tij[i,j] += barUijlk[i,k,j,l] * rhoij[k,l]
end
end
Tij
end
function compute_rhoij!(rhoij, evecs, nelec)
rhoij .= 0.0
N = size(rhoij)[1]
for e in 1:nelec
for i in 1:N, j in 1:N
rhoij[i, j] += conj(evecs[i,e]) * evecs[j,e]
end
end
end
"""
Solve the Hartree-Fock equation self-consistently:
hat{H} = sum_{ij=1}^N t_{ij} hat{c}^dagger_i hat{c}_j
+ frac{1}{2} sum_{ijkl=1}^N U_{ijlk}hat{c}^dagger_i hat{c}^dagger_j hat{c}_k hat{c}_l
"""
function solve_scf(tij::Array{T,2}, Uijlk::Array{T,4}, rhoij0::Array{T,2}, nelec::Integer, niter::Integer, mixing::Float64) where T
# Antisymmetric Coulomb tensor
barUijlk = Uijlk - permutedims(Uijlk, [1, 2, 4, 3])
rhoij = copy(rhoij0)
rhoij_out = copy(rhoij)
for iter in 1:niter
Tij = compute_Tij(tij, Uijlk, rhoij)
e = eigen(Hermitian(Tij))
compute_rhoij!(rhoij_out, e.vectors, nelec)
@. rhoij = (1-mixing) * rhoij + mixing * rhoij_out
end
rhoij
end
end | [
27,
34345,
29,
10677,
14,
18647,
631,
62,
69,
735,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
11345,
631,
37,
735,
198,
198,
39344,
7925,
62,
83,
2926,
62,
52,
2926,
75,
74,
11,
8494,
62,
1416,
69,
198,
198,
3... | 2.065149 | 1,274 |
<reponame>korzhimanov/Vasilek.jl<gh_stars>1-10
module VlasovBenchmarks
using BenchmarkTools
const SUITE = BenchmarkGroup()
include(joinpath(dirname(@__FILE__),"..","src","VlasovSolver","LaxWendroff.jl"))
import .LaxWendroff
SUITE["LaxWendroff"] = BenchmarkGroup()
SUITE["LaxWendroff c"] = BenchmarkGroup()
include(joinpath(dirname(@__FILE__),"..","src","VlasovSolver","Upwind.jl"))
import .Upwind
SUITE["Upwind"] = BenchmarkGroup()
SUITE["Upwind с"] = BenchmarkGroup()
include(joinpath(dirname(@__FILE__),"..","src","VlasovSolver","Godunov.jl"))
import .Godunov
SUITE["Godunov constant"] = BenchmarkGroup()
SUITE["Godunov linear"] = BenchmarkGroup()
SUITE["Godunov linear VanLeer"] = BenchmarkGroup()
include(joinpath(dirname(@__FILE__),"..","src","VlasovSolver","SemiLagrangian.jl"))
import .SemiLagrangian
SUITE["SemiLagrangian linear"] = BenchmarkGroup()
SUITE["SemiLagrangian quadratic"] = BenchmarkGroup()
SUITE["SemiLagrangian cubic"] = BenchmarkGroup()
include(joinpath(dirname(@__FILE__),"..","src","VlasovSolver","PFC.jl"))
import .PFC
SUITE["PFC"] = BenchmarkGroup()
Δx = Dict()
Δt = Dict()
v = Dict()
f₀ = Dict()
f = Dict()
advect! = Dict()
for N in [100, 1000, 10000]
Δx[N] = 0.01
Δt[N] = 0.8*Δx[N]
v[N] = 1
f₀[N] = [1.0 + 0.01*sin(2π*i*Δx[N]) for i = 0:N]
f[N] = similar(f₀[N])
advect![N] = Dict()
advect![N][:LaxWendroff] = LaxWendroff.generate_solver(f₀[N], f[N], v[N]*Δt[N]/Δx[N])
SUITE["LaxWendroff"]["advect $N"] = @benchmarkable advect![$N][:LaxWendroff]()
advect![N][:LaxWendroff_c] = LaxWendroff.generate_solver(f₀[N], f[N])
SUITE["LaxWendroff c"]["advect $N"] = @benchmarkable advect![$N][:LaxWendroff_c]($(v[N]*Δt[N]/Δx[N]))
advect![N][:Upwind] = Upwind.generate_solver(f₀[N], f[N], v[N]*Δt[N]/Δx[N])
SUITE["Upwind"]["advect $N"] = @benchmarkable advect![$N][:Upwind]()
advect![N][:Upwind_с] = Upwind.generate_solver(f₀[N], f[N])
SUITE["Upwind с"]["advect $N"] = @benchmarkable advect![$N][:Upwind_с]($(v[N]*Δt[N]/Δx[N]))
advect![N][:Godunov_constant] = Godunov.generate_solver(f₀[N], f[N], :Riemann_constant)
SUITE["Godunov constant"]["advect $N"] = @benchmarkable advect![$N][:Godunov_constant]($(v[N]*Δt[N]/Δx[N]))
advect![N][:Godunov_linear] = Godunov.generate_solver(f₀[N], f[N], :Riemann_linear)
SUITE["Godunov linear"]["advect $N"] = @benchmarkable advect![$N][:Godunov_linear]($(v[N]*Δt[N]/Δx[N]))
advect![N][:Godunov_linear_VanLeer] = Godunov.generate_solver(f₀[N], f[N], :Riemann_linear; flux_limiter = :VanLeer)
SUITE["Godunov linear VanLeer"]["advect $N"] = @benchmarkable advect![$N][:Godunov_linear_VanLeer]($(v[N]*Δt[N]/Δx[N]))
advect![N][:SemiLagrangian_linear] = SemiLagrangian.generate_solver(f₀[N], f[N]; interpolation_order = :Linear)
SUITE["SemiLagrangian linear"]["advect $N"] = @benchmarkable advect![$N][:SemiLagrangian_linear]($(v[N]*Δt[N]/Δx[N]))
advect![N][:SemiLagrangian_quadratic] = SemiLagrangian.generate_solver(f₀[N], f[N]; interpolation_order = :Quadratic)
SUITE["SemiLagrangian quadratic"]["advect $N"] = @benchmarkable advect![$N][:SemiLagrangian_quadratic]($(v[N]*Δt[N]/Δx[N]))
advect![N][:SemiLagrangian_cubic] = SemiLagrangian.generate_solver(f₀[N], f[N]; interpolation_order = :Cubic)
SUITE["SemiLagrangian cubic"]["advect $N"] = @benchmarkable advect![$N][:SemiLagrangian_cubic]($(v[N]*Δt[N]/Δx[N]))
advect![N][:PFC] = PFC.generate_solver(f₀[N], f[N])
SUITE["PFC"]["advect $N"] = @benchmarkable advect![$N][:PFC]($(v[N]*Δt[N]/Δx[N]))
end
end # module
| [
27,
7856,
261,
480,
29,
74,
273,
23548,
24086,
709,
14,
53,
292,
576,
74,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
569,
21921,
709,
44199,
14306,
198,
198,
3500,
25187,
4102,
33637,
198,
198,
9979,
13558,
12709,
... | 1.954821 | 1,815 |
export mutate
"Mutate: Change weights"
function mutate_weights(indiv::NEATInd, cfg::Dict)
# TODO: check original weight mutation
ind = NEATInd(indiv)
for c in ind.connections
if rand() < cfg["p_mut_weights"]
c.weight = c.weight + randn()*cfg["weight_factor"]
end
end
ind
end
"split a connection by adding a neuron"
function split_connection(c::Connection, neuron_nb::Int, cfg::Dict)
# Create neuron
n = Neuron(0.0, 0.0, false)
# Create 2 new connections
# TODO: neuron number does not correspond to order for recurrence checks
c1 = Connection(c.in_node, neuron_nb, 1.0, true, cfg["innovation_max"]+1)
c2 = Connection(neuron_nb, c.out_node, c.weight, true, cfg["innovation_max"]+2)
cfg["innovation_max"] += 2
c1, c2, n
end
"Mutation: Add a node in a connection"
function mutate_add_neuron(indiv::NEATInd, cfg::Dict)
ind = NEATInd(indiv)
ci = rand(1:length(ind.connections))
c = ind.connections[ci]
c1, c2, n = split_connection(c, length(ind.neurons), cfg)
deleteat!(ind.connections, ci)
append!(ind.connections, [c1, c2])
push!(ind.neurons, n)
ind
end
"Mutation: Add a connection between 2 random nodes"
function mutate_add_connection(indiv::NEATInd, cfg::Dict, allow_recurrent::Bool=false)
ind = NEATInd(indiv)
# Select 2 different neurons at random, can't create a connection that already exists
nn = length(ind.neurons)
valid = trues(nn, nn)
for c in ind.connections
valid[c.in_node, c.out_node] = false
end
conns = findall(valid)
if length(conns) > 0
shuffle!(conns)
cfg["innovation_max"] += 1
c = Connection(conns[1][1], conns[1][2], rand_weight(), true, cfg["innovation_max"])
push!(ind.connections, c)
end
ind
end
"Mutation: Toggle Enabled"
function mutate_enabled(indiv::NEATInd; n_times=1)
ind = NEATInd(indiv)
ids = randperm(length(ind.connections))
for i in 1:n_times
ind.connections[ids[i]].enabled = !ind.connections[ids[i]].enabled
end
ind
end
"mutate(ind::NEATInd, cfg::Dict): return a new mutated individual"
function mutate(indiv::NEATInd, cfg::Dict)
if rand() < cfg["p_mutate_add_neuron"]
return mutate_add_neuron(indiv, cfg)
elseif rand() < cfg["p_mutate_add_connection"]
return mutate_add_connection(indiv, cfg)
elseif rand() < cfg["p_mutate_weights"]
return mutate_weights(indiv, cfg)
elseif rand() < cfg["p_mutate_enabled"]
return mutate_enabled(indiv, cfg)
end
# return clone if no mutation occurs
NEATInd(indiv)
end
| [
39344,
4517,
378,
198,
198,
1,
41603,
378,
25,
9794,
19590,
1,
198,
8818,
4517,
378,
62,
43775,
7,
521,
452,
3712,
12161,
1404,
5497,
11,
30218,
70,
3712,
35,
713,
8,
198,
220,
220,
220,
1303,
16926,
46,
25,
2198,
2656,
3463,
1514... | 2.349107 | 1,120 |
<reponame>gdalle/MultiAgentPathFinding.jl<filename>test/learn_agents.jl<gh_stars>0
## Imports
using Base.Threads
using Flux
using Graphs
using InferOpt
using MultiAgentPathFinding
using PythonCall
using ProgressMeter
using UnicodePlots
## Test
rail_generators = pyimport("flatland.envs.rail_generators")
line_generators = pyimport("flatland.envs.line_generators")
rail_env = pyimport("flatland.envs.rail_env")
rail_generator = rail_generators.sparse_rail_generator(; max_num_cities=4)
line_generator = line_generators.sparse_line_generator()
pyenv = rail_env.RailEnv(;
width=30,
height=30,
number_of_agents=20,
rail_generator=rail_generator,
line_generator=line_generator,
random_seed=11,
)
pyenv.reset();
## Data generation
nb_instances = 10
instances = Vector{typeof(flatland_mapf(pyenv))}(undef, nb_instances);
@showprogress "Generating instances: " for k in 1:nb_instances
pyenv.reset()
instances[k] = flatland_mapf(pyenv)
end
solutions = Vector{Solution}(undef, nb_instances);
@threads for k in 1:nb_instances
@info "Instance $k solved by thread $(threadid())"
solutions[k] = large_neighborhood_search(instances[k]; N=10, steps=100, progress=false)
end
T = maximum(max_time(solution) for solution in solutions) * 2
X = [agents_embedding(mapf) for mapf in instances];
Y = [solution_to_vec(solution, mapf; T=T) for (solution, mapf) in zip(solutions, instances)];
function maximizer(θ; mapf)
permutation = sortperm(θ; rev=true)
solution = cooperative_astar(mapf, permutation)
ŷ = solution_to_vec(solution, mapf; T=T)
return ŷ
end
## Initialization
encoder = Chain(Dense(size(X[1], 1), 1), vec)
model = Perturbed(maximizer; ε=0.1, M=10)
squared_loss(ŷ, y) = sum(abs2, y - ŷ) / T;
λ = 30
opt = ADAGrad();
par = Flux.params(encoder)
losses = Float64[]
## Training
for epoch in 1:100
l = 0.0
@showprogress "Epoch $epoch/100 - " for k in 1:nb_instances
gs = gradient(par) do
l += (
squared_loss(model(encoder(X[k]); mapf=instances[k]), Y[k]) +
(λ / nb_instances) * sum(abs, encoder[1].weight)
)
end
Flux.update!(opt, par, gs)
end
push!(losses, l)
end;
println(lineplot(losses))
## Evaluation
solutions_naive = Vector{Solution}(undef, nb_instances);
solutions_pred = Vector{Solution}(undef, nb_instances);
for k in 1:nb_instances
solutions_naive[k] = cooperative_astar(instances[k], 1:nb_agents(instances[k]))
θ = encoder(X[k])
permutation = sortperm(θ; rev=true)
solutions_pred[k] = cooperative_astar(instances[k], permutation)
end
avg_flowtime =
sum(flowtime(solution, mapf) for (solution, mapf) in zip(solutions, instances)) /
nb_instances
avg_flowtime_pred =
sum(flowtime(solution, mapf) for (solution, mapf) in zip(solutions_pred, instances)) /
nb_instances
avg_flowtime_naive =
sum(flowtime(solution, mapf) for (solution, mapf) in zip(solutions_naive, instances)) /
nb_instances
| [
27,
7856,
261,
480,
29,
21287,
6765,
14,
29800,
36772,
15235,
36276,
13,
20362,
27,
34345,
29,
9288,
14,
35720,
62,
49638,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
2235,
1846,
3742,
198,
198,
3500,
7308,
13,
16818,
82,
198,
3500,... | 2.4 | 1,250 |
<reponame>hennyg888/Oceananigans.jl
using Oceananigans.Architectures
using Oceananigans.BoundaryConditions
using Oceananigans.TurbulenceClosures: calculate_diffusivities!
import Oceananigans.TimeSteppers: update_state!
"""
update_state!(model::IncompressibleModel)
Update peripheral aspects of the model (halo regions, diffusivities, hydrostatic pressure) to the current model state.
"""
function update_state!(model::IncompressibleModel)
# Fill halos for velocities and tracers
fill_halo_regions!(merge(model.velocities, model.tracers), model.architecture, model.clock, fields(model))
# Calculate diffusivities
calculate_diffusivities!(model.diffusivities, model.architecture, model.grid, model.closure,
model.buoyancy, model.velocities, model.tracers)
fill_halo_regions!(model.diffusivities, model.architecture, model.clock, fields(model))
# Calculate hydrostatic pressure
pressure_calculation = launch!(model.architecture, model.grid, :xy, update_hydrostatic_pressure!,
model.pressures.pHY′, model.grid, model.buoyancy, model.tracers,
dependencies=Event(device(model.architecture)))
# Fill halo regions for pressure
wait(device(model.architecture), pressure_calculation)
fill_halo_regions!(model.pressures.pHY′, model.architecture)
return nothing
end
| [
27,
7856,
261,
480,
29,
831,
3281,
70,
28011,
14,
46607,
272,
34090,
13,
20362,
198,
3500,
10692,
272,
34090,
13,
19895,
5712,
942,
198,
3500,
10692,
272,
34090,
13,
49646,
560,
25559,
1756,
198,
3500,
10692,
272,
34090,
13,
51,
5945,... | 2.696565 | 524 |
<reponame>akhand9999/IncrementalInference.jl<filename>src/CSMOccuranceUtils.jl
export CSMOccuranceType
export parseCSMVerboseLog, calcCSMOccurancesFolders, calcCSMOccuranceMax, printCSMOccuranceMax, reconstructCSMHistoryLogical
# [cliqId][fsmIterNumber][fsmFunctionName] => (nr. call occurances, list global call sequence position, list of status)
const CSMOccuranceType = Dict{Int, Dict{Int, Dict{Symbol, Tuple{Int, Vector{Int}, Vector{String}}}}}
function parseCSMVerboseLog(resultsDir::AbstractString;verboseName::AbstractString="csmVerbose.log")
#
fid = open(joinpath(resultsDir, verboseName), "r")
fsmLines = readlines(fid)
close(fid)
# parse lines into usable format
sfsmL = split.(fsmLines, r" -- ")
cids = split.(sfsmL .|> x->match(r"cliq\d+", x[1]).match, r"cliq") .|> x->parse(Int,x[end])
iters = split.(sfsmL .|> x->match(r"iter=\d+", x[1]).match, r"iter=") .|> x->parse(Int,x[end])
smfnc = sfsmL .|> x->split(x[2], ',')[1] .|> Symbol
statu = sfsmL .|> x->split(x[2], ',')[2] .|> x->lstrip(rstrip(x))
return cids, iters, smfnc, statu
end
## Make lookup from all runs
function calcCSMOccurancesFolders(folderList::Vector{<:AbstractString};
verboseName::AbstractString="csmVerbose.log" )
#
# lookup for histogram on each step per fsm
# [cliqId][fsmIterNumber][fsmFunctionName] => (nr. call occurances, list global call sequence position)
csmCounter = CSMOccuranceType()
# lookup for transition counts per fsm function
trxCounter = Dict{Symbol, Dict{Symbol, Int}}()
prevFnc = Dict{Int, Symbol}()
for rDir in folderList
## load the sequence from each file
cids, iters, smfnc, statu = parseCSMVerboseLog(rDir, verboseName=verboseName)
# populate histogram
for (idx,smfi) in enumerate(smfnc)
if !haskey(csmCounter, cids[idx])
csmCounter[cids[idx]] = Dict{Int, Dict{Symbol, Tuple{Int,Vector{Int}}}}()
end
if !haskey(csmCounter[cids[idx]], iters[idx])
# Tuple{Int,Int[]} == (nr. occurances of call, list global call sequence position)
csmCounter[cids[idx]][iters[idx]] = Dict{Symbol, Tuple{Int,Vector{Int}, Vector{String}}}()
end
easyRef = csmCounter[cids[idx]][iters[idx]]
if !haskey(easyRef,smfi)
easyRef[smfi] = (0,Int[],String[])
end
# add position in call sequence (global per solve)
globalSeqIdx = easyRef[smfi][2]
push!(globalSeqIdx, idx)
statSeq = easyRef[smfi][3]
push!(statSeq, statu[idx])
easyRef[smfi] = (easyRef[smfi][1]+1, globalSeqIdx, statSeq)
## also track the transitions
if haskey(prevFnc, cids[idx])
if !haskey(trxCounter, prevFnc[cids[idx]])
# add function lookup if not previously seen
trxCounter[prevFnc[cids[idx]]] = Dict{Symbol, Int}()
end
if !haskey(trxCounter[prevFnc[cids[idx]]], smfi)
# add previously unseen transition
trxCounter[prevFnc[cids[idx]]][smfi] = 0
end
# from previous to next function
trxCounter[prevFnc[cids[idx]]][smfi] += 1
end
# always update prevFnc register
prevFnc[cids[idx]] = smfi
end
end
return csmCounter, trxCounter
end
"""
$SIGNATURES
Use maximum occurance from `csmCounter::CSMOccuranceType` to summarize many CSM results.
Notes
- `percentage::Bool=false` shows median global sequence occurance ('m'), or
- `percentage::Bool=true` of occurance ('%')
"""
function calcCSMOccuranceMax( csmCounter::CSMOccuranceType;
percentage::Bool=false)
#
ncsm = length(keys(csmCounter))
maxOccuran = Dict()
# max steps
for i in 1:ncsm
# sequence of functions that occur most often
maxOccuran[i] = Vector{Tuple{Symbol, String, String}}()
end
# pick out the max for each CSM iter
for (csmID, csmD) in csmCounter, stp in 1:length(keys(csmD))
maxFnc = :null
maxCount = 0
totalCount = 0
for (fnc, cnt) in csmCounter[csmID][stp]
totalCount += cnt[1]
if maxCount < cnt[1]
maxCount = cnt[1]
maxFnc = fnc
end
end
# occurance count
perc = if percentage
"$(round(Int,(maxCount/totalCount)*100))"
else
# get medial position (proxy to most frequent)
"$(round(Int,Statistics.median(csmCounter[csmID][stp][maxFnc][2])))"
end
# get status
allst = csmCounter[csmID][stp][maxFnc][3]
qst = unique(allst)
mqst = qst .|> y->count(x->x==y, allst)
midx = findfirst(x->x==maximum(mqst),mqst)
maxStatus = qst[midx]
push!(maxOccuran[csmID], (maxFnc, perc, maxStatus) ) # position in vector == stp
end
maxOccuran
end
"""
$SIGNATURES
Print the most likely FSM function at each step per state machine, as swim lanes.
Example
```julia
csmCo = calcCSMOccurancesFolders(resultFolder[maskTrue])
maxOcc = calcCSMOccuranceMax(csmCo)
printCSMOccuranceMax(maxOcc)
```
"""
function printCSMOccuranceMax(maxOcc;
fid=stdout,
percentage::Bool=false )
#
ncsm = length(keys(maxOcc))
# print titles
titles = Tuple[]
for cid in 1:ncsm
tpl = ("","","$cid "," ")
push!(titles, tpl)
end
IIF.printHistoryLane(fid, "", titles)
print(fid,"----")
for i in 1:ncsm
print(fid,"+--------------------")
end
println(fid,"")
maxsteps=0
for i in 1:ncsm
maxsteps = maxsteps < length(maxOcc[i]) ? length(maxOcc[i]) : maxsteps
end
for stp in 1:maxsteps
TPL = Tuple[]
for cid in 1:ncsm
tpl = ("",""," "," ")
if stp <= length(maxOcc[cid])
fncName = maxOcc[cid][stp][1]
# either show percentage or sequence index
percOrSeq = "$(maxOcc[cid][stp][2])"
percOrSeq *= percentage ? "%" : "m"
# get status
tpl = ("",percOrSeq,fncName,maxOcc[cid][stp][3])
end
push!(TPL, tpl)
end
IIF.printHistoryLane(fid, stp, TPL)
end
end
"""
$SIGNATURES
Use `solveTree!`'s` `verbose` output to reconstruct the swim lanes Logical sequence of CSM function calls.
Notes
- This is a secondary function to primary `printCSMHistoryLogical`.
Related
printCSMHistoryLogical
"""
function reconstructCSMHistoryLogical(resultsDir::AbstractString;
fid::IO=stdout,
verboseName::AbstractString="csmVerbose.log" )
#
csmCounter, trxCounter = calcCSMOccurancesFolders([resultsDir], verboseName=verboseName)
# print with sequence position
maxOcc = calcCSMOccuranceMax(csmCounter, percentage=false)
printCSMOccuranceMax(maxOcc, fid=fid)
end
# | [
27,
7856,
261,
480,
29,
461,
4993,
24214,
14,
15562,
37098,
818,
4288,
13,
20362,
27,
34345,
29,
10677,
14,
7902,
11770,
535,
3874,
18274,
4487,
13,
20362,
198,
198,
39344,
9429,
11770,
535,
3874,
6030,
198,
39344,
21136,
7902,
44,
13... | 2.261017 | 2,950 |
using AutoTest
Package = "BlackBoxOptim"
using BlackBoxOptim
function run(packagename, srcdir = "src", testdir = "test";
testfileregexp = r"^test_.*\.jl$",
srcfileregexp = r"^.*\.jl$")
testfiles = AutoTest.findfiles(testdir, testfileregexp; recursive = true) # in AutoTest this is false
srcfiles = AutoTest.findfiles(srcdir, srcfileregexp; recursive = true)
ts = AutoTest.TestSuite(testfiles, srcfiles, "$packagename test suite")
AutoTest.runtestsuite(ts)
end
if length(ARGS) > 0 && ARGS[1] == "continuous"
AutoTest.autorun(Package, "src", "test/autotests")
else
run(Package, "src", "test/autotests")
end
| [
3500,
11160,
14402,
198,
198,
27813,
796,
366,
9915,
14253,
27871,
320,
1,
198,
198,
3500,
2619,
14253,
27871,
320,
198,
198,
8818,
1057,
7,
8002,
11286,
480,
11,
12351,
15908,
796,
366,
10677,
1600,
1332,
15908,
796,
366,
9288,
8172,
... | 2.590361 | 249 |
using RealInterface
using Base.Test
import SpecialFunctions, NaNMath
for f in RealInterface.UNARY_ARITHMETIC
@test isa(eval(Base, f), Function)
end
for f in RealInterface.BINARY_ARITHMETIC
@test isa(eval(Base, f), Function)
end
for f in RealInterface.UNARY_MATH
@test isa(eval(Base, f), Function)
end
for f in RealInterface.BINARY_MATH
@test isa(eval(Base, f), Function)
end
for f in RealInterface.MISC_FUNCTIONS
@test isa(eval(Base, f), Function)
end
for f in RealInterface.UNARY_PREDICATES
@test isa(eval(Base, f), Function)
end
for f in RealInterface.BINARY_PREDICATES
@test isa(eval(Base, f), Function)
end
for f in RealInterface.UNARY_SPECIAL_MATH
@test isa(eval(SpecialFunctions, f), Function)
end
for f in RealInterface.BINARY_SPECIAL_MATH
@test isa(eval(SpecialFunctions, f), Function)
end
for f in RealInterface.TERNARY_SPECIAL_MATH
@test isa(eval(SpecialFunctions, f), Function)
end
for f in RealInterface.UNARY_NAN_MATH
@test isa(eval(NaNMath, f), Function)
end
for f in RealInterface.BINARY_NAN_MATH
@test isa(eval(NaNMath, f), Function)
end
| [
3500,
6416,
39317,
198,
3500,
7308,
13,
14402,
198,
11748,
6093,
24629,
2733,
11,
11013,
32755,
776,
198,
198,
1640,
277,
287,
6416,
39317,
13,
4944,
13153,
62,
1503,
10554,
47123,
2149,
198,
220,
220,
220,
2488,
9288,
318,
64,
7,
182... | 2.526077 | 441 |
<filename>src/OceanTurbulenceParameterEstimation.jl
module OceanTurbulenceParameterEstimation
export OneDimensionalTimeSeries, InverseProblem, FreeParameters,
IdentityNormalization, ZScore, forward_map, observation_map,
eki, lognormal_with_mean_std, iterate!, EnsembleKalmanInversion, UnscentedKalmanInversion,
UnscentedKalmanInversionPostprocess, ConstrainedNormal
include("Observations.jl")
include("TurbulenceClosureParameters.jl")
include("InverseProblems.jl")
include("EnsembleKalmanInversions.jl")
using .Observations: OneDimensionalTimeSeries, ZScore
using .TurbulenceClosureParameters: FreeParameters
using .InverseProblems: InverseProblem, forward_map, observation_map
using .EnsembleKalmanInversions: iterate!, EnsembleKalmanInversion, UnscentedKalmanInversion,
UnscentedKalmanInversionPostprocess, ConstrainedNormal, lognormal_with_mean_std
end # module
| [
27,
34345,
29,
10677,
14,
46607,
51,
5945,
32401,
36301,
22362,
18991,
13,
20362,
198,
21412,
10692,
51,
5945,
32401,
36301,
22362,
18991,
198,
198,
39344,
1881,
35,
16198,
7575,
27996,
11,
554,
4399,
40781,
11,
3232,
48944,
11,
220,
19... | 3.17193 | 285 |
<gh_stars>1-10
# Raw memory management
export Mem, available_memory, total_memory
module Mem
using ..VectorEngine
using ..VectorEngine.VEDA: vedaMemAlloc, vedaMemPtr, vedaMemFree, vedaMemGetInfo,
vedaMemAllocHost, vedaMemFreeHost
using Printf
#
# buffers
#
# a chunk of memory allocated using the VEDA APIs. this memory can reside on the host, on
# the VE, or can represent specially-formatted memory (like texture arrays). depending on
# all that, the buffer may be `convert`ed to a Ptr, VEPtr, or VEArrayPtr.
abstract type AbstractBuffer end
Base.convert(T::Type{<:Union{Ptr,VEPtr,VEArrayPtr}}, buf::AbstractBuffer) =
throw(ArgumentError("Illegal conversion of a $(typeof(buf)) to a $T"))
# ccall integration
#
# taking the pointer of a buffer means returning the underlying pointer,
# and not the pointer of the buffer object itself.
Base.unsafe_convert(T::Type{<:Union{Ptr,VEPtr,VEArrayPtr}}, buf::AbstractBuffer) =
convert(T, buf)
## host side device buffer
"""
Mem.DeviceBuffer
Host residing structure representing a buffer of device memory.
"""
mutable struct DeviceBuffer <: AbstractBuffer
#vptr::VEDAdeviceptr
vptr::VEPtr{Int8}
ptr::VEPtr{Int8}
bytesize::Int
function DeviceBuffer(bsize::Int)
bsize == 0 && return new(VE_NULL, VE_NULL, 0)
#vp = Ref{VEDAdeviceptr}()
vp = Ref{VEPtr{Int8}}(0)
p = Ref{VEPtr{Int8}}(0)
vedaMemAlloc(pointer_from_objref(vp), bsize)
vedaMemPtr(pointer_from_objref(p), vp[])
#VectorEngine.vesync() # is this needed?
obj = new(vp[], p[], bsize)
finalizer(unsafe_free!, obj)
return obj
end
end
function unsafe_free!(buf::DeviceBuffer)
if pointer(buf) != VE_NULL
vedaMemFree(pointer(buf))
buf.vptr = VE_NULL
buf.ptr = VE_NULL
end
end
"""
Mem.alloc(DeviceBuffer, bytesize::Integer)
Allocate `bytesize` bytes of memory on the VE.
"""
alloc(::Type{DeviceBuffer}, bytesize::Integer) = DeviceBuffer(bytesize)
function free(buf::DeviceBuffer) end
Base.pointer(buf::DeviceBuffer) = buf.vptr
Base.sizeof(buf::DeviceBuffer) = buf.bytesize
Base.show(io::IO, buf::DeviceBuffer) =
@printf(io, "DeviceBuffer(%s at %p (%p))", Base.format_bytes(sizeof(buf)), Int(buf.vptr), Int(buf.ptr))
#Base.convert(::Type{VEDAdeviceptr}, buf::DeviceBuffer) = buf.vptr
Base.convert(::Type{VEPtr{T}}, buf::DeviceBuffer) where {T} = reinterpret(VEPtr{T}, buf.vptr)
############################################################
## host buffer
"""
Mem.HostBuffer
Mem.Host
A buffer of pinned memory on the CPU, unaccessible to the VE.
"""
mutable struct HostBuffer <: AbstractBuffer
ptr::Ptr{Cvoid}
bytesize::Int
end
Base.pointer(buf::HostBuffer) = buf.ptr
Base.sizeof(buf::HostBuffer) = buf.bytesize
Base.show(io::IO, buf::HostBuffer) =
@printf(io, "HostBuffer(%s at %p)", Base.format_bytes(sizeof(buf)), Int(pointer(buf)))
Base.convert(::Type{Ptr{T}}, buf::HostBuffer) where {T} =
convert(Ptr{T}, pointer(buf))
function Base.convert(::Type{VEPtr{T}}, buf::HostBuffer) where {T}
throw(ArgumentError("cannot take the VE address of a CPU buffer"))
end
"""
Mem.alloc(HostBuffer, bytesize::Integer)
Allocate `bytesize` bytes of page-locked memory on the host. This memory is accessible from
the CPU, and makes it possible to perform faster memory copies to the VE.
"""
function alloc(::Type{HostBuffer}, bytesize::Integer)
bytesize == 0 && return HostBuffer(C_NULL, 0)
ptr_ref = Ref{Ptr{Cvoid}}()
vedaMemAllocHost(ptr_ref, bytesize)
return HostBuffer(ptr_ref[], bytesize)
end
function free(buf::HostBuffer)
if pointer(buf) != VE_NULL
vedaMemFreeHost(buf.ptr)
end
end
## array buffer
mutable struct ArrayBuffer{T,N} <: AbstractBuffer
ptr::VEArrayPtr{T}
dims::Dims{N}
end
Base.pointer(buf::ArrayBuffer) = buf.ptr
Base.sizeof(buf::ArrayBuffer) = error("Opaque array buffers do not have a definite size")
Base.size(buf::ArrayBuffer) = buf.dims
Base.length(buf::ArrayBuffer) = prod(buf.dims)
Base.ndims(buf::ArrayBuffer{<:Any,N}) where {N} = N
Base.show(io::IO, buf::ArrayBuffer{T,1}) where {T} =
@printf(io, "%g-element ArrayBuffer{%s,%g}(%p)", length(buf), string(T), 1, Int(pointer(buf)))
Base.show(io::IO, buf::ArrayBuffer{T}) where {T} =
@printf(io, "%s ArrayBuffer{%s,%g}(%p)", Base.inds2string(size(buf)), string(T), ndims(buf), Int(pointer(buf)))
# array buffers are typed, so refuse arbitrary conversions
Base.convert(::Type{VEArrayPtr{T}}, buf::ArrayBuffer{T}) where {T} =
convert(VEArrayPtr{T}, pointer(buf))
# ... except for VEArrayPtr{Nothing}, which is used to call untyped API functions
Base.convert(::Type{VEArrayPtr{Nothing}}, buf::ArrayBuffer) =
convert(VEArrayPtr{Nothing}, pointer(buf))
function alloc(::Type{<:ArrayBuffer{T}}, dims::Dims{N}) where {T,N}
format = convert(CUarray_format, eltype(T))
if N == 2
width, height = dims
depth = 0
@assert 1 <= width "VEDA 2D array (texture) width must be >= 1"
# @assert witdh <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH
@assert 1 <= height "VEDA 2D array (texture) height must be >= 1"
# @assert height <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT
elseif N == 3
width, height, depth = dims
@assert 1 <= width "VEDA 3D array (texture) width must be >= 1"
# @assert witdh <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH
@assert 1 <= height "VEDA 3D array (texture) height must be >= 1"
# @assert height <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT
@assert 1 <= depth "VEDA 3D array (texture) depth must be >= 1"
# @assert depth <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH
elseif N == 1
width = dims[1]
height = depth = 0
@assert 1 <= width "VEDA 1D array (texture) width must be >= 1"
# @assert witdh <= CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH
else
"VEDA arrays (texture memory) can only have 1, 2 or 3 dimensions"
end
allocateArray_ref = Ref(CUDA.CUDA_ARRAY3D_DESCRIPTOR(
width, # Width::Csize_t
height, # Height::Csize_t
depth, # Depth::Csize_t
format, # Format::CUarray_format
UInt32(CUDA.nchans(T)), # NumChannels::UInt32
0))
handle_ref = Ref{CUarray}()
vedaArray3DCreate(handle_ref, allocateArray_ref)
ptr = reinterpret(VEArrayPtr{T}, handle_ref[])
return ArrayBuffer{T,N}(ptr, dims)
end
function free(buf::ArrayBuffer)
vedaArrayDestroy(buf.ptr)
end
## convenience aliases
const Device = DeviceBuffer
const Host = HostBuffer
const Array = ArrayBuffer
#
# pointers
#
## initialization
"""
Mem.set!(buf::VEPtr, value::Union{Int8,UInt8,Int16,UInt16,Int32,UInt32,
Int64,UInt64,Float32,Float64}, len::Integer;
async::Bool=false, stream::VEStream)
Initialize device memory by copying `val` for `len` times. Executed asynchronously if
`async` is true, in which case a valid `stream` is required.
"""
set!
for T in [Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64, UInt64, Float32, Float64]
bits = 8*sizeof(T)
fn_sync = Symbol("vedaMemsetD$(bits)")
fn_async = Symbol("vedaMemsetD$(bits)Async")
U = Symbol("UInt$(bits)")
@eval function set!(ptr::VEPtr{$T}, value::$T, len::Integer;
async::Bool=false, stream::Union{Nothing,VEStream}=nothing)
val = typeof(value) == $U ? value : reinterpret($U, value)
if async
stream===nothing &&
throw(ArgumentError("Asynchronous memory operations require a stream."))
$(getproperty(VectorEngine.VEDA, fn_async))(ptr, val, len, stream)
else
stream===nothing ||
throw(ArgumentError("Synchronous memory operations cannot be issued on a stream."))
$(getproperty(VectorEngine.VEDA, fn_sync))(ptr, val, len)
end
end
end
## copy operations
for (f, fa, srcPtrTy, dstPtrTy) in (("vedaMemcpyDtoH", "vedaMemcpyDtoHAsync", VEPtr, Ptr),
("vedaMemcpyHtoD", "vedaMemcpyHtoDAsync", Ptr, VEPtr),
("vedaMemcpyDtoD", "vedaMemcpyDtoDAsync", VEPtr, VEPtr),
)
@eval function Base.unsafe_copyto!(dst::$dstPtrTy{T}, src::$srcPtrTy{T}, N::Integer;
stream::Union{Nothing,VEStream}=nothing,
async::Bool=false) where T
if async
stream===nothing &&
throw(ArgumentError("Asynchronous memory operations require a stream."))
$(getproperty(VectorEngine.VEDA, Symbol(fa)))(dst, src, N*sizeof(T), stream)
else
stream===nothing ||
throw(ArgumentError("Synchronous memory operations cannot be issued on a stream."))
$(getproperty(VectorEngine.VEDA, Symbol(f)))(dst, src, N*sizeof(T))
end
return dst
end
end
function Base.unsafe_copyto!(dst::VEArrayPtr{T}, src::Ptr{T}, N::Integer;
stream::Union{Nothing,VEStream}=nothing,
async::Bool=false) where T
if async
stream===nothing &&
throw(ArgumentError("Asynchronous memory operations require a stream."))
vedaMemcpyHtoDAsync(dst, src, N*sizeof(T), stream)
else
stream===nothing ||
throw(ArgumentError("Synchronous memory operations cannot be issued on a stream."))
vedaMemcpyHtoD(dst+doffs-1, src, N*sizeof(T))
end
end
function Base.unsafe_copyto!(dst::Ptr{T}, src::VEArrayPtr{T}, soffs::Integer, N::Integer;
stream::Union{Nothing,VEStream}=nothing,
async::Bool=false) where T
if async
stream===nothing &&
throw(ArgumentError("Asynchronous memory operations require a stream."))
vedaMemcpyAtoHAsync(dst, src, soffs, N*sizeof(T), stream)
else
stream===nothing ||
throw(ArgumentError("Synchronous memory operations cannot be issued on a stream."))
vedaMemcpyAtoH(dst, src, soffs, N*sizeof(T))
end
end
Base.unsafe_copyto!(dst::VEArrayPtr{T}, doffs::Integer, src::VEPtr{T}, N::Integer) where {T} =
vedaMemcpyDtoA(dst, doffs, src, N*sizeof(T))
Base.unsafe_copyto!(dst::VEPtr{T}, src::VEArrayPtr{T}, soffs::Integer, N::Integer) where {T} =
vedaMemcpyAtoD(dst, src, soffs, N*sizeof(T))
Base.unsafe_copyto!(dst::VEArrayPtr, src, N::Integer; kwargs...) =
Base.unsafe_copyto!(dst, 0, src, N; kwargs...)
Base.unsafe_copyto!(dst, src::VEArrayPtr, N::Integer; kwargs...) =
Base.unsafe_copyto!(dst, src, 0, N; kwargs...)
## memory info
function info()
free_ref = Ref{Csize_t}()
total_ref = Ref{Csize_t}()
vedaMemGetInfo(free_ref, total_ref)
return convert(Int, free_ref[]), convert(Int, total_ref[])
end
end # module Mem
"""
available_memory()
Returns the available_memory amount of memory (in bytes), available for allocation by the CUDA context.
"""
available_memory() = Mem.info()[1]
"""
total_memory()
Returns the total amount of memory (in bytes), available for allocation by the CUDA context.
"""
total_memory() = Mem.info()[2]
# memory operations
function unsafe_fill!(ptr::Union{Ptr{T},VEPtr{T}},
pattern::Union{Ptr{T},VEPtr{T}}, N::Integer) where T
bytes = N*sizeof(T)
bytes==0 && return
Mem.set!(ptr, pattern, N)
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
16089,
4088,
4542,
198,
198,
39344,
4942,
11,
1695,
62,
31673,
11,
2472,
62,
31673,
198,
198,
21412,
4942,
198,
198,
3500,
11485,
38469,
13798,
198,
3500,
11485,
38469,
13798,
13,
53,
196... | 2.356749 | 4,897 |
<filename>src/utilities.jl<gh_stars>0
import LowRankModels: copy_estimate, copy
export copy_estimate, copy
function copy_estimate(g::GFRM)
return GFRM(g.A,g.losses,g.r,g.k,
g.observed_features,g.observed_examples,
copy(g.U),copy(g.W))
end
| [
27,
34345,
29,
10677,
14,
315,
2410,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
11748,
7754,
27520,
5841,
1424,
25,
4866,
62,
395,
1920,
11,
4866,
198,
198,
39344,
4866,
62,
395,
1920,
11,
4866,
198,
198,
8818,
4866,
62,
395,
192... | 2.037594 | 133 |
n = 9 # rand(1:10)
@test matrixdepot("clement", Float64, n) == matrixdepot("clement", n)
A = matrixdepot("clement", n)
B = matrixdepot("clement", n, 1)
@test diag(A+A', 1) == n*ones(n-1)
@test issymmetric(Array(B))
θ = matrixdepot("clement", 1)
println("'clement' passed test...")
| [
77,
796,
860,
1303,
43720,
7,
16,
25,
940,
8,
198,
31,
9288,
17593,
10378,
313,
7203,
66,
1732,
1600,
48436,
2414,
11,
299,
8,
6624,
17593,
10378,
313,
7203,
66,
1732,
1600,
299,
8,
198,
198,
32,
796,
17593,
10378,
313,
7203,
66,
... | 2.358333 | 120 |
@testset "Global Quantities" begin
# Test Residual
T = Float64
N = 10
dt = 0.1
p = 3
i = 2
model = UnicycleGame(p=p)
probsize = ProblemSize(N,model)
x0 = rand(SVector{model.n,T})
opts = Options()
Q = [Diagonal(rand(SVector{model.ni[i],T})) for i=1:p]
R = [Diagonal(rand(SVector{model.mi[i],T})) for i=1:p]
xf = [i*ones(SVector{model.ni[i],T}) for i=1:p]
uf = [2i*ones(SVector{model.mi[i],T}) for i=1:p]
game_obj = GameObjective(Q,R,xf,uf,N,model)
game_con = GameConstraintValues(probsize)
prob = GameProblem(N, dt, x0, model, opts, game_obj, game_con)
@test typeof(prob) <: GameProblem
residual!(prob)
ibr_residual!(prob, i)
# Test Residual Jacobian
residual_jacobian!(prob)
ibr_residual_jacobian!(prob, i)
# Test scn
@test scn(1234.0) == " 1.2e+3"
@test scn(-1234.0) == "-1.2e+3"
@test scn(-0.1234) == "-1.2e-1"
@test scn( 0.1234) == " 1.2e-1"
@test scn(0) == " 0.0e+0"
@test scn(-0) == " 0.0e+0"
@test scn(0, digits=3) == " 0.000e+0"
@test scn(1234, digits=3) == " 1.234e+3"
@test scn(1234, digits=0) == " 1e+3"
@test_throws AssertionError scn(1234, digits=-1) == " 1e+3"
end
| [
31,
9288,
2617,
366,
22289,
16972,
871,
1,
2221,
628,
220,
220,
220,
1303,
6208,
1874,
312,
723,
198,
220,
220,
220,
309,
796,
48436,
2414,
198,
220,
220,
220,
399,
796,
838,
198,
220,
220,
220,
288,
83,
796,
657,
13,
16,
198,
2... | 1.897356 | 643 |
using SubHunt
using Test
using Random
using POMDPs
using POMDPPolicies
using POMDPSimulators
using DiscreteValueIteration
using ParticleFilters
using POMDPModelTools
using QMDP
@testset "VI" begin
rng = MersenneTwister(6)
pomdp = SubHuntPOMDP()
# show(STDOUT, MIME("text/plain"), SubVis(pomdp))
solver = ValueIterationSolver(verbose=true)
vipolicy = solve(solver, UnderlyingMDP(pomdp))
#
s = rand(rng, initialstate(pomdp))
@show value(vipolicy, s)
@show value(vipolicy, SubState(s.own, s.target, s.goal, true))
end
@testset "QMDP and PF" begin
rng = MersenneTwister(6)
pomdp = SubHuntPOMDP()
policy = solve(QMDPSolver(verbose=true), pomdp)
# policy = RandomPolicy(pomdp, rng=rng)
filter = BootstrapFilter(pomdp, 10000, rng)
for (s, a, r, sp) in stepthrough(pomdp, policy, filter, "s,a,r,sp", max_steps=200, rng=rng)
v = SubVis(pomdp, s=s, a=a, r=r)
show(stdout, MIME("text/plain"), v)
end
end
@testset "DPOMDP and PF" begin
rng = MersenneTwister(6)
dpomdp = DSubHuntPOMDP(SubHuntPOMDP(), 1.0)
policy = RandomPolicy(dpomdp, rng=rng)
filter = BootstrapFilter(dpomdp, 10000, rng)
for (s, a, r, sp) in stepthrough(dpomdp, policy, filter, "s,a,r,sp", max_steps=200, rng=rng)
v = SubVis(dpomdp.cp, s=s, a=a, r=r)
show(stdout, MIME("text/plain"), v)
end
end
@testset "Visualization" begin
rng = MersenneTwister(6)
pomdp = SubHuntPOMDP()
policy = solve(QMDPSolver(verbose=true), pomdp)
# policy = RandomPolicy(pomdp, rng=rng)
filter = BootstrapFilter(pomdp, 10000, rng)
for step in stepthrough(pomdp, policy, filter, "s,a,r,sp", max_steps=200, rng=rng)
show(stdout, MIME("text/plain"), render(pomdp, step))
end
end
| [
3500,
3834,
47663,
198,
3500,
6208,
198,
3500,
14534,
198,
3500,
350,
2662,
6322,
82,
198,
3500,
350,
2662,
6322,
47,
4160,
444,
198,
3500,
350,
2662,
35,
3705,
320,
24325,
198,
3500,
8444,
8374,
11395,
29993,
341,
198,
3500,
2142,
15... | 2.178049 | 820 |
# InflationTotalCPI - Implementación para obtener la medida estándar de ritmo
# inflacionario a través de la variación interanual del IPC
struct InflationTotalCPI <: InflationFunction
end
# Extender el método para obtener el nombre de esta medida
measure_name(::InflationTotalCPI) = "Variación interanual IPC"
measure_tag(::InflationTotalCPI) = "Total"
# Las funciones sobre VarCPIBase deben resumir en variaciones intermensuales
# Método para objetos VarCPIBase cuyo índice base es un escalar
function (inflfn::InflationTotalCPI)(base::VarCPIBase{T, T}) where {T <: AbstractFloat}
base_ipc = capitalize(base.v, base.baseindex)
ipc = base_ipc * base.w / base.baseindex
varinterm!(ipc, ipc, 100)
ipc
end
# Esta medida sí se comporta diferente de acuerdo a los índices base, por lo que
# se define una versión que toma en cuenta los diferentes índices. Si la medida
# solamente genera resumen de las variaciones intermensuales, no es necesario.
# Método para objetos VarCPIBase cuyos índices base son un vector
function (inflfn::InflationTotalCPI)(base::VarCPIBase{T, B}) where {T <: AbstractFloat, B <: AbstractVector{T}}
base_ipc = capitalize(base.v, base.baseindex)
# Obtener índice base y normalizar a 100
baseindex = base.baseindex' * base.w
ipc = 100 * (base_ipc * base.w / baseindex)
varinterm!(ipc, ipc, 100)
ipc
end
| [
2,
554,
33521,
14957,
8697,
40,
532,
48282,
32009,
18840,
31215,
909,
83,
877,
8591,
1117,
3755,
1556,
6557,
358,
283,
390,
374,
270,
5908,
198,
2,
28472,
49443,
4982,
257,
1291,
85,
20954,
390,
8591,
5553,
32009,
18840,
987,
272,
723... | 2.598485 | 528 |
<filename>src/BinaryProvider.jl
__precompile__()
module BinaryProvider
using Compat
using Compat.Libdl
# Utilities for controlling verbosity
include("LoggingUtils.jl")
# Include our subprocess running functionality
include("OutputCollector.jl")
# External utilities such as downloading/decompressing tarballs
include("PlatformEngines.jl")
# Platform naming
include("PlatformNames.jl")
# Everything related to file/path management
include("Prefix.jl")
# Abstraction of "needing" a file, that would trigger an install
include("Products.jl")
function __init__()
global global_prefix
# Initialize our global_prefix
global_prefix = Prefix(joinpath(dirname(@__FILE__), "../", "global_prefix"))
activate(global_prefix)
# Find the right download/compression engines for this platform
probe_platform_engines!()
# If we're on a julia that's too old, then fixup the color mappings
if !haskey(Base.text_colors, :default)
Base.text_colors[:default] = Base.color_normal
end
end
end # module
| [
27,
34345,
29,
10677,
14,
33,
3219,
29495,
13,
20362,
198,
834,
3866,
5589,
576,
834,
3419,
198,
21412,
45755,
29495,
198,
198,
3500,
3082,
265,
198,
3500,
3082,
265,
13,
25835,
25404,
198,
198,
2,
41086,
329,
12755,
15942,
16579,
198... | 3.232704 | 318 |
using HDF5
HDF5File <: HDF5Object
HDF5Object
| [
198,
3500,
5572,
37,
20,
198,
198,
39,
8068,
20,
8979,
1279,
25,
5572,
37,
20,
10267,
198,
198,
39,
8068,
20,
10267,
198
] | 2 | 24 |
<gh_stars>0
#TYPES
"""
AbstractModel
abstract type for models
"""
abstract type AbstractModel end
"""
AbstractDependenceStructure
Types inheriting from abstract type `AbstractDependenceStructure`
"""
abstract type AbstractDependenceStructure end
"""
FullIndependence <: AbstractDependenceStructure
Type acting as a Flag for Full independent Zig-Zag sampler
"""
struct FullIndependence <: AbstractDependenceStructure end
"""
Type acting as a Flag for partial independent Zig-Zag sampler
"""
struct PartialIndependence <: AbstractDependenceStructure end
"""
SamplingScheme
Abstact type for Sampling scheme
"""
abstract type SamplingScheme end
"""
SubSampling <: SamplingScheme
If you cannot sample from the inhomogeneous Poisson rate, sample it by
subsampling from Poisson rate with higher intensity (bound) and accept reject the event.
"""
struct SubSampling <: SamplingScheme end
"""
Regular <: SamplingScheme
For linear sdes, where the actual imhogeneous Poisson rate can be sampled directly.
"""
struct Regular <: SamplingScheme end
"""
System
CHANGE IN TUPLE WITH FREE NUMBER OF PARAMETERS
contains all the information needed for the ZigZag sampler
ξ::Vector{Float64} := vector for the position of the coefficients
θ::Vector{Float64} := vector containing the velocities (need to be changes in float64)
ϕ::Vector{Fs} := Faber Schauder functions information (see Fs)
τ::Vector{Float64} := vector containing waiting time (could be sorted)
L::Int64 := Number of Levels
T::Float64 := time length of the bridge
b1::Vector{Float64} := free vector needed for linear growth sde
b2::Vector{Float64} := free vector needed for linear growth sde
tt::Vector{Float64} := free vector needed for linear growth sde
"""
struct System
ξ::Vector{Float64} #
θ::Vector{Float64}
ϕ::Vector{Fs}
τ::Vector{Float64}
L::Int64
T::Float64
function System(L::Int64, T::Float64, ξ = fill(0.0, 2<<L - 1), θ = fill(1.0, 2<<L - 1), τ = fill(0.0, 2<<L - 1))
new(ξ, θ, generate(L, T), τ, L, T)
end
end
"""
Skeleton
container for output zigzag sampler
"""
struct Skeleton
ξ::Vector{Float64}
t::Float64
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
9936,
47,
1546,
198,
37811,
198,
220,
220,
220,
27741,
17633,
198,
198,
397,
8709,
2099,
329,
4981,
198,
37811,
198,
397,
8709,
2099,
27741,
17633,
886,
628,
628,
198,
37811,
198,
220,
220,
220,
... | 3 | 735 |
function linsolve_cg( LF::LF3dGrid, b::Array{Float64,1};
x0 = nothing,
NiterMax = 1000, TOL=5.e-10,
convmsg=true, showprogress=false )
#
Npoints = size(b)[1]
if x0 == nothing
x = zeros(Float64, Npoints)
else
x = copy(x0)
end
#
r = zeros( Float64, Npoints )
p = zeros( Float64, Npoints )
#
L_x = apply_Laplacian( LF, x )
for ip = 1 : Npoints
r[ip] = b[ip] - L_x[ip]
p[ip] = r[ip]
end
rsold = dot( r, r )
for iter = 1 : NiterMax
#
L_x = apply_Laplacian( LF, p )
#
α = rsold/dot( p, L_x )
#
x[:] = x[:] + α * p[:] # FIXME use x[:] to force x to be copied, not referenced
r[:] = r[:] - α * L_x[:]
#
rsnew = dot( r, r )
# deltars = rsold - rsnew
if showprogress
@printf("%8d %20.10f\n", iter, sqrt(rsnew))
end
#
if sqrt(rsnew) < TOL
if convmsg
@printf("#Convergence achieved in linsolve_cg: %8d iterations.\n", iter)
end
break
end
#
p = r + (rsnew/rsold) * p
#
rsold = rsnew
end
#
return x
#
end
| [
8818,
300,
1040,
6442,
62,
66,
70,
7,
47629,
3712,
43,
37,
18,
67,
41339,
11,
275,
3712,
19182,
90,
43879,
2414,
11,
16,
19629,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.871237 | 598 |
using LowRankModels
# test losses in losses.jl
srand(1);
losses = [
QuadLoss(),
QuadLoss(10),
L1Loss(),
L1Loss(5.2),
HuberLoss(),
HuberLoss(4),
HuberLoss(3.1, crossover=3.2),
PeriodicLoss(2*pi),
PeriodicLoss(2*pi, 4),
PoissonLoss(20),
PoissonLoss(22,4.1),
OrdinalHingeLoss(1,10),
OrdinalHingeLoss(2,7,5),
LogisticLoss(),
LogisticLoss(0.2),
WeightedHingeLoss(),
WeightedHingeLoss(11),
WeightedHingeLoss(1.5, case_weight_ratio=4.3),
MultinomialLoss(4),
MultinomialLoss(6, .5),
# OrdisticLoss(5),
MultinomialOrdinalLoss(3)
] #tests what should be successful constructions
# TODO: do some bad constructions and test that they fail with catches
bad_losses = [
:(QuadLoss(10,RealDomain)),
:(HuberLoss(3.1, 3.2)),
:(PeriodicLoss(scale=2*pi)),
:(PeriodicLoss(2*pi, scale=4)),
:(PeriodicLoss())
]
for expression in bad_losses
try
eval(expression);
println("test FAILED for $expression")
catch
println("test PASSED for $expression (failed to construct)")
end
end
m,n,k = 1000, length(losses), 5;
d = embedding_dim(losses)
X_real, Y_real = 2*randn(m,k), 2*randn(k,d);
XY_real = X_real*Y_real;
# tests default imputations and implicit domains
# we can visually inspect the differences between A and A_real to make sure imputation is right
A = impute(losses, XY_real)
regscale = 1
yregs = Array(Regularizer, length(losses))
for i=1:length(losses)
if typeof(losses[i]) == MultinomialOrdinalLoss ||
typeof(losses[i]) == OrdisticLoss
yregs[i] = OrdinalReg(QuadReg(regscale))
else
yregs[i] = QuadReg(regscale)
end
end
# tests all the M-estimators with scale=false, offset=false
glrm = GLRM(A, losses, QuadReg(regscale), yregs, 5, scale=false, offset=false);
# interestingly adding an offset to a model with multidimensional ordinal data causes a segfault
# but let's test the offset for everything but ordinals
# oops we still get a segfault...
# tamecols = [typeof(losses[i]) !== MultinomialOrdinalLoss &&
# typeof(losses[i]) !== OrdisticLoss
# for i=1:length(losses)]
# glrm = GLRM(A[:, tamecols],
# losses[tamecols],
# QuadReg(regscale),
# yregs[tamecols],
# 5, scale=false, offset=true)
# tests eval and grad
@time X,Y,ch = fit!(glrm);
# tests initialization
init_svd!(glrm)
| [
3500,
7754,
27520,
5841,
1424,
198,
198,
2,
1332,
9089,
287,
9089,
13,
20362,
198,
82,
25192,
7,
16,
1776,
198,
198,
22462,
274,
796,
685,
198,
4507,
324,
43,
793,
22784,
198,
4507,
324,
43,
793,
7,
940,
828,
198,
43,
16,
43,
79... | 2.428101 | 911 |
using SerialPorts
function _end_of_command(ser)
for i in 0:2
write(stdout, 0xff)
write(ser, 0xff)
end
end
function _execute_command(ser, cmd)
println(cmd)
write(ser, cmd)
_end_of_command(ser)
end
function main()
ser = SerialPort("COM4", 9600)
_execute_command(ser, "page 0")
sleep(2)
_execute_command(ser, "page 1")
sleep(2)
_execute_command(ser, "t0.txt=\"Hello\"")
close(ser)
end
main()
| [
3500,
23283,
47,
2096,
198,
198,
8818,
4808,
437,
62,
1659,
62,
21812,
7,
2655,
8,
198,
220,
220,
220,
329,
1312,
287,
657,
25,
17,
198,
220,
220,
220,
220,
220,
220,
220,
3551,
7,
19282,
448,
11,
657,
47596,
8,
198,
220,
220,
... | 2.180952 | 210 |
<gh_stars>0
using Distributed
using Distributions
nb_draws = 100000
function inside_circle(x::Float64, y::Float64)
output = 0
if x^2 + y^2 <= 1
output = 1
end
return output
end
function pi_serial(nbPoints::Int64 = 128 * 1000; d=Uniform(-1.0,1.0))
#draw NbPoints from within the square centered in 0
#with side length equal to 2
xDraws = rand(d, nbPoints)
yDraws = rand(d, nbPoints)
sumInCircle = 0
for (xValue, yValue) in zip(xDraws, yDraws)
sumInCircle+=inside_circle(xValue, yValue)
end
return 4*sumInCircle/nbPoints
end
pi_approx = pi_serial(nb_draws);
println("Approximation for Pi $(pi_approx) with $(nb_draws) random draws")
println("True value is $(pi)")
| [
27,
456,
62,
30783,
29,
15,
198,
3500,
4307,
6169,
198,
3500,
46567,
507,
198,
198,
46803,
62,
19334,
82,
796,
1802,
830,
198,
198,
8818,
2641,
62,
45597,
7,
87,
3712,
43879,
2414,
11,
331,
3712,
43879,
2414,
8,
198,
220,
220,
220... | 2.512281 | 285 |
#=
From
"CAV 2020 Tutorial: Probabilistic Programming: A Guide for Verificationists"
https://www.youtube.com/watch?v=yz5uUf_03Ik&t=2657s
Around @23:30
Summary Statistics
parameters mean std naive_se mcse ess rhat ess_per_sec
Symbol Float64 Float64 Float64 Float64 Float64 Float64 Float64
pr 0.3290 0.2330 0.0023 0.0036 4184.9815 0.9999 900.7709
p 0.0000 0.0000 0.0000 0.0000 NaN NaN NaN
x 14.0153 0.4955 0.0050 0.0062 5962.5178 0.9999 1283.3659
cf ~/webppl/gaussian_mixture_model2.wppl
=#
using Turing, StatsPlots, DataFrames
include("jl_utils.jl")
@model function gaussian_mixture_model2(xval=14.5)
pr ~ Uniform(0,1)
p ~ flip(pr)
# x ~ p ? Normal(10.0,1.0) : Normal(14.0,0.5)
# x ~ p ? Normal(10.0,1.0) : Normal(14.0,1)
# Note: Both these approaches give incorrect solutions
# x == 14.5 || begin Turing.@addlogprob! -Inf; return end
# true ~ Dirac(x == 14.5)
# true ~ Dirac(x == 8.5)
# true ~ Dirac(x == 12.5)
# This is the way, "observe externally"
xval ~ p ? Normal(10.0,1.0) : Normal(14.0,0.5)
# Post
x_post ~ p ? Normal(10.0,1.0) : Normal(14.0,0.5)
end
model = gaussian_mixture_model2()
# chns = sample(model, Prior(), 10_000)
# chns = sample(model, MH(), 100_000)
chns = sample(model, PG(5), 10_000)
# chns = sample(model, SMC(), 10_000)
# chns = sample(model, IS(), 10_000)
# chns = sample(model, NUTS(), 10_000)
# chns = sample(model, HMC(0.1,5), 10_000)
# chns = sample(model, Gibbs(HMC(0.1,5,:pr,:x),PG(5,:p)), 10_000)
# chns = sample(model, Gibbs(HMC(0.1,5,:pr,:x),MH(:p)), 10_000)
# chns = sample(model, Gibbs(NUTS(1000,0.65,:pr,:x),PG(15,:p)), 10_000)
display(chns)
# display(plot(chns))
| [
2,
28,
628,
220,
3574,
198,
220,
366,
8141,
53,
12131,
36361,
25,
30873,
14991,
2569,
30297,
25,
317,
10005,
329,
4643,
2649,
1023,
1,
198,
220,
3740,
1378,
2503,
13,
11604,
13,
785,
14,
8340,
30,
85,
28,
45579,
20,
84,
52,
69,
... | 1.992545 | 939 |
function main(dataPath)
lines = parse(readall(pipeline(`cat $[dataPath]ratings.csv`, `wc -l`)))
@time ratings = readdlm(dataPath * "ratings.csv", ',', header=true, dims=(lines, 4))
ratingsHeader = ratings[2]
ratings = ratings[1]
order = shuffle(collect(1:size(ratings)[1]))
ratings = ratings[order, :]
trainEnd = Int(floor(size(ratings)[1] * 0.8))
train = ratings[1 : trainEnd, :]
crossEnd = Int(floor(size(ratings)[1] * 0.9))
cross = ratings[trainEnd+1 : crossEnd, :]
test = ratings[crossEnd+1 : end, :]
train = [ratingsHeader; train]
cross = [ratingsHeader; cross]
test = [ratingsHeader; test]
@printf "Finished moving, begin saving\n"
writecsv(dataPath * "train.csv", train)
writecsv(dataPath * "cross.csv", cross)
writecsv(dataPath * "test.csv", test)
@printf "Done :)\n"
end
main("ml-latest-small/fixed/")
| [
8818,
1388,
7,
7890,
15235,
8,
198,
197,
6615,
796,
21136,
7,
961,
439,
7,
79,
541,
4470,
7,
63,
9246,
720,
58,
7890,
15235,
60,
10366,
654,
13,
40664,
47671,
4600,
86,
66,
532,
75,
63,
22305,
198,
197,
31,
2435,
10109,
796,
110... | 2.599379 | 322 |
<reponame>biomass-dev/BioMASS.jl
using PyCall
function __init__()
py"""
import os
import shutil
import re
def main(model_path):
''' Convert fitparam/n/*.dat -> out/n/*.npy
Parameters
----------
model_path : str
Path to your model written in Julia.
Usage
-----
$ python dat2npy.py
$ mv dat2npy/out/ path_to_biomass/
'''
try:
import numpy as np
except ImportError:
print("numpy: Not installed")
n_file = []
fitparam_files = os.listdir(
os.path.join(
model_path,
"fitparam",
)
)
for file in fitparam_files:
if re.match(r"\d", file):
n_file.append(int(file))
for nth_paramset in n_file:
os.makedirs(
os.path.join(
model_path,
"dat2npy",
"out",
f"{nth_paramset:d}",
),
exist_ok=True,
)
nth_fitparam_files = os.listdir(
os.path.join(
model_path,
"fitparam",
f"{nth_paramset:d}",
)
)
for dat_file in nth_fitparam_files:
if os.path.splitext(dat_file)[-1] == ".dat":
if "fit" in dat_file:
'''
- fit_param%d.dat -> fit_param%d.npy
- best_fitness.dat -> best_fitness.npy
'''
try:
data = np.loadtxt(
os.path.join(
model_path,
"fitparam",
f"{nth_paramset:d}",
f"{dat_file}",
),
dtype="float",
)
except ValueError:
pass
else:
'''
- count_num.dat -> count_num.npy
- generation.dat -> generation.npy
'''
data = np.loadtxt(
os.path.join(
model_path,
"fitparam",
f"{nth_paramset:d}",
f"{dat_file}",
),
dtype="int",
)
np.save(
os.path.join(
model_path,
"dat2npy",
"out",
f"{nth_paramset:d}",
dat_file.replace(".dat", ".npy"),
),
data,
)
if os.path.isfile(
os.path.join(
model_path,
"logs",
f"{nth_paramset:d}.log",
)
):
shutil.copyfile(
os.path.join(
model_path,
"logs",
f"{nth_paramset:d}.log",
),
os.path.join(
model_path,
"dat2npy",
"out",
f"{nth_paramset:d}",
"optimization.log",
),
)
"""
end
param2biomass(model_path::String) = py"main"(model_path) | [
27,
7856,
261,
480,
29,
8482,
296,
562,
12,
7959,
14,
42787,
44,
10705,
13,
20362,
198,
3500,
9485,
14134,
198,
198,
8818,
11593,
15003,
834,
3419,
198,
220,
220,
220,
12972,
37811,
198,
220,
220,
220,
1330,
28686,
198,
220,
220,
22... | 1.385652 | 2,746 |
using hyporheicBiogeochemistry, DifferentialEquations
α = 1.6; τ₋ = 0.01; τ₊ = 1000.0; k = 0.0; V_frac = 1.0
q = qCalc_powerLaw(α, τ₋, τ₊, V_frac)
p = (q, α, τ₋, τ₊, k)
h(p,t) = [50.0]
u0 = [100.0]
tspan = (1000.0, 2000.0)
f = build_sam_model_dde(E_powerLaw, foc, tspan[1])
j = build_sam_model_dde2(E_powerLaw, foc, tspan[1])
problem = DDEProblem(f, u0, h, tspan, p)
problem2 = DDEProblem(j, u0, h, tspan, p)
@time sol = solve(problem, MethodOfSteps(RK4()))
@time sol2 = solve(problem2, MethodOfSteps(RK4()))
using Plots
plot(sol)
ilt = build_sam_model_lt(E_powerLaw, foc, 1000, 100, 0, p)
out = [ilt(t) for t = 800:1200]
# ILT method is wierd - may need to tune number of terms and/or use incomplete gamma function in place of quadGK integral | [
3500,
2537,
1819,
258,
291,
42787,
469,
37074,
11,
20615,
498,
23588,
602,
198,
198,
17394,
796,
352,
13,
21,
26,
46651,
158,
224,
233,
796,
657,
13,
486,
26,
46651,
158,
224,
232,
796,
8576,
13,
15,
26,
479,
796,
657,
13,
15,
2... | 2.151862 | 349 |
<reponame>grahamas/AxisIndices.jl<gh_stars>10-100
using Documenter
using AxisIndices
using LinearAlgebra
using Metadata
using Statistics
makedocs(;
modules=[AxisIndices],
format=Documenter.HTML(),
pages=[
"index.md",
"references.md",
],
repo="https://github.com/Tokazama/AxisIndices.jl/blob/{commit}{path}#L{line}",
sitename="AxisIndices.jl",
authors="<NAME>",
)
deploydocs(
repo = "github.com/Tokazama/AxisIndices.jl.git",
)
| [
27,
7856,
261,
480,
29,
70,
13220,
292,
14,
31554,
271,
5497,
1063,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
198,
3500,
16854,
263,
198,
3500,
38349,
5497,
1063,
198,
3500,
44800,
2348,
29230,
198,
3500,
3395,
14706,
1... | 2.287081 | 209 |
<reponame>UnofficialJuliaMirror/IntArrays.jl-45d23951-e9a5-545c-8049-e4c4887f5525<filename>src/matrix.jl
const IntMatrix{w,T} = IntArray{w,T,2}
function IntMatrix{w,T}(m::Integer, n::Integer, mmap::Bool=false) where {w,T}
return IntArray{w,T}((m, n), mmap)
end
function IntMatrix{w,T}(mmap::Bool=false) where {w,T}
return IntArray{w,T}((0, 0), mmap)
end
function IntMatrix{w}(matrix::AbstractMatrix{T}) where {w,T}
return IntArray{w,T,2}(matrix)
end
function Base.convert(::Type{IntMatrix{w}}, matrix::AbstractMatrix{T}) where {w,T}
return convert(IntArray{w,T,2}, matrix)
end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
5317,
3163,
20477,
13,
20362,
12,
2231,
67,
1954,
50119,
12,
68,
24,
64,
20,
12,
45326,
66,
12,
1795,
2920,
12,
68,
19,
66,
2780,
5774,
69,
2816,
1495,
27,
34345,
... | 2.244361 | 266 |
<filename>test/tracers_test.jl
using eFEM, Test
| [
27,
34345,
29,
9288,
14,
2213,
49908,
62,
9288,
13,
20362,
198,
3500,
304,
37,
3620,
11,
6208,
220,
198
] | 2.45 | 20 |
@ghdef mutable struct Installation
id::Union{Int, Nothing}
end
namefield(i::Installation) = i.id
@api_default function create_access_token(api::GitHubAPI, i::Installation, auth::JWTAuth; headers = Dict(), options...)
headers["Accept"] = "application/vnd.github.machine-man-preview+json"
payload = gh_post_json(api, "/installations/$(i.id)/access_tokens", auth = auth,
headers=headers, options...)
OAuth2(payload["token"])
end
@api_default function installations(api::GitHubAPI, auth::JWTAuth; headers = Dict(), options...)
headers["Accept"] = "application/vnd.github.machine-man-preview+json"
results, page_data = gh_get_paged_json(api, "/app/installations", auth = auth,
headers=headers, options...)
map(Installation, results), page_data
end
@api_default function repos(api::GitHubAPI, inst::Installation; headers = Dict(), options...)
headers["Accept"] = "application/vnd.github.machine-man-preview+json"
results, page_data = github_paged_get(api, "/installation/repositories";
headers=headers, options...)
mapreduce(x->map(Repo, JSON.parse(HTTP.payload(x, String))["repositories"]), vcat, results; init=Repo[]), page_data
end
| [
31,
456,
4299,
4517,
540,
2878,
32588,
198,
220,
220,
220,
4686,
3712,
38176,
90,
5317,
11,
10528,
92,
198,
437,
198,
198,
3672,
3245,
7,
72,
3712,
30838,
8,
796,
1312,
13,
312,
628,
198,
31,
15042,
62,
12286,
2163,
2251,
62,
1552... | 2.818824 | 425 |
"""
#### function ```get_line_params(xi::Float64, df::DataFrame)```
<br>
Description of ```get_line_params```
-----------------------------------------
Returns the parameters a,b of linear approximation y = ax +b. For the segment where the random number ξ falls into.
``
"""
function get_line_params(xi::Float64, df::DataFrame)
row = df[(df.minE .< xi) .& (df.maxE .> xi),:]
return (row.a[1], row.b[1])
end
"""
#### function ```get_line_params(x1::Float64, y1::Float64, x2::Float64, y2::Float64)```
<br>
Description of ```get_line_params```
-----------------------------------------
Returns the parameters a,b of linear approximation y = ax +b.
``
"""
function get_line_params(x1::Float64, y1::Float64, x2::Float64, y2::Float64)
a = (y1 - y2) / (x1 - x2)
b = y1 - a*x1
return a,b #returns the parameters of line given by y = b + ax
end
get_line_point(x::Float64,a::Float64,b::Float64) = b + a*x
"""
#### function ```get_integral_linear(minE::Real, maxE::Real, a::Real, b::Real)```
<br>
Description of ```get_integral_linear```
-----------------------------------------
Return the definitive integral of ``
\\int_{minE}^{maxE} (a_iE + b_i) dE= \\frac{a_i}{2}(maxE^2 - minE^2) + b(maxE - minE)
``
"""
function get_integral_linear(minE::Real, maxE::Real, a::Real, b::Real)
return 0.5*a * (maxE^2 - minE^2) + b * ( maxE - minE )
end
function solvequadratic(a, b, c)
d = sqrt(b^2 - 4*a*c)
return (-b - d) / (2*a), (-b + d) / (2*a)
end
function plot_lines(step::Real, df::DataFrame, c::ColorPalette)
p::Plots.Plot = plot()
cp::Float64 = 1.0
for e1 in 1:step:length(unique(df.E1))
for row in eachrow(df[df.E1 .== unique(df.E1)[e1],:])
xs = row.minE : 1e-3 : row.maxE
line2(x) = get_line_point(x, row.a, row.b)
plot!(xs, line2.(xs), lw = 2, alpha = 0.3, c = c[ceil(Int,cp)], legend = :false,
xlabel = "E2 [MeV]", ylabel ="dGdE", ylims = (minimum(df.minG), 1.1*maximum(df.maxG)),
title = "projection of the linear approximations, \n every $step")
end
if cp < 250.0
cp += step/10.0
end
end
return p
end | [
37811,
198,
4242,
2163,
7559,
63,
1136,
62,
1370,
62,
37266,
7,
29992,
3712,
43879,
2414,
11,
47764,
3712,
6601,
19778,
8,
15506,
63,
220,
198,
27,
1671,
29,
628,
220,
220,
220,
12489,
286,
7559,
63,
1136,
62,
1370,
62,
37266,
15506... | 2.222668 | 997 |
<reponame>findmyway/MLStyle.jl
module MLStyle
export @case, @data, @match, Pattern, Case, Failed, failed, PatternDef, pattern_match, app_pattern_match, (..), enum_next
include("utils.jl")
include("Err.jl")
using MLStyle.Err
include("Match.jl")
using MLStyle.Match
include("ADT.jl")
using MLStyle.ADT
include("MatchExt.jl")
using MLStyle.MatchExt
include("Data/Data.jl")
using MLStyle.Data
end # module
| [
27,
7856,
261,
480,
29,
19796,
1820,
1014,
14,
5805,
21466,
13,
20362,
198,
21412,
13981,
774,
293,
198,
198,
39344,
2488,
7442,
11,
2488,
7890,
11,
2488,
15699,
11,
23939,
11,
8913,
11,
22738,
11,
4054,
11,
23939,
7469,
11,
3912,
6... | 2.662338 | 154 |
using HDF5
using JSON
savepath = "SARA/NatCom2020/outer/data/"
xrd_file = "Bi2O3_19F44_01_outer_xrd_gradients_input_noise_iSARA.h5"
# savefile = "Bi2O3_19F44_01_outer_xrd_gradients_no_input_noise.h5"
xrd_f = h5open(savepath * xrd_file, "r")
xrd_temperatures = read(xrd_f, "temperatures")
xrd_dwelltimes = read(xrd_f, "dwelltimes")
xrd_coefficients = read(xrd_f, "coefficients")
close(xrd_f)
xrd_f = h5open(savepath * "Bi2O3_19F44_01_outer_xrd_data.h5", "r")
xrd_positions = read(xrd_f, "positions")
close(xrd_f)
include("../inner/inner_load_data.jl")
path = "/Users/sebastianament/Documents/SEA/XRD Analysis/SARA/Bi2O3_19F44_01/"
# path = ".."
file = "Bi2O3_19F44_01_inner_loop_data.json"
f = JSON.parsefile(path * file)
optical_positions, optical_coefficients, rescaling_parameters, optical_temperatures, optical_dwelltimes = load_data(f)
using Plots
plotly()
for i in 1:8
p = 10
match = (T, τ) -> xrd_temperatures[end, i] ≈ T && xrd_dwelltimes[end, i] ≈ τ
j = 1
while true
if match(optical_temperatures[j]-p, optical_dwelltimes[j])
break
end
j += 1
end
# j = findfirst(match, [optical_temperatures, optical_dwelltimes])
plot(yscale = :log10)
# xrd = xrd_coefficients[1, :, i]
# opt = optical_coefficients[j][1]
# xrd = abs.(diff(xrd))
# opt = abs.(diff(opt))
xrd = sum(abs2, diff(xrd_coefficients[:, :, i], dims = 2), dims = 1)
opt = [optical_coefficients[j][k1][k2] for k1 in 1:16, k2 in 1:151]
opt = sum(abs2, diff(opt, dims = 2), dims = 1)
xrd = vec(xrd)
opt = vec(opt)
xrd ./= maximum(xrd)
opt ./= maximum(opt)
xrd_pos = xrd_positions[:, i]
opt_pos = optical_positions[j]
xrd_pos = xrd_pos[1:end-1]
opt_pos = opt_pos[1:end-1]
plot!(xrd_pos, xrd, label = "xrd")
plot!(opt_pos, opt, label = "opt")
gui()
end
| [
3500,
5572,
37,
20,
198,
3500,
19449,
198,
21928,
6978,
796,
366,
50,
24401,
14,
47849,
5377,
42334,
14,
39605,
14,
7890,
30487,
198,
87,
4372,
62,
7753,
796,
366,
23286,
17,
46,
18,
62,
1129,
37,
2598,
62,
486,
62,
39605,
62,
87,... | 2.093785 | 885 |
using Test
using LinearAlgebraicRepresentation
Lar = LinearAlgebraicRepresentation
using ViewerGL
GL = ViewerGL
@testset "GLShader.jl" begin
# function GLShader(vertex, fragment)
@testset "GLShader" begin
@test
@test
@test
@test
end
# function releaseGpuResources(shader::GLShader)
@testset "releaseGpuResources" begin
@test
@test
@test
@test
end
# function createShader(type,source)
@testset "createShader" begin
@test
@test
@test
@test
end
# function enableProgram(shader)
@testset "enableProgram" begin
@test
@test
@test
@test
end
# function disableProgram(shader)
@testset "disableProgram" begin
@test
@test
@test
@test
end
end
| [
3500,
6208,
198,
3500,
44800,
2348,
29230,
291,
40171,
341,
198,
43,
283,
796,
44800,
2348,
29230,
291,
40171,
341,
198,
3500,
3582,
263,
8763,
198,
8763,
796,
3582,
263,
8763,
198,
198,
31,
9288,
2617,
366,
8763,
2484,
5067,
13,
2036... | 2.27762 | 353 |
using SatelliteToolbox
"""
T = disturbance(sun_vecs, air_density, current_qua)
擾乱によるトルクの計算
# Arguments
- `sun_vecs`: 太陽方向ベクトル@SEOF
- `air_density`:大気密度
- `sat_velocity`:衛星速度ベクトル@seof
- `current_qua`: 現在の姿勢クォータニオン
# Returns
- `T`: 擾乱によるトルクの合計
"""
function disturbance(sun_vecs, air_density, sat_velocity, current_qua)
sun_vecs = [0.05350930426409988, -0.8245359539307662, -0.5632736590971152]
air_density = 1.8883960963389464e-12
sat_velocity = [norm([2825.0313914335334, -4280.551397611556, -5681.579003761858]), 0., 0.]
current_qua = SatelliteToolbox.Quaternion(cos(π/4), sin(π/4), 0, 0)
#Ts = sun_pressure(sun_vecs, current_qua)
#Ta = air_pressure(air_density, sat_velocity, current_qua)
#println(Ts)
#println(Ta)
T = 0#Ts #+ Ta
return T
end
"""
T = sun_pressure(sun_vecs, current_qua, sat_size, cm)
太陽輻射圧の計算
# Arguments
- `sun_vecs`: 太陽方向ベクトル@SEOF
- `current_qua`: 現在の姿勢クォータニオン
- `sat_size` : 衛星の形状(x, y, z)方向長さ[x,y,z][m]
- `cm` : 衛星体心から見た重心の位置ベクトル [x,y,z][m]
# Returns
- `T`: 太陽輻射圧によるトルク
"""
function sun_pressure(sun_vecs, current_qua, sat_size, cm)
Fe = 1353. #太陽定数 [W/m2]
c = 299792458 # 光速 [m/s]
ρs = 0.6 #鏡面反射割合
ρd = 0.3 #散乱反射割合
ρa = 0.1 #吸収割合
T = [0., 0., 0.]
#太陽方向ベクトルのBody座標系変換
sunvecs_scsfqua = current_qua \ sun_vecs * current_qua
#println(sunvecs_scsfqua)
sunvecs_scsf = [sunvecs_scsfqua.q1, sunvecs_scsfqua.q2, sunvecs_scsfqua.q3]
if norm(sunvecs_scsf) == 0
return T
else
sunvecs_scsf_unit = sunvecs_scsf / norm(sunvecs_scsf)
#x面に働く力
if sunvecs_scsf[1] >= 0
n = [1., 0., 0.]
r = [sat_size[1]/2, 0., 0.] - cm
else
n = [-1., 0., 0.]
r = [-sat_size[1]/2, 0., 0.] - cm
end
A = sat_size[2]*sat_size[3]
P = Fe/c
F = - P*A*dot(n,sunvecs_scsf_unit)* ((ρa+ρd)*sunvecs_scsf_unit + (2*dot(sunvecs_scsf_unit, n)ρs+2/3*ρd)*n)
T = T + cross(r,F)
# y面に働く力
if sunvecs_scsf[2] >= 0
n = [0., 1., 0.]
r = [ 0., sat_size[2]/2, 0.] - cm
else
n = [0., -1., 0.]
r = [0., -sat_size[2]/2, 0.] - cm
end
A = sat_size[1]*sat_size[3]
F = - P*A*dot(n,sunvecs_scsf_unit)* ((ρa+ρd)*sunvecs_scsf_unit + (2*dot(sunvecs_scsf_unit, n)ρs+2/3*ρd)*n)
T = T + cross(r,F)
# z面に働く力
if sunvecs_scsf[3] >= 0
n = [0., 0., 1.]
r = [0., 0., sat_size[3]/2] - cm
else
n = [0., 0., -1.]
r = [0., 0., -sat_size[3]/2] - cm
end
A = sat_size[1]*sat_size[2]
F = - P*A*dot(n,sunvecs_scsf_unit)* ((ρa+ρd)*sunvecs_scsf_unit + (2*dot(sunvecs_scsf_unit, n)ρs+2/3*ρd)*n)
T = T + cross(r,F)
return T
end
end
"""
T = air_pressure(density, current_qua)
空力トルクの計算
# Arguments
- `density`: 大気密度
- `vel_seof`:衛星の速度ベクトル@SEOF
- `current_qua`: 現在の姿勢クォータニオン
- `sat_size` : 衛星の形状(x, y, z)方向長さ[x,y,z][m]
- `cm` : 衛星体心から見た重心の位置ベクトル [x,y,z][m]
- `Cd` : 抗力係数(宇宙空間では通常2~3)
# Returns
- `T`: 空力トルク
"""
function air_pressure(density, vel_seof, current_qua, sat_size, cm, Cd)
# 速度ベクトルのBody座標系への変換
vel_scsfqua = current_qua \ vel_seof * current_qua
vel_scsf = [vel_scsfqua.q1, vel_scsfqua.q2, vel_scsfqua.q3]
T = [0., 0., 0.]
#x面に働く力
if vel_scsf[1] >= 0
n = [1., 0., 0.]
r = [sat_size[1]/2, 0., 0.] - cm
else
n = [-1., 0., 0.]
r = [-sat_size[1]/2, 0., 0.] - cm
end
A = sat_size[2]*sat_size[3]
F = -1/2 *Cd * density * dot(n, vel_scsf) * vel_scsf * A
dT = cross(r, F)
T = T + dT
# y面に働く力
if vel_scsf[2] >= 0
n = [0., 1., 0.]
r = [ 0., sat_size[2]/2, 0.] - cm
else
n = [0., -1., 0.]
r = [0., -sat_size[2]/2, 0.] - cm
end
A = sat_size[1]*sat_size[3]
F = -1/2 *Cd * density * dot(n, vel_scsf) * vel_scsf * A
dT = cross(r, F)
T = T + dT
# z面に働く力
if vel_scsf[3] >= 0
n = [0., 0., 1.]
r = [0., 0., sat_size[3]/2] - cm
else
n = [0., 0., 1.]
r = [0., 0., -sat_size[1]/2] - cm
end
A = sat_size[1]*sat_size[2]
F = -1/2 *Cd * density * dot(n, vel_scsf) * vel_scsf * A
dT = cross(r, F)
T = T + dT
return T
end
## 以下,修正前の擾乱計算関数
#=
"""
T = sun_pressure(sun_vecs, current_qua)
太陽輻射圧の計算
# Arguments
- `sun_vecs`: 太陽方向ベクトル@SEOF
- `current_qua`: 現在の姿勢クォータニオン
# Returns
- `T`: 太陽輻射圧によるトルク
"""
function sun_pressure(sun_vecs, current_qua)
sat_size = [0.1 , 0.1 , 0.1] #衛星各辺長さ [x,y,z][m]
cm = [0.005, 0.005, 0.005] #衛星重心のずれ [x,y,z][m]
I = 1353. #太陽定数 [W/m2]
c = 299792458 # 光速 [m/s]
ρs = 0.6 #鏡面反射割合
ρd = 0.3 #散乱反射割合
ρa = 0.1 #吸収割合
T = [0., 0., 0.]
if sun_vecs == [0., 0., 0.]
return T
else
#太陽方向ベクトルのBody座標系変換
sunvecs_scsfqua = current_qua \ sun_vecs * current_qua
#println(sunvecs_scsfqua)
sunvecs_scsf = [sunvecs_scsfqua.q1, sunvecs_scsfqua.q2, sunvecs_scsfqua.q3]
#x面に働く力
if sunvecs_scsf[1] >= 0
n = [1., 0., 0.]
cf = [sat_size[1]/2, 0., 0.]
else
n = [-1., 0., 0.]
cf = [-sat_size[1]/2, 0., 0.]
end
S = -sunvecs_scsf / norm(sunvecs_scsf)
A = sat_size[2]*sat_size[3]
P = I/c
F =P*A*dot(n,S)*((ρa+ρd)*S+(2*ρs+2/3*ρd)*n)
r = cf - cm
T = T + cross(r,F)
# y面に働く力
if sunvecs_scsf[2] >= 0
n = [0., 1., 0.]
cf = [ 0., sat_size[2]/2, 0.]
else
n = [0., -1., 0.]
cf = [0., -sat_size[2]/2, 0.]
end
S = -sunvecs_scsf / norm(sunvecs_scsf)
A = sat_size[1]*sat_size[3]
P = I/c
F = P*A*dot(n,S)*((ρa+ρd)*S+(2*ρs+2/3*ρd)*n)
r = cf - cm
T = T + cross(r,F)
# z面に働く力
if sunvecs_scsf[3] >= 0
n = [0., 0., 1.]
cf = [0., 0., sat_size[3]/2]
else
n = [0., 0., 1.]
cf = [0., 0., -sat_size[1]/2]
end
S = -sunvecs_scsf / norm(sunvecs_scsf)
A = sat_size[1]*sat_size[2]
P = I/c
F = P*A*dot(n,S)*((ρa+ρd)*S+(2*ρs+2/3*ρd)*n)
r = cf - cm
T = T + cross(r,F)
return T
end
end
"""
T = air_pressure(density, current_qua)
空力トルクの計算
# Arguments
- `density`: 大気密度
- `vel_seof`:衛星の速度ベクトル@SEOF
- `current_qua`: 現在の姿勢クォータニオン
# Returns
- `T`: 空力トルク
"""
function air_pressure(density, vel_seof, current_qua)
sat_size = [0.1 , 0.1 , 0.1] #衛星各辺長さ [x,y,z][m]
cm = [0.005, 0.005, 0.005] #衛星重心のずれ [x,y,z][m]
Cd = 1.12 #抗力係数(各面を正方形と近似)
# 速度ベクトルのBody座標系への変換
vel_scsfqua = current_qua \ vel_seof * current_qua
vel_scsf = [vel_scsfqua.q1, vel_scsfqua.q2, vel_scsfqua.q3]
T = [0., 0., 0.]
#x面に働く力
if vel_scsf[1] >= 0
n = [1., 0., 0.]
cf = [sat_size[1]/2, 0., 0.]
else
n = [-1., 0., 0.]
cf = [-sat_size[1]/2, 0., 0.]
end
A = sat_size[2]*sat_size[3]
ξ = norm(cm)
v_perp = dot(vel_scsf, -n) / norm(vel_scsf) * norm(vel_scsf)
F = 1/2 * density * v_perp^2 * A * Cd * (-n)
dT = cross(cf-cm, F)
T = T + dT
# y面に働く力
if vel_scsf[2] >= 0
n = [0., 1., 0.]
cf = [ 0., sat_size[2]/2, 0.]
else
n = [0., -1., 0.]
cf = [0., -sat_size[2]/2, 0.]
end
A = sat_size[1]*sat_size[3]
v_perp = dot(vel_scsf, -n) / norm(vel_scsf) * norm(vel_scsf)
F = 1/2 * density * v_perp^2 * A * Cd * (-n)
dT = cross(cf-cm, F)
T = T + dT
# z面に働く力
if vel_scsf[3] >= 0
n = [0., 0., 1.]
cf = [0., 0., sat_size[3]/2]
else
n = [0., 0., 1.]
cf = [0., 0., -sat_size[1]/2]
end
A = sat_size[1]*sat_size[2]
v_perp = dot(vel_scsf, -n) / norm(vel_scsf) * norm(vel_scsf)
F = 1/2 * density * v_perp^2 * A * Cd * (-n)
dT = cross(cf-cm, F)
T = T + dT
return T
end
=# | [
3500,
33530,
25391,
3524,
198,
198,
37811,
198,
51,
796,
30497,
7,
19155,
62,
303,
6359,
11,
1633,
62,
43337,
11,
1459,
62,
39566,
8,
198,
198,
162,
241,
122,
20046,
109,
28618,
1792,
230,
25748,
13298,
9202,
14099,
5641,
164,
101,
... | 1.489472 | 5,414 |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,jl:hydrogen
# text_representation:
# extension: .jl
# format_name: hydrogen
# format_version: '1.3'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Julia 1.6.4
# language: julia
# name: julia-1.6
# ---
# %%
using Distributions
using Plots
using Optim
traceon = Optim.Options(
store_trace = true,
extended_trace = true
)
# model
Distributions.TDist(μ, ρ, ν) = LocationScale(μ, ρ, TDist(ν))
# test data
X = [-0.01, 0.01, 1.0]
# Warning: Very Slow!
lower = [-0.1, -15.0, -2.5]
upper = [ 0.4, 10.0, 8.0]
@show lower upper
r = @time optimize(x -> -loglikelihood(TDist(x[1], 10^x[2], 10^x[3]), X), lower, upper, [0.33, 0.0, 0.27], Fminbox(LBFGS()), traceon)
# %%
function plot_trace(r)
x = r.initial_x
c = hcat(x, (t.metadata["x"] for t in r.trace)...)
l = -minimum(r)
m = r.minimizer
t = "initial = $x\nfinal = $(round.(m; digits=3))\nfinal loglikelihood = $(round(l; digits=3))"
plot(; legend=:topleft)
scatter3d!([x[1]], [x[2]], [x[3]]; label="initial", c=:blue, ms=3)
plot!(c[1,:], c[2,:], c[3,:]; label="path", c=:darkcyan)
scatter3d!([m[1]], [m[2]], [m[3]]; label="final", c=:yellow, ms=3)
plot!(; xlabel="μ", ylabel="log₁₀ρ", zlabel="log₁₀ν")
title!(t; titlefontsize=10)
end
plot_trace(r)
plot!(; fmt=:png)
# %%
function f(μ, log10ν, X)
log10rhos = range(-15, 10; length=200)
-maximum(loglikelihood(TDist(μ, 10^log10ρ, 10^log10ν), X) for log10ρ in log10rhos)
end
function plot_trace2d(r)
x = r.initial_x
c = hcat(x, (t.metadata["x"] for t in r.trace)...)
l = -minimum(r)
m = r.minimizer
t = "initial = $x\nfinal = $(round.(m; digits=3))\nfinal loglikelihood = $(round(l; digits=3))"
mus = range(-0.1, 0.4; length=200)
log10nus = range(-2.5, 8.0; length=200)
z = f.(mus', log10nus, Ref(X))
plot(; legend=:topleft, colorbar=false)
plot!(; xlim=extrema(mus), ylim=extrema(log10nus))
heatmap!(mus, log10nus, z; clim=(-1, 5), c=reverse(cgrad(:CMRmap)))
scatter!([x[1]], [x[3]]; label="initial", ms=4, c=:blue)
plot!(c[1,:], c[3,:]; label="path", c=:cyan, lw=1.5)
scatter!([m[1]], [m[3]]; label="final", ms=4, c=:yellow)
plot!(; xlabel="μ", ylabel="log₁₀ν")
title!(t; titlefontsize=10)
end
plot_trace2d(r)
plot!(; fmt=:png)
# %%
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11420,
198,
2,
474,
929,
88,
353,
25,
198,
2,
220,
220,
474,
929,
88,
5239,
25,
198,
2,
220,
220,
220,
220,
17519,
25,
20966,
2047,
65,
11,
20362,
25,
15511,
... | 2.000837 | 1,195 |
<reponame>JuliaBinaryWrappers/czmq_jll.jl
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule czmq_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("czmq")
JLLWrappers.@generate_main_file("czmq", UUID("bada3277-1da5-58a9-94c6-da212cd39369"))
end # module czmq_jll
| [
27,
7856,
261,
480,
29,
16980,
544,
33,
3219,
36918,
11799,
14,
26691,
76,
80,
62,
73,
297,
13,
20362,
198,
2,
5765,
6247,
21412,
284,
34494,
572,
257,
1178,
14204,
422,
262,
11389,
1143,
4600,
13,
7285,
63,
2393,
198,
49382,
21412,... | 2.382979 | 141 |
<reponame>dourouc05/ConstraintProgrammingExtensions.jl
"""
Bridges `CP.GlobalCardinality` to `CP.Count`.
"""
struct GlobalCardinalityFixedOpen2CountBridge{T} <: MOIBC.AbstractBridge
cons_count::Vector{MOI.ConstraintIndex{MOI.VectorAffineFunction{T}, CP.Count{MOI.EqualTo{T}}}}
end
function MOIBC.bridge_constraint(
::Type{GlobalCardinalityFixedOpen2CountBridge{T}},
model,
f::MOI.VectorOfVariables,
s::CP.GlobalCardinality{CP.FIXED_COUNTED_VALUES, CP.OPEN_COUNTED_VALUES, T},
) where {T}
return MOIBC.bridge_constraint(
GlobalCardinalityFixedOpen2CountBridge{T},
model,
MOI.VectorAffineFunction{T}(f),
s,
)
end
function MOIBC.bridge_constraint(
::Type{GlobalCardinalityFixedOpen2CountBridge{T}},
model,
f::MOI.VectorAffineFunction{T},
s::CP.GlobalCardinality{CP.FIXED_COUNTED_VALUES, CP.OPEN_COUNTED_VALUES, T},
) where {T}
f_scalars = MOIU.scalarize(f)
f_array = f_scalars[1:s.dimension]
cons_count = MOI.ConstraintIndex{MOI.VectorAffineFunction{T}, CP.Count{MOI.EqualTo{T}}}[
MOI.add_constraint(
model,
MOIU.vectorize(
MOI.ScalarAffineFunction{T}[
f_scalars[s.dimension + i],
f_array...,
]
),
CP.Count(s.dimension, MOI.EqualTo(s.values[i]))
)
for i in 1:length(s.values)
]
return GlobalCardinalityFixedOpen2CountBridge(cons_count)
end
function MOI.supports_constraint(
::Type{GlobalCardinalityFixedOpen2CountBridge{T}},
::Union{Type{MOI.VectorOfVariables}, Type{MOI.VectorAffineFunction{T}}},
::Type{CP.GlobalCardinality{CP.FIXED_COUNTED_VALUES, CP.OPEN_COUNTED_VALUES, T}},
) where {T}
return true
end
function MOIB.added_constrained_variable_types(::Type{GlobalCardinalityFixedOpen2CountBridge{T}}) where {T}
return Tuple{Type}[]
end
function MOIB.added_constraint_types(::Type{GlobalCardinalityFixedOpen2CountBridge{T}}) where {T}
return [
(MOI.VectorAffineFunction{T}, CP.Count{MOI.EqualTo{T}}),
]
end
function MOI.get(
b::GlobalCardinalityFixedOpen2CountBridge{T},
::MOI.NumberOfConstraints{
MOI.VectorAffineFunction{T}, CP.Count{MOI.EqualTo{T}},
},
) where {T}
return length(b.cons_count)
end
function MOI.get(
b::GlobalCardinalityFixedOpen2CountBridge{T},
::MOI.ListOfConstraintIndices{
MOI.VectorAffineFunction{T}, CP.Count{MOI.EqualTo{T}},
},
) where {T}
return copy(b.cons_count)
end
| [
27,
7856,
261,
480,
29,
67,
454,
280,
66,
2713,
14,
3103,
2536,
2913,
15167,
2229,
11627,
5736,
13,
20362,
198,
37811,
198,
33,
32124,
4600,
8697,
13,
22289,
16962,
1292,
414,
63,
284,
4600,
8697,
13,
12332,
44646,
198,
37811,
198,
... | 2.167095 | 1,167 |
module LevenshteinToolkit
# ----------------------------------------
# EXPORTED INTERFACE
# ----------------------------------------
export distance_matrix
export distance_row
export dfa
export nfa
export check
export draw
include("matrix.jl")
include("row.jl")
include("automata.jl")
end
| [
21412,
1004,
574,
1477,
22006,
25391,
15813,
198,
198,
2,
20368,
982,
198,
2,
7788,
15490,
1961,
23255,
49836,
198,
2,
20368,
982,
198,
198,
39344,
5253,
62,
6759,
8609,
198,
39344,
5253,
62,
808,
198,
198,
39344,
288,
13331,
198,
393... | 3.6875 | 80 |
<filename>src/filters.jl<gh_stars>0
"""
```math
\\tilde X = \\hat X_{t+1|t} = A\\hat X_{t|t}
```
"""
function KalmanFilter(M::LGSSM, y)
RR = M.R * M.R'
SS = M.S * M.S'
ydim, qdim = size(M.B)
xdim, pdim = size(M.R)
n = size(y, 1)
ϵ = zeros(ydim, 1, n)
Γ = zeros(ydim, ydim, n)
Γ⁻¹ = zeros(ydim, ydim, n)
K = zeros(xdim, ydim, n) # Kalman filter gain
H = zeros(xdim, ydim, n) # Kalman prediction gain
Λ = zeros(xdim, xdim, n)
Xp = zeros(xdim, 1, n)
Σp = zeros(xdim, xdim, n)
Xf = zeros(xdim, 1, n)
Σf = zeros(xdim, xdim, n)
ℓ = 0.0
for t in 1:n
# Prediction
if t == 1
Xp[:,:,t] = 0.0
Σp[:,:,t] = M.Σᵥ
else
Xp[:,:,t] = M.A * Xf[:,:,t-1]
Σp[:,:,t] = M.A * Σf[:,:,t-1] * M.A' + RR
end
# Correction
ϵ[:,:,t] = y[t] - M.B * Xp[:,:,t]
Γ[:,:,t] = M.B*Σp[:,:,t]*M.B' + SS
Γ⁻¹[:,:,t] = inv(Γ[:,:,t])
K[:,:,t] = Σp[:,:,t]*M.B'*Γ⁻¹[:,:,t]
H[:,:,t] = M.A*K[:,:,t]
Λ[:,:,t] = M.A - H[:,:,t]*M.B
Xf[:,:,t] = Xp[:,:,t] + K[:,:,t]*ϵ[:,:,t]
Σf[:,:,t] = Σp[:,:,t] - K[:,:,t]*M.B*Σp[:,:,t]
ℓ += logdet(Γ[:,:,t]) + ϵ[:,:,t]'*Γ⁻¹[:,:,t]*ϵ[:,:,t]
end
return Dict(:Xp => Xp,
:Xf => Xf,
:Σp => Σp,
:Σf => Σf,
:ϵ => ϵ,
:Γ => Γ,
:Γ⁻¹ => Γ⁻¹,
:K => K,
:H => H,
:Λ => Λ,
:ℓ => -ℓ/2)
end
"""
Disturbanve Smoother
Algorithm 5.2.15 Cappe 2005
"""
function DisturbanceSmoother(M::LGSSM, y)
n = size(y, 1)
pdim = size(M.B, 2)
qdim = size(y, 2)
RR = M.R*M.R'
p = zeros(pdim, 1, n-1)
C = zeros(pdim, pdim, n-1)
U = zeros(pdim, pdim, n-1)
Ξ = zeros(pdim, pdim, n-1)
Xs = zeros(pdim, 1, n)
Σs = zeros(pdim, pdim, n)
KF = KalmanFilter(M, y)
# Smoothed disturbances
for t in n-1:-1:1
if t == n-1
p[:,:,t] = M.B'*KF[:Γ⁻¹][:,:,n]*KF[:ϵ][:,:,n]
C[:,:,t] = M.B'*KF[:Γ⁻¹][:,:,n]*M.B
else
p[:,:,t] = M.B'*KF[:Γ⁻¹][:,:,t+1]*KF[:ϵ][:,:,t+1] + KF[:Λ][:,:,t+1]'*p[:,:,t+1]
C[:,:,t] = M.B'*KF[:Γ⁻¹][:,:,t+1]*M.B + KF[:Λ][:,:,t+1]'*C[:,:,t+1]*KF[:Λ][:,:,t+1]
end
U[:,:,t] = M.R'*p[:,:,t]
Ξ[:,:,t] = I - M.R'*C[:,:,t]*M.R
end
# Smoothed states
Xs[:,:,1] = M.Σᵥ*(M.B'*KF[:Γ⁻¹][:,:,1]*KF[:ϵ][:,:,1] + KF[:Λ][:,:,1]*p[:,:,1])
Σs[:,:,1] = M.Σᵥ - M.Σᵥ*(M.B'*KF[:Γ⁻¹][:,:,1]*M.B + KF[:Λ][:,:,1]'*C[:,:,1]*KF[:Λ][:,:,1])*M.Σᵥ
for t in 1:n-1
Xs[:,:,t+1] = M.A*Xs[:,:,t] + M.R*U[:,:,t]
Σs[:,:,t+1] = M.A*Σs[:,:,t]*M.A' + M.R*Ξ[:,:,t]*M.R' -
M.A*KF[:Σp][:,:,t]*KF[:Λ][:,:,t]'*C[:,:,t]*RR -
RR*C[:,:,t]*KF[:Λ][:,:,t]*KF[:Σp][:,:,t]*M.A'
end
return Dict(:Xp => KF[:Xp],
:Xf => KF[:Xf],
:Xs => Xs,
:Σp => KF[:Σp],
:Σf => KF[:Σf],
:Σs => Σs,
:ϵ => KF[:ϵ],
:Γ => KF[:Γ],
:Γ⁻¹ => KF[:Γ⁻¹],
:K => KF[:K],
:H => KF[:H],
:Λ => KF[:Λ],
:p => p,
:C => C,
:U => U,
:Ξ => Ξ,
:ℓ => KF[:ℓ])
end
"""
Backward Information Recursion
Proposition 5.2.21 Cappe (2005)
"""
function BackwardInformationRecursion(M::LGSSM, y)
n = size(y, 1)
pdim = size(M.B, 2)
qdim = size(y, 2)
RR = M.R*M.R'
SS = M.S*M.S'
SS⁻¹ = inv(SS)
κ = zeros(pdim, 1, n)
κ̃ = zeros(pdim, 1, n)
Π = zeros(pdim, pdim, n)
Π̃ = zeros(pdim, pdim, n)
Xs = zeros(pdim, 1, n)
Σs = zeros(pdim, pdim, n)
KF = KalmanFilter(M, y)
for t in n-1:-1:1
κ̃[:,:,t+1] = M.B'*SS⁻¹*y[t+1] + κ[:,:,t+1]
Π̃[:,:,t+1] = M.B'*SS⁻¹*M.B + Π[:,:,t+1]
V = inv(I + Π̃[:,:,t+1]*RR)
κ[:,:,t] = M.A'V*κ̃[:,:,t+1]
Π[:,:,t] = M.A'V*Π̃[:,:,t+1]*M.A
end
Dict(:Xp => KF[:Xp],
:Xf => KF[:Xf],
:Σp => KF[:Σp],
:Σf => KF[:Σf],
:κ => κ,
:Π => Π)
end
"""
Forward-backward smoother
Algorithm 5.2.22 Cappe (2005)
"""
function ForwardBackwardSmoother(M::LGSSM, y)
n = size(y, 1)
pdim = size(M.B, 2)
qdim = size(y, 2)
RR = M.R*M.R'
SS = M.S*M.S'
SS⁻¹ = inv(SS)
κ = zeros(pdim, 1, n)
κ̃ = zeros(pdim, 1, n)
Π = zeros(pdim, pdim, n)
Π̃ = zeros(pdim, pdim, n)
Xs = zeros(pdim, 1, n)
Σs = zeros(pdim, pdim, n)
KF = KalmanFilter(M, y)
W = inv(I + Π[:,:,n]*KF[:Σf][:,:,n])
Xs[:,:,n] = KF[:Xf][:,:,n] + KF[:Σf][:,:,n]*W*(κ[:,:,n] - Π[:,:,n]*KF[:Xf][:,:,n])
Σs[:,:,n] = KF[:Σf][:,:,n] - KF[:Σf][:,:,n]*W*Π[:,:,n]*KF[:Σf][:,:,n]
for t in n-1:-1:1
κ̃[:,:,t+1] = M.B'*SS⁻¹*y[t+1] + κ[:,:,t+1]
Π̃[:,:,t+1] = M.B'*SS⁻¹*M.B + Π[:,:,t+1]
V = inv(I + Π̃[:,:,t+1]*RR)
κ[:,:,t] = M.A'V*κ̃[:,:,t+1]
Π[:,:,t] = M.A'V*Π̃[:,:,t+1]*M.A
W = inv(I + Π[:,:,t]*KF[:Σf][:,:,t])
Xs[:,:,t] = KF[:Xf][:,:,t] + KF[:Σf][:,:,t]*W*(κ[:,:,t] - Π[:,:,t]*KF[:Xf][:,:,t])
Σs[:,:,t] = KF[:Σf][:,:,t] - KF[:Σf][:,:,t]*W*Π[:,:,t]*KF[:Σf][:,:,t]
end
Dict(:Xp => KF[:Xp],
:Xf => KF[:Xf],
:Xs => Xs,
:Σp => KF[:Σp],
:Σf => KF[:Σf],
:Σs => Σs,
:κ => κ,
:Π => Π)
end
"""
Backward Information Recursion
Algorithm 6.1.2 Cappe (2005)
"""
function BackwardMarkovianStateSampling(M::LGSSM, y, N::Int)
n = size(y, 1)
KF = KalmanFilter(M, y)
pdim = size(M.B, 2)
qdim = size(y, 2)
X = zeros(pdim, n, N)
X[:,1,:] = rand(MvNormal(reshape(KF[:Xf][:,:,1],1), KF[:Σf][:,:,1]), N)
for t in 2:n
X[:,t,:] = rand(MvNormal(reshape(KF[:Xf][:,:,t],1), KF[:Σf][:,:,t]), N)
end
return X
end
"""
Sampling with Dual Smoothing
Algorithm 6.1.3 Cappe (2005)
"""
function SamplingWithDualSmoothing(M::LGSSM, y)
n = size(y, 1)
pdim = size(M.B, 2)
qdim = size(y, 2)
XY′ = Generate(M, n)
KF = DisturbanceSmoother(M, y)
KF′ = DisturbanceSmoother(M, XY′[:Y])
return KF[:Xf] + XY′[:X] - KF′[:Xf]
end
| [
27,
34345,
29,
10677,
14,
10379,
1010,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
37811,
198,
198,
15506,
63,
11018,
198,
6852,
83,
44725,
1395,
796,
26867,
5183,
1395,
23330,
83,
10,
16,
91,
83,
92,
796,
317,
6852,
5183,
1395,
2... | 1.404023 | 4,574 |
module MCMC
Base.warn("MCMC.jl has moved to Lora.jl. Development in Lora.jl will continue. MCMC.jl is a placeholder for the future merge of various independent MCMC implementations in Julia, including Lora.jl.")
end
| [
21412,
13122,
9655,
198,
198,
14881,
13,
40539,
7203,
9655,
9655,
13,
20362,
468,
3888,
284,
406,
5799,
13,
20362,
13,
7712,
287,
406,
5799,
13,
20362,
481,
2555,
13,
13122,
9655,
13,
20362,
318,
257,
46076,
329,
262,
2003,
20121,
286... | 3.633333 | 60 |
using BoardGames
struct RandomStrategy <: Strategy end
function BoardGames.getmove(board, s::RandomStrategy)
rand(getmoves(board))
end
| [
3500,
5926,
24474,
198,
198,
7249,
14534,
13290,
4338,
1279,
25,
20561,
886,
198,
198,
8818,
5926,
24474,
13,
1136,
21084,
7,
3526,
11,
264,
3712,
29531,
13290,
4338,
8,
198,
220,
220,
220,
43720,
7,
1136,
76,
5241,
7,
3526,
4008,
1... | 3.133333 | 45 |
# This file is a part of JuliaFEM.
# License is MIT: see https://github.com/JuliaFEM/JuliaFEM.jl/blob/master/LICENSE.md
# # Generating local matrices for problems
using JuliaFEM
# Plane stress Quad4 element with linear material model:
# In JuliaFEM the plane stress element can be defined using Quad4 element which
# has four nodes. First we need to define geometry. Geometry is defined with
# node number and coordinates into a dictionary.
X = Dict(1 => [0.0, 0.0],
2 => [2.0, 0.0],
3 => [2.0, 2.0],
4 => [0.0, 2.0])
# Element is created using Element(element_type, connectivity) -function. Here
# we create a Quad4 element which is connected to nodes 1-4.
element = Element(Quad4, [1, 2, 3, 4])
# Element properties are defined using update!(element, field, value) -function.
# To create stiffness matrix we need to define geometry, Young's modulus and
# Poisson's ratio.
update!(element, "geometry", X)
update!(element, "youngs modulus", 288.0)
update!(element, "poissons ratio", 1/3)
# Then we have to create a problem using
# Problem(problem_type, name, number_of_dofs_per_node) -function. Problem type
# in this case is Elasticity and number of dofs per node is two in 2D problem.
# In the problem.properties.formulation we must define whether we are using
# :plane_stress or :plane_strain formulation.
problem = Problem(Elasticity, "example elasticity problem", 2)
problem.properties.formulation = :plane_stress
# Elements need to added to the problem using add_elements!(problem, element)
# -function.
add_elements!(problem, [element])
# Normally next thing to do after defining problems would be running the analysis
# but now we are only interested in stiffness matrix. We have to assemble the
# matrix using assemble!(problem, time) -function.
assemble!(problem, 0.0)
# Now we have the stiffness matrix in problem.assembly.K you may type it to
# console and see that it is in sparse matrix form. We can write it as normal
# matrix with full() -function.
K = full(problem.assembly.K)
display(K)
# This is not necessary but we can round it with round() -function.
K = round(K,2)
display(K)
| [
2,
770,
2393,
318,
257,
636,
286,
22300,
37,
3620,
13,
198,
2,
13789,
318,
17168,
25,
766,
3740,
1378,
12567,
13,
785,
14,
16980,
544,
37,
3620,
14,
16980,
544,
37,
3620,
13,
20362,
14,
2436,
672,
14,
9866,
14,
43,
2149,
24290,
... | 3.235294 | 663 |
<gh_stars>1-10
export RecipeInflation, filter_state!, vortexassim
struct RecipeInflation <: InflationType
"Parameters"
p::Array{Float64,1}
end
# Filtering function to bound the strength of the vortices and sources
function filter_state!(x, config::VortexConfig)
@inbounds for j=1:config.Nv
# Ensure that vortices stay above the x axis
x[3*j-1] = clamp(x[3*j-1], 1e-2, Inf)
# Ensure that the circulation remains positive
x[3*j] = clamp(x[3*j], 0.0, Inf)
end
return x
end
# This function apply additive inflation to the state components only,
# not the measurements, X is an Array{Float64,2} or a view of it
function (ϵ::RecipeInflation)(X, Ny, Nx, config::VortexConfig)
ϵX, ϵΓ = ϵ.p
Nv = config.Nv
@assert Nx == 3*Nv
for col in eachcol(X)
if config.Nv > 0
for i in 1:Nv
col[Ny + 3*(i-1) + 1: Ny + 3*(i-1) + 2] .+= ϵX*randn(2)
col[Ny + 3*i] += ϵΓ*randn()
end
end
end
end
function (ϵ::RecipeInflation)(x::AbstractVector{Float64}, config::VortexConfig)
ϵX, ϵΓ = ϵ.p
Nv = config.Nv
Nx = size(x, 1)
@assert Nx == 3*Nv
for i=1:Nv
x[3*(i-1) + 1:3*(i-1) + 2] .+= ϵX*randn(2)
x[3*(i-1) + 3] += ϵΓ*randn()
end
end
# Create a function to perform the sequential assimilation for any sequential filter SeqFilter
function vortexassim(algo::SeqFilter, X, tspan::Tuple{S,S}, config::VortexConfig, data::SyntheticData; withfreestream::Bool = false, P::Parallel = serial) where {S<:Real}
# Define the additive Inflation
ϵX = config.ϵX
ϵΓ = config.ϵΓ
β = config.β
ϵY = config.ϵY
Ny = size(config.ss,1)
ϵx = RecipeInflation([ϵX; ϵΓ])
ϵmul = MultiplicativeInflation(β)
# Set different times
Δtobs = algo.Δtobs
Δtdyn = algo.Δtdyn
t0, tf = tspan
step = ceil(Int, Δtobs/Δtdyn)
n0 = ceil(Int64, t0/Δtobs) + 1
J = (tf-t0)/Δtobs
Acycle = n0:n0+J-1
# Array dimensions
Nypx, Ne = size(X)
Nx = Nypx - Ny
ystar = zeros(Ny)
cachevels = allocate_velocity(state_to_lagrange(X[Ny+1:Ny+Nx,1], config))
h(x, t) = measure_state(x, t, config; withfreestream = withfreestream)
press_itp = CubicSplineInterpolation((LinRange(real(config.ss[1]), real(config.ss[end]), length(config.ss)),
t0:data.Δt:tf), data.yt, extrapolation_bc = Line())
yt(t) = press_itp(real.(config.ss), t)
Xf = Array{Float64,2}[]
push!(Xf, copy(state(X, Ny, Nx)))
Xa = Array{Float64,2}[]
push!(Xa, copy(state(X, Ny, Nx)))
# Run particle filter
@showprogress for i=1:length(Acycle)
# Forecast step
@inbounds for j=1:step
tj = t0+(i-1)*Δtobs+(j-1)*Δtdyn
X, _ = vortex(X, tj, Ny, Nx, cachevels, config, withfreestream = withfreestream)
end
push!(Xf, deepcopy(state(X, Ny, Nx)))
# Get real measurement
ystar .= yt(t0+i*Δtobs)
# Perform state inflation
ϵmul(X, Ny+1, Ny+Nx)
ϵx(X, Ny, Nx, config)
# Filter state
if algo.isfiltered == true
@inbounds for i=1:Ne
x = view(X, Ny+1:Ny+Nx, i)
x .= filter_state!(x, config)
end
end
observe(h, X, t0+i*Δtobs, Ny, Nx; P = P)
ϵ = algo.ϵy.σ*randn(Ny, Ne) .+ algo.ϵy.m
Xpert = (1/sqrt(Ne-1))*(X[Ny+1:Ny+Nx,:] .- mean(X[Ny+1:Ny+Nx,:]; dims = 2)[:,1])
HXpert = (1/sqrt(Ne-1))*(X[1:Ny,:] .- mean(X[1:Ny,:]; dims = 2)[:,1])
ϵpert = (1/sqrt(Ne-1))*(ϵ .- mean(ϵ; dims = 2)[:,1])
# Kenkf = Xpert*HXpert'*inv(HXpert*HXpert'+ϵpert*ϵpert')
b = (HXpert*HXpert' + ϵpert*ϵpert')\(ystar .- (X[1:Ny,:] + ϵ))
view(X,Ny+1:Ny+Nx,:) .+= (Xpert*HXpert')*b
# @show cumsum(svd((Xpert*HXpert')*inv((HXpert*HXpert' + ϵpert*ϵpert'))).S)./sum(svd((Xpert*HXpert')*inv((HXpert*HXpert' + ϵpert*ϵpert'))).S)
# X = algo(X, ystar, t0+i*Δtobs)
# Filter state
if algo.isfiltered == true
@inbounds for i=1:Ne
x = view(X, Ny+1:Ny+Nx, i)
x .= filter_state!(x, config)
end
end
push!(Xa, deepcopy(state(X, Ny, Nx)))
end
return Xf, Xa
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
39344,
26694,
818,
33521,
11,
8106,
62,
5219,
28265,
42726,
562,
320,
198,
198,
7249,
26694,
818,
33521,
1279,
25,
554,
33521,
6030,
198,
220,
220,
220,
366,
48944,
1,
198,
220,
220,
220,
... | 1.973014 | 1,964 |
<gh_stars>100-1000
using Revise
using ADCME
using PyCall
using LinearAlgebra
using PyPlot
using SparseArrays
using Random
# Random.seed!(233)
# TODO: specify your input parameters
A = sprand(10,5,0.3)
f = rand(10)
sol = A\f
u = constant(A)\f
sess = Session()
init(sess)
@show run(sess, u)-sol
# error()
# TODO: change your test parameter to `m`
# gradient check -- v
function scalar_function(m)
return sum((constant(A)\m)^2)
B = SparseTensor(ii, jj, m, size(A)...)
return sum((B\Array([f f]'))^2)
end
ii, jj, vv = find(constant(A))
# TODO: change `m_` and `v_` to appropriate values
# m_ = constant(rand(length(vv)))
# v_ = rand(length(vv))
m_ = constant(rand(5,10))
v_ = rand(5,10)
y_ = scalar_function(m_)
dy_ = gradients(y_, m_)
ms_ = Array{Any}(undef, 5)
ys_ = Array{Any}(undef, 5)
s_ = Array{Any}(undef, 5)
w_ = Array{Any}(undef, 5)
gs_ = @. 1 / 10^(1:5)
for i = 1:5
g_ = gs_[i]
ms_[i] = m_ + g_*v_
ys_[i] = scalar_function(ms_[i])
s_[i] = ys_[i] - y_
w_[i] = s_[i] - g_*sum(v_.*dy_)
end
sess = Session()
init(sess)
sval_ = run(sess, s_)
wval_ = run(sess, w_)
close("all")
loglog(gs_, abs.(sval_), "*-", label="finite difference")
loglog(gs_, abs.(wval_), "+-", label="automatic differentiation")
loglog(gs_, gs_.^2 * 0.5*abs(wval_[1])/gs_[1]^2, "--",label="\$\\mathcal{O}(\\gamma^2)\$")
loglog(gs_, gs_ * 0.5*abs(sval_[1])/gs_[1], "--",label="\$\\mathcal{O}(\\gamma)\$")
plt.gca().invert_xaxis()
legend()
xlabel("\$\\gamma\$")
ylabel("Error")
| [
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
3500,
5416,
786,
198,
198,
3500,
49169,
11682,
198,
3500,
9485,
14134,
198,
3500,
44800,
2348,
29230,
198,
3500,
9485,
43328,
198,
3500,
1338,
17208,
3163,
20477,
198,
3500,
14534,
198,
2,
... | 2.053645 | 727 |
<gh_stars>1-10
# SPDX-License-Identifier: X11
# 2020-11-14
using Random
function geninput(bound::Integer, fn::AbstractString)
X = collect(-bound:bound)
Y = collect(-bound:bound)
n = 2bound + 1
println("Shuffling...")
shuffle!(X)
shuffle!(Y)
println("Printing to $fn...")
open(fn, "w") do f
for i ∈ 1:n
println(f, join([string(i, base=16),
string(X[i]),
string(Y[i])],
" "))
end
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
1395,
1157,
198,
2,
12131,
12,
1157,
12,
1415,
198,
198,
3500,
14534,
198,
198,
8818,
2429,
15414,
7,
7784,
3712,
46541,
11,
24714,
3712,
2383... | 2.004082 | 245 |
module PackageName
# Write your package code here.
print("Hello Julia, Git and Kraken!")
end
| [
21412,
15717,
5376,
198,
198,
2,
19430,
534,
5301,
2438,
994,
13,
198,
198,
4798,
7203,
15496,
22300,
11,
15151,
290,
43392,
2474,
8,
198,
198,
437,
198
] | 3.428571 | 28 |
using CairoMakie
using ElectronDisplay
using FFTW
using LinearAlgebra
using Scapin.Elasticity
using Scapin.Bri17
const T = Float64
const d = 2
C = Hooke{d,T}(1.0, 0.3)
α = (0.25, 0.25) # Fraction of the domained that is polarized
N_coarse = (4, 4)
r_max = 9
N_fine = (2^r_max) .* N_coarse
results = Dict()
for r ∈ 0:r_max
N = (2^r) .* N_coarse
𝒩 = CartesianIndices(N)
h = 1.0 ./ N
Γ̂ = DiscreteGreenOperatorBri17{d,T}(C, N, h)
τ = zeros(T, 3, N...)
τ[3, fill(1:2^r, d)...] .= 1
τ̂ = fft(τ, 2:(d+1))
ε̂ = Array{eltype(τ̂)}(undef, size(τ̂)...)
for n ∈ 𝒩
apply!(view(ε̂, :, n), Γ̂, n, τ̂[:, n])
end
ε = real.(ifft(ε̂, 2:(d+1)))
ε_fine = zeros(T, 3, N_fine...)
s = 2^(r_max - r)
for n ∈ map(Tuple, 𝒩)
n₁ = CartesianIndex(s .* (n .- 1) .+ 1)
n₂ = CartesianIndex(s .* n)
ε_fine[:, n₁:n₂] .= ε[:, n...]
end
# fig, ax, hm = heatmap(ε_fine[3, :, :])
# save("eps_xy-$(N[1])x$(N[2]).png", fig)
results[r] = ε_fine
end
x = Array{T}(undef, r_max)
y = Array{T}(undef, r_max)
for r ∈ 0:(r_max-1)
ε₁ = results[r]
ε₂ = results[r_max]
x[r+1] = 2^r * N_coarse[1]
y[r+1] = norm(ε₂-ε₁)
end
C = x[end] * y[end]
fig = scatter(x, y, axis = (xscale=log10, yscale = log10))
lines!(x, C ./ x)
#save("convergence.png", fig)
electrondisplay(fig)
| [
3500,
23732,
44,
461,
494,
198,
3500,
5903,
1313,
23114,
198,
3500,
376,
9792,
54,
198,
3500,
44800,
2348,
29230,
198,
3500,
1446,
499,
259,
13,
9527,
3477,
414,
198,
3500,
1446,
499,
259,
13,
33,
380,
1558,
198,
198,
9979,
309,
796... | 1.736165 | 777 |
<filename>test/test_tools.jl
module TestTools
using Test
using Mimi
import Mimi:
getproperty, reset_compdefs
reset_compdefs()
#utils: prettify
@test Mimi.prettify("camelCaseBasic") == Mimi.prettify(:camelCaseBasic) == "Camel Case Basic"
@test Mimi.prettify("camelWithAOneLetterWord") == Mimi.prettify(:camelWithAOneLetterWord) == "Camel With A One Letter Word"
@test Mimi.prettify("snake_case_basic") == Mimi.prettify(:snake_case_basic) == "Snake Case Basic"
@test Mimi.prettify("_snake__case__weird_") == Mimi.prettify(:_snake__case__weird_) == "Snake Case Weird"
#utils: interpolate
stepsize = 2 # N.B. ERROR: cannot assign variable Base.step from module Main
final = 10 # N.B. ERROR: cannot assign variable Base.last from module Main
ts = 10
@test Mimi.interpolate(collect(0:stepsize:final), ts) == collect(0:stepsize/ts:final)
@defcomp Foo begin
input = Parameter()
intermed = Variable(index=[time])
function run_timestep(p, v, d, t)
v.intermed[t] = p.input
end
end
@defcomp Bar begin
intermed = Parameter(index=[time])
output = Variable(index=[time])
function run_timestep(p, v, d, t)
v.output[t] = p.intermed[t]
end
end
m = Model()
set_dimension!(m, :time, 2)
foo = add_comp!(m, Foo)
bar = add_comp!(m, Bar)
foo[:input] = 3.14
bar[:intermed] = foo[:intermed]
run(m)
end #module | [
27,
34345,
29,
9288,
14,
9288,
62,
31391,
13,
20362,
198,
21412,
6208,
33637,
198,
198,
3500,
6208,
198,
3500,
337,
25236,
198,
198,
11748,
337,
25236,
25,
198,
220,
220,
220,
651,
26745,
11,
13259,
62,
5589,
4299,
82,
198,
198,
425... | 2.433393 | 563 |
SubWorker{T,A,PT} = RemoteChannel{Channel{Vector{SubProblem{T,A,PT}}}}
ScenarioProblemChannel{S} = RemoteChannel{Channel{ScenarioProblems{S}}}
Work = RemoteChannel{Channel{Int}}
Progress{T <: AbstractFloat} = Tuple{Int,Int,SubproblemSolution{T}}
ProgressQueue{T <: AbstractFloat} = RemoteChannel{Channel{Progress{T}}}
function initialize_subproblems!(ph::AbstractProgressiveHedging,
subworkers::Vector{SubWorker{T,A,PT}},
scenarioproblems::DistributedScenarioProblems,
penaltyterm::AbstractPenaltyterm) where {T <: AbstractFloat,
A <: AbstractVector,
PT <: AbstractPenaltyterm}
# Create subproblems on worker processes
@sync begin
for w in workers()
subworkers[w-1] = RemoteChannel(() -> Channel{Vector{SubProblem{T,A,PT}}}(1), w)
prev = map(2:(w-1)) do p
scenarioproblems.scenario_distribution[p-1]
end
start_id = isempty(prev) ? 0 : sum(prev)
@async remotecall_fetch(initialize_subworker!,
w,
subworkers[w-1],
scenarioproblems[w-1],
penaltyterm,
start_id)
end
end
return nothing
end
function update_dual_gap!(ph::AbstractProgressiveHedging,
subworkers::Vector{<:SubWorker{T}}) where T <: AbstractFloat
# Update δ₂
partial_δs = Vector{Float64}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_δs[i] = remotecall_fetch(
w,
subworkers[w-1],
ph.ξ) do sw, ξ
subproblems = fetch(sw)
return mapreduce(+, subproblems, init = zero(T)) do subproblem
π = subproblem.probability
x = subproblem.x
π * norm(x - ph.ξ, 2) ^ 2
end
end
end
end
ph.data.δ₂ = sum(partial_δs)
return nothing
end
function initialize_subworker!(subworker::SubWorker{T,A,PT},
scenarioproblems::ScenarioProblemChannel,
penaltyterm::AbstractPenaltyterm,
start_id::Integer) where {T <: AbstractFloat,
A <: AbstractArray,
PT <: AbstractPenaltyterm}
sp = fetch(scenarioproblems)
subproblems = Vector{SubProblem{T,A,PT}}(undef, num_subproblems(sp))
for i = 1:num_subproblems(sp)
subproblems[i] = SubProblem(
subproblem(sp, i),
start_id + i,
T(probability(sp, i)),
copy(penaltyterm))
end
put!(subworker, subproblems)
return nothing
end
function restore_subproblems!(subworkers::Vector{<:SubWorker})
@sync begin
for w in workers()
@async remotecall_fetch(w, subworkers[w-1]) do sw
for subproblem in fetch(sw)
restore_subproblem!(subproblem)
end
end
end
end
return nothing
end
function resolve_subproblems!(subworker::SubWorker{T,A,PT},
ξ::AbstractVector,
r::AbstractFloat) where {T <: AbstractFloat,
A <: AbstractArray,
PT <: AbstractPenaltyterm}
subproblems::Vector{SubProblem{T,A,PT}} = fetch(subworker)
Qs = Vector{SubproblemSolution{T}}(undef, length(subproblems))
# Reformulate and solve sub problems
for (i,subproblem) in enumerate(subproblems)
reformulate_subproblem!(subproblem, ξ, r)
Qs[i] = subproblem(ξ)
end
# Return current objective value
return sum(Qs)
end
function collect_primals(subworker::SubWorker{T,A,PT}, n::Integer) where {T <: AbstractFloat,
A <: AbstractArray,
PT <: AbstractPenaltyterm}
subproblems::Vector{SubProblem{T,A,PT}} = fetch(subworker)
return mapreduce(+, subproblems, init = zeros(T, n)) do subproblem
π = subproblem.probability
x = subproblem.x
π * x
end
end
function calculate_objective_value(subworkers::Vector{<:SubWorker{T}}) where T <: AbstractFloat
partial_objectives = Vector{Float64}(undef, nworkers())
@sync begin
for (i, w) in enumerate(workers())
@async partial_objectives[i] = remotecall_fetch(w, subworkers[w-1]) do sw
return mapreduce(+, fetch(sw), init = zero(T)) do subproblem
_objective_value(subproblem)
end
end
end
end
return sum(partial_objectives)
end
function work_on_subproblems!(subworker::SubWorker{T,A,PT},
work::Work,
finalize::Work,
progress::ProgressQueue{T},
x̄::RemoteRunningAverage{A},
δ::RemoteRunningAverage{T},
iterates::RemoteIterates{A},
r::IteratedValue{T}) where {T <: AbstractFloat,
A <: AbstractArray,
PT <: AbstractPenaltyterm}
subproblems::Vector{SubProblem{T,A,PT}} = fetch(subworker)
if isempty(subproblems)
# Workers has nothing do to, return.
return nothing
end
x̄ = fetch(x̄)
δ = fetch(δ)
quit = false
while true
t::Int = try
if isready(finalize)
quit = true
take!(finalize)
else
wait(work)
take!(work)
end
catch err
if err isa InvalidStateException
# Master closed the work/finalize channel. Worker finished
return nothing
end
end
t == -1 && continue
ξ::A = fetch(iterates, t)
if t > 1
update_subproblems!(subproblems, ξ, fetch(r,t-1))
end
for (i,subproblem) in enumerate(subproblems)
!quit && subtract!(δ, i)
!quit && subtract!(x̄, i)
x = subproblem.x
π = subproblem.probability
!quit && add!(δ, i, norm(x - ξ, 2) ^ 2, π)
reformulate_subproblem!(subproblem, ξ, fetch(r, t))
Q::SubproblemSolution{T} = subproblem(ξ)
!quit && add!(x̄, i, π)
!quit && put!(progress, (t, subproblem.id, Q))
end
if quit
# Worker finished
return nothing
end
end
end
| [
7004,
12468,
263,
90,
51,
11,
32,
11,
11571,
92,
796,
21520,
29239,
90,
29239,
90,
38469,
90,
7004,
40781,
90,
51,
11,
32,
11,
11571,
11709,
11709,
198,
3351,
39055,
40781,
29239,
90,
50,
92,
796,
21520,
29239,
90,
29239,
90,
3351,
... | 1.76566 | 4,071 |
# tests for the synchronous task runner
import Arbiter.Sync: run_tasks
import Arbiter.ArbiterTasks: ArbiterTask
import Arbiter.ArbiterGraphs: NodeSet, ImmutableNodeSet
facts("empty") do
# solve no tasks
results = run_tasks(())
@fact results.completed --> ImmutableNodeSet()
@fact results.failed --> ImmutableNodeSet()
end
facts("no dependencies") do
# run dependency-less tasks
executed_tasks = NodeSet()
"""
Make a task
"""
function make_task(name, dependencies=(); succeed=true)
ArbiterTask(name, () -> (push!(executed_tasks, name); succeed), dependencies)
end
results = run_tasks(
(
make_task(:foo),
make_task(:bar),
make_task(:baz),
make_task(:fail; succeed=false)
)
)
@fact executed_tasks --> NodeSet((:foo, :bar, :baz, :fail))
@fact results.completed --> ImmutableNodeSet((:foo, :bar, :baz))
@fact results.failed --> ImmutableNodeSet((:fail,))
end
| [
2,
5254,
329,
262,
18305,
516,
4876,
17490,
198,
11748,
33619,
263,
13,
28985,
25,
1057,
62,
83,
6791,
198,
11748,
33619,
263,
13,
3163,
2545,
263,
51,
6791,
25,
33619,
263,
25714,
198,
11748,
33619,
263,
13,
3163,
2545,
263,
37065,
... | 2.476427 | 403 |
# Julia wrapper for header: /usr/local/include/sodium.h
# Automatically generated using Clang.jl wrap_c, version 0.0.0
@c Int32 sodium_init () libsodium
@c Int32 crypto_auth_hmacsha512256 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_auth_hmacsha512256_verify (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c size_t crypto_auth_bytes () libsodium
@c size_t crypto_auth_keybytes () libsodium
@c Ptr{Uint8} crypto_auth_primitive () libsodium
@c Int32 crypto_auth (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_auth_verify (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_auth_hmacsha256 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_auth_hmacsha256_verify (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305_open (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305_keypair (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305_beforenm (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_curve25519xsalsa20poly1305_open_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c size_t crypto_box_publickeybytes () libsodium
@c size_t crypto_box_secretkeybytes () libsodium
@c size_t crypto_box_beforenmbytes () libsodium
@c size_t crypto_box_noncebytes () libsodium
@c size_t crypto_box_zerobytes () libsodium
@c size_t crypto_box_boxzerobytes () libsodium
@c size_t crypto_box_macbytes () libsodium
@c Ptr{Uint8} crypto_box_primitive () libsodium
@c Int32 crypto_box_keypair (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_beforenm (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_open_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_box_open (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_core_hsalsa20 (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_core_salsa20 (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_core_salsa2012 (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_core_salsa208 (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_generichash_blake2b (Ptr{Uint8}, size_t, Ptr{Uint8}, Uint64, Ptr{Uint8}, size_t) libsodium
@c Int32 crypto_generichash_blake2b_init (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, size_t, size_t) libsodium
@c Int32 crypto_generichash_blake2b_update (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_generichash_blake2b_final (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, size_t) libsodium
@c size_t crypto_generichash_bytes_min () libsodium
@c size_t crypto_generichash_bytes_max () libsodium
@c size_t crypto_generichash_keybytes_min () libsodium
@c size_t crypto_generichash_keybytes_max () libsodium
@c size_t crypto_generichash_blockbytes () libsodium
@c Ptr{Uint8} crypto_generichash_primitive () libsodium
@c Int32 crypto_generichash (Ptr{Uint8}, size_t, Ptr{Uint8}, Uint64, Ptr{Uint8}, size_t) libsodium
@c Int32 crypto_generichash_init (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, size_t, size_t) libsodium
@c Int32 crypto_generichash_update (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_generichash_final (Ptr{crypto_generichash_blake2b_state}, Ptr{Uint8}, size_t) libsodium
@c Int32 crypto_hash_sha512 (Ptr{Uint8}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_hash (Ptr{Uint8}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_hash_sha256 (Ptr{Uint8}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_hashblocks_sha256 (Ptr{Uint8}, Ptr{Uint8}, Uint64) libsodium
@c Int32 crypto_hashblocks_sha512 (Ptr{Uint8}, Ptr{Uint8}, Uint64) libsodium
@c Ptr{Uint8} crypto_onetimeauth_poly1305_ref_implementation_name () libsodium
@c Int32 crypto_onetimeauth_poly1305_set_implementation (Ptr{crypto_onetimeauth_poly1305_implementation},) libsodium
@c Ptr{crypto_onetimeauth_poly1305_implementation} crypto_onetimeauth_pick_best_implementation () libsodium
@c Int32 crypto_onetimeauth_poly1305 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_onetimeauth_poly1305_verify (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c size_t crypto_onetimeauth_bytes () libsodium
@c size_t crypto_onetimeauth_keybytes () libsodium
@c Ptr{Uint8} crypto_onetimeauth_primitive () libsodium
@c Int32 crypto_onetimeauth (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_onetimeauth_verify (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_scalarmult_curve25519 (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_scalarmult_curve25519_base (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c size_t crypto_scalarmult_bytes () libsodium
@c size_t crypto_scalarmult_scalarbytes () libsodium
@c Ptr{Uint8} crypto_scalarmult_primitive () libsodium
@c Int32 crypto_scalarmult_base (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_scalarmult (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_secretbox_xsalsa20poly1305 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_secretbox_xsalsa20poly1305_open (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c size_t crypto_secretbox_keybytes () libsodium
@c size_t crypto_secretbox_noncebytes () libsodium
@c size_t crypto_secretbox_zerobytes () libsodium
@c size_t crypto_secretbox_boxzerobytes () libsodium
@c Ptr{Uint8} crypto_secretbox_primitive () libsodium
@c Int32 crypto_secretbox (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_secretbox_open (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_shorthash_siphash24 (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c size_t crypto_shorthash_bytes () libsodium
@c size_t crypto_shorthash_keybytes () libsodium
@c Ptr{Uint8} crypto_shorthash_primitive () libsodium
@c Int32 crypto_shorthash (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_ed25519 (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_ed25519_open (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_ed25519_keypair (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_ed25519_seed_keypair (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c size_t crypto_sign_bytes () libsodium
@c size_t crypto_sign_publickeybytes () libsodium
@c size_t crypto_sign_secretkeybytes () libsodium
@c Ptr{Uint8} crypto_sign_primitive () libsodium
@c Int32 crypto_sign_seed_keypair (Ptr{Uint8}, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_keypair (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_sign (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_open (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_edwards25519sha512batch (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_edwards25519sha512batch_open (Ptr{Uint8}, Ptr{Uint64}, Ptr{Uint8}, Uint64, Ptr{Uint8}) libsodium
@c Int32 crypto_sign_edwards25519sha512batch_keypair (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xsalsa20 (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xsalsa20_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xsalsa20_beforenm (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xsalsa20_afternm (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xsalsa20_xor_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c size_t crypto_stream_keybytes () libsodium
@c size_t crypto_stream_noncebytes () libsodium
@c Ptr{Uint8} crypto_stream_primitive () libsodium
@c Int32 crypto_stream (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_aes128ctr (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_aes128ctr_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_aes128ctr_beforenm (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_aes128ctr_afternm (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_aes128ctr_xor_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa20 (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa20_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa20_beforenm (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa20_afternm (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa20_xor_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa2012 (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa2012_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa2012_beforenm (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa2012_afternm (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa2012_xor_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa208 (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa208_xor (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa208_beforenm (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa208_afternm (Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_stream_salsa208_xor_afternm (Ptr{Uint8}, Ptr{Uint8}, Uint64, Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_verify_16 (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 crypto_verify_32 (Ptr{Uint8}, Ptr{Uint8}) libsodium
@c Int32 randombytes_set_implementation (Ptr{randombytes_implementation},) libsodium
@c None randombytes (Ptr{Uint8}, Uint64) libsodium
@c Ptr{Uint8} randombytes_implementation_name () libsodium
@c uint32_t randombytes_random () libsodium
@c None randombytes_stir () libsodium
@c uint32_t randombytes_uniform (uint32_t,) libsodium
@c None randombytes_buf (Ptr{Void}, size_t) libsodium
@c Int32 randombytes_close () libsodium
@c Ptr{Uint8} randombytes_salsa20_implementation_name () libsodium
@c uint32_t randombytes_salsa20_random () libsodium
@c None randombytes_salsa20_random_stir () libsodium
@c uint32_t randombytes_salsa20_random_uniform (uint32_t,) libsodium
@c None randombytes_salsa20_random_buf (Ptr{Void}, size_t) libsodium
@c Int32 randombytes_salsa20_random_close () libsodium
@c Ptr{Uint8} randombytes_sysrandom_implementation_name () libsodium
@c uint32_t randombytes_sysrandom () libsodium
@c None randombytes_sysrandom_stir () libsodium
@c uint32_t randombytes_sysrandom_uniform (uint32_t,) libsodium
@c None randombytes_sysrandom_buf (Ptr{Void}, size_t) libsodium
@c Int32 randombytes_sysrandom_close () libsodium
@c Ptr{Uint8} _sodium_alignedcalloc (Ptr{Ptr{Uint8}}, size_t) libsodium
@c None sodium_memzero (Ptr{Void}, size_t) libsodium
@c Int32 sodium_memcmp (Ptr{Void}, Ptr{Void}, size_t) libsodium
@c Ptr{Uint8} sodium_version_string () libsodium
@c Int32 sodium_library_version_major () libsodium
@c Int32 sodium_library_version_minor () libsodium
| [
2,
22300,
29908,
329,
13639,
25,
1220,
14629,
14,
12001,
14,
17256,
14,
82,
12664,
13,
71,
198,
2,
17406,
4142,
7560,
1262,
1012,
648,
13,
20362,
14441,
62,
66,
11,
2196,
657,
13,
15,
13,
15,
198,
198,
31,
66,
2558,
2624,
21072,
... | 2.318304 | 5,212 |
#! format: off
using PowerSystems
using PowerSimulations
using PowerSystemCaseBuilder
using Cbc #solver
solver = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 1, "ratioGap" => 0.5)
sys = build_system(PSITestSystems, "test_RTS_GMLC_sys")
for line in get_components(Line, sys)
if (get_base_voltage(get_from(get_arc(line))) >= 230.0) &&
(get_base_voltage(get_to(get_arc(line))) >= 230.0)
#if get_area(get_from(get_arc(line))) != get_area(get_to(get_arc(line)))
@info "Changing $(get_name(line)) to MonitoredLine"
convert_component!(MonitoredLine, line, sys)
end
end
template = template_unit_commitment(transmission = PTDFPowerModel)
set_device_model!(template, MonitoredLine, StaticBranch)
set_device_model!(template, Line, StaticBranchUnbounded)
uc_prob = OperationsProblem(template, sys, horizon = 24, optimizer = solver)
build!(uc_prob, output_dir = mktempdir())
solve!(uc_prob)
# This file was generated using Literate.jl, https://github.com/fredrikekre/Literate.jl
| [
2,
0,
5794,
25,
572,
198,
198,
3500,
4333,
11964,
82,
198,
3500,
4333,
8890,
5768,
198,
3500,
4333,
11964,
20448,
32875,
198,
198,
3500,
327,
15630,
1303,
82,
14375,
198,
82,
14375,
796,
6436,
7509,
62,
4480,
62,
1078,
7657,
7,
34,
... | 2.588384 | 396 |
module HypergraphsEpidemics
using CSV
using DataFrames
using Query
using Tables
using Dates
using DataStructures
using Distributions
using SimpleHypergraphs
using Statistics
using Combinatorics
using Pipe: @pipe
using Random
using JSON
using JSON3
using Serialization
export Person
export Household
export Company
export Contact_simulation_options
export generate_dataset
export start_simulation
include("common/Constants.jl")
include("common/EventWriter.jl")
include("common/utils.jl")
include("common/structs.jl")
include("RoutineAutomaton.jl")
include("DatasetGeneration.jl")
include("Simulation.jl")
include("../deps/HGEpidemics/src/HGEpidemics.jl")
using .HGEpidemics
end | [
21412,
15079,
34960,
82,
13807,
28913,
873,
198,
198,
3500,
44189,
198,
3500,
6060,
35439,
198,
3500,
43301,
198,
3500,
33220,
198,
3500,
44712,
198,
3500,
6060,
44909,
942,
198,
3500,
46567,
507,
198,
3500,
17427,
38197,
34960,
82,
198,
... | 3.331707 | 205 |
using Test
# test the seg error functions
using EMIRT
using EMIRT.Evaluate
@testset "test evaluate" begin
# get test data
aff = EMIRT.IOs.imread(joinpath(dirname(@__FILE__),"../assets/aff.h5"))
lbl = EMIRT.IOs.imread(joinpath(dirname(@__FILE__),"../assets/lbl.h5"))
lbl = Array{UInt32,3}(lbl)
# compare python code and julia
seg = EMIRT.AffinityMaps.aff2seg(aff)
judec = evaluate(seg, lbl)
@show judec
# dict of evaluation curve
@time ecd = evaluate(lbl,lbl)
@show ecd
@test abs(ecd[:rf]-1) < 0.01
seg = Array{UInt32,3}(reshape(range(1,length=length(lbl)), size(lbl)))
@time ecd = evaluate(seg,lbl)
@show ecd
@test abs(ecd[:rf]-0) < 0.01
end # end of test set
| [
3500,
6208,
198,
2,
1332,
262,
384,
70,
4049,
5499,
198,
3500,
17228,
48771,
198,
3500,
17228,
48771,
13,
36,
2100,
4985,
198,
198,
31,
9288,
2617,
366,
9288,
13446,
1,
2221,
220,
198,
198,
2,
651,
1332,
1366,
198,
2001,
796,
17228,... | 2.360424 | 283 |
"""
gen_ref_dirs(dimension, n_paritions)
Generates Das and Dennis's structured reference points. `dimension` could be
the number of objective functions in multi-objective functions.
"""
function gen_ref_dirs(dimension, n_paritions)
return gen_weights(dimension, n_paritions)
end
function gen_weights(a, b)
nobj = a;
H = b;
a = zeros(nobj);
d = H;
w = [];
produce_weight!(a, 1, d, H, nobj, w)
return Array.(w)
end
function produce_weight!(a, i, d, H, nobj, w)
for k=0:d
if i<nobj
a[i] = k;
d2 = d - k;
produce_weight!(a, i+1, d2, H, nobj, w);
else
a[i] = d;
push!(w, a/H)
break;
end
end
end
"""
ideal(points)
Computes the ideal point from a provided array of `Vector`s or a population or row vectors
in a `Matrix`.
"""
function ideal(points::Array{Vector{T}}) where T <: Real
isempty(points) && isempty(points[1]) && return zeros(0)
ideal = points[1]
for point in points
ideal = min.(ideal, point)
end
return ideal
end
"""
nadir(points)
Computes the nadir point from a provided array of `Vector`s or a population or row vectors
in a `Matrix`.
"""
function nadir(points::Array{Vector{T}}) where T <: Real
isempty(points) && isempty(points[1]) && return zeros(0)
nadir = points[1]
for point in points
nadir = max.(nadir, point)
end
return nadir
end
function ideal(population::Array{xFgh_indiv})
mask = sum_violations.(population) .== 0
ideal(fval.(population[mask]))
end
ideal(A::Matrix) = ideal([A[i,:] for i in 1:size(A,1)])
function nadir(population::Array{xFgh_indiv})
mask = sum_violations.(population) .== 0
nadir(fval.(population[mask]))
end
nadir(A::Matrix) = nadir([A[i,:] for i in 1:size(A,1)])
| [
37811,
198,
220,
220,
220,
2429,
62,
5420,
62,
15908,
82,
7,
46156,
11,
299,
62,
1845,
1756,
8,
198,
8645,
689,
29533,
290,
16902,
338,
20793,
4941,
2173,
13,
4600,
46156,
63,
714,
307,
198,
1169,
1271,
286,
9432,
5499,
287,
5021,
... | 2.244311 | 835 |
<reponame>fingolfin/CompileBot.jl<filename>test/TestPackage3.jl/test/runtests.jl<gh_stars>10-100
using Test, TestPackage3
@static if VERSION > v"1.3"
@test hello3("Julia") == "Hello, Julia"
elseif VERSION > v"1.2"
@test domath3(2.0) ≈ 7.0
else
multiply3(2.0) == 8.0
end
| [
27,
7856,
261,
480,
29,
28825,
4024,
259,
14,
7293,
576,
20630,
13,
20362,
27,
34345,
29,
9288,
14,
14402,
27813,
18,
13,
20362,
14,
9288,
14,
81,
2797,
3558,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
6208,
11,
... | 2.241935 | 124 |
<reponame>UzielLinares/TaylorModels.jl<filename>src/validatedODEs.jl
# Some methods for validated integration of ODEs
const _DEF_MINABSTOL = 1.0e-50
"""
remainder_taylorstep!(f!, t, x, dx, xI, dxI, δI, δt, params)
Returns a remainder for the integration step for the dependent variables (`x`)
checking that the solution satisfies the criteria for existence and uniqueness.
"""
function remainder_taylorstep!(f!::Function, t::Taylor1{T},
x::Vector{Taylor1{TaylorN{T}}}, dx::Vector{Taylor1{TaylorN{T}}},
xI::Vector{Taylor1{Interval{T}}}, dxI::Vector{Taylor1{Interval{T}}},
δI::IntervalBox{N,T}, δt::Interval{T}, params) where {N,T}
orderT = get_order(dx[1])
aux = δt^(orderT+1)
Δx = IntervalBox([xI[i][orderT+1] for i in eachindex(xI)]) * aux
Δdx = IntervalBox([dxI[i][orderT+1] for i in eachindex(xI)]) * aux
Δ0 = IntervalBox([dx[i][orderT](δI) for i in eachindex(x)]) * aux / (orderT+1)
Δ = Δ0 + Δdx * δt
Δxold = Δx
# Checking existence and uniqueness
iscontractive(Δ, Δx) && return (true, Δx, t[0])
# If the check didn't work, compute new remainders. A new Δx is proposed,
# and the corresponding Δdx is computed
xxI = Array{Taylor1{TaylorN{Interval{T}}}}(undef, N)
dxxI = Array{Taylor1{TaylorN{Interval{T}}}}(undef, N)
vv = Array{Interval{T}}(undef, N)
for its = 1:50
# Remainder of Picard iteration
Δ = picard_remainder!(f!, t, x, dx, xxI, dxxI, δI, δt, Δx, Δ0, params)
# Checking existence and uniqueness
iscontractive(Δ, Δx) && return (true, Δx, t[0])
# iscontractive(Δ, Δx) && return _contract_iteration!(f!, t, x, dx, xxI, dxxI, δI, δt, Δx, Δdx, Δ0, params)
# Expand Δx in the directions needed
Δxold = Δx
if Δ ⊆ Δx
@inbounds for ind in 1:N
# Widen the directions where ⊂ does not hold
vv[ind] = Δx[ind]
if Δ[ind] == Δx[ind]
vv[ind] = widen.(Δ[ind])
end
end
Δx = IntervalBox(vv)
continue
end
Δx = Δ
end
return (false, Δx)
end
"""
iscontractive(Δ, Δx)
Checks if `Δ .⊂ Δx` is satisfied. If ``Δ ⊆ Δx` is satisfied, it returns
`true` if all cases where `==` holds corresponds to the zero `Interval`.
"""
function iscontractive(Δ::Interval{T}, Δx::Interval{T}) where{T}
(Δ ⊂ Δx || Δ == Δx == zero_interval(T)) && return true
return false
end
function iscontractive(Δ::IntervalBox{N,T}, Δx::IntervalBox{N,T}) where{N,T}
@inbounds for ind in 1:N
iscontractive(Δ[ind], Δx[ind]) || return false
end
return true
end
"""
picard_remainder!(f!, t, x, dx, xxI, dxxI, δI, δt, Δx, Δ0, params)
Return the remainder of Picard operator
"""
function picard_remainder!(f!::Function, t::Taylor1{T},
x::Vector{Taylor1{TaylorN{T}}}, dx::Vector{Taylor1{TaylorN{T}}},
xxI::Vector{Taylor1{TaylorN{Interval{T}}}},
dxxI::Vector{Taylor1{TaylorN{Interval{T}}}},
δI::IntervalBox{N,T}, δt::Interval{T},
Δx::IntervalBox{N,T}, Δ0::IntervalBox{N,T}, params) where {N,T}
# Extend `x` and `dx` to have interval coefficients
zI = zero_interval(T)
@. begin
xxI = x + Δx
dxxI = dx + zI
end
# Compute `dxxI` from the equations of motion
f!(dxxI, xxI, params, t)
# Picard iteration, considering only the bound of `f` and the last coeff of f
Δdx = IntervalBox( evaluate.( (dxxI - dx)(δt), (δI,) ) )
Δ = Δ0 + Δdx * δt
return Δ
end
# # Picard iterations to contract further Δx, once Δ ⊂ Δx holds
# # **Currently not used**
# function _contract_iteration!(f!::Function, t::Taylor1{T},
# x::Vector{Taylor1{TaylorN{T}}}, dx::Vector{Taylor1{TaylorN{T}}},
# xxI::Vector{Taylor1{TaylorN{Interval{T}}}}, dxxI::Vector{Taylor1{TaylorN{Interval{T}}}},
# δI::IntervalBox{N,T}, δt::Interval{T},
# Δx::IntervalBox{N,T}, Δdx::IntervalBox{N,T}, Δ0::IntervalBox{N,T}, params) where {N,T}
#
# # Some abbreviations
# Δ = Δ0 + Δdx * δt
# Δxold = Δx
#
# # Picard contractions
# for its = 1:10
# # Remainder of Picard iteration
# Δ = picard_remainder!(f!, t, x, dx, xxI, dxxI, δI, δt, Δx, Δ0, params)
#
# # If contraction doesn't hold, return old bound
# iscontractive(Δ, Δx) || return Δxold
#
# # Contract estimate
# Δxold = Δx
# Δx = Δ
# end
#
# return Δxold
# end
"""
absorb_remainder(a::TaylorModelN)
Returns a TaylorModelN, equivalent to `a`, such that the remainder
is mostly absorbed in the constant and linear coefficients. The linear shift assumes
that `a` is normalized to the `IntervalBox(-1..1, Val(N))`.
Ref: <NAME>, <NAME>, and <NAME>,
"Taylor Model Flowpipe Construction for Non-linear Hybrid
Systems", in Real Time Systems Symposium (RTSS), pp. 183-192 (2012),
IEEE Press.
"""
function absorb_remainder(a::TaylorModelN{N,T,T}) where {N,T}
Δ = remainder(a)
orderQ = get_order(a)
δ = symmetric_box(N, T)
aux = diam(Δ)/(2N)
rem = zero_interval(T)
# Linear shift
lin_shift = mid(Δ) + sum((aux*TaylorN(i, order=orderQ) for i in 1:N))
bpol = a.pol + lin_shift
# Compute the new remainder
aI = a(δ)
bI = bpol(δ)
if bI ⊆ aI
rem = Interval(aI.lo-bI.lo, aI.hi-bI.hi)
elseif aI ⊆ bI
rem = Interval(bI.lo-aI.lo, bI.hi-aI.hi)
else
r_lo = aI.lo-bI.lo
r_hi = aI.hi-bI.hi
if r_lo > 0
rem = Interval(-r_lo, r_hi)
else
rem = Interval( r_lo, -r_hi)
end
end
return TaylorModelN(bpol, rem, a.x0, a.dom)
end
# Postverify and define Taylor models to be returned
function scalepostverify_sw!(xTMN::Vector{TaylorModelN{N,T,T}},
X::Vector{TaylorN{T}}) where {N,T}
postverify = true
x0 = xTMN[1].x0
B = domain(xTMN[1])
zI = zero_interval(T)#zero(B[1])
@inbounds for i in eachindex(xTMN)
pol = polynomial(xTMN[i])
ppol = fp_rpa(TaylorModelN(pol(X), zI, x0, B ))
postverify = postverify && (xTMN[i](B) ⊆ ppol(B))
xTMN[i] = copy(ppol)
end
@assert postverify """
Failed to post-verify shrink-wrapping:
X = $(linear_polynomial(X))
xTMN = $(xTMN)
"""
return postverify
end
"""
shrink_wrapping!(xTMN::TaylorModelN)
Returns a modified inplace `xTMN`, which has absorbed the remainder
by the modified shrink-wrapping method of Florian Bünger.
The domain of `xTMN` is the normalized interval box `[-1,1]^N`.
Ref: <NAME>, Shrink wrapping for Taylor models revisited,
Numer Algor 78:1001–1017 (2018), https://doi.org/10.1007/s11075-017-0410-1
"""
function shrink_wrapping!(xTMN::Vector{TaylorModelN{N,T,T}}) where {N,T}
# Original domain of TaylorModelN should be the symmetric normalized box
B = symmetric_box(N, T)
@assert all(domain.(xTMN) .== (B,))
zI = zero_interval(T)
x0 = zero_box(N, T)
@assert all(expansion_point.(xTMN) .== (x0,))
# Vector of independent TaylorN variables
order = get_order(xTMN[1])
X = [TaylorN(T, i, order=order) for i in 1:N]
# Remainder of original TaylorModelN and componentwise mag
rem = remainder.(xTMN)
r = mag.(rem)
qB = r .* B
one_r = ones(eltype(r), N)
# Shift to remove constant term
xTN0 = constant_term.(xTMN)
xTNcent = polynomial.(xTMN) .- xTN0
xTNcent_lin = linear_polynomial(xTNcent)
# Step 4 of Bünger algorithm: Jacobian (at zero) and its inverse
jac = TaylorSeries.jacobian(xTNcent_lin)
# If the conditional number is too large (inverse of jac is ill defined),
# don't change xTMN
cond(jac) > 1.0e4 && return one_r
# Inverse of the Jacobian
invjac = inv(jac)
# Componentwise bound
r̃ = mag.(invjac * qB) # qB <-- r .* B
qB´ = r̃ .* B
@assert invjac * qB ⊆ qB´
# Step 6 of Bünger algorithm: compute g
g = invjac*xTNcent .- X
# g = invjac*(xTNcent .- xTNcent_lin)
# ... and its jacobian matrix (full dependence!)
jacmatrix_g = TaylorSeries.jacobian(g, X)
# Alternative to Step 7: Check the validity of Eq 16 (or 17) for Lemma 2
# of Bünger's paper, for s=0, and s very small. If it satisfies it,
# postverify and return. Otherwise, use Bünger's step 7.
q = 1.0 .+ r̃
s = zero(q)
@. q = 1.0 + r̃ + s
jaq_q1 = jacmatrix_g * (q .- 1.0)
eq16 = all(mag.(evaluate.(jaq_q1, (q .* B,))) .≤ s)
if eq16
postverify = scalepostverify_sw!(xTMN, q .* X)
postverify && return q
end
s .= eps.(q)
@. q = 1.0 + r̃ + s
jaq_q1 = jacmatrix_g * (q .- 1.0)
eq16 = all(mag.(evaluate.(jaq_q1, (q .* B,))) .≤ s)
if eq16
postverify = scalepostverify_sw!(xTMN, q .* X)
postverify && return q
end
# Step 7 of Bünger algorithm: estimate of `q`
# Some constants/parameters
q_tol = 1.0e-12
q = 1.0 .+ r̃
ff = 65/64
q_max = ff .* q
s = zero(q)
q_old = similar(q)
q_1 = similar(q)
jaq_q1 .= jacmatrix_g * r̃
iter_max = 100
improve = true
iter = 0
while improve && iter < iter_max
qB .= q .* B
q_1 .= q .- 1.0
q_old .= q
mul!(jaq_q1, jacmatrix_g, q_1)
eq16 = all(evaluate.(jaq_q1, (qB,)) .≤ s)
eq16 && break
@inbounds for i in eachindex(xTMN)
s[i] = mag( jaq_q1[i](qB) )
q[i] = 1.0 + r̃[i] + s[i]
# If q is too large, return xTMN unchanged
q[i] > q_max[i] && return -one_r
end
improve = any( ((q .- q_old)./q) .> q_tol )
iter += 1
end
# (improve || q == one_r) && return one_r
# Compute final q and rescale X
@. q = 1.0 + r̃ + ff * s
@. X = q * X
# Postverify
postverify = scalepostverify_sw!(xTMN, X)
return q
end
"""
validated-step!
"""
function validated_step!(f!, t::Taylor1{T}, x::Vector{Taylor1{TaylorN{T}}},
dx::Vector{Taylor1{TaylorN{T}}}, xaux::Vector{Taylor1{TaylorN{T}}},
tI::Taylor1{T}, xI::Vector{Taylor1{Interval{T}}},
dxI::Vector{Taylor1{Interval{T}}}, xauxI::Vector{Taylor1{Interval{T}}},
t0::T, tmax::T, sign_tstep::Int,
xTMN::Vector{TaylorModelN{N,T,T}}, xv::Vector{IntervalBox{N,T}},
rem::Vector{Interval{T}}, zbox::IntervalBox{N,T}, symIbox::IntervalBox{N,T},
nsteps::Int, orderT::Int, abstol::T, params, parse_eqs::Bool,
adaptive::Bool, minabstol::T, absorb::Bool,
check_property::Function=(t, x)->true) where {N,T}
# One step integration (non-validated)
# TaylorIntegration.__jetcoeffs!(Val(parse_eqs), f!, t, x, dx, xaux, params)
# δt = TaylorIntegration.stepsize(x, abstol)
δt = TaylorIntegration.taylorstep!(f!, t, x, dx, xaux, abstol, params, parse_eqs)
f!(dx, x, params, t) # Update `dx[:][orderT]`
# One step integration for the initial box
# TaylorIntegration.__jetcoeffs!(Val(parse_eqs), f!, tI, xI, dxI, xauxI, params)
# δtI = TaylorIntegration.stepsize(xI, abstol)
δtI = TaylorIntegration.taylorstep!(f!, tI, xI, dxI, xauxI, abstol, params, parse_eqs)
f!(dxI, xI, params, tI) # Update `dxI[:][orderT+1]`
# Step size
δt = min(δt, sign_tstep*(tmax-t0))
δt = sign_tstep * δt
# Test if `check_property` is satisfied; if not, half the integration time.
# If after 25 checks `check_property` is not satisfied, throw an error.
nsteps += 1
issatisfied = false
rem_old = copy(rem)
local _success, _t0
reduced_abstol = abstol
bool_red = true
while bool_red
# Validate the solution: remainder consistent with Schauder thm
δtI = sign_tstep * Interval(0, sign_tstep*δt)
(_success, Δ) = remainder_taylorstep!(f!, t, x, dx, xI, dxI, symIbox, δtI, params)
# Shrink stepsize δt if adaptive is true and _success is false
if !_success
if adaptive
bool_red = reduced_abstol > minabstol
if bool_red
reduced_abstol = reduced_abstol/10
δt = δt * 0.1^(1/orderT)
continue
else
@warn("Minimum absolute tolerance reached: ", t[0], δt, reduced_abstol, Δ)
end
else
@warn("It cannot prove existence and unicity of the solution: ", t[0], δt, Δ)
end
end
# Remainder
rem .= rem_old .+ Δ
# Create TaylorModelN to store remainders and evaluation
@inbounds begin
for i in eachindex(x)
xTMN[i] = fp_rpa( TaylorModelN(x[i](δtI), rem[i], zbox, symIbox) )
# If remainder is still too big, do it again
j = 0
while absorb && (j < 10) && (mag(rem[i]) > 1.0e-10)
t[0] == 0 && println("absorb_remainder ")
j += 1
xTMN[i] = absorb_remainder(xTMN[i])
rem[i] = remainder(xTMN[i])
end
end
xv[nsteps] = evaluate(xTMN, symIbox) # IntervalBox
issatisfied = check_property(t0+δt, xv[nsteps])
if !issatisfied
# δt = δt/2
bool_red = reduced_abstol > minabstol
@info("issatisfied: ", bool_red, δt)
if bool_red
reduced_abstol = reduced_abstol / 10
δt = δt * 0.1^(1/orderT)
continue
else
@warn("Minimum absolute tolerance reached (issatisfied): ", t[0], δt, reduced_abstol, Δ)
end
end
end # @inbounds
break
end
if !issatisfied && !adaptive
@warn("""
Something went wrong:
""", issatisfied, adaptive, bool_red, reduced_abstol,
t0, δt, Δ, nsteps, xv[nsteps], check_property(t0+δt, xv[nsteps])
)
end
return (_success, δt, reduced_abstol)
end
"""
initialize!(X0::IntervalBox{N, T}, orderQ, orderT, x, dx, xTMN, xI, dxI, rem, xTM1v) where {N, T}
Initialize the auxiliary integration variables and normalize the given interval
box to the domain `[-1, 1]^n`.
"""
function initialize!(X0::IntervalBox{N,T}, orderQ, orderT, x, dx, xTMN, xI, dxI, rem, xTM1v) where {N,T}
@assert N == get_numvars()
# center of the box and vector of widths
q0 = mid.(X0)
δq0 = X0 .- q0
# normalized domain
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
qaux = normalize_taylor.(q0 .+ TaylorN.(1:N, order=orderQ), (δq0,), true)
@. begin
x = Taylor1(qaux, orderT)
dx = x
xTMN = TaylorModelN(qaux, zI, (zB,), (S,))
xI = Taylor1(X0, orderT+1)
dxI = xI
rem = zI
xTM1v[:, 1] = TaylorModel1(deepcopy(x), zI, zI, zI)
end
return nothing
end
function initialize!(X0::IntervalBox{N,T}, orderQ, orderT, x, dx, xTMN, rem, xTM1v) where {N,T}
@assert N == get_numvars()
q0 = mid.(X0)
δq0 = X0 .- q0
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
qaux = normalize_taylor.(q0 .+ TaylorN.(1:N, order=orderQ), (δq0,), true)
@. begin
x = Taylor1(qaux, orderT)
dx = x
xTMN = TaylorModelN(qaux, zI, (zB,), (S,))
rem = zI
xTM1v[:, 1] = TaylorModel1(deepcopy(x), zI, zI, zI)
end
return nothing
end
"""
initialize!(X0::Vector{TaylorModel1{TaylorN{T}, T}}, orderQ, orderT, x, dx, xTMN, xI, dxI, rem, xTM1v) where {T}
Initialize the auxiliary integration variables assuming that the given vector
of taylor models `X0` is normalized to the domain `[-1, 1]^n` in space.
"""
function initialize!(X0::Vector{TaylorModel1{TaylorN{T},T}}, orderQ, orderT, x, dx, xTMN, xI, dxI, rem, xTM1v) where {T}
# nomalized domain
N = get_numvars()
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
qaux = constant_term.(polynomial.(X0))
x0t = expansion_point(X0[1])
domt = domain(X0[1])
@. begin
x = Taylor1(qaux, orderT)
dx = x
xTMN = TaylorModelN(qaux, zI, (zB,), (S,))
# we assume that qaux is normalized to S=[-1, 1]^N
xI = Taylor1(evaluate(qaux, (S,)), orderT+1)
dxI = xI
# remainder
rem = remainder(X0)
# output vector
xTM1v[:, 1] = TaylorModel1(deepcopy(x), rem, x0t, domt)
end
return nothing
end
function initialize!(X0::Vector{TaylorModel1{TaylorN{T},T}}, orderQ, orderT, x, dx, xTMN, rem, xTM1v) where {T}
# nomalized domain
N = get_numvars()
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
qaux = constant_term.(polynomial.(X0))
x0t = expansion_point(X0[1])
domt = domain(X0[1])
@. begin
x = Taylor1(qaux, orderT)
dx = x
xTMN = TaylorModelN(qaux, zI, (zB,), (S,))
# remainder
rem = remainder(X0)
# output vector
xTM1v[:, 1] = TaylorModel1(deepcopy(x), rem, x0t, domt)
end
return nothing
end
function validated_integ(f!, X0, t0::T, tmax::T, orderQ::Int, orderT::Int, abstol::T, params=nothing;
maxsteps::Int=2000, parse_eqs::Bool=true,
adaptive::Bool=true, minabstol::T=T(_DEF_MINABSTOL),
absorb::Bool=false,
check_property::Function=(t, x)->true) where {T<:Real}
# Set proper parameters for jet transport
N = get_numvars()
dof = N
# Some variables
zt = zero(t0)
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
t = t0 + Taylor1(orderT)
tI = t0 + Taylor1(orderT+1)
# Allocation of vectors
# Output
tv = Array{T}(undef, maxsteps+1)
xv = Array{IntervalBox{N,T}}(undef, maxsteps+1)
xTM1v = Array{TaylorModel1{TaylorN{T},T}}(undef, dof, maxsteps+1)
rem = Array{Interval{T}}(undef, dof)
# Internals: jet transport integration
x = Array{Taylor1{TaylorN{T}}}(undef, dof)
dx = Array{Taylor1{TaylorN{T}}}(undef, dof)
xaux = Array{Taylor1{TaylorN{T}}}(undef, dof)
xTMN = Array{TaylorModelN{N,T,T}}(undef, dof)
# Internals: Taylor1{Interval{T}} integration
xI = Array{Taylor1{Interval{T}}}(undef, dof)
dxI = Array{Taylor1{Interval{T}}}(undef, dof)
xauxI = Array{Taylor1{Interval{T}}}(undef, dof)
# Set initial conditions
initialize!(X0, orderQ, orderT, x, dx, xTMN, xI, dxI, rem, xTM1v)
sign_tstep = copysign(1, tmax-t0)
# Output vectors
@inbounds tv[1] = t0
@inbounds xv[1] = evaluate(xTMN, S)
# Determine if specialized jetcoeffs! method exists (built by @taylorize)
parse_eqs = TaylorIntegration._determine_parsing!(parse_eqs, f!, t, x, dx, params)
local _success # if true, the validation step succeeded
setformat(:full)
red_abstol = abstol
# Integration
nsteps = 1
while sign_tstep*t0 < sign_tstep*tmax
# Validated step of the integration
(_success, δt, red_abstol) = validated_step!(f!, t, x, dx, xaux, tI, xI, dxI, xauxI,
t0, tmax, sign_tstep, xTMN, xv, rem, zB, S,
nsteps, orderT, red_abstol, params,
parse_eqs, adaptive, minabstol,
absorb, check_property)
δtI = sign_tstep * Interval(zt, sign_tstep*δt)
# New initial conditions and time, and output vectors
nsteps += 1
@inbounds tv[nsteps] = t0
t0 += δt
@inbounds t[0] = t0
@inbounds tI[0] = t0
@. begin
xTM1v[:, nsteps] = TaylorModel1(x, rem, zI, δtI) # deepcopy?
x = Taylor1(evaluate(x, δt), orderT)
# dx = Taylor1(zero(constant_term(x)), orderT)
xI = Taylor1(evaluate(xTMN, (S,)), orderT+1)
# dxI = xI
end
# Try to increase `red_abstol` if `adaptive` is true
if adaptive
red_abstol = min(abstol, 10*red_abstol)
end
# If the integration step is unsuccessfull, break with a warning; note that the
# last integration step (which was not successfull) is returned
if !_success
@warn("""
Exiting due to unsuccessfull step
""", _success, t0)
break
end
if nsteps > maxsteps
@warn("""
Maximum number of integration steps reached; exiting.
""")
break
end
end
return view(tv,1:nsteps), view(xv,1:nsteps), view(xTM1v, :, 1:nsteps)
end
"""
picard(dx, x0, box)
Computes the picard (integral) operator for the initial condition `x0`.
`dx` must be the rhs of the differential equation.
"""
function _picard(dx, x0, box)
∫f = integrate(dx, 0., box)
pol = ∫f.pol + x0.pol
Δk = ∫f.rem
return pol, Δk
end
_picard_rem(dx, box) = remainder(integrate(dx, box))
"""
picard_iteration(f!, dx, xTM1K, params, t, x0, box, ::Val{true})
Computes the picard (integral) operator for the set of equations `f!` and the initial condition`x0`.
The Val parameter enables the selection of the desired method. This one returns the remainder of
the resulting Taylor Model with the remainder of the initial condition included.
"""
function picard_iteration(f!, dx, xTM1K, params, t, x0, box, ::Val{true})
f!(dx, xTM1K, params, t)
return @. _picard_rem(dx, (box,)) + remainder(x0)
end
"""
picard_iteration(f!, dx, xTM1K, params, t, x0, box)
Computes the picard (integral) operator for the set of equations `f!` and the initial condition`x0`.
This method returns the remainder of the resulting Taylor Model without the remainder of the initial condition.
"""
function picard_iteration(f!, dx, xTM1K, params, t, x0, box)
f!(dx, xTM1K, params, t)
return _picard_rem.(dx, (box,))
end
"""
_validate_step(xTM1K, f!, dx, x0, params, t, box, dof; ε=1e-10, maxsteps=20, extrasteps=50)
Validate the Taylor Model solution for the current integration time step.
This function implements the epsilon inflation algorithm proposed by Bünger
with some custom adaptations.
Ref: <NAME>, A Taylor model toolbox for solving ODEs implemented in MATLAB/INTLAB,
J. Comput. Appl. Math. 368, 112511, https://doi.org/10.1016/j.cam.2019.112511
"""
function _validate_step!(xTM1K, f!, dx, x0, params, x, t, box, dof, rem, abstol,
δt, sign_tstep, E, E′, polv, low_ratiov, hi_ratiov,
adaptive::Bool, minabstol;
ε=1e-10, δ=1e-6, validatesteps=20, extrasteps=50)
#
T = eltype(box[1])
zI = zero_interval(T)
domT = sign_tstep * Interval{T}(0, sign_tstep*δt)
orderT = get_order(t)
@. begin
polv = deepcopy.(x)
xTM1K = TaylorModel1(polv, zI, zI, domT)
# xTM1K = TaylorModel1(polv, rem, zI, domT)
E = remainder(xTM1K)
# E = remainder(x0)
end
εi = (1 - ε) .. (1 + ε)
δi = -δ .. δ
_success = false
reduced_abstol = abstol
# Try to prove existence and uniqueness up to numchecks, including reducing the
# time step
bool_red = true
# for nchecks = 1:numchecks
while bool_red
# Try to prove existence and uniqueness up to validatesteps
nsteps = 0
E′ .= picard_iteration(f!, dx, xTM1K, params, t, x0, box, Val(true)) # 0-th iteration
@. xTM1K = TaylorModel1(polv, E′, zI, domT)
while nsteps < validatesteps
E′ .= picard_iteration(f!, dx, xTM1K, params, t, x0, box)
all(iscontractive.(E′, E)) && break
# Only inflates the required component
@inbounds for i in eachindex(dx)
if !iscontractive(E′[i], E[i])
E[i] = E′[i] * εi + δi
end
xTM1K[i] = TaylorModel1(polv[i], E[i], zI, domT)
end
nsteps += 1
end
_success = all(iscontractive.(E′, E))
_success && break
# Shrink stepsize `δt` if `adaptive` is `true` and `_success` is false,
# up to some minimum
if !_success
if adaptive
bool_red = reduced_abstol > minabstol
if bool_red
reduced_abstol = reduced_abstol/10
δt = δt * 0.1^(1/orderT)
domT = sign_tstep * Interval{T}(0, sign_tstep*δt)
@. begin
xTM1K = TaylorModel1(polv, zI, zI, domT)
# xTM1K = TaylorModel1(polv, rem, zI, domT)
E = remainder(xTM1K)
# E = remainder(x0)
end
else
@warn("Minimum absolute tolerance reached: ", t[0], E′, E,
_success, all(iscontractive.(E′, E)), reduced_abstol)
end
else
@warn("""It cannot prove existence and unicity of the solution:
t0 = $(t[0])
δt = $(δt)
Δx = $(E)
""")
break
end
end
end
if !all(iscontractive.(E′, E))
@warn("Maximum number of validate steps reached.", t[0], E′, E,
_success, all(iscontractive.(E′, E)))
return (_success, δt, reduced_abstol)
end
# @. begin
# low_ratiov = inf(E′) / inf(E)
# hi_ratiov = sup(E′) / sup(E)
# end
# # Contract further the remainders if the last contraction improves more than 5%
# for ind = 1:extrasteps
# minimum(low_ratiov) > 0.90 && minimum(hi_ratiov) > 0.90 && break
# E .= remainder.(xTM1K)
# E′ .= picard_iteration(f!, dx, xTM1K, params, t, x0, box)
# @. begin
# xTM1K = TaylorModel1(polv, E′, zI, dom)
# low_ratiov = inf(E′) / inf(E)
# hi_ratiov = sup(E′) / sup(E)
# end
# # @show(ind, E, E′, all(iscontractive.(E′, E)), low_ratiov)
# end
return (_success, δt, reduced_abstol)
end
function validated_integ2(f!, X0, t0::T, tf::T, orderQ::Int, orderT::Int,
abstol::T, params=nothing;
parse_eqs=true, maxsteps::Int=2000,
absorb::Bool=false,
adaptive::Bool=true, minabstol=T(_DEF_MINABSTOL),
validatesteps::Int=30, ε::T=1e-10, δ::T=1e-6,
absorb_steps::Int=3) where {T <: Real}
N = get_numvars()
dof = N
zt = zero(t0)
zI = zero_interval(T)
zB = zero_box(N, T)
S = symmetric_box(N, T)
t = t0 + Taylor1(orderT)
tv = Array{T}(undef, maxsteps+1)
xv = Array{IntervalBox{N,T}}(undef, maxsteps+1)
xTM1v = Array{TaylorModel1{TaylorN{T},T}}(undef, dof, maxsteps+1)
x = Array{Taylor1{TaylorN{T}}}(undef, dof)
dx = Array{Taylor1{TaylorN{T}}}(undef, dof)
xaux = Array{Taylor1{TaylorN{T}}}(undef, dof)
xTMN = Array{TaylorModelN{N,T,T}}(undef, dof)
dxTM1 = Array{TaylorModel1{TaylorN{T},T}}(undef, dof)
xTM1 = Array{TaylorModel1{TaylorN{T},T}}(undef, dof)
low_ratiov = Array{T}(undef, dof)
hi_ratiov = Array{T}(undef, dof)
rem = Array{Interval{T}}(undef, dof)
E = Array{Interval{T}}(undef, dof)
E′ = Array{Interval{T}}(undef, dof)
# Set initial conditions
initialize!(X0, orderQ, orderT, x, dx, xTMN, rem, xTM1v)
@inbounds for i in eachindex(x)
xTM1[i] = TaylorModel1(deepcopy(x[i]), zI, zI, zI)
end
polv = polynomial.(xTM1)
fill!(E, zI)
fill!(E′, zI)
sign_tstep = copysign(1, tf - t0)
nsteps = 1
@inbounds tv[1] = t0
@inbounds xv[1] = evaluate(xTMN, S)
parse_eqs = TaylorIntegration._determine_parsing!(parse_eqs, f!, t, x, dx, params)
red_abstol = abstol
setformat(:full)
while t0 * sign_tstep < tf * sign_tstep
δt = TaylorIntegration.taylorstep!(f!, t, x, dx, xaux, abstol, params, parse_eqs)
f!(dx, x, params, t)
δt = min(δt, sign_tstep*(tf-t0))
δt = sign_tstep * δt
# Reuse previous TaylorModel1 to save some allocations
(_success, δt, red_abstol) = _validate_step!(xTM1, f!, dxTM1, xTMN, params, x, t,
S, dof, rem, red_abstol, δt, sign_tstep, E, E′,
polv, low_ratiov, hi_ratiov,
adaptive, minabstol,
ε=ε, δ=δ,
validatesteps=validatesteps)
domt = sign_tstep * Interval(zt, sign_tstep*δt)
# δtI = (δt .. δt) ∩ domt # assure it is inside the domain in t
nsteps += 1
@inbounds tv[nsteps] = t0
t0 += δt
@inbounds t[0] = t0
# Flowpipe
@. begin
rem = remainder(xTM1)
xTMN = fp_rpa(TaylorModelN(copy(evaluate(xTM1, domt)), rem, (zB,), (S,)))
end
xv[nsteps] = evaluate(xTMN, S)
# New initial condition
@inbounds for i in eachindex(x)
aux_pol = evaluate(xTM1[i], δt) #δtI
# rem[i] = remainder(xTM1[i])
xTMN[i] = fp_rpa(TaylorModelN(deepcopy(aux_pol), rem[i], zB, S))
# xTMN[i] = fp_rpa(TaylorModelN(deepcopy(aux_pol), 0 .. 0, zB, S))
# Absorb remainder
j = 0
while absorb && (j < absorb_steps) && (mag(rem[i]) > 1.0e-10)
t[0] == 0 && println("absorb_remainder ")
j += 1
xTMN[i] = absorb_remainder(xTMN[i])
rem[i] = remainder(xTMN[i])
end
x[i] = Taylor1(polynomial(xTMN[i]), orderT)
xTM1v[i, nsteps] = xTM1[i]
end
# Try to increase `red_abstol` if `adaptive` is true
if adaptive
red_abstol = min(abstol, 10*red_abstol)
end
# If the integration step is unsuccessfull, break with a warning; note that the
# last integration step (which was not successfull) is returned
if !_success
@warn("""
Exiting due to unsuccessfull step
""", _success, t0)
break
end
if nsteps > maxsteps
@warn("""
Maximum number of integration steps reached; exiting.
""")
break
end
end
return view(tv, 1:nsteps), view(xv, 1:nsteps), view(xTM1v, :, 1:nsteps)
end
| [
27,
7856,
261,
480,
29,
52,
89,
8207,
43,
1437,
411,
14,
29907,
5841,
1424,
13,
20362,
27,
34345,
29,
10677,
14,
12102,
515,
16820,
82,
13,
20362,
198,
2,
2773,
5050,
329,
31031,
11812,
286,
440,
7206,
82,
198,
198,
9979,
4808,
32... | 1.972592 | 15,470 |
<gh_stars>0
@testset "Protting" begin
@testset "DistPlot1D" begin
# TODO
end
@testset "DistPlot2D" begin
# TODO
end
@testset "CornerPlot" begin
# TODO
end
end
| [
27,
456,
62,
30783,
29,
15,
198,
31,
9288,
2617,
366,
19703,
889,
1,
2221,
198,
220,
2488,
9288,
2617,
366,
20344,
43328,
16,
35,
1,
2221,
198,
220,
220,
220,
1303,
16926,
46,
198,
220,
886,
628,
220,
2488,
9288,
2617,
366,
20344,... | 2.228916 | 83 |
<gh_stars>10-100
using RecurrenceAnalysis, DelimitedFiles, Statistics
# Measure the times (in ms) of evaluating an expression n times
macro measuretime(ex, n)
quote
# Train the expression and get the result
result = $(esc(ex))
t = zeros($n)
for i in 1:$n
t[i] = 1000*(@elapsed $(esc(ex)))
end
t, result
end
end
# Function that will be measured
function fun_rqa(x,metric)
xe = embed(x,3,6)
rmat = RecurrenceMatrix(xe,1.2,metric=metric)
rqa(rmat,theiler=1)
end
# Analyse 12 series from 250 to 3000 points
# (With variable metric)
function benchmark(metric)
m = readdlm("rossler.txt")
for r=1:12
x=m[1:250r,2r-1]
tt, res = @measuretime fun_rqa(x,metric) 5
t = median(tt)
# Write table of results
open("benchmark_rqa_julia_$metric.txt","a") do f
write(f,"$r\t$t\t")
for k in ["RR","DET","L","Lmax","ENTR","LAM","TT"]
write(f, "$(res[k])\t")
end
write(f,"\n")
end
end
end
# Do it with max and euclidean norms
benchmark("euclidean")
benchmark("max")
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
3311,
33928,
32750,
11,
4216,
320,
863,
25876,
11,
14370,
198,
198,
2,
24291,
262,
1661,
357,
259,
13845,
8,
286,
22232,
281,
5408,
299,
1661,
198,
20285,
305,
3953,
2435,
7,
1069,
... | 2.042553 | 564 |
"""
dt = DotTheta( (x,y) -> dot(x,y) / length(x) )
This parametric type allows to define a new dot product from the one saved in `dt::dot`. More precisely:
dt(u1, u2, p1::T, p2::T, theta::T) where {T <: Real}
computes, the weigthed dot product ``\\langle (u_1,p_1), (u_2,p_2)\\rangle_\\theta = \\theta \\Re \\langle u_1,u_2\\rangle +(1-\\theta)p_1p_2`` where ``u_i\\in\\mathbb R^N``. The ``\\Re`` factor is put to ensure a real valued result despite possible complex valued arguments.
normtheta(u, p::T, theta::T)
Compute, the norm associated to weighted dot product ``\\langle (u_1,p_1), (u_2,p_2)\\rangle_\\theta``.
!!! info "Info"
This is used in the pseudo-arclength constraint with the dot product ``\\frac{1}{N} \\langle u_1,u_2\\rangle,\\quad u_i\\in\\mathbb R^N``
"""
struct DotTheta{Tdot}
dot::Tdot # defaults to (x,y) -> dot(x,y) / length(x)
end
DotTheta() = DotTheta( (x, y) -> dot(x, y) / length(x) )
# Implementation of the dot product associated to DotTheta
# we restrict the type of the parameters because for complex problems, we still want the parameter to be real
(dt::DotTheta)(u1, u2, p1::T, p2::T, θ::T) where {T <: Real} = real(dt.dot(u1, u2) * θ + p1 * p2 * (one(T) - θ))
# Implementation of the norm associated to DotTheta
# we restrict the type of the parameters because for complex problems, we still want the parameter to be real
(dt::DotTheta)(u, p::T, θ::T) where T = sqrt(dt(u, u, p, p, θ))
(dt::DotTheta)(a::BorderedArray{vec, T}, b::BorderedArray{vec, T}, θ::T) where {vec, T} = dt(a.u, b.u, a.p, b.p, θ)
(dt::DotTheta)(a::BorderedArray{vec, T}, θ::T) where {vec, T} = dt(a.u, a.p, θ)
####################################################################################################
# equation of the arc length constraint
arcLengthEq(dt::DotTheta, u, p, du, dp, θ, ds) = dt(u, du, p, dp, θ) - ds
####################################################################################################
abstract type AbstractTangentPredictor end
abstract type AbstractSecantPredictor <: AbstractTangentPredictor end
# wrapper to use iterators and state
getPredictor!(state::AbstractContinuationState, iter::AbstractContinuationIterable, nrm = false) = getPredictor!(state.z_pred, state.z_old, state.tau, state.ds, iter.tangentAlgo, nrm)
getTangent!(state::AbstractContinuationState, it::AbstractContinuationIterable, verbosity) = getTangent!(state.tau, state.z_new, state.z_old, it, state.ds, θ, it.tangenAlgo::NaturalPred, it.verbosity)
# reset the predictor
emptypredictor!(::Union{Nothing, AbstractTangentPredictor}) = nothing
# clamp p value
clampPred(p::Number, it::AbstractContinuationIterable) = clamp(p, it.contParams.pMin, it.contParams.pMax)
# this function only mutates z_pred
# the nrm argument allows to just increment z_pred.p by ds
function getPredictor!(z_pred::M, z_old::M, tau::M, ds, algo::Talgo, nrm = false) where {T, vectype, M <: BorderedArray{vectype, T}, Talgo <: AbstractTangentPredictor}
# we perform z_pred = z_old + ds * tau
copyto!(z_pred, z_old) # z_pred .= z_old
nrm ? axpy!(ds / tau.p, tau, z_pred) : axpy!(ds, tau, z_pred)
end
# generic corrector based on Bordered formulation
function corrector(it, z_old::M, τ::M, z_pred::M, ds, θ,
algo::Talgo, linearalgo = MatrixFreeBLS();
normC = norm, callback = cbDefault, kwargs...) where
{T, vectype, M <: BorderedArray{vectype, T}, Talgo <: AbstractTangentPredictor}
if z_pred.p <= it.contParams.pMin || z_pred.p >= it.contParams.pMax
z_pred.p = clampPred(z_pred.p, it)
return corrector(it, z_old, τ, z_pred, ds, θ, NaturalPred(), linearalgo;
normC = normC, callback = callback, kwargs...)
end
return newtonPALC(it, z_old, τ, z_pred, ds, θ; linearbdalgo = linearalgo, normN = normC, callback = callback, kwargs...)
end
####################################################################################################
"""
Natural predictor / corrector
"""
struct NaturalPred <: AbstractTangentPredictor end
function getPredictor!(z_pred::M, z_old::M, τ::M, ds, algo::NaturalPred, nrm = false) where {T, vectype, M <: BorderedArray{vectype, T}}
# we do z_pred .= z_old
copyto!(z_pred, z_old) # z_pred .= z_old
z_pred.p += ds
end
# corrector based on natural formulation
function corrector(it, z_old::M, τ::M, z_pred::M, ds, θ,
algo::NaturalPred, linearalgo = MatrixFreeBLS();
normC = norm, callback = cbDefault, kwargs...) where
{T, vectype, M <: BorderedArray{vectype, T}}
res = newton(it.F, it.J, z_pred.u, setParam(it, clampPred(z_pred.p, it)), it.contParams.newtonOptions; normN = normC, callback = callback, kwargs...)
return BorderedArray(res[1], z_pred.p), res[2:end]...
end
function getTangent!(τ::M, z_new::M, z_old::M, it::AbstractContinuationIterable, ds, θ, algo::NaturalPred, verbosity) where {T, vectype, M <: BorderedArray{vectype, T}}
(verbosity > 0) && println("--> predictor = ", algo)
# we do nothing here, the predictor will just copy z_old into z_pred
end
####################################################################################################
"""
Secant tangent predictor
"""
struct SecantPred <: AbstractSecantPredictor end
# tangent computation using Secant predictor
# tau is the tangent prediction
function getTangent!(τ::M, z_new::M, z_old::M, it::AbstractContinuationIterable, ds, θ, algo::SecantPred, verbosity) where {T, vectype, M <: BorderedArray{vectype, T}}
(verbosity > 0) && println("--> predictor = ", algo)
# secant predictor: tau = z_new - z_old; tau *= sign(ds) / normtheta(tau)
copyto!(τ, z_new)
minus!(τ, z_old)
α = algo isa SecantPred ? sign(ds) / it.dottheta(τ, θ) : sign(ds) / abs(τ.p)
rmul!(τ, α)
end
####################################################################################################
"""
Bordered Tangent predictor
"""
struct BorderedPred <: AbstractTangentPredictor end
# tangent computation using Bordered system
# tau is the tangent prediction
function getTangent!(τ::M, z_new::M, z_old::M, it::AbstractContinuationIterable, ds, θ, algo::BorderedPred, verbosity) where {T, vectype, M <: BorderedArray{vectype, T}}
(verbosity > 0) && println("--> predictor = Bordered")
# tangent predictor
ϵ = it.contParams.finDiffEps
# dFdl = (F(z_new.u, z_new.p + ϵ) - F(z_new.u, z_new.p)) / ϵ
dFdl = it.F(z_new.u, setParam(it, z_new.p + ϵ))
minus!(dFdl, it.F(z_new.u, setParam(it, z_new.p)))
rmul!(dFdl, 1/ϵ)
# tau = getTangent(J(z_new.u, z_new.p), dFdl, tau_old, theta, contparams.newtonOptions.linsolve)
tau_normed = copy(τ)#copyto!(similar(tau), tau) #copy(tau_old)
rmul!(tau_normed, θ / length(τ.u), 1 - θ)
# extract tangent as solution of bordered linear system, using zero(z_new.u)
tauu, taup, flag, itl = it.linearAlgo( it.J(z_new.u, setParam(it, z_new.p)), dFdl,
tau_normed, 0*z_new.u, T(1), θ)
# the new tangent vector must preserve the direction along the curve
α = T(1) / it.dottheta(tauu, τ.u, taup, τ.p, θ)
# tau_new = α * tau
copyto!(τ.u, tauu)
τ.p = taup
rmul!(τ, α)
end
####################################################################################################
"""
Multiple Tangent predictor
$(TYPEDFIELDS)
# Constructor(s)
MultiplePred(algo, x0, α, n)
MultiplePred(x0, α, n)
- `α` damping in Newton iterations
- `n` number of predictors
- `x0` example of vector solution to be stored
"""
@with_kw mutable struct MultiplePred{T <: Real, Tvec, Talgo} <: AbstractTangentPredictor
"Tangent algorithm used"
tangentalgo::Talgo
"Save the current tangent"
τ::Tvec
"Damping factor"
α::T
"Number of predictors"
nb::Int64
"Index of the largest converged predictor"
currentind::Int64 = 0
"Index for lookup in residual history"
pmimax::Int64 = 1
"Maximum index for lookup in residual history"
imax::Int64 = 4
"Factor to increase ds upon successful step"
dsfact::T = 1.5
end
MultiplePred(algo::AbstractTangentPredictor, x0, α::T, nb) where T = MultiplePred(tangentalgo= algo, τ = BorderedArray(x0, T(0)), α = α, nb = nb)
MultiplePred(x0, α, nb) = MultiplePred(SecantPred(), x0, α, nb)
emptypredictor!(mpd::MultiplePred) = (mpd.currentind = 1; mpd.pmimax = 1)
# callback for newton
function (mpred::MultiplePred)(x, f, J, res, iteration, itlinear, options; kwargs...)
resHist = get(kwargs, :resHist, nothing)
if mpred.currentind > 1
return iteration - mpred.pmimax > 0 ? resHist[end] <= mpred.α * resHist[end-mpred.pmimax] : true
end
return true
end
function getTangent!(τ::M, z_new::M, z_old::M, it::AbstractContinuationIterable, ds, θ, algo::MultiplePred{T, M, Talgo}, verbosity) where {T, vectype, M <: BorderedArray{vectype, T}, Talgo}
# compute tangent and store it
(verbosity > 0) && print("--> predictor = MultiplePred\n--")
getTangent!(τ, z_new, z_old, it, ds, θ, algo.tangentalgo, verbosity)
copyto!(algo.τ, τ)
end
function getPredictor!(z_pred::M, z_old::M, τ::M, ds, algo::MultiplePred, nrm = false) where {T, vectype, M <: BorderedArray{vectype, T}}
# we do nothing!
# emptypredictor!(algo)
return nothing
end
function corrector(it, z_old::M, tau::M, z_pred::M, ds, θ,
algo::MultiplePred, linearalgo = MatrixFreeBLS(); normC = norm,
callback = cbDefault, kwargs...) where {T, vectype, M <: BorderedArray{vectype, T}}
# we combine the callbacks for the newton iterations
cb = (x, f, J, res, iteration, itlinear, options; k...) -> callback(x, f, J, res, iteration, itlinear, options; k...) & algo(x, f, J, res, iteration, itlinear, options; k...)
# note that z_pred already contains ds * τ, hence ii=0 corresponds to this case
for ii in algo.nb:-1:1
# printstyled(color=:magenta, "--> ii = $ii\n")
# record the current index
algo.currentind = ii
zpred = _copy(z_pred)
axpy!(ii * ds, algo.τ, zpred)
# we restore the original callback if it reaches the usual case ii == 0
zold, res, flag, itnewton, itlinear = corrector(it, z_old, tau, zpred, ds, θ,
algo.tangentalgo, linearalgo; normC = normC, callback = cb, kwargs...)
if flag || ii == 1 # for i==1, we return the result anyway
return zold, res, flag, itnewton, itlinear
end
end
return zold, res, flag, itnewton, itlinear
end
function stepSizeControl(ds, θ, contparams::ContinuationPar, converged::Bool, it_newton_number::Int, tau::M, mpd::MultiplePred, verbosity) where {T, vectype, M<:BorderedArray{vectype, T}}
if converged == false
dsnew = ds
if abs(ds) < (1 + mpd.nb) * contparams.dsmin
if mpd.pmimax < mpd.imax
@error "--> Increase pmimax"
mpd.pmimax += 1
else
(verbosity > 0) && printstyled("*"^80*"\nFailure to converge with given tolerances\n"*"*"^80, color=:red)
# we stop the continuation
return ds, θ, true
end
else
@error "--> Decrease ds"
dsnew = ds / (1 + mpd.nb)
(verbosity > 0) && printstyled("Halving continuation step, ds = $(dsnew)\n", color=:red)
end
else # the newton correction has converged
dsnew = ds
if mpd.currentind == mpd.nb && abs(ds) * mpd.dsfact <= contparams.dsmax
(verbosity > 0) && @show dsnew
# println("--> Increase ds")
dsnew = ds * mpd.dsfact
end
end
# control step to stay between bounds
dsnew = clampDs(dsnew, contparams)
return dsnew, θ, false
end
####################################################################################################
"""
Polynomial Tangent predictor
$(TYPEDFIELDS)
# Constructor(s)
PolynomialPred(algo, n, k, v0)
PolynomialPred(n, k, v0)
- `n` order of the polynomial
- `k` length of the last solutions vector used for the polynomial fit
- `v0` example of solution to be stored. It is only used to get the `eltype` of the tangent!!
"""
mutable struct PolynomialPred{T <: Real, Tvec, Talgo} <: AbstractTangentPredictor
"Order of the polynomial"
n::Int64
"Length of the last solutions vector used for the polynomial fit"
k::Int64
"Matrix for the interpolation"
A::Matrix{T}
"Algo for tangent when polynomial predictor is not possible"
tangentalgo::Talgo
"Vector of solutions"
solutions::CircularBuffer{Tvec}
"Vector of parameters"
parameters::CircularBuffer{T}
"Vector of arclengths"
arclengths::CircularBuffer{T}
"Coefficients for the polynomials for the solution"
coeffsSol::Vector{Tvec}
"Coefficients for the polynomials for the parameter"
coeffsPar::Vector{T}
"Update the predictor ?"
update::Bool
end
PolynomialPred(algo, n, k, v0) = (@assert n<k; ;PolynomialPred(n,k,zeros(eltype(v0),k,n+1),algo,
CircularBuffer{typeof(v0)}(k),CircularBuffer{eltype(v0)}(k),
CircularBuffer{eltype(v0)}(k),
Vector{typeof(v0)}(undef, n+1),
Vector{eltype(v0)}(undef, n+1),true))
PolynomialPred(n, k, v0) = PolynomialPred(SecantPred(),n,k,v0)
isready(ppd::PolynomialPred) = length(ppd.solutions) >= ppd.k
function emptypredictor!(ppd::PolynomialPred)
empty!(ppd.solutions); empty!(ppd.parameters); empty!(ppd.arclengths);
end
function getStats(polypred::PolynomialPred)
Sbar = sum(polypred.arclengths) / polypred.n
σ = sqrt(sum(x->(x-Sbar)^2, polypred.arclengths ) / (polypred.n))
# return 0,1
return Sbar, σ
end
function (polypred::PolynomialPred)(ds)
sbar, σ = getStats(polypred)
s = polypred.arclengths[end] + ds
S = [((s-sbar)/σ)^(jj-1) for jj=1:polypred.n+1]
p = sum(S .* polypred.coeffsPar)
x = sum(S .* polypred.coeffsSol)
return x, p
end
function updatePred!(polypred::PolynomialPred)
Sbar, σ = getStats(polypred)
Ss = (polypred.arclengths .- Sbar) ./ σ
# construction of the Vandermond Matrix
polypred.A[:, 1] .= 1
for jj in 1:polypred.n
polypred.A[:, jj+1] .= polypred.A[:, jj] .* Ss
end
# invert linear system for least square fitting
B = (polypred.A' * polypred.A) \ polypred.A'
mul!(polypred.coeffsSol, B, polypred.solutions)
mul!(polypred.coeffsPar, B, polypred.parameters)
end
function getTangent!(tau::M, z_new::M, z_old::M, it::AbstractContinuationIterable, ds, θ, polypred::PolynomialPred, verbosity) where {T, vectype, M <: BorderedArray{vectype, T}, Talgo}
# compute tangent and store it
(verbosity > 0) && println("--> predictor = PolynomialPred")
if polypred.update
# update the list of solutions
if length(polypred.arclengths)==0
push!(polypred.arclengths, ds)
else
push!(polypred.arclengths, polypred.arclengths[end]+ds)
end
push!(polypred.solutions, z_new.u)
push!(polypred.parameters, z_new.p)
end
if ~isready(polypred) || ~polypred.update
return getTangent!(tau, z_new, z_old, it, ds, θ, polypred.tangentalgo, verbosity)
else
return polypred.update ? updatePred!(polypred) : true
end
end
function getPredictor!(z_pred::M, z_old::M, tau::M, ds, polypred::PolynomialPred, nrm = false) where {T, vectype, M <: BorderedArray{vectype, T}}
if ~isready(polypred)
return getPredictor!(z_pred, z_old, tau, ds, polypred.tangentalgo, nrm)
else
x, p = polypred(ds)
copyto!(z_pred.u, x)
z_pred.p = p
return true
end
end
####################################################################################################
function arcLengthScaling(θ, contparams, tau::M, verbosity) where {M <: BorderedArray}
# the arclength scaling algorithm is based on Salinger, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
# Wilkes. “LOCA 1.0 Library of Continuation Algorithms: Theory and Implementation Manual,
# ” March 1, 2002. https://doi.org/10.2172/800778.
thetanew = θ
g = abs(tau.p * θ)
(verbosity > 0) && print("Theta changes from $(θ) to ")
if (g > contparams.gMax)
thetanew = contparams.gGoal / tau.p * sqrt( abs(1.0 - g^2) / abs(1.0 - tau.p^2) )
if (thetanew < contparams.thetaMin)
thetanew = contparams.thetaMin;
end
end
(verbosity > 0) && print("$(thetanew)\n")
return thetanew
end
####################################################################################################
function clampDs(dsnew, contparams::ContinuationPar)
return sign(dsnew) * clamp(abs(dsnew), contparams.dsmin, contparams.dsmax)
end
function stepSizeControl(ds, θ, contparams::ContinuationPar, converged::Bool, it_newton_number::Int, tau::M, algo::AbstractTangentPredictor, verbosity) where {T, vectype, M<:BorderedArray{vectype, T}}
if converged == false
if abs(ds) <= contparams.dsmin
(verbosity > 0) && printstyled("*"^80*"\nFailure to converge with given tolerances\n"*"*"^80, color=:red)
# we stop the continuation
return ds, θ, true
end
dsnew = sign(ds) * max(abs(ds) / 2, contparams.dsmin);
(verbosity > 0) && printstyled("Halving continuation step, ds=$(dsnew)\n", color=:red)
else
# control to have the same number of Newton iterations
Nmax = contparams.newtonOptions.maxIter
factor = (Nmax - it_newton_number) / Nmax
dsnew = ds * (1 + contparams.a * factor^2)
# (verbosity > 0) && @show 1 + contparams.a * factor^2
end
# control step to stay between bounds
dsnew = clampDs(dsnew, contparams)
thetanew = contparams.doArcLengthScaling ? arcLengthScaling(θ, contparams, tau, verbosity) : θ
# we do not stop the continuation
return dsnew, thetanew, false
end
####################################################################################################
"""
This is the classical Newton-Krylov solver used to solve `F(x, p) = 0` together
with the scalar condition `n(x, p) ≡ θ ⋅ <x - x0, τx> + (1-θ) ⋅ (p - p0) * τp - n0 = 0`. This makes a problem of dimension N + 1.
Here, we specify `p` as a subfield of `par` with the `paramLens::Lens`
# Arguments
- `(x, par) -> F(x, par)` where `par` is a set of parameters like `(a=1.0, b=1)`
- `(x, par) -> Jh(x, par)` the jacobian Jh = ∂xF
"""
function newtonPALC(F, Jh, par, paramlens::Lens,
z0::BorderedArray{vectype, T},
τ0::BorderedArray{vectype, T},
z_pred::BorderedArray{vectype, T},
ds::T, θ::T,
contparams::ContinuationPar{T, S, E},
dottheta::DotTheta;
linearbdalgo = BorderingBLS(DefaultLS()),
normN = norm,
callback = cbDefault, kwargs...) where {T, S, E, vectype}
# Extract parameters
@unpack tol, maxIter, verbose, α, αmin, linesearch = contparams.newtonOptions
@unpack finDiffEps, pMin, pMax = contparams
N = (x, p) -> arcLengthEq(dottheta, minus(x, z0.u), p - z0.p, τ0.u, τ0.p, θ, ds)
normAC = (resf, resn) -> max(normN(resf), abs(resn))
# Initialise iterations
x = _copy(z_pred.u) # copy(z_pred.u)
p = z_pred.p
x_pred = _copy(x) # copy(x)
# Initialise residuals
res_f = F(x, set(par, paramlens, p)); res_n = N(x, p)
dX = _copy(res_f) # copy(res_f)
dp = T(0)
up = T(0)
# dFdp = (F(x, p + finDiffEps) - res_f) / finDiffEps
dFdp = _copy(F(x, set(par, paramlens, p + finDiffEps)))
minus!(dFdp, res_f) # dFdp = dFdp - res_f
rmul!(dFdp, T(1) / finDiffEps)
res = normAC(res_f, res_n)
resHist = [res]
it = 0
itlineartot = 0
# Displaying results
verbose && displayIteration(it, res)
line_step = true
# invoke callback before algo really starts
compute = callback(x, res_f, nothing, res, 0, 0, contparams; p = p, resHist = resHist, fromNewton = false, kwargs...)
# Main loop
while (res > tol) & (it < maxIter) & line_step & compute
# dFdp = (F(x, p + epsi) - F(x, p)) / epsi)
copyto!(dFdp, F(x, set(par, paramlens, p + finDiffEps)))
minus!(dFdp, res_f); rmul!(dFdp, T(1) / finDiffEps)
J = Jh(x, set(par, paramlens, p))
u, up, flag, itlinear = linearbdalgo(J, dFdp, τ0, res_f, res_n, θ)
itlineartot += sum(itlinear)
if linesearch
line_step = false
while !line_step & (α > αmin)
# x_pred = x - α * u
copyto!(x_pred, x); axpy!(-α, u, x_pred)
p_pred = p - α * up
copyto!(res_f, F(x_pred, set(par, paramlens, p_pred)))
res_n = N(x_pred, p_pred)
res = normAC(res_f, res_n)
if res < resHist[end]
if (res < resHist[end] / 2) & (α < 1)
α *= 2
end
line_step = true
copyto!(x, x_pred)
p = clamp(p_pred, pMin, pMax)
# p = p_pred
else
α /= 2
end
end
α = contparams.newtonOptions.α # we put back the initial value
else
minus!(x, u) # x .= x .- u
p = clamp(p - up, pMin, pMax)
# p = p - up
copyto!(res_f, F(x, set(par, paramlens, p)))
res_n = N(x, p); res = normAC(res_f, res_n)
end
push!(resHist, res)
it += 1
verbose && displayIteration(it, res, itlinear)
# shall we break the loop?
compute = callback(x, res_f, J, res, it, itlinear, contparams; p = p, resHist = resHist, fromNewton = false, kwargs...)
end
verbose && displayIteration(it, res, 0, true) # display last line of the table
flag = (resHist[end] < tol) & callback(x, res_f, nothing, res, it, -1, contparams; p = p, resHist = resHist, fromNewton = false, kwargs...)
return BorderedArray(x, p), resHist, flag, it, itlineartot
end
# conveniency for use in continuation
newtonPALC(it::AbstractContinuationIterable, z0::M, τ0::M, z_pred::M, ds::T, θ::T; kwargs...) where {T, vectype, M <: BorderedArray{vectype, T}} = newtonPALC(it.F, it.J, it.par, it.lens, z0, τ0, z_pred, ds, θ, it.contParams, it.dottheta; kwargs...)
####################################################################################################
| [
37811,
198,
197,
28664,
796,
22875,
464,
8326,
7,
357,
87,
11,
88,
8,
4613,
16605,
7,
87,
11,
88,
8,
1220,
4129,
7,
87,
8,
1267,
198,
198,
1212,
5772,
19482,
2099,
3578,
284,
8160,
257,
649,
16605,
1720,
422,
262,
530,
7448,
287... | 2.494313 | 8,353 |
using Test, Onda, Dates, Random, UUIDs
@testset "pretty printing" begin
@test repr(TimeSpan(6149872364198, 123412345678910)) ==
"TimeSpan(01:42:29.872364198, 34:16:52.345678910)"
signal = Signal([:a, :b, Symbol("c-d")], Nanosecond(3), Nanosecond(Second(12345)),
:unit, 0.25, -0.5, Int16, 50.2, Symbol("lpcm.zst"), nothing)
@test sprint(show, signal, context=(:compact => true)) ==
"Signal([:a, :b, Symbol(\"c-d\")])"
@test sprint(show, signal) == """
Signal:
channel_names: [:a, :b, Symbol(\"c-d\")]
start_nanosecond: 3 nanoseconds (00:00:00.000000003)
stop_nanosecond: 12345000000000 nanoseconds (03:25:45.000000000)
sample_unit: :unit
sample_resolution_in_unit: 0.25
sample_offset_in_unit: -0.5
sample_type: Int16
sample_rate: 50.2 Hz
file_extension: Symbol(\"lpcm.zst\")
file_options: nothing"""
samples = Samples(signal, true,
rand(Random.MersenneTwister(0), signal.sample_type, 3, 5))
@test sprint(show, samples, context=(:compact => true)) == "Samples(3×5 Array{Int16,2})"
@test sprint(show, samples) == """
Samples (00:00:00.099601594):
signal.channel_names: [:a, :b, Symbol(\"c-d\")]
signal.start_nanosecond: 3 nanoseconds (00:00:00.000000003)
signal.stop_nanosecond: 12345000000000 nanoseconds (03:25:45.000000000)
signal.sample_unit: :unit
signal.sample_resolution_in_unit: 0.25
signal.sample_offset_in_unit: -0.5
signal.sample_type: Int16
signal.sample_rate: 50.2 Hz
signal.file_extension: Symbol(\"lpcm.zst\")
signal.file_options: nothing
encoded: true
data:
3×5 Array{Int16,2}:
20032 4760 27427 -20758 24287
14240 5037 5598 -5888 21784
16885 600 20880 -32493 -19305"""
annotations = Set(Annotation("val", TimeSpan(i, i + 1)) for i in 1:10)
recording = Recording(Dict(:test => signal), annotations)
@test sprint(show, recording) == """
Recording (03:25:45.000000000; 12345.0 seconds)
signals:
:test => Signal([:a, :b, Symbol(\"c-d\")])
annotations: (10 total)"""
mktempdir() do root
dataset = Dataset(joinpath(root, "test.onda"); create=true)
return @test sprint(show, dataset) == "Dataset($(dataset.path), 0 recordings)"
end
end
| [
3500,
6208,
11,
440,
45658,
11,
44712,
11,
14534,
11,
471,
27586,
82,
198,
198,
31,
9288,
2617,
366,
37784,
13570,
1,
2221,
198,
220,
220,
220,
2488,
9288,
41575,
7,
7575,
4561,
272,
7,
46841,
44183,
1954,
2414,
22337,
11,
1105,
268... | 1.628696 | 2,063 |
<gh_stars>0
#################### Discrete Gibbs Sampler ####################
#################### Types and Constructors ####################
const DGSUnivariateDistribution =
Union{Bernoulli, Binomial, Categorical, DiscreteUniform,
Hypergeometric, NoncentralHypergeometric}
const DSForm = Union{Function, Vector{Float64}}
mutable struct DSTune{F<:DSForm} <: SamplerTune
mass::Union{F, Missing}
support::Matrix{Real}
DSTune{F}() where {F<:DSForm} = new{F}()
DSTune{F}(x::Vector, support::AbstractMatrix) where {F<:DSForm} =
new{F}(missing, support)
DSTune{F}(x::Vector, support::AbstractMatrix, mass::Function) where {F<:DSForm} = new{F}(mass, support)
end
const DGSVariate = SamplerVariate{DSTune{Function}}
const DiscreteVariate = SamplerVariate{DSTune{Vector{Float64}}}
validate(v::DGSVariate) = validate(v, v.tune.support)
function validate(v::DiscreteVariate)
validate(v, v.tune.support)
validate(v, v.tune.support, v.tune.mass)
end
function validate(v::SamplerVariate{DSTune{F}}, support::Matrix) where {F<:DSForm}
n = length(v)
size(support, 1) == n ||
throw(ArgumentError("size(support, 1) differs from variate length $n"))
v
end
validate(v::DiscreteVariate, support::Matrix, mass::Union{Vector{Float64}, Missing}) =
isa(mass, Missing) ? v : validate(v, support, mass)
function validate(v::DiscreteVariate, support::Matrix, mass::Vector{Float64})
n = length(mass)
size(support, 2) == n ||
throw(ArgumentError("size(support, 2) differs from mass length $n"))
v
end
#################### Sampler Constructor ####################
"""
DGS(params::ElementOrVector{Symbol})
Construct a `Sampler` object for which DGS sampling is to be applied separately
to each of the supplied parameters. Parameters are assumed to have discrete
univariate distributions with finite supports.
Returns a `Sampler{DSTune{Function}}` type object.
* `params`: stochastic node(s) to be updated with the sampler.
"""
function DGS(params::ElementOrVector{Symbol})
params = asvec(params)
samplerfx = function(model::Model, block::Integer)
s = model.samplers[block]
local node, x
for key in params
node = model[key]
x = unlist(node)
sim = function(i::Integer, d::DGSUnivariateDistribution, mass::Function)
v = DGSVariate([x[i]], support(d)')
sample!(v, mass)
x[i] = v[1]
relist!(model, x, key)
end
mass = function(d::DGSUnivariateDistribution, v::AbstractVector,
i::Integer)
x[i] = value = v[1]
relist!(model, x, key)
exp(logpdf(d, value) + logpdf(model, node.targets))
end
DGS_sub!(node.distr, sim, mass)
end
nothing
end
Sampler(params, samplerfx, DSTune{Function}())
end
function DGS_sub!(d::UnivariateDistribution, sim::Function, mass::Function)
sim(1, d, v -> mass(d, v, 1))
end
function DGS_sub!(D::Array{UnivariateDistribution}, sim::Function,
mass::Function)
for i in 1:length(D)
d = D[i]
sim(i, d, v -> mass(d, v, i))
end
end
function DGS_sub!(d, sim::Function, mass::Function)
throw(ArgumentError("unsupported distribution structure $(typeof(d))"))
end
#################### Sampling Functions ####################
sample!(v::SamplerVariate{DSTune{F}}) where {F<:DSForm} = sample!(v, v.tune.mass)
"""
sample!(v::DGSVariate, mass::Function)
Draw one sample directly from a target probability mass function. Parameters
are assumed to have discrete and finite support.
Returns `v` updated with simulated values and associated tuning parameters.
"""
function sample!(v::DGSVariate, mass::Function)
tune = v.tune
n = size(tune.support, 2)
probs = Vector{Float64}(undef, n)
psum = 0.0
for i in 1:n
value = mass(tune.support[:, i])
probs[i] = value
psum += value
end
if psum > 0
probs /= psum
else
probs[:] .= 1 / n
end
v[:] = tune.support[:, rand(Categorical(probs))]
v
end
"""
sample!(v::DiscreteVariate, mass::Vector{Float64})
Draw one sample directly from a target probability mass function. Parameters
are assumed to have discrete and finite support.
Returns `v` updated with simulated values and associated tuning parameters.
"""
function sample!(v::DiscreteVariate, mass::Vector{Float64})
validate(v, v.tune.support, mass)
v[:] = v.tune.support[:, rand(Categorical(mass))]
v
end
| [
27,
456,
62,
30783,
29,
15,
198,
14468,
4242,
8444,
8374,
41071,
3409,
20053,
1303,
14468,
21017,
198,
198,
14468,
4242,
24897,
290,
28407,
669,
1303,
14468,
21017,
198,
198,
9979,
360,
14313,
3118,
42524,
20344,
3890,
796,
198,
220,
22... | 2.696024 | 1,635 |
cd(@__DIR__); include("setups/grid7x3.jl")
pyplot(dpi = 200)
## (a) eigenvectors by nondecreasing eigenvalue ordering
plot(layout = Plots.grid(3, 7))
for i in 1:N
heatmap!(reshape(𝚽[:, i], (Nx, Ny))', c = :viridis, cbar = false,
clims = (-0.4,0.4), frame = :none, ratio = 1, ylim = [0, Ny + 1],
title = latexstring("\\phi_{", i-1, "}"), titlefont = 12,
subplot = i)
end
plt = current()
savefig(plt, "../figs/grid7x3_evsp_title.png")
## (b) eigenvectors by natural frequency ordering
plot(layout = Plots.grid(3, 7))
for i in 1:N
k = grid2eig_ind[i]
heatmap!(reshape(𝚽[:,k], (Nx, Ny))', c = :viridis, cbar = false,
clims = (-0.4,0.4), frame = :none, ratio = 1, ylim = [0, Ny + 1],
title = latexstring("\\varphi_{", string(eig2dct[k,1]),
",", string(eig2dct[k,2]), "}"), titlefont = 12, subplot = i)
end
plt = current()
savefig(plt, "../figs/grid7x3_dct_title2.png")
| [
10210,
7,
31,
834,
34720,
834,
1776,
2291,
7203,
2617,
4739,
14,
25928,
22,
87,
18,
13,
20362,
4943,
198,
9078,
29487,
7,
67,
14415,
796,
939,
8,
198,
198,
2235,
357,
64,
8,
304,
9324,
303,
5217,
416,
30745,
721,
260,
2313,
304,
... | 1.987755 | 490 |
# not working.
using LinearAlgebra, FFTW
import BSON, Statistics, Random
import PyPlot
import NMRSpectraSimulator
include("../src/NMRCalibrate.jl")
import .NMRCalibrate
# for loading something with Interpolations.jl
import OffsetArrays
import Interpolations
import PlotlyJS
import Plots
Plots.plotly()
import Destruct
include("./helpers/final_helpers.jl")
include("./helpers/resonance_helpers.jl")
include("./helpers/plot_resonance.jl")
PyPlot.close("all")
fig_num = 1
Random.seed!(25)
PyPlot.matplotlib["rcParams"][:update](["font.size" => 22, "font.family" => "serif"])
# base_dir = "/home/roy/MEGAsync/outputs/NMR/calibrate/final/"
# project_set = filter(isdir, readdir(base_dir; join=true)) # all dirs in base_dir
project_set = Vector{String}(undef, 0)
tmp = "/home/roy/MEGAsync/outputs/NMR/calibrate/final/D-(+)-Glucose-NRC-600"
push!(project_set, tmp)
function loopplotresonance(project_set)
for i = 1:length(project_set)
projects_dir = project_set[i]
println("Now on $(projects_dir)")
plotresonancegroups(projects_dir)
println()
end
end
### batch.
loopplotresonance(project_set)
### end batch.
| [
2,
407,
1762,
13,
198,
198,
3500,
44800,
2348,
29230,
11,
376,
9792,
54,
198,
11748,
347,
11782,
11,
14370,
11,
14534,
198,
11748,
9485,
43328,
198,
198,
11748,
28692,
6998,
806,
430,
8890,
8927,
198,
198,
17256,
7203,
40720,
10677,
1... | 2.53159 | 459 |
/**
* This class models a cloud storage API.
*/
class Cloud {
int{L} cloud;
/**
* Put a value into the cloud.
*/
void put(int x) {
this.cloud = x;
}
/**
* Put the only value the cloud stores.
*/
int get() {
return this.cloud;
}
}
| [
35343,
198,
1635,
770,
1398,
4981,
257,
6279,
6143,
7824,
13,
198,
9466,
198,
4871,
10130,
1391,
198,
220,
220,
220,
493,
90,
43,
92,
6279,
26,
628,
220,
220,
220,
42638,
198,
220,
220,
220,
220,
1635,
5930,
257,
1988,
656,
262,
6... | 2.300813 | 123 |
<reponame>UnofficialJuliaMirror/ProteinEnsembles.jl-186d2b2d-8ad5-54a6-bcea-66047609c611
# Tests for align.jl
@testset "Align" begin
coords_one = [
1.0 0.0 0.0;
0.0 1.0 0.0;
0.0 0.0 0.0;
]
coords_two = [
0.0 -1.0 0.0;
1.0 0.0 0.0;
1.0 1.0 1.0;
]
trans_one_test, trans_two_test, rotation_test = kabschalignment(coords_one, coords_two)
trans_one_real = [1/3, 1/3, 0]
trans_two_real = [-1/3, 1/3, 1]
rotation_real = [
0.0 -1.0 0.0;
1.0 0.0 0.0;
0.0 0.0 1.0;
]
@test isapprox(trans_one_test, trans_one_real)
@test isapprox(trans_two_test, trans_two_real)
@test isapprox(rotation_test, rotation_real)
coords_one = [
1.0 0.0 0.0;
0.0 1.0 0.0;
0.0 0.0 0.0;
]
coords_two = [
0.0 -1.0 0.0;
1.0 0.0 0.0;
1.0 1.0 1.0;
]
devs_test = displacements(coords_one, coords_two)
devs_real = [sqrt(3), sqrt(3), 1.0]
@test isapprox(devs_test, devs_real)
atom_one = Atom("CA", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_two = Atom("C", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_thr = Atom("CA", "ALA", 'A', 21, [0.0, 0.0, 0.0], "C")
atoms = [atom_one, atom_two, atom_thr]
coords_one = [
0.0 1.0 2.0;
0.0 1.0 0.0;
0.0 0.0 0.0;
]
coords_two = [
0.0 -1.0 0.0;
0.0 0.0 3.0;
3.0 3.0 3.0;
]
align!(coords_one, coords_two, atoms)
coords_one_real = [
0.0 0.5 3.0;
-1.0 1.5 3.0;
0.0 2.5 3.0;
]
coords_one_real = [
0.0 -1.0 0.0;
0.5 1.5 2.5;
3.0 3.0 3.0;
]
@test coords_one == coords_one_real
atom_one = Atom("CA", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_two = Atom("C", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_thr = Atom("CA", "ALA", 'A', 21, [0.0, 0.0, 0.0], "C")
atoms = [atom_one, atom_two, atom_thr]
coords_one = [
0.0 1.0 2.0;
0.0 1.0 0.0;
0.0 0.0 0.0;
]
coords_two = [
0.0 -1.0 0.0;
0.0 0.0 3.0;
3.0 3.0 3.0;
]
alignsimple!(coords_one, coords_two, atoms)
coords_one_real = [
0.0 -1.0 0.0;
0.5 1.5 2.5;
3.0 3.0 3.0;
]
@test coords_one == coords_one_real
atom_one = Atom("CA", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_two = Atom("C", "ALA", 'A', 20, [1.0, 1.0, 0.0], "C")
atom_thr = Atom("CA", "ALA", 'A', 21, [2.0, 0.0, 0.0], "C")
atoms = [atom_one, atom_two, atom_thr]
atoms_new = deepcopy(atoms)
coords_ref = [
0.0 -1.0 0.0;
0.0 0.0 3.0;
3.0 3.0 3.0;
]
alignatoms!(atoms, coords_ref)
coords_real = [
0.0 -1.0 0.0;
0.5 1.5 2.5;
3.0 3.0 3.0;
]
@test atomcoords(atoms) == coords_real
atoms_ref = atomcoords(atoms, coords_ref)
alignatoms!(atoms_new, atoms_ref)
@test atomcoords(atoms_new) == coords_real
atom_one = Atom("CA", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_two = Atom("CA", "ALA", 'A', 30, [0.0, 0.0, 0.0], "C")
struc_one = ModelledStructure(0.0, [0.0 5.0; 0.0 5.0; 0.0 5.0])
struc_two = ModelledStructure(0.0, [1.0 4.0; 1.0 4.0; 1.0 4.0])
ensemble = ModelledEnsemble([atom_one, atom_two], [struc_one, struc_two])
average_test = centroid(ensemble)
average_real = [0.5 4.5; 0.5 4.5; 0.5 4.5]
@test average_test == average_real
coords_one = [
0.0 3.0 13.0;
0.0 0.0 0.0;
0.0 0.0 0.0;
]
coords_two = [
0.0 5.0 10.0;
1.0 0.0 0.0;
0.0 0.0 0.0;
]
atom_one = Atom("CA", "ALA", 'A', 20, [0.0, 0.0, 0.0], "C")
atom_two = Atom("CA", "ALA", 'A', 30, [0.0, 0.0, 0.0], "C")
atom_thr = Atom("C", "ALA", 'A', 40, [0.0, 0.0, 0.0], "C")
atoms = [atom_one, atom_two, atom_thr]
@test isapprox(rmsd(coords_one, coords_two, atoms), sqrt(2.5))
coords_one = [
0.0 3.0;
0.0 0.0;
0.0 0.0;
]
coords_two = [
0.0 5.0;
1.0 0.0;
0.0 0.0;
]
@test isapprox(rmsd(coords_one, coords_two), sqrt(2.5))
end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
47,
35574,
4834,
4428,
829,
13,
20362,
12,
25096,
67,
17,
65,
17,
67,
12,
23,
324,
20,
12,
4051,
64,
21,
12,
65,
344,
64,
12,
39885,
2857,
31751,
66,
21,
1157,
... | 1.635536 | 2,527 |
module Jags
using Compat, Pkg, Documenter, DelimitedFiles, Unicode, MCMCChains, StatsPlots
#### Includes ####
include("jagsmodel.jl")
include("jagscode.jl")
if !isdefined(Main, :Stanmodel)
include("utilities.jl")
end
"""The directory which contains the executable `bin/stanc`. Inferred
from `Main.JAGS_HOME` or `ENV["JAGS_HOME"]` when available. Use
`set_jags_home!` to modify."""
JAGS_HOME=""
function __init__()
global JAGS_HOME = if isdefined(Main, :JAGS_HOME)
eval(Main, :JAGS_HOME)
elseif haskey(ENV, "JAGS_HOME")
ENV["JAGS_HOME"]
else
println("Environment variable JAGS_HOME not found. Use set_jags_home!.")
""
end
end
"""Set the path for `Jags`.
Example: `set_jags_home!(homedir() * "/src/src/cmdstan-2.11.0/")`
"""
set_jags_home!(path) = global JAGS_HOME=path
#### Exports ####
export
# From this file
set_jags_home!,
# From Jags.jl
JAGS_HOME,
# From jagsmodel.jl
Jagsmodel,
# From jagscode.jl
jags
end # module
| [
21412,
449,
3775,
198,
198,
3500,
3082,
265,
11,
350,
10025,
11,
16854,
263,
11,
4216,
320,
863,
25876,
11,
34371,
11,
13122,
9655,
1925,
1299,
11,
20595,
3646,
1747,
198,
198,
4242,
29581,
1303,
21017,
198,
198,
17256,
7203,
73,
3775... | 2.348341 | 422 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.