content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
include("../src/RichVehicleRoutingProblem.jl")
const RVRP = RichVehicleRoutingProblem
using Test
include("unit_tests/unit_tests.jl")
unit_tests()
| [
17256,
7203,
40720,
10677,
14,
14868,
37870,
1548,
49,
13660,
40781,
13,
20362,
4943,
198,
9979,
371,
13024,
47,
796,
3998,
37870,
1548,
49,
13660,
40781,
198,
198,
3500,
6208,
198,
198,
17256,
7203,
20850,
62,
41989,
14,
20850,
62,
419... | 2.921569 | 51 |
include(joinpath("structures", "cluster.jl"))
include(joinpath("structures", "dataitem.jl"))
include(joinpath("structures", "eva.jl"))
include(joinpath("structures", "fittedeva.jl"))
| [
17256,
7,
22179,
6978,
7203,
7249,
942,
1600,
366,
565,
5819,
13,
20362,
48774,
198,
17256,
7,
22179,
6978,
7203,
7249,
942,
1600,
366,
7890,
9186,
13,
20362,
48774,
198,
17256,
7,
22179,
6978,
7203,
7249,
942,
1600,
366,
48855,
13,
2... | 3.101695 | 59 |
<reponame>chachaleo/Polyhedra.jl
import GeometryBasics
"""
struct Mesh{N, T, PT <: Polyhedron{T}} <: GeometryBasics.GeometryPrimitive{N, T}
polyhedron::PT
coordinates::Union{Nothing, Vector{GeometryBasics.Point{3, T}}}
faces::Union{Nothing, Vector{GeometryBasics.TriangleFace{Int}}}
normals::Union{Nothing, Vector{GeometryBasics.Point{3, T}}}
end
Mesh wrapper type that inherits from `GeometryPrimitive` to be used for plotting
a polyhedron. Note that `Mesh(p)` is type unstable but one can use `Mesh{3}(p)`
instead if it is known that `p` is defined in a 3-dimensional space.
"""
mutable struct Mesh{N, T, PT <: Polyhedron{T}} <: GeometryBasics.GeometryPrimitive{N, T}
polyhedron::PT
coordinates::Union{Nothing, Vector{GeometryBasics.Point{N, T}}}
faces::Union{Nothing, Vector{GeometryBasics.TriangleFace{Int}}}
normals::Union{Nothing, Vector{GeometryBasics.Point{N, T}}}
end
function Mesh{N}(polyhedron::Polyhedron{T}) where {N, T}
return Mesh{N, T, typeof(polyhedron)}(polyhedron, nothing, nothing, nothing)
end
function Mesh(polyhedron::Polyhedron, ::StaticArrays.Size{N}) where N
return Mesh{N[1]}(polyhedron)
end
function Mesh(polyhedron::Polyhedron, N::Int)
# This is type unstable but there is no way around that,
# use polyhedron built from StaticArrays vector to avoid that.
return Mesh{N}(polyhedron)
end
function Mesh(polyhedron::Polyhedron)
return Mesh(polyhedron, FullDim(polyhedron))
end
function fulldecompose!(mesh::Mesh)
if mesh.coordinates === nothing
mesh.coordinates, mesh.faces, mesh.normals = fulldecompose(mesh)
end
return
end
# Creates a scene for the vizualisation to be used to truncate the lines and rays
function scene(vr::VRep, ::Type{T}) where T
# First compute the smallest rectangle containing the P-representation (i.e. the points).
(xmin, xmax) = extrema(map((x)->x[1], points(vr)))
(ymin, ymax) = extrema(map((x)->x[2], points(vr)))
(zmin, zmax) = extrema(map((x)->x[3], points(vr)))
width = max(xmax-xmin, ymax-ymin, zmax-zmin)
if width == zero(T)
width = 2
end
scene = GeometryBasics.HyperRectangle{3, T}([(xmin + xmax) / 2 - width,
(ymin + ymax) / 2 - width,
(zmin + zmax) / 2 - width],
2 * width * ones(T, 3))
# Intersection of rays with the limits of the scene
(start, r) -> begin
ray = coord(r)
λ = nothing
min_scene = minimum(scene)
max_scene = maximum(scene)
for i in 1:3
r = ray[i]
if !iszero(r)
cur = max((min_scene[i] - start[i]) / r, (max_scene[i] - start[i]) / r)
if λ === nothing || cur < λ
λ = cur
end
end
end
start + λ * ray
end
end
function _isdup(zray, triangles)
for tri in triangles
normal = tri[2]
if isapproxzero(cross(zray, normal)) && dot(zray, normal) > 0 # If A[j,:] is almost 0, it is always true...
# parallel and equality or inequality and same sense
return true
end
end
false
end
_isdup(poly, hidx, triangles) = _isdup(get(poly, hidx).a, triangles)
function fulldecompose(poly_geom::Mesh{3}, ::Type{T}) where T
poly = poly_geom.polyhedron
exit_point = scene(poly, T)
triangles = Tuple{Tuple{Vector{T},Vector{T},Vector{T}}, Vector{T}}[]
function decomposeplane(hidx)
h = get(poly, hidx)
# xray should be the rightmost ray
xray = nothing
# xray should be the leftmost ray
yray = nothing
zray = h.a
isapproxzero(zray) && return
# Checking rays
counterclockwise(a, b) = dot(cross(a, b), zray)
face_vert = pointtype(poly)[]
for x in points(poly)
if _isapprox(dot(x, zray), h.β)
push!(face_vert, x)
end
end
hull, lines, rays = _planar_hull(3, face_vert, incidentlines(poly, hidx), incidentrays(poly, hidx), counterclockwise, r -> cross(zray, r))
if isempty(lines)
if length(hull) + length(rays) < 3
return
end
@assert length(rays) <= 2
if !isempty(rays)
if length(rays) + length(hull) >= 2
push!(hull, exit_point(last(hull), last(rays)))
end
push!(hull, exit_point(first(hull), first(rays)))
end
else
if length(hull) == 2
@assert length(lines) == 1 && isempty(rays)
a, b = hull
line = first(lines)
empty!(hull)
push!(hull, exit_point(a, line))
push!(hull, exit_point(a, -line))
push!(hull, exit_point(b, -line))
push!(hull, exit_point(b, line))
else
@assert length(hull) == 1 && isempty(rays)
center = first(hull)
empty!(hull)
a = first(lines)
b = nothing
if length(lines) == 2
@assert isempty(rays)
b = last(lines)
elseif !isempty(rays)
@assert length(lines) == 1
@assert length(rays) == 1
b = linearize(first(rays))
end
push!(hull, exit_point(center, a))
if b !== nothing
push!(hull, exit_point(center, b))
end
push!(hull, exit_point(center, -a))
if b !== nothing && length(lines) == 2 || length(rays) >= 2
@assert length(rays) == 2
push!(hull, exit_point(center, -b))
end
end
end
if length(hull) >= 3
a = pop!(hull)
b = pop!(hull)
while !isempty(hull)
c = pop!(hull)
push!(triangles, ((a, b, c), zray))
b = c
end
end
end
for hidx in eachindex(hyperplanes(poly))
decomposeplane(hidx)
end
# If there is already a triangle, his normal is an hyperplane and it is the only face
if isempty(triangles)
for hidx in eachindex(halfspaces(poly))
if !_isdup(poly, hidx, triangles)
decomposeplane(hidx)
end
end
end
ntri = length(triangles)
pts = Vector{GeometryBasics.Point{3, T}}(undef, 3ntri)
faces = Vector{GeometryBasics.TriangleFace{Int}}(undef, ntri)
ns = Vector{GeometryBasics.Point{3, T}}(undef, 3ntri)
for i in 1:ntri
tri = pop!(triangles)
normal = tri[2]
for j = 1:3
idx = 3*(i-1)+j
#ns[idx] = -normal
ns[idx] = normal
end
faces[i] = collect(3*(i-1) .+ (1:3))
k = 1
for k = 1:3
# reverse order of the 3 vertices so that if I compute the
# normals with the `normals` function, they are in the good
# sense.
# I know I don't use the `normals` function but I don't know
# what is the OpenGL convention so I don't know if it cares
# about the order of the vertices.
pts[3*i-k+1] = tri[1][k]
end
end
# If the type of ns is Rational, it also works.
# The normalized array in in float but then it it recast into Rational
map!(normalize, ns, ns)
(pts, faces, ns)
end
fulldecompose(poly::Mesh{N, T}) where {N, T} = fulldecompose(poly, typeof(one(T)/2))
GeometryBasics.coordinates(poly::Mesh) = (fulldecompose!(poly); poly.coordinates)
GeometryBasics.faces(poly::Mesh) = (fulldecompose!(poly); poly.faces)
GeometryBasics.texturecoordinates(poly::Mesh) = nothing
GeometryBasics.normals(poly::Mesh) = (fulldecompose!(poly); poly.normals)
| [
27,
7856,
261,
480,
29,
354,
620,
1000,
78,
14,
34220,
704,
430,
13,
20362,
198,
11748,
2269,
15748,
15522,
873,
198,
198,
37811,
198,
220,
220,
220,
2878,
47529,
90,
45,
11,
309,
11,
19310,
1279,
25,
12280,
704,
1313,
90,
51,
117... | 2.010028 | 3,989 |
@testset "Variables" begin
m = Model()
@variable(m, x ≥ 0, u"m/s")
@test x == UnitJuMP.UnitVariableRef(x.vref, u"m/s")
@test unit(x) == u"m/s"
@test owner_model(x) === m
@variable(m, y[1:4], u"km/hr")
@test y[1] == UnitJuMP.UnitVariableRef(y[1].vref, u"km/hr")
end
@testset "Constraints" begin
m = Model()
@variable(m, u[1:2] ≥ 0, u"m")
@variable(m, v[1:4], u"km/hr")
@variable(m, w, Bin)
@variable(m, y)
maxspeed = 4u"ft/s"
speed = 2.3u"m/s"
# Various combination of coefficient and variables with and without units
@constraint(m, 2v[1] ≤ maxspeed)
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 1
@constraint(m, 2.3v[1] ≤ maxspeed)
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 2
@constraint(m, c1a, speed * w ≤ maxspeed)
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 3
@test unit(c1a) == u"m/s"
@constraint(m, c1b, 40u"km/hr" * w ≤ 15u"m/s")
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 4
@test unit(c1b) == u"km/hr"
@constraint(m, c1c, w * speed ≤ maxspeed)
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 5
@test unit(c1c) == u"m/s"
@constraint(m, c1d, sum(v[i] for i in 1:4) ≤ maxspeed)
@test num_constraints(m, AffExpr, MOI.LessThan{Float64}) == 6
@test unit(c1d) == u"km/hr"
@constraint(m, c1, 2v[1] + 4v[2] ≤ maxspeed)
@test typeof(c1) <: UnitJuMP.UnitConstraintRef
@test unit(c1) == u"km/hr"
@constraint(m, c2, 2v[1] + 4v[2] ≤ maxspeed, u"m/s")
@test unit(c2) == u"m/s"
@test normalized_rhs(c2.cref) == convert(Float64, uconvert(u"m/s", maxspeed).val)
@variable(m, z, Bin)
maxlength = 1000u"yd"
period = 1.5u"hr"
@constraint(m, c3, u[2] + period * v[2] ≤ maxlength * z, u"cm")
@test unit(c3) == u"cm"
@constraint(m, c3b, u[2] + 1.5u"hr" * v[2] ≤ 1000u"yd" * z, u"cm")
@test unit(c3b) == u"cm"
end | [
31,
9288,
2617,
366,
23907,
2977,
1,
2221,
628,
220,
220,
220,
285,
796,
9104,
3419,
198,
220,
220,
220,
220,
198,
220,
220,
220,
2488,
45286,
7,
76,
11,
2124,
26870,
657,
11,
334,
1,
76,
14,
82,
4943,
198,
220,
220,
220,
2488,
... | 1.937804 | 1,029 |
<reponame>NHDaly/pongClone
mutable struct Timer
starttime_ns::typeof(Base.time_ns())
paused_elapsed_ns::typeof(Base.time_ns())
Timer() = new(0,0)
end
function start!(timer::Timer)
timer.starttime_ns = (Base.time_ns)()
return nothing
end
started(timer::Timer) = (timer.starttime_ns ≠ 0)
""" Return seconds since timer was started or 0 if not yet started. """
function elapsed(timer::Timer)
local elapsedtime_ns = (Base.time_ns)() - timer.starttime_ns
return started(timer) * float(elapsedtime_ns) / 1000000000
end
function pause!(timer::Timer)
timer.paused_elapsed_ns = (Base.time_ns)() - timer.starttime_ns
return nothing
end
function unpause!(timer::Timer)
timer.starttime_ns = (Base.time_ns)()
timer.starttime_ns -= timer.paused_elapsed_ns;
return nothing
end
t = Timer()
start!(t)
elapsed(t)
pause!(t)
unpause!(t)
elapsed(t)
| [
27,
7856,
261,
480,
29,
45,
10227,
3400,
14,
79,
506,
2601,
505,
198,
76,
18187,
2878,
5045,
263,
198,
220,
220,
220,
923,
2435,
62,
5907,
3712,
4906,
1659,
7,
14881,
13,
2435,
62,
5907,
28955,
198,
220,
220,
220,
24487,
62,
417,
... | 2.543353 | 346 |
<filename>src/utils.jl
using Downloads
# WARNING: THIS FILE IS WORK-IN-PROGRESS
#--------------------------------------------------------------------
# Metadata info: dimensions
#--------------------------------------------------------------------
function listdimensions(apiurl::String, dataflow::String)
io = IOBuffer()
resp = Downloads.download(apiurl * "data/" * dataflow * "?detail=serieskeysonly&lastNObservations=1&format=jsondata", io) |> take!
ds = JSON3.read(resp).structure.dimensions.series
dim = OrderedDict()
for dsᵢ in ds
dim[dsᵢ.name] = NamedTuple(Symbol(j.id) => j.name for j in dsᵢ.values)
end
dim
end
#--------------------------------------------------------------------
# Basic Data download
#--------------------------------------------------------------------
"""Generates a SDMX API compliant url
#kwargs
filter::Union{nothing,NamedTuple},
updatedAfter::DateTime,
firstNObservations::Int,
lastNObservations::Int,
dimensionAtObservation,
attributes = "dsd",
measures = "all",
includeHistory = false
"""
function generateurl(;context = "*", agencyID = "*", resourceID = "*", version = "*", key::String="*", kwargs...) # TODO: substitute query args by kwargs...
baseurl = join(["https://ws-entry-point/data/",context,agencyID,resourceID,version,key],"/")
query = "/?"*join([String(k)*"="*kwargs[k] for k in keys(kwargs)],"&")
end
""" Generates key part of url from a dictionary"""
function generatekey(dims)
key = ""
for v in dims
key = key*join[v,"+"]*"." # FIXME = sobraría un punto??
end
end
""" Fetch data and creates a SDMX.Datatable"""
function getseries(url)
io = IOBuffer()
return Downloads.download(url, io) |> take! |> SDMX.read(alldims = false)
end
| [
27,
34345,
29,
10677,
14,
26791,
13,
20362,
198,
3500,
50093,
198,
198,
2,
39410,
25,
12680,
45811,
3180,
30936,
12,
1268,
12,
4805,
49656,
7597,
198,
198,
2,
10097,
650,
198,
2,
3395,
14706,
7508,
25,
15225,
198,
2,
10097,
650,
198... | 3.030354 | 593 |
<filename>files/db/migrations/2021061519532446_create_table_roles_users.jl
module CreateTableRolesUsers
import SearchLight.Migrations: create_table, column, primary_key, add_index, drop_table
function up()
create_table(:rolesusers) do
[
primary_key()
column(:roles_id, :int)
column(:users_id, :int)
]
end
add_index(:rolesusers, :roles_id)
add_index(:rolesusers, :users_id)
end
function down()
drop_table(:rolesusers)
end
end
| [
27,
34345,
29,
16624,
14,
9945,
14,
76,
3692,
602,
14,
19004,
15801,
1314,
1129,
4310,
1731,
3510,
62,
17953,
62,
11487,
62,
305,
829,
62,
18417,
13,
20362,
198,
21412,
13610,
10962,
49,
4316,
14490,
198,
198,
11748,
11140,
15047,
13,... | 2.414508 | 193 |
<filename>lib/cufft/util.jl
const cufftNumber = Union{cufftDoubleReal,cufftReal,cufftDoubleComplex,cufftComplex}
const cufftReals = Union{cufftDoubleReal,cufftReal}
const cufftComplexes = Union{cufftDoubleComplex,cufftComplex}
const cufftDouble = Union{cufftDoubleReal,cufftDoubleComplex}
const cufftSingle = Union{cufftReal,cufftComplex}
const cufftTypeDouble = Union{Type{cufftDoubleReal},Type{cufftDoubleComplex}}
const cufftTypeSingle = Union{Type{cufftReal},Type{cufftComplex}}
cufftfloat(x) = _cufftfloat(float(x))
_cufftfloat(::Type{T}) where {T<:cufftReals} = T
_cufftfloat(::Type{Float16}) = Float32
_cufftfloat(::Type{Complex{T}}) where {T} = Complex{_cufftfloat(T)}
_cufftfloat(::Type{T}) where {T} = error("type $T not supported")
_cufftfloat(x::T) where {T} = _cufftfloat(T)(x)
complexfloat(x::DenseCuArray{Complex{<:cufftReals}}) = x
realfloat(x::DenseCuArray{<:cufftReals}) = x
complexfloat(x::DenseCuArray{T}) where {T<:Complex} = copy1(typeof(cufftfloat(zero(T))), x)
complexfloat(x::DenseCuArray{T}) where {T<:Real} = copy1(typeof(complex(cufftfloat(zero(T)))), x)
realfloat(x::DenseCuArray{T}) where {T<:Real} = copy1(typeof(cufftfloat(zero(T))), x)
function copy1(::Type{T}, x) where T
y = CuArray{T}(undef, map(length, axes(x)))
#copy!(y, x)
y .= broadcast(xi->convert(T,xi),x)
end
| [
27,
34345,
29,
8019,
14,
66,
1648,
83,
14,
22602,
13,
20362,
198,
9979,
47291,
83,
15057,
796,
4479,
90,
66,
1648,
83,
25628,
15633,
11,
66,
1648,
83,
15633,
11,
66,
1648,
83,
25628,
5377,
11141,
11,
66,
1648,
83,
5377,
11141,
92,... | 2.332155 | 566 |
<filename>src/composite/control.jl<gh_stars>0
using YaoArrayRegister
using YaoArrayRegister: matvec
export ControlBlock, control, cnot
struct ControlBlock{N, BT<:AbstractBlock, C, M} <: AbstractContainer{BT, N}
ctrl_locs::NTuple{C, Int}
ctrl_config::NTuple{C, Int}
content::BT
locs::NTuple{M, Int}
function ControlBlock{N, BT, C, M}(ctrl_locs, ctrl_config, block, locs) where {N, C, M, BT<:AbstractBlock}
@assert_locs_safe N (ctrl_locs..., locs...)
@assert nqubits(block) == M "number of locations doesn't match the size of block"
@assert block isa AbstractBlock "expect a block, got $(typeof(block))"
new{N, BT, C, M}(ctrl_locs, ctrl_config, block, locs)
end
end
"""
decode_sign(ctrls...)
Decode signs into control sequence on control or inversed control.
"""
decode_sign(ctrls::Int...,) = decode_sign(ctrls)
decode_sign(ctrls::NTuple{N, Int}) where N = tuple(ctrls .|> abs, ctrls .|> sign .|> (x->(1+x)÷2))
function ControlBlock{N}(ctrl_locs::NTuple{C}, ctrl_config::NTuple{C}, block::BT, locs::NTuple{K}) where {N, M, C, K, BT<:AbstractBlock{M}}
M == K || throw(DimensionMismatch("block position not maching its size!"))
return ControlBlock{N, BT, C, M}(ctrl_locs, ctrl_config, block, locs)
end
function ControlBlock{N}(ctrl_locs::NTuple{C}, ctrl_config::NTuple{C}, block, locs::NTuple{K}) where {N, M, C, K}
error("expect a block, got $(typeof(block))")
end
# control bit configs are 1 by default, it use sign to encode control bit code
ControlBlock{N}(ctrl_locs::NTuple{C}, block::AbstractBlock, locs::NTuple) where {N, C} =
ControlBlock{N}(decode_sign(ctrl_locs)..., block, locs)
ControlBlock{N}(ctrl_locs::NTuple{C}, block::Function, locs::NTuple) where {N, C} =
ControlBlock{N}(decode_sign(ctrl_locs)..., parse_block(length(locs), block), locs)
ControlBlock{N}(ctrl_locs::NTuple{C}, block, locs::NTuple) where {N, C} =
ControlBlock{N}(decode_sign(ctrl_locs)..., block, locs) # trigger error
# use pair to represent block under control in a compact way
ControlBlock{N}(ctrl_locs::NTuple{C}, target::Pair) where {N, C} =
ControlBlock{N}(ctrl_locs, target.second, (target.first...,))
"""
control(n, ctrl_locs, target)
Return a [`ControlBlock`](@ref) with number of active qubits `n` and control locs
`ctrl_locs`, and control target in `Pair`.
# Example
```jldoctest
julia> control(4, (1, 2), 3=>X)
nqubits: 4
control(1, 2)
└─ (3,) X gate
julia> control(4, 1, 3=>X)
nqubits: 4
control(1)
└─ (3,) X gate
```
"""
control(total::Int, ctrl_locs, target::Pair) = ControlBlock{total}(Tuple(ctrl_locs), target)
control(total::Int, control_location::Int, target::Pair) = control(total, (control_location, ), target)
"""
control(ctrl_locs, target) -> f(n)
Return a lambda that takes the number of total active qubits as input. See also
[`control`](@ref).
# Example
```jldoctest
julia> control((2, 3), 1=>X)
(n -> control(n, (2, 3), 1 => X gate))
julia> control(2, 1=>X)
(n -> control(n, 2, 1 => X gate))
```
"""
control(ctrl_locs, target::Pair) = @λ(n -> control(n, ctrl_locs, target))
control(control_location::Int, target::Pair) = @λ(n -> control(n, control_location, target))
"""
control(target) -> f(ctrl_locs)
Return a lambda that takes a `Tuple` of control qubits locs as input. See also
[`control`](@ref).
# Example
```jldoctest
julia> control(1=>X)
(ctrl_locs -> control(ctrl_locs, 1 => X gate))
julia> control((2, 3) => YaoBlocks.ConstGate.CNOT)
(ctrl_locs -> control(ctrl_locs, (2, 3) => CNOT gate))
```
"""
control(target::Pair) = @λ(ctrl_locs -> control(ctrl_locs, target))
"""
control(ctrl_locs::Int...) -> f(target)
Return a lambda that takes a `Pair` of control target as input.
See also [`control`](@ref).
# Example
```jldoctest
julia> control(1, 2)
(target -> control((1, 2), target))
```
"""
control(ctrl_locs::Int...) = @λ(target -> control(ctrl_locs, target))
"""
cnot(n, ctrl_locs, location)
Return a speical [`ControlBlock`](@ref), aka CNOT gate with number of active qubits
`n` and locs of control qubits `ctrl_locs`, and `location` of `X` gate.
# Example
```jldoctest
julia> cnot(3, (2, 3), 1)
nqubits: 3
control(2, 3)
└─ (1,) X gate
julia> cnot(2, 1)
(n -> cnot(n, 2, 1))
```
"""
cnot(total::Int, ctrl_locs, locs::Int) = control(total, ctrl_locs, locs=>X)
cnot(ctrl_locs, loc::Int) = @λ(n -> cnot(n, ctrl_locs, loc))
mat(::Type{T}, c::ControlBlock{N, BT, C}) where {T, N, BT, C} = cunmat(N, c.ctrl_locs, c.ctrl_config, mat(T, c.content), c.locs)
function apply!(r::ArrayReg{B, T}, c::ControlBlock) where {B, T}
_check_size(r, c)
instruct!(matvec(r.state), mat(T, c.content), c.locs, c.ctrl_locs, c.ctrl_config)
return r
end
# specialization
for G in [:X, :Y, :Z, :S, :T, :Sdag, :Tdag]
GT = Expr(:(.), :ConstGate, QuoteNode(Symbol(G, :Gate)))
@eval function apply!(r::ArrayReg, c::ControlBlock{N, <:$GT}) where N
_check_size(r, c)
instruct!(matvec(r.state), Val($(QuoteNode(G))), c.locs, c.ctrl_locs, c.ctrl_config)
return r
end
end
PreserveStyle(::ControlBlock) = PreserveAll()
occupied_locs(c::ControlBlock) = (c.ctrl_locs..., map(x->c.locs[x], occupied_locs(c.content))...)
chsubblocks(pb::ControlBlock{N}, blk::AbstractBlock) where {N} = ControlBlock{N}(pb.ctrl_locs, pb.ctrl_config, blk, pb.locs)
# NOTE: ControlBlock will forward parameters directly without loop
cache_key(ctrl::ControlBlock) = cache_key(ctrl.content)
function Base.:(==)(lhs::ControlBlock{N, BT, C, M}, rhs::ControlBlock{N, BT, C, M}) where {BT, N, C, M}
return (lhs.ctrl_locs == rhs.ctrl_locs) && (lhs.content == rhs.content) && (lhs.locs == rhs.locs)
end
Base.adjoint(blk::ControlBlock{N}) where N = ControlBlock{N}(blk.ctrl_locs, blk.ctrl_config, adjoint(blk.content), blk.locs)
# NOTE: we only copy one hierachy (shallow copy) for each block
function Base.copy(ctrl::ControlBlock{N, BT, C, M}) where {BT, N, C, M}
return ControlBlock{N, BT, C, M}(ctrl.ctrl_locs, ctrl.ctrl_config, ctrl.content, ctrl.locs)
end
function YaoBase.iscommute(x::ControlBlock{N}, y::ControlBlock{N}) where N
if x.locs == y.locs && x.ctrl_locs == y.ctrl_locs
return iscommute(x.content, y.content)
else
return iscommute_fallback(x, y)
end
end
| [
27,
34345,
29,
10677,
14,
785,
1930,
578,
14,
13716,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
37826,
19182,
38804,
198,
3500,
37826,
19182,
38804,
25,
2603,
35138,
198,
198,
39344,
6779,
12235,
11,
1630,
11,
269,
1662,
198,
... | 2.424748 | 2,578 |
<filename>julia/turing/coin_bias.jl
#=
This is a port of the R2 model CoinBias.cs
Output from the R2 model:
```
Mean: 0.421294
Variance: 0.0162177
Number of accepted samples = 692
```
This model:
parameters mean std naive_se mcse ess rhat ess_per_sec
Symbol Float64 Float64 Float64 Float64 Float64 Float64 Float64
bias 0.4166 0.1360 0.0014 0.0020 5021.6561 1.0000 2276.3627
=#
using Turing, StatsPlots, Distributions, StatsBase
using CSV
include("jl_utils.jl")
@model function coin_bias(x)
n = length(x)
# Beta(2,5) has mean about 0.2855
bias ~ Beta(2,5)
x ~ filldist(Bernoulli(bias),n)
end
x = parse.(Int,split(readline("coin_bias.txt"),","))
println("x:$x")
model = coin_bias(x)
# chns = sample(model, Prior(), 10_000)
# chns = sample(model, MH(), 1_000)
chns = sample(model, PG(5), 10_000)
# chns = sample(model, SMC(), 1_000)
# chns = sample(model, IS(), 10_000)
# chns = sample(model, HMC(0.1,6), 1_000)
# chns = sample(model, NUTS(), 1_000)
display(chns)
show_var_dist_pct(chns,:bias,20) | [
27,
34345,
29,
73,
43640,
14,
83,
870,
14,
3630,
62,
65,
4448,
13,
20362,
198,
2,
28,
628,
220,
220,
220,
770,
318,
257,
2493,
286,
262,
371,
17,
2746,
16312,
33,
4448,
13,
6359,
220,
628,
220,
220,
220,
25235,
422,
262,
371,
... | 2.105839 | 548 |
# Starting with the number 1 and moving to the right in a clockwise direction a
# 5 by 5 spiral is formed as follows:
#
# 21 22 23 24 25 26
# 20 7 8 9 10 27
# 19 6 1 2 11 28
# 18 5 4 3 12 29
# 17 16 15 14 13 30
#
# It can be verified that the sum of the numbers on the diagonals is 101.
#
# What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral
# formed in the same way?
using ProjectEulerSolutions
# Create and sum the sequence of corner values, then increment.
function p028solution_increment(n::Integer=5)::Integer
val = 1
startindex = 3
for i = 2:2:n
val += sum(collect(startindex .+ (0:i:i*3)))
startindex = startindex + 4*i + 2
end
return val
end
# Closed form solution by noticing that the number in the top right corner is
# n^2 and the other corners as: n^2-n+1, n^2-2n+2, and n^2-3n+3. Summing gives
# 4n^2-6n+6
function p028solution_broadcast(n::Integer=5)::Integer
nset = 3:2:n
vals = 4 .* nset .^ 2 .- 6 .* nset .+ 6
return sum(vals) + 1
end
# Even more closed form solution solving for sum of series, which is:
# 2n^3/3 + n^2/2 + 4n/3 - 5/2. Somehow it is not faster than either previous
# solution.
function p028solution_closedform(n::Integer=5)::Integer
return fld(4 * n^3 + 3 * n^2 + 8 * n - 9, 6)
end
p028 = Problems.Problem(Dict("Incremental" => p028solution_increment,
"Broadcast" => p028solution_broadcast,
"Closed form" => p028solution_closedform))
Problems.benchmark(p028, 1001) | [
2,
17962,
351,
262,
1271,
352,
290,
3867,
284,
262,
826,
287,
257,
8801,
3083,
4571,
257,
198,
2,
642,
416,
642,
23642,
318,
7042,
355,
5679,
25,
198,
2,
198,
2,
2310,
2534,
2242,
1987,
1679,
2608,
198,
2,
1160,
220,
767,
220,
8... | 2.4784 | 625 |
<reponame>caseykneale/ChemometricsData.jl
"""
check_MD5( file_path, checksum )
returns a MD5 hash from a file location.
Note: this converts Int8 representations to comma delimitted strings.
"""
get_MD5( file_path ) = join( string.( open(md5, file_path) ), "," )
"""
check_MD5( file_path, checksum )
Checks the result of an MD5 hash vs a stored checksum.
Note: this converts Int8 representations to comma delimitted strings.
"""
check_MD5( file_path, check_sum ) = get_MD5( file_path ) == check_sum
| [
27,
7856,
261,
480,
29,
7442,
48361,
710,
1000,
14,
41829,
908,
10466,
6601,
13,
20362,
198,
37811,
198,
220,
220,
220,
2198,
62,
12740,
20,
7,
2393,
62,
6978,
11,
8794,
388,
1267,
198,
7783,
82,
257,
10670,
20,
12234,
422,
257,
2... | 2.97076 | 171 |
<reponame>GustavoSasaki/Advent-Of-Code-2020-in-Julia
### A Pluto.jl notebook ###
# v0.12.16
using Markdown
using InteractiveUtils
# ╔═╡ 5e8c4976-3727-11eb-2801-0313fd547ac2
file = open(f->read(f, String), "day_1.txt")
# ╔═╡ bd9f511c-3727-11eb-38e3-ad2be4ac32fc
begin
#separing each line
input = eachmatch(r"(.+)\n",file)
#getting first match of regex and converting to int
input = map(x-> parse(Int64, x.captures[1]), input)
end
# ╔═╡ b3710000-372a-11eb-25f0-4fe8d0aa2162
sum = filter(x->x<2020, [i+j for i in input,j in input])
# ╔═╡ a064fb9e-3737-11eb-06e6-fb89f785aba8
needs = [(2020-x) for x in sum]
# ╔═╡ 081e453e-372d-11eb-09ab-471d8bb8881f
inter = intersect(needs,input)
# ╔═╡ cd91eaa0-3737-11eb-2b49-b9168dab6e3f
result =inter[1]*inter[2]*inter[3]
# ╔═╡ 33efb69e-3738-11eb-3c85-8df7bac1208e
1051 * 897 * 72
# ╔═╡ Cell order:
# ╠═5e8c4976-3727-11eb-2801-0313fd547ac2
# ╠═bd9f511c-3727-11eb-38e3-ad2be4ac32fc
# ╠═b3710000-372a-11eb-25f0-4fe8d0aa2162
# ╠═a064fb9e-3737-11eb-06e6-fb89f785aba8
# ╠═081e453e-372d-11eb-09ab-471d8bb8881f
# ╠═cd91eaa0-3737-11eb-2b49-b9168dab6e3f
# ╠═33efb69e-3738-11eb-3c85-8df7bac1208e
| [
27,
7856,
261,
480,
29,
38,
436,
615,
34049,
33846,
14,
2782,
1151,
12,
5189,
12,
10669,
12,
42334,
12,
259,
12,
16980,
544,
198,
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1065,
13,
1433,
198,
198,
3500,
294... | 1.751543 | 648 |
using TestPackage
using Test
@testset "TestPackage.jl" begin
@test 8 == testFunction(4)
end
| [
3500,
6208,
27813,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
14402,
27813,
13,
20362,
1,
2221,
198,
220,
220,
220,
2488,
9288,
807,
6624,
1332,
22203,
7,
19,
8,
198,
437,
198
] | 2.852941 | 34 |
<gh_stars>0
# using Base:splat
using JSON
using SQLite
splat_db = SQLite.DB("../splatalogue_v3.db")
SQLite.tables(splat_db)
freq_start = 688.2213591803346
freq_end = 689.3824263880999
strSQL = "SELECT * FROM lines WHERE frequency>=$freq_start AND frequency<=$freq_end;"
println(strSQL)
has_molecules = false
resp = IOBuffer()
write(resp, "{\"molecules\" : [")
for row in SQLite.DBInterface.execute(splat_db, strSQL)
global has_molecules = true
json = JSON.json(row)
write(resp, json)
write(resp, ",")
end
json = String(take!(resp))
if !has_molecules
json = "{\"molecules\" : []}"
else
# remove the last character (comma) from json
json = chop(json, tail=1) * "]}"
end
println(json) | [
27,
456,
62,
30783,
29,
15,
198,
2,
1262,
7308,
25,
22018,
265,
198,
3500,
19449,
198,
3500,
16363,
578,
198,
198,
22018,
265,
62,
9945,
796,
16363,
578,
13,
11012,
7203,
40720,
22018,
10254,
5119,
62,
85,
18,
13,
9945,
4943,
198,
... | 2.442177 | 294 |
<filename>src/offlineAnalysis/PV.jl
function vor( u :: Array{Float64,2}, v :: Array{Float64,2}, m :: MITgcmDatas)
dudy = dimDiff(u, 2, OnePointR()) /m.dyspacing
dvdx = dimDiff(v, 1, OnePointR()) /m.dxspacing
return (dvdx - dudy)
end
function vor( u :: Array{Float64,3}, v :: Array{Float64,3}, m :: MITgcmDatas)
vv = zeros(size(u))
for iz = 1: size(u,3)
vv[:,:,iz] = vor(u[:,:,iz],v[:,:,iz],m)
end
return vv
end
#= include("intercept.jl") =#
function MITgcmPV( u :: Array{Float64,3}, v :: Array{Float64,3},
T :: Array{Float64,3}, m :: MITgcmDatas;
dzspacing :: Float64 = 2.5, f :: Float64 = 7.29e-5, BbyT :: Float64 = 2.0e-4 * 9.8)
parIndent = OnePointR()
dudy = begin
fdy = dimDiff(u, 2, OnePointLR()) /(m.dyspacing)
dimAverage(fdy,1, parIndent)
#= dimMoves(fdy,2,OnePointR()) =#
end
dvdx = begin
fdx = dimDiff(v,1,OnePointLR())/(m.dxspacing)
dimAverage(fdx, 2,parIndent)
#= dimMoves(fdx,1,OnePointR()) =#
end
dudz = begin
fdz = -dimDiff(u,3,OnePointLR())/dzspacing
dimAverage(fdz,1, parIndent)
#= dimMoves(fdz,2,OnePointR()) =#
end
dvdz = begin
fdz = -dimDiff(v,3,OnePointLR())/dzspacing
dimAverage(fdz,2, parIndent)
#= dimMoves(fdz,1,OnePointR()) =#
end
avor = ( -dvdz, dudz, f+(dvdx - dudy))
dB = begin
TT = map( x-> BbyT * dimDiff(T,x, OnePointLR()), (1,2,3))
(TT[1]/m.dxspacing, TT[2]/m.dyspacing, -TT[3]/dzspacing)
end
return mapreduce(+, zip(avor, dB))do x
x[1].*x[2]
end
end
export vor
export MITgcmPV, MITgcmVor
include("intercept.jl")
function MFPV( u :: Array{Float64,3}, v :: Array{Float64,3},
T :: Array{Float64,3}, m :: MITgcmDatas;
dzspacing :: Float64 = 2.5, f :: Float64 = 7.29e-5, BbyT :: Float64 = 2.0e-4 * 9.8,
whether_smooth_T :: Bool = true
)
dudy = begin
fdy = dimDiff(u, 2, OnePointLR()) /(m.dyspacing)
shift = GridMoving(uGrid, GridPosition(mGrid.x, mGrid.y - 0.5),2.0,2.0)
intercept(fdy, shift)/(m.dyspacing)
end
dvdx = begin
fdx = dimDiff(v,1,OnePointLR())/(m.dxspacing)
shift = GridMoving(vGrid, GridPosition(mGrid.x - 0.5, mGrid.y),2.0,2.0)
intercept(fdx, shift)
end
dudz = begin
fdz = -dimDiff(u,3,OnePointLR())/dzspacing
shift = GridMoving(uGrid, mGrid,2.,2.)
intercept(fdz,shift)
end
dvdz = begin
fdz = -dimDiff(v,3,OnePointLR())/dzspacing
shift = GridMoving(vGrid, mGrid,2.,2.)
intercept(fdz,shift)/dzspacing
end
avor = ( -dvdz, dudz, f+(dvdx - dudy))
dB = begin
T1 = intercept(T,GridMoving(0.0, 0.0, 2.0,2.0); keep_when_notmoving=!whether_smooth_T)
TT = map( x-> BbyT * dimDiff(T1,x, OnePointLR()), (1,2,3))
(TT[1]/m.dxspacing, TT[2]/m.dyspacing, -TT[3]/dzspacing)
end
return mapreduce(+, zip(avor, dB))do x
x[1].*x[2]
end
end
export MFPV
| [
27,
34345,
29,
10677,
14,
2364,
1370,
32750,
14,
47,
53,
13,
20362,
198,
8818,
410,
273,
7,
334,
7904,
15690,
90,
43879,
2414,
11,
17,
5512,
410,
7904,
15690,
90,
43879,
2414,
11,
17,
5512,
285,
7904,
17168,
70,
11215,
27354,
292,
... | 1.973159 | 1,453 |
using thesis, PGFPlotsX
setup_pgfplotsx()
ts = LinRange(thesis.example_trange..., 21)
tts = LinRange(thesis.example_trange..., 201)
## Digital neuron
num_bits = 4
thresholds_dig = LinRange(thesis.example_yrange..., 2^num_bits+1)
levels_dig = 0.5*(thresholds_dig[1:end-1]+thresholds_dig[2:end])
ys_dig = thesis.example_input.(ts)
ys_bits_dig = thesis.discretize.(ys_dig, Ref(thresholds_dig), Ref(digits.(eachindex(thresholds_dig).-1, base=2, pad=num_bits)))
ys_num_dig = thesis.discretize.(ys_dig, Ref(thresholds_dig), Ref(levels_dig))
ys_trans_dig = thesis.discretize.(ys_dig, Ref(levels_dig), Ref(string.(0:2^num_bits,base=2^num_bits)))
y_rec_dig(t) = ys_num_dig[searchsortedlast(ts, t)]
τ = minimum(diff(ts))
β = 2.0
δ = 0.3
## LIF neuron
sol_lif,spikes_lif=lif_pair_get_solution(thesis.example_trange, thesis.example_input;α=β,β=β,δ=δ)
y_lif=thesis.example_input_integral.(spikes_lif)
y_rec_lif = t->sol_lif(t,idxs=2)
y_filtered = t->sol_lif(t,idxs=3)
δ = 0.15
## LNP neuron
sol_lnp,spikes_lnp=lnp_pair_get_solution(thesis.example_trange, thesis.example_input ;β=β,δ=δ)
y_lnp=thesis.example_input_integral.(spikes_lnp)
y_rec_lnp = t->sol_lnp(t,idxs=1)
y_filtered = t->sol_lnp(t,idxs=2)
## Build the monstrosity
@pgf gp = TikzPicture(
GroupPlot(
{
group_style={group_size="3 by 4"},
major_tick_style={draw="none"},
},
# input for digital
{title="digital encoding", height="4cm",width="6cm", grid = "minor", ylabel=raw"input $s(t)$", xticklabel="\\empty", minor_xtick=ts, minor_ytick=levels_dig, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick},
Table(tts, thesis.example_input.(tts))
),
(Plot(
{no_markers, color="Set2-B", very_thick},
Coordinates([(t,0),(t,thesis.example_input(t))])
) for (i,t) ∈ enumerate(ts))...,
PlotInc(
{only_marks, color="Set2-B"},
Table(ts, ys_dig)
),
PlotInc(
{only_marks, mark="+", color="black"},
Table(ts, ys_num_dig)
),
[raw"\coordinate (bot11) at (axis cs:5,0);"],
# input for lif
{title="lIF encoding", height="4cm",width="6cm", grid = "minor", yminorgrids="false",xticklabel="\\empty", minor_xtick=spikes_lif, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick, fill="Set2-B", fill_opacity=0.5},
Table([thesis.example_trange[1];tts;thesis.example_trange[2]], [thesis.example_yrange[1];thesis.example_input.(tts);thesis.example_yrange[1]])
),
# (Plot(
# {no_markers, color="black", very_thick},
# Coordinates([(t,0),(t,thesis.example_input(t))])
# ) for (i,t) ∈ enumerate(ts))...,
[raw"\coordinate (bot12) at (axis cs:5,0);"],
# input for lnp
{title="LNP encoding", height="4cm",width="6cm", grid = "minor", yminorgrids="false",xticklabel="\\empty", minor_xtick=spikes_lnp, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick, fill="Set2-B", fill_opacity=0.5},
Table([thesis.example_trange[1];tts;thesis.example_trange[2]], [thesis.example_yrange[1];thesis.example_input.(tts);thesis.example_yrange[1]])
),
# (Plot(
# {no_markers, color="black", very_thick},
# Coordinates([(t,0),(t,thesis.example_input(t))])
# ) for (i,t) ∈ enumerate(ts))...,
[raw"\coordinate (bot13) at (axis cs:5,0);"],
# mechanism for digital
{height="4cm",width="6cm", xticklabel="\\empty", "group/empty plot"},
# mechanism for lif
{xtick_pos="bottom", yticklabel_pos="left", ytick_pos="right", tick_align="outside", color="black", minor_tick_length="2mm", height="4cm",width="6cm", grid = "minor", ylabel=raw"{$\int_{0}^t s(t)dt$}", xticklabel="\\empty", minor_xtick=spikes_lif, minor_ytick=y_lif, ymin=0, xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick, color="Set2-B"},
Table(tts, thesis.example_input_integral.(tts))
),
Plot(
{only_marks, mark="+", color="black"},
Table(spikes_lif, y_lif)
),
[raw"\coordinate (top22) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot22) at (axis cs:5,0);"],
# mechanism for lnp
{xtick_pos="bottom", yticklabel_pos="left", ytick_pos="right", tick_align="outside", minor_tick_length="2mm", height="4cm",width="6cm", grid = "minor", xticklabel="\\empty", minor_xtick=spikes_lnp, minor_ytick=y_lnp, ymin=0, xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick, color="Set2-B"},
Table(tts, thesis.example_input_integral.(tts))
),
Plot(
{only_marks, mark="+", color="black"},
Table(spikes_lnp, y_lnp)
),
[raw"\coordinate (top23) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot23) at (axis cs:5,0);"],
# message for digital
{height="2cm",width="6cm", grid = "minor", ylabel="message", xticklabel="\\empty", minor_xtick=ts, ytick="\\empty", ymin=0, ymax=1, xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
Table([],[])
),
["\\node[] at (axis cs:$t,0.5) {\\strut $y};" for (t,y) ∈ zip(0.5*(ts[1:end-1].+ts[2:end]), ys_trans_dig)],
["\\coordinate (label31) at (\$(axis cs:10,0)+(0,-4pt)\$);", raw"\coordinate (top31) at (axis cs:5,1);\coordinate (bot31) at (axis cs:5,0);"],
# message for lif
{height="2cm",width="6cm", grid = "minor", ylabel="spikes", xticklabel="\\empty", xtick="\\empty", ytick="\\empty", ymin=0, ymax=1, xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
(
Plot(
{no_markers, color="black", very_thick},
Coordinates([(t,0),(t,1)])
) for (i,t) ∈ enumerate(spikes_lif)
)...,
["\\coordinate (label32) at (\$(axis cs:10,0)+(0,-4pt)\$);", raw"\coordinate (top32) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot32) at (axis cs:5,0);"],
# message for lnp
{height="2cm",width="6cm", grid = "minor", xticklabel="\\empty", xtick="\\empty", ytick="\\empty", ymin=0, ymax=1, xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
(
Plot(
{no_markers, color="black", very_thick},
Coordinates([(t,0),(t,1)])
) for (i,t) ∈ enumerate(spikes_lnp)
)...,
["\\coordinate (label33) at (\$(axis cs:10,0)+(0,-4pt)\$);", raw"\coordinate (top33) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot33) at (axis cs:5,0);"],
# reconstruction for digital
{height="4cm",width="6cm", grid = "minor", xlabel=raw"time $t$", ylabel="reconstruction", minor_xtick=ts, minor_ytick=levels_dig, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick},
Table(tts, thesis.example_input.(tts))
),
Plot(
{no_marks, very_thick, color="black"},
Table(tts, y_rec_dig.(tts))
),
PlotInc(
{no_marks, color="Set2-B", very_thick},
Table(tts, (thesis.example_input_integral.(tts).-thesis.example_input_integral.(tts.-τ))/τ)
),
[raw"\coordinate (top41) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot42) at (axis cs:5,0);"],
# reconstruction for lif
{height="4cm",width="6cm", grid = "minor", yminorgrids="false",xlabel=raw"time $t$", minor_xtick=spikes_lif, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick},
Table(tts, thesis.example_input.(tts))
),
Plot(
{no_marks, very_thick, color="black"},
Table(tts, y_rec_lif.(tts))
),
PlotInc(
{no_marks, color="Set2-B", very_thick},
Table(tts, y_filtered.(tts))
),
[raw"\coordinate (top42) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot42) at (axis cs:5,0);"],
# reconstruction for lnp
{height="4cm",width="6cm", grid = "minor", yminorgrids="false",xlabel=raw"time $t$", minor_xtick=spikes_lnp, ymin=thesis.example_yrange[1], ymax=thesis.example_yrange[2], xmin=thesis.example_trange[1], xmax=thesis.example_trange[2]},
PlotInc(
{no_marks, very_thick},
Table(tts, thesis.example_input.(tts))
),
Plot(
{no_marks, very_thick, color="black"},
Table(tts, y_rec_lnp.(tts))
),
PlotInc(
{no_marks, color="Set2-B", very_thick},
Table(tts, y_filtered.(tts))
),
[raw"\coordinate (top43) at (axis cs:5,\pgfkeysvalueof{/pgfplots/ymax});", raw"\coordinate (bot43) at (axis cs:5,0);"],
),
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot11) -- node[draw, fill=white, rounded corners, align=center] {analog-to-digital\\conversion} (top31);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot31) -- (top41);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot12) -- (top22);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot22) -- (top32);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot32) -- (top42);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot13) -- (top23);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot23) -- (top33);",
raw"\draw[->, shorten >= 4pt, shorten <= 4pt] (bot33) -- (top43);",
"\\node[anchor=north east] at (label31) {($(sum(sum.(ys_bits_dig))) bits)};",
"\\node[anchor=north east] at (label32) {($(length(spikes_lif)) spikes)};",
"\\node[anchor=north east] at (label33) {($(length(spikes_lnp)) spikes)};",
)
#pgfsave("fig/encoding_schemes.pdf",gp)
| [
3500,
21554,
11,
350,
21713,
3646,
1747,
55,
198,
40406,
62,
6024,
69,
489,
1747,
87,
3419,
628,
198,
912,
796,
5164,
17257,
7,
83,
8497,
13,
20688,
62,
2213,
858,
986,
11,
2310,
8,
198,
83,
912,
796,
5164,
17257,
7,
83,
8497,
1... | 2.154785 | 4,587 |
<reponame>victorialena/DecomposedMDPSolver.jl<gh_stars>0
using DecomposedMDPSolver
using Flux
using Test
Na = 5
Np = 10
solutions = [(x) -> rand(Na) for i=1:Np ]
## Weights network
base = Chain(Dense(4, 32, relu), Dense(32, Na))
attn = Chain(Dense(4, 32, relu), Dense(32, Np+1), softmax)
a2t_model = A2TNetwork(base, attn, solutions)
@test size(a2t_model(rand(4))) == (Na, 1)
## Constant Weights
base = Chain(Dense(4, 32, relu), Dense(32, Na))
attn = Chain(ConstantLayer(Np+1), softmax)
a2t_model = A2TNetwork(base, attn, solutions)
@test size(a2t_model(rand(4))) == (Na, 1)
## Version that was failing -- local approx policy eval
sols = rand(100)
function val(s)
sols[1] = rand()
return sols[1]
end
base = Chain(Dense(2, 32, relu), Dense(32, 1, σ))
attn = Chain(Dense(2, 32, relu), Dense(32, 2, exp))
solutions = [val]
model = A2TNetwork(base, attn, solutions)
S, G = rand(2, 100), rand(1,100)
data = Flux.Data.DataLoader(S, G, batchsize=32, shuffle = true)
opt = ADAM()
Flux.train!((x, y) -> Flux.mse(model(x), y), Flux.params(model), data, opt)
| [
27,
7856,
261,
480,
29,
32433,
5132,
8107,
14,
10707,
3361,
1335,
12740,
3705,
14375,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
4280,
3361,
1335,
12740,
3705,
14375,
198,
3500,
1610,
2821,
198,
3500,
6208,
198,
198,
26705,
796... | 2.317391 | 460 |
<filename>P/Perl/build_tarballs.jl
# Note that this script can accept some limited command-line arguments, run
# `julia build_tarballs.jl --help` to see a usage message.
using BinaryBuilder, Pkg
name = "Perl"
version = v"5.30.3"
# Collection of sources required to build perl
# with a few extra modules for polymake
sources = [
ArchiveSource("https://www.cpan.org/src/5.0/perl-$version.tar.gz", "32e04c8bb7b1aecb2742a7f7ac0eabac100f38247352a73ad7fa104e39e7406f"),
ArchiveSource("https://cpan.metacpan.org/authors/id/I/IS/ISHIGAKI/JSON-4.02.tar.gz", "444a88755a89ffa2a5424ab4ed1d11dca61808ebef57e81243424619a9e8627c"),
ArchiveSource("https://cpan.metacpan.org/authors/id/J/JO/JOSEPHW/XML-Writer-0.625.tar.gz", "e080522c6ce050397af482665f3965a93c5d16f5e81d93f6e2fe98084ed15fbe"),
ArchiveSource("https://cpan.metacpan.org/authors/id/J/JS/JSTOWE/TermReadKey-2.38.tar.gz", "5a645878dc570ac33661581fbb090ff24ebce17d43ea53fd22e105a856a47290"),
ArchiveSource("https://cpan.metacpan.org/authors/id/H/HA/HAYASHI/Term-ReadLine-Gnu-1.36.tar.gz", "9a08f7a4013c9b865541c10dbba1210779eb9128b961250b746d26702bab6925"),
ArchiveSource("https://cpan.metacpan.org/authors/id/G/GR/GRANTM/XML-SAX-1.02.tar.gz", "4506c387043aa6a77b455f00f57409f3720aa7e553495ab2535263b4ed1ea12a"),
ArchiveSource("https://cpan.metacpan.org/authors/id/P/PE/PERIGRIN/XML-NamespaceSupport-1.12.tar.gz", "47e995859f8dd0413aa3f22d350c4a62da652e854267aa0586ae544ae2bae5ef"),
ArchiveSource("https://cpan.metacpan.org/authors/id/G/GR/GRANTM/XML-SAX-Base-1.09.tar.gz", "66cb355ba4ef47c10ca738bd35999723644386ac853abbeb5132841f5e8a2ad0"),
ArchiveSource("https://cpan.metacpan.org/authors/id/M/MA/MANWAR/SVG-2.84.tar.gz", "ec3d6ddde7a46fa507eaa616b94d217296fdc0d8fbf88741367a9821206f28af"),
DirectorySource("./bundled")
]
# Bash recipe for building
script = raw"""
perldir=`ls -1d perl-*`
cd $WORKSPACE/srcdir/
for dir in *;
do
[[ "$dir" == "perl-"* ]] && continue;
[[ "$dir" == "patches" ]] && continue;
# build extra perl modules in-tree
# the names of the extra modules also need to appear in the
# config.sh for all cross-compilation architectures
sed -i '1s/^/$ENV{PERL_CORE}=0;/' $dir/Makefile.PL
mv $dir $perldir/cpan/${dir%-*};
done
cd $perldir/
# allow combining relocation with shared library
# add patch to find binary location from shared library
atomic_patch -p1 ../patches/allow-relocate.patch
# replace some library checks that wont work in the cross-compile environment
# with the required values
atomic_patch -p1 ../patches/cross-nolibchecks.patch
if [[ $target != x86_64-linux* ]] && [[ $target != i686-linux* ]]; then
# cross build with supplied config.sh
# build native miniperl
src=`pwd`
mkdir host
pushd host
../Configure -des -Dusedevel -Duserelocatableinc -Dmksymlinks -Dosname=linux -Dcc=$CC_FOR_BUILD -Dld=$LD_FOR_BUILD -Dar=$AR_FOR_BUILD -Dnm=$NM_FOR_BUILD -Dlibs=-lm
make -j${nproc} miniperl
make -j${nproc} generate_uudmap
cp -p miniperl $prefix/bin/miniperl-for-build
popd
# copy and use prepared configure information
cp ../patches/config-$target.sh config.sh
./Configure -K -S
else
# native
# config overrides
if [[ $target = *-gnu ]]; then
# disable xlocale.h usage (which was removed in recent glibc)
cp ../patches/config.arch.gnu config.arch
fi
./Configure -des -Dcc="$CC" -Dprefix=$prefix -Duserelocatableinc -Dprocselfexe -Duseshrplib -Dsysroot=/opt/$target/$target/sys-root -Dccflags="-I${prefix}/include" -Dldflags="-L${libdir} -Wl,-rpath,${libdir}" -Dlddlflags="-shared -L${libdir} -Wl,-rpath,${libdir}"
fi
make -j${nproc} depend
make -j${nproc}
make install
# put a libperl directly in lib
cd $libdir
ln -s perl5/*/*/CORE/libperl.${dlext} libperl.${dlext}
# resolve case-ambiguity:
cd $libdir/perl5/5.*.*
mv Pod/* pod
rmdir Pod
# remove sysroot and target flags from stored compiler flags:
sed -i -e "s#--sysroot[ =]\S\+##g" \
-e "s#-target[ =]\S\+##g" \
${prefix}/*/perl5/*/*/Config_heavy.pl
"""
# These are the platforms we will build for by default, unless further
# platforms are passed in on the command line
platforms = [
Platform("x86_64", "macos")
Platform("x86_64", "linux"; libc="glibc")
Platform("i686", "linux"; libc="glibc")
Platform("x86_64", "linux"; libc="musl")
Platform("i686", "linux"; libc="musl")
]
# The products that we will ensure are always built
products = [
ExecutableProduct("perl", :perl)
LibraryProduct("libperl", :libperl)
]
# Dependencies that must be installed before this package can be built
dependencies = [
Dependency("Readline_jll")
]
# Build the tarballs, and possibly a `build.jl` as well.
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies)
| [
27,
34345,
29,
47,
14,
5990,
75,
14,
11249,
62,
18870,
21591,
13,
20362,
198,
2,
5740,
326,
428,
4226,
460,
2453,
617,
3614,
3141,
12,
1370,
7159,
11,
1057,
198,
2,
4600,
73,
43640,
1382,
62,
18870,
21591,
13,
20362,
1377,
16794,
... | 2.411351 | 1,991 |
<reponame>jarrison/Gage.jl
module Gage
using Libdl
@show include("gagestructs.jl")
using Main.GageStructs: BoardInfo
##############################################
#### GageAPI Methods ####
##############################################
# Julia translations of the Gage Driver API.
# Low-level operations. Require GaGe Drivers Installed and Visible to Julia.
function geterror(errorcode::Int32)
errstring = Vector{UInt8}(undef,255)
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsGetErrorString)
err = ccall(sym,Int32,(Int32,Ref{UInt8}, Int32), errorcode, errstring, 255)
errstring[end] = 0
if err < 0
return err
else
return unsafe_string(pointer(errstring))
end
end
function initialize()
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsInitialize)
err = ccall(sym,Int32,())
if err < 0
throw(SystemError)
end
err
end
function getsystem(boardtype=UInt32(0), nchannels=UInt32(0),
bitresolution=UInt32(0), systemindex=Int16(0))
@show boardtype
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsGetSystem)
result = Ref{UInt32}(0)
@show result
argtype = (result, UInt32, UInt32, UInt32, Int16)
args = (result, boardtype, nchannels, bitresolution, systemindex)
err = ccall(sym,Int32,
(Ref{Cuint},UInt32, UInt32, UInt32, Int16),
result,boardtype,nchannels,bitresolution,systemindex)
if err < 1
return geterror(err)
else
@show err
return result
end
end
function getsysteminfo(handle::UInt32)
result = Ref{BoardInfo}()
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsGetSystemInfo)
err = ccall(sym,Int32,(UInt32,Ref{BoardInfo}),handle,result)
if err < 0
return geterror(err)
end
err
end
function getstatus(hSys::UInt32)
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsGetStatus)
err = ccall(sym,Int32,(UInt32,),hSys)
if err < 0
throw(SystemError)
end
err
end
function freesystem(hSys::UInt32)
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsFreeSystem)
err = ccall(sym,Int32,(UInt32,), hSys)
if err < 0
return geterror(err)
else
return true
end
end
function gagedo(hSys::UInt32, operation::Integer)
lib = dlopen("CsSsm")
sym = dlsym(lib,:CsDo)
err = ccall(sym,Int32,(UInt32,Int16), hSys, operation)
if err < 0
return geterror(err)
else
return true
end
end
function transfer(hSys::UInt)
end
end
| [
27,
7856,
261,
480,
29,
73,
22472,
14,
38,
496,
13,
20362,
198,
21412,
402,
496,
198,
3500,
7980,
25404,
198,
31,
12860,
2291,
7203,
70,
363,
395,
1356,
82,
13,
20362,
4943,
198,
3500,
8774,
13,
38,
496,
44909,
82,
25,
5926,
12360... | 2.26158 | 1,101 |
<filename>utils/drive_metrics_solenoid.jl
include("../src/get_sens_solenoid_K.jl")
s = [1.0, 4.0, 0.0]
sens(s, 200000)
| [
27,
34345,
29,
26791,
14,
19472,
62,
4164,
10466,
62,
82,
8622,
1868,
13,
20362,
198,
17256,
7203,
40720,
10677,
14,
1136,
62,
82,
641,
62,
82,
8622,
1868,
62,
42,
13,
20362,
4943,
198,
82,
796,
685,
16,
13,
15,
11,
604,
13,
15,... | 1.983333 | 60 |
function _fit(glr::GLR{RobustLoss{ρ},<:L2R}, solver::IWLSCG, X, y, scratch
) where {ρ}
n,p,_ = npc(scratch)
_Mv! = Mv!(glr, X, y, scratch; threshold=solver.threshold)
κ = solver.damping # between 0 and 1, 1 = fully take the new iteration
# cache
θ = zeros(p)
θ_ = zeros(p)
b = zeros(p) # will contain X'Wy
ω = zeros(n) # will contain the diagonal of W
# params for the loop
max_cg_steps = min(solver.max_inner, p)
k, tol = 0, Inf
while k < solver.max_iter && tol > solver.tol
# update the weights and retrieve the application function
# Mθv! corresponds to the current application of (X'WX + λI) on v
Mθv! = _Mv!(ω, θ)
Mm = LinearMap(Mθv!, p;
ismutating=true, isposdef=true, issymmetric=true)
Wy = ω .* y
b = X'Wy
if glr.fit_intercept
b = vcat(b, sum(Wy))
end
# update
θ .= (1-κ) .* θ .+ κ .* cg(Mm, b; maxiter=max_cg_steps)
# check tolerance
tol = norm(θ .- θ_) / (norm(θ) + eps())
# update cache
copyto!(θ_, θ)
k += 1
end
tol ≤ solver.tol ||
@warn "IWLS did not converge in $(solver.max_iter) iterations."
return θ
end
| [
8818,
4808,
11147,
7,
4743,
81,
3712,
8763,
49,
90,
14350,
436,
43,
793,
90,
33643,
5512,
27,
25,
43,
17,
49,
5512,
1540,
332,
3712,
40,
54,
43,
6173,
38,
11,
1395,
11,
331,
11,
12692,
198,
220,
220,
220,
220,
220,
220,
220,
2... | 1.862573 | 684 |
<reponame>AlexAtanasov14/GalerkinSparseGrids.jl<gh_stars>10-100
# -----------------------------------------------------------
#
# Constructing the Hierarchical Discontinuous Galerkin Basis
#
# -----------------------------------------------------------
# Efficiency criticality: LOW
# Computations only performed once
# -----------------------------------------------------
# Defining the Inner Product
# -----------------------------------------------------
# The coordinate convention here is to have vectors of length n even
# The first half has component i is the respective coefficient of x^i
# THe second half has component n/2 + i is the coefficient of sgn(x)*x^i
function product_matrix(i::Int, j::Int, n::Int)
# This is the inner product of <x^i, x^j> when i,j < n/2
# or the appropriate reflection when they are >=
# Exact expressions only. No NIntegrations
k= Int(round(n/2))
if i < k && j < k
return (1 + (-1)^(i + j))/(1 + i + j)
elseif i >= k && j< k
return (1 - (-1)^((i-k) + (j)))/(1 + (i-k) + (j))
elseif i < k && j >= k
return product_matrix(j, i, n)
else
return (1 + (-1)^((i-k) + (j-k)))/(1 + (i-k) + (j-k))
end
end
function inner_product(v1::AbstractArray{T}, v2::AbstractArray{T}) where T <: Real
value=zero(eltype(v1)) #Get 0 of the same type as v1
n=length(v1)
for i in 1:n
for j in 1:n
if v1[i]==0 || v2[j]==0
continue
else
value += product_matrix(i-1,j-1,n)*v1[i]*v2[j]
end
end
end
return value
end
function inner_product(v1::AbstractArray{T}, j::Int) where T <: Real #we consider x^(j-1)
value=zero(eltype(v1)) #Get 0 of the same type as v1
n=length(v1)
for i in 1:n
value += product_matrix(i-1,j-1,n)*v1[i]
#note we need to shift appropriately
end
return value
end
# ------------------------------------------------------
# Defining Gram-Schmidt and forming Legendre Polynomials
# ------------------------------------------------------
# The gram schmidt process on a set of vectors
# I think using an array of arrays is easiest here
function gram_schmidt(Q_initial::Array{Array{T, 1}, 1}) where T <: Real
n = length(Q_initial[1])
k = Int(round(n/2))
# n = 2k, k polys of degree up to k-1 and then k polys according to f
Q_final = deepcopy(Q_initial)
# we'll return a modified version of the initial without modifying Q_initial
for i = 1:k
for j = 1:i-1
proj = inner_product(Q_initial[i],Q_final[j])/inner_product(Q_final[j], Q_final[j])
#This gives us the number corresponding to the projection along Q_final[j]
Q_final[i] -= proj * Q_final[j]
#This subtracts it out from Q_final[i] to orthogonalize
end
Q_final[i] /= sqrt(inner_product(Q_final[i], Q_final[i]))
#Normalize every time
end
return Q_final
end
#We can now form the Legendre polynomials
function legendre(k::Int)
Q = [[i==j ? 1.0 : 0.0 for i = 1:2*(k+1)] for j = 1:(k+1)]
#start with just the x^i basis for i = 0 to k
Q = gram_schmidt(Q)
#Orthgonalize, and now we have the Legendre polynomials
return Q
end
# I want to be able to do this completely analytically, using fractions
# so that I can avoid any numerical error in this process
# -----------------------------------------------------------
# Making a basis of functions have the first k moments vanish
# -----------------------------------------------------------
function orthogonalize_1(Q_initial::Array{Array{T, 1}, 1}) where T <: Real
n = length(Q_initial[1])
k = Int(round(n/2))
# n = 2k, because basis includes the f(j,x) functions in addition to x^j
Q_final = deepcopy(Q_initial)
# As before
legendre_polys=legendre(k-1)
# We need an orthogonal basis for 1..x^(k-1) for the projection to work
for i = 1:k
for j in 1:k
proj = inner_product(Q_initial[i],legendre_polys[j])/
inner_product(legendre_polys[j],legendre_polys[j])
Q_final[i] -= proj*legendre_polys[j] #subtract projection
#project out v[i] by each element of this orthogonal basis for x^j
end
end
return Q_final
end
# -----------------------------------------------------
# Make some functions have higher vanishing moments
# -----------------------------------------------------
# This function will make k-1 of the basis vectors orth to x^k,
# k-2 to x^k+1 all the way to 1 vector orth to x^2k-2
function orthogonalize_2(Q_initial::Array{Array{T, 1}, 1}) where T <: Real
n = length(Q_initial[1])
k = Int(round(n/2))
Q_final = deepcopy(Q_initial)
for i = 1:k-1
fi=copy(Q_initial[i])
# We assume this isn't perp to x^(k+i-1) and subtract it from the next ones.
# We know that it isn't perp by parity considerations on the degree! :)
# (Thank goodness, cuz otherwise each time, we'd have to rearrange the basis
# until we found a non-perp function and subtract by that one)
for j = i+1:k
a= inner_product(Q_final[j],k+i)/inner_product(fi, k+i)
Q_final[j] -= a * fi
end
end
return Q_final
end
# Standard gram-schmidt process, starting at the end
# (this is important, because the last function is orthogonal to a lot of higher
# polynomials, and we don't want to do anything other than normalize it,
# with similar reasoning for the penultimate, etc. functions)
function gram_schmidt_rev(Q_initial::Array{Array{T, 1}, 1}) where T <: Real
n = length(Q_initial[1])
k = Int(round(n/2))
Q_final = [[0.0 for i in 1:n] for j in 1:k]
for i = k:-1:1 #note we're going in reverse
fi = copy(Q_initial[i]) #we won't modify the original, so we copy
Q_final[i] = fi #start with Q_final = f_i, and we'll subtract projections
for j = i+1:k #because we're going in reverse
proj = inner_product(fi,Q_final[j])/inner_product(Q_final[j], Q_final[j])
#find the projection
Q_final[i] -= proj * Q_final[j] #project out direction j
end
Q_final[i] /= sqrt(inner_product(Q_final[i], Q_final[i])) #now normalize
end
return Q_final
end
# -----------------------------------------------------
# TODO: Lastly, perform a rotation so that there
# is only one 'dicontinuous' basis element,
# namely the last one
# -----------------------------------------------------
function rotate_discontinuity(Q_initial::Array{Array{T, 1}, 1}) where T <: Real
# To be implemented
end
# -----------------------------------------------------
# All together, for the final result:
# -----------------------------------------------------
function dg_basis(k::Int)
Q = [[j==(i-k) ? 1.0 : 0.0 for i in 1:2*k] for j in 1:k]
Q = orthogonalize_1(Q)
Q = orthogonalize_2(Q)
Q = gram_schmidt_rev(Q)
return Q
end
| [
27,
7856,
261,
480,
29,
15309,
2953,
15991,
709,
1415,
14,
26552,
263,
5116,
50,
29572,
8642,
2340,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2,
20368,
22369,
6329,
198,
2,
198,
2,
28407,
278,
262,
36496,
998,
605,
84... | 2.570904 | 2,722 |
@inline function Phi_se(Cell,s,z,Def,ϕ_tf,D,res0)
"""
Solid-Electrolyte Potential Transfer Function
Phi_se(Cell,s,z,Def)
"""
if Def == "Pos"
Electrode = Cell.Pos #Electrode Length
else
Electrode = Cell.Neg #Electrode Length
end
κ_eff = Cell.Const.κ*Electrode.ϵ_e^Electrode.κ_brug #Effective Electrolyte Conductivity
σ_eff = Electrode.σ*Electrode.ϵ_s^Electrode.σ_brug #Effective Electrode Conductivity
#Defining SOC
θ = Cell.Const.SOC * (Electrode.θ_100-Electrode.θ_0) + Electrode.θ_0
#Prepare for j0
ce0 = Cell.Const.ce0
cs_max = Electrode.cs_max
cs0 = cs_max * θ
α = Electrode.α
#Current Flux Density
if Cell.Const.CellTyp == "Doyle_94"
κ = Electrode.k_norm/Electrode.cs_max/ce0^(1-α)
j0 = κ*(ce0*(cs_max-cs0))^(1-α)*cs0^α
else
j0 = Electrode.k_norm*(Cell.Const.ce0*cs0*(Electrode.cs_max-cs0))^(1-Electrode.α)
end
#Resistance
Rtot = R*Cell.Const.T/(j0*F^2) + Electrode.RFilm
#Rtot = R*Cell.Const.T/(j0*Cell.Const.CC_A*F) + Electrode.RFilm
∂Uocp_elc = Cell.Const.∂Uocp(Def,θ)/cs_max #Open Circuit Potential Partial
res0 .= @. -3*∂Uocp_elc/(Electrode.as*F*Electrode.L*Cell.Const.CC_A*Electrode.Rs) # residual for pole removal
ν = @. Electrode.L*sqrt((Electrode.as/σ_eff+Electrode.as/κ_eff)/(Rtot+∂Uocp_elc*(Electrode.Rs/(F*Electrode.Ds))*(tanh(Electrode.β)/(tanh(Electrode.β)-Electrode.β)))) #Condensing Variable - eq. 4.13
ν_∞ = @. Electrode.L*sqrt(Electrode.as*((1/κ_eff)+(1/σ_eff))/(Rtot))
ϕ_tf .= @. Electrode.L/(Cell.Const.CC_A*ν*sinh(ν))*((1/κ_eff)*cosh(ν*z)+(1/σ_eff)*cosh(ν*(z-1)))-res0/s #Transfer Function - eq. 4.14
zero_tf = @. (6*(5*Electrode.Ds*F*Rtot-∂Uocp_elc*Electrode.Rs)*σ_eff)/(30*Cell.Const.CC_A*Electrode.as*Electrode.Ds*F*σ_eff*Electrode.L) + (5*Electrode.as*Electrode.Ds*F*Electrode.L^2*(σ_eff*(-1+3*z^2)+κ_eff*(2-6*z+3*z^2)))/(30*Cell.Const.CC_A*Electrode.as*Electrode.Ds*F*σ_eff*κ_eff*Electrode.L)
D .= @. Electrode.L/(Cell.Const.CC_A*ν_∞*sinh(ν_∞))*((1/κ_eff)*cosh(ν_∞*z)+(1/σ_eff)*cosh(ν_∞*(z-1))) # Contribution to D as G->∞
ϕ_tf[:,findall(s.==0)] .= zero_tf[:,findall(s.==0)]
res0 .= zeros(length(z))
if Def == "Pos" #Double check this implementation
ϕ_tf .= -ϕ_tf
D .= -D
end
end
| [
31,
45145,
2163,
47256,
62,
325,
7,
28780,
11,
82,
11,
89,
11,
7469,
11,
139,
243,
62,
27110,
11,
35,
11,
411,
15,
8,
198,
37811,
220,
198,
220,
220,
15831,
12,
19453,
305,
306,
660,
32480,
20558,
15553,
628,
220,
220,
47256,
62... | 1.958258 | 1,102 |
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: iotsitewise
using AWS.Compat
using AWS.UUIDs
"""
associate_assets(asset_id, child_asset_id, hierarchy_id)
associate_assets(asset_id, child_asset_id, hierarchy_id, params::Dict{String,<:Any})
Associates a child asset with the given parent asset through a hierarchy defined in the
parent asset's model. For more information, see Associating assets in the IoT SiteWise User
Guide.
# Arguments
- `asset_id`: The ID of the parent asset.
- `child_asset_id`: The ID of the child asset to be associated.
- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. Hierarchies allow
different groupings of assets to be formed that all come from the same asset model. For
more information, see Asset hierarchies in the IoT SiteWise User Guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
associate_assets(assetId, childAssetId, hierarchyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets/$(assetId)/associate", Dict{String, Any}("childAssetId"=>childAssetId, "hierarchyId"=>hierarchyId, "clientToken"=>string(uuid4())); aws_config=aws_config)
associate_assets(assetId, childAssetId, hierarchyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets/$(assetId)/associate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("childAssetId"=>childAssetId, "hierarchyId"=>hierarchyId, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
batch_associate_project_assets(asset_ids, project_id)
batch_associate_project_assets(asset_ids, project_id, params::Dict{String,<:Any})
Associates a group (batch) of assets with an IoT SiteWise Monitor project.
# Arguments
- `asset_ids`: The IDs of the assets to be associated to the project.
- `project_id`: The ID of the project to which to associate the assets.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
batch_associate_project_assets(assetIds, projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects/$(projectId)/assets/associate", Dict{String, Any}("assetIds"=>assetIds, "clientToken"=>string(uuid4())); aws_config=aws_config)
batch_associate_project_assets(assetIds, projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects/$(projectId)/assets/associate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetIds"=>assetIds, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
batch_disassociate_project_assets(asset_ids, project_id)
batch_disassociate_project_assets(asset_ids, project_id, params::Dict{String,<:Any})
Disassociates a group (batch) of assets from an IoT SiteWise Monitor project.
# Arguments
- `asset_ids`: The IDs of the assets to be disassociated from the project.
- `project_id`: The ID of the project from which to disassociate the assets.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
batch_disassociate_project_assets(assetIds, projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects/$(projectId)/assets/disassociate", Dict{String, Any}("assetIds"=>assetIds, "clientToken"=>string(uuid4())); aws_config=aws_config)
batch_disassociate_project_assets(assetIds, projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects/$(projectId)/assets/disassociate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetIds"=>assetIds, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
batch_put_asset_property_value(entries)
batch_put_asset_property_value(entries, params::Dict{String,<:Any})
Sends a list of asset property values to IoT SiteWise. Each value is a
timestamp-quality-value (TQV) data point. For more information, see Ingesting data using
the API in the IoT SiteWise User Guide. To identify an asset property, you must specify one
of the following: The assetId and propertyId of an asset property. A propertyAlias,
which is a data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To
define an asset property's alias, see UpdateAssetProperty. With respect to Unix epoch
time, IoT SiteWise accepts only TQVs that have a timestamp of no more than 7 days in the
past and no more than 10 minutes in the future. IoT SiteWise rejects timestamps outside of
the inclusive range of [-7 days, +10 minutes] and returns a TimestampOutOfRangeException
error. For each asset property, IoT SiteWise overwrites TQVs with duplicate timestamps
unless the newer TQV has a different quality. For example, if you store a TQV {T1, GOOD,
V1}, then storing {T1, GOOD, V2} replaces the existing TQV. IoT SiteWise authorizes access
to each BatchPutAssetPropertyValue entry individually. For more information, see
BatchPutAssetPropertyValue authorization in the IoT SiteWise User Guide.
# Arguments
- `entries`: The list of asset property value entries for the batch put request. You can
specify up to 10 entries per request.
"""
batch_put_asset_property_value(entries; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/properties", Dict{String, Any}("entries"=>entries); aws_config=aws_config)
batch_put_asset_property_value(entries, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/properties", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("entries"=>entries), params)); aws_config=aws_config)
"""
create_access_policy(access_policy_identity, access_policy_permission, access_policy_resource)
create_access_policy(access_policy_identity, access_policy_permission, access_policy_resource, params::Dict{String,<:Any})
Creates an access policy that grants the specified identity (Amazon Web Services SSO user,
Amazon Web Services SSO group, or IAM user) access to the specified IoT SiteWise Monitor
portal or project resource.
# Arguments
- `access_policy_identity`: The identity for this access policy. Choose an Amazon Web
Services SSO user, an Amazon Web Services SSO group, or an IAM user.
- `access_policy_permission`: The permission level for this access policy. Note that a
project ADMINISTRATOR is also known as a project owner.
- `access_policy_resource`: The IoT SiteWise Monitor resource for this access policy.
Choose either a portal or a project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"tags"`: A list of key-value pairs that contain metadata for the access policy. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_access_policy(accessPolicyIdentity, accessPolicyPermission, accessPolicyResource; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/access-policies", Dict{String, Any}("accessPolicyIdentity"=>accessPolicyIdentity, "accessPolicyPermission"=>accessPolicyPermission, "accessPolicyResource"=>accessPolicyResource, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_access_policy(accessPolicyIdentity, accessPolicyPermission, accessPolicyResource, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/access-policies", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("accessPolicyIdentity"=>accessPolicyIdentity, "accessPolicyPermission"=>accessPolicyPermission, "accessPolicyResource"=>accessPolicyResource, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
create_asset(asset_model_id, asset_name)
create_asset(asset_model_id, asset_name, params::Dict{String,<:Any})
Creates an asset from an existing asset model. For more information, see Creating assets in
the IoT SiteWise User Guide.
# Arguments
- `asset_model_id`: The ID of the asset model from which to create the asset.
- `asset_name`: A unique, friendly name for the asset.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"tags"`: A list of key-value pairs that contain metadata for the asset. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_asset(assetModelId, assetName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets", Dict{String, Any}("assetModelId"=>assetModelId, "assetName"=>assetName, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_asset(assetModelId, assetName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetModelId"=>assetModelId, "assetName"=>assetName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
create_asset_model(asset_model_name)
create_asset_model(asset_model_name, params::Dict{String,<:Any})
Creates an asset model from specified property and hierarchy definitions. You create assets
from asset models. With asset models, you can easily create assets of the same type that
have standardized definitions. Each asset created from a model inherits the asset model's
property and hierarchy definitions. For more information, see Defining asset models in the
IoT SiteWise User Guide.
# Arguments
- `asset_model_name`: A unique, friendly name for the asset model.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetModelCompositeModels"`: The composite asset models that are part of this asset
model. Composite asset models are asset models that contain specific properties. Each
composite model has a type that defines the properties that the composite model supports.
Use composite asset models to define alarms on this asset model.
- `"assetModelDescription"`: A description for the asset model.
- `"assetModelHierarchies"`: The hierarchy definitions of the asset model. Each hierarchy
specifies an asset model whose assets can be children of any other assets created from this
asset model. For more information, see Asset hierarchies in the IoT SiteWise User Guide.
You can specify up to 10 hierarchies per asset model. For more information, see Quotas in
the IoT SiteWise User Guide.
- `"assetModelProperties"`: The property definitions of the asset model. For more
information, see Asset properties in the IoT SiteWise User Guide. You can specify up to 200
properties per asset model. For more information, see Quotas in the IoT SiteWise User Guide.
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"tags"`: A list of key-value pairs that contain metadata for the asset model. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_asset_model(assetModelName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/asset-models", Dict{String, Any}("assetModelName"=>assetModelName, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_asset_model(assetModelName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/asset-models", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetModelName"=>assetModelName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
create_dashboard(dashboard_definition, dashboard_name, project_id)
create_dashboard(dashboard_definition, dashboard_name, project_id, params::Dict{String,<:Any})
Creates a dashboard in an IoT SiteWise Monitor project.
# Arguments
- `dashboard_definition`: The dashboard definition specified in a JSON literal. For
detailed information, see Creating dashboards (CLI) in the IoT SiteWise User Guide.
- `dashboard_name`: A friendly name for the dashboard.
- `project_id`: The ID of the project in which to create the dashboard.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"dashboardDescription"`: A description for the dashboard.
- `"tags"`: A list of key-value pairs that contain metadata for the dashboard. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_dashboard(dashboardDefinition, dashboardName, projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/dashboards", Dict{String, Any}("dashboardDefinition"=>dashboardDefinition, "dashboardName"=>dashboardName, "projectId"=>projectId, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_dashboard(dashboardDefinition, dashboardName, projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/dashboards", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("dashboardDefinition"=>dashboardDefinition, "dashboardName"=>dashboardName, "projectId"=>projectId, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
create_gateway(gateway_name, gateway_platform)
create_gateway(gateway_name, gateway_platform, params::Dict{String,<:Any})
Creates a gateway, which is a virtual or edge device that delivers industrial data streams
from local servers to IoT SiteWise. For more information, see Ingesting data using a
gateway in the IoT SiteWise User Guide.
# Arguments
- `gateway_name`: A unique, friendly name for the gateway.
- `gateway_platform`: The gateway's platform. You can only specify one platform in a
gateway.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"tags"`: A list of key-value pairs that contain metadata for the gateway. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_gateway(gatewayName, gatewayPlatform; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/20200301/gateways", Dict{String, Any}("gatewayName"=>gatewayName, "gatewayPlatform"=>gatewayPlatform); aws_config=aws_config)
create_gateway(gatewayName, gatewayPlatform, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/20200301/gateways", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("gatewayName"=>gatewayName, "gatewayPlatform"=>gatewayPlatform), params)); aws_config=aws_config)
"""
create_portal(portal_contact_email, portal_name, role_arn)
create_portal(portal_contact_email, portal_name, role_arn, params::Dict{String,<:Any})
Creates a portal, which can contain projects and dashboards. IoT SiteWise Monitor uses
Amazon Web Services SSO or IAM to authenticate portal users and manage user permissions.
Before you can sign in to a new portal, you must add at least one identity to that portal.
For more information, see Adding or removing portal administrators in the IoT SiteWise User
Guide.
# Arguments
- `portal_contact_email`: The Amazon Web Services administrator's contact email address.
- `portal_name`: A friendly name for the portal.
- `role_arn`: The ARN of a service role that allows the portal's users to access your IoT
SiteWise resources on your behalf. For more information, see Using service roles for IoT
SiteWise Monitor in the IoT SiteWise User Guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"alarms"`: Contains the configuration information of an alarm created in an IoT SiteWise
Monitor portal. You can use the alarm to monitor an asset property and get notified when
the asset property value is outside a specified range. For more information, see Monitoring
with alarms in the IoT SiteWise Application Guide.
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"notificationSenderEmail"`: The email address that sends alarm notifications. If you
use the IoT Events managed Lambda function to manage your emails, you must verify the
sender email address in Amazon SES.
- `"portalAuthMode"`: The service to use to authenticate users to the portal. Choose from
the following options: SSO – The portal uses Amazon Web Services Single Sign On to
authenticate users and manage user permissions. Before you can create a portal that uses
Amazon Web Services SSO, you must enable Amazon Web Services SSO. For more information, see
Enabling Amazon Web Services SSO in the IoT SiteWise User Guide. This option is only
available in Amazon Web Services Regions other than the China Regions. IAM – The
portal uses Identity and Access Management to authenticate users and manage user
permissions. This option is only available in the China Regions. You can't change this
value after you create a portal. Default: SSO
- `"portalDescription"`: A description for the portal.
- `"portalLogoImageFile"`: A logo image to display in the portal. Upload a square,
high-resolution image. The image is displayed on a dark background.
- `"tags"`: A list of key-value pairs that contain metadata for the portal. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_portal(portalContactEmail, portalName, roleArn; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/portals", Dict{String, Any}("portalContactEmail"=>portalContactEmail, "portalName"=>portalName, "roleArn"=>roleArn, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_portal(portalContactEmail, portalName, roleArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/portals", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("portalContactEmail"=>portalContactEmail, "portalName"=>portalName, "roleArn"=>roleArn, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
create_project(portal_id, project_name)
create_project(portal_id, project_name, params::Dict{String,<:Any})
Creates a project in the specified portal.
# Arguments
- `portal_id`: The ID of the portal in which to create the project.
- `project_name`: A friendly name for the project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"projectDescription"`: A description for the project.
- `"tags"`: A list of key-value pairs that contain metadata for the project. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
create_project(portalId, projectName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects", Dict{String, Any}("portalId"=>portalId, "projectName"=>projectName, "clientToken"=>string(uuid4())); aws_config=aws_config)
create_project(portalId, projectName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/projects", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("portalId"=>portalId, "projectName"=>projectName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_access_policy(access_policy_id)
delete_access_policy(access_policy_id, params::Dict{String,<:Any})
Deletes an access policy that grants the specified identity access to the specified IoT
SiteWise Monitor resource. You can use this operation to revoke access to an IoT SiteWise
Monitor resource.
# Arguments
- `access_policy_id`: The ID of the access policy to be deleted.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_access_policy(accessPolicyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/access-policies/$(accessPolicyId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_access_policy(accessPolicyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/access-policies/$(accessPolicyId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_asset(asset_id)
delete_asset(asset_id, params::Dict{String,<:Any})
Deletes an asset. This action can't be undone. For more information, see Deleting assets
and models in the IoT SiteWise User Guide. You can't delete an asset that's associated to
another asset. For more information, see DisassociateAssets.
# Arguments
- `asset_id`: The ID of the asset to delete.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_asset(assetId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/assets/$(assetId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_asset(assetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/assets/$(assetId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_asset_model(asset_model_id)
delete_asset_model(asset_model_id, params::Dict{String,<:Any})
Deletes an asset model. This action can't be undone. You must delete all assets created
from an asset model before you can delete the model. Also, you can't delete an asset model
if a parent asset model exists that contains a property formula expression that depends on
the asset model that you want to delete. For more information, see Deleting assets and
models in the IoT SiteWise User Guide.
# Arguments
- `asset_model_id`: The ID of the asset model to delete.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_asset_model(assetModelId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/asset-models/$(assetModelId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_asset_model(assetModelId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/asset-models/$(assetModelId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_dashboard(dashboard_id)
delete_dashboard(dashboard_id, params::Dict{String,<:Any})
Deletes a dashboard from IoT SiteWise Monitor.
# Arguments
- `dashboard_id`: The ID of the dashboard to delete.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_dashboard(dashboardId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/dashboards/$(dashboardId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_dashboard(dashboardId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/dashboards/$(dashboardId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_gateway(gateway_id)
delete_gateway(gateway_id, params::Dict{String,<:Any})
Deletes a gateway from IoT SiteWise. When you delete a gateway, some of the gateway's files
remain in your gateway's file system.
# Arguments
- `gateway_id`: The ID of the gateway to delete.
"""
delete_gateway(gatewayId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/20200301/gateways/$(gatewayId)"; aws_config=aws_config)
delete_gateway(gatewayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/20200301/gateways/$(gatewayId)", params; aws_config=aws_config)
"""
delete_portal(portal_id)
delete_portal(portal_id, params::Dict{String,<:Any})
Deletes a portal from IoT SiteWise Monitor.
# Arguments
- `portal_id`: The ID of the portal to delete.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_portal(portalId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/portals/$(portalId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_portal(portalId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/portals/$(portalId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
delete_project(project_id)
delete_project(project_id, params::Dict{String,<:Any})
Deletes a project from IoT SiteWise Monitor.
# Arguments
- `project_id`: The ID of the project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
delete_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/projects/$(projectId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
delete_project(projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/projects/$(projectId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
describe_access_policy(access_policy_id)
describe_access_policy(access_policy_id, params::Dict{String,<:Any})
Describes an access policy, which specifies an identity's access to an IoT SiteWise Monitor
portal or project.
# Arguments
- `access_policy_id`: The ID of the access policy.
"""
describe_access_policy(accessPolicyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/access-policies/$(accessPolicyId)"; aws_config=aws_config)
describe_access_policy(accessPolicyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/access-policies/$(accessPolicyId)", params; aws_config=aws_config)
"""
describe_asset(asset_id)
describe_asset(asset_id, params::Dict{String,<:Any})
Retrieves information about an asset.
# Arguments
- `asset_id`: The ID of the asset.
"""
describe_asset(assetId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)"; aws_config=aws_config)
describe_asset(assetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)", params; aws_config=aws_config)
"""
describe_asset_model(asset_model_id)
describe_asset_model(asset_model_id, params::Dict{String,<:Any})
Retrieves information about an asset model.
# Arguments
- `asset_model_id`: The ID of the asset model.
"""
describe_asset_model(assetModelId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/asset-models/$(assetModelId)"; aws_config=aws_config)
describe_asset_model(assetModelId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/asset-models/$(assetModelId)", params; aws_config=aws_config)
"""
describe_asset_property(asset_id, property_id)
describe_asset_property(asset_id, property_id, params::Dict{String,<:Any})
Retrieves information about an asset property. When you call this operation for an
attribute property, this response includes the default attribute value that you define in
the asset model. If you update the default value in the model, this operation's response
includes the new default value. This operation doesn't return the value of the asset
property. To get the value of an asset property, use GetAssetPropertyValue.
# Arguments
- `asset_id`: The ID of the asset.
- `property_id`: The ID of the asset property.
"""
describe_asset_property(assetId, propertyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/properties/$(propertyId)"; aws_config=aws_config)
describe_asset_property(assetId, propertyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/properties/$(propertyId)", params; aws_config=aws_config)
"""
describe_dashboard(dashboard_id)
describe_dashboard(dashboard_id, params::Dict{String,<:Any})
Retrieves information about a dashboard.
# Arguments
- `dashboard_id`: The ID of the dashboard.
"""
describe_dashboard(dashboardId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/dashboards/$(dashboardId)"; aws_config=aws_config)
describe_dashboard(dashboardId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/dashboards/$(dashboardId)", params; aws_config=aws_config)
"""
describe_default_encryption_configuration()
describe_default_encryption_configuration(params::Dict{String,<:Any})
Retrieves information about the default encryption configuration for the Amazon Web
Services account in the default or specified Region. For more information, see Key
management in the IoT SiteWise User Guide.
"""
describe_default_encryption_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/configuration/account/encryption"; aws_config=aws_config)
describe_default_encryption_configuration(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/configuration/account/encryption", params; aws_config=aws_config)
"""
describe_gateway(gateway_id)
describe_gateway(gateway_id, params::Dict{String,<:Any})
Retrieves information about a gateway.
# Arguments
- `gateway_id`: The ID of the gateway device.
"""
describe_gateway(gatewayId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways/$(gatewayId)"; aws_config=aws_config)
describe_gateway(gatewayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways/$(gatewayId)", params; aws_config=aws_config)
"""
describe_gateway_capability_configuration(capability_namespace, gateway_id)
describe_gateway_capability_configuration(capability_namespace, gateway_id, params::Dict{String,<:Any})
Retrieves information about a gateway capability configuration. Each gateway capability
defines data sources for a gateway. A capability configuration can contain multiple data
source configurations. If you define OPC-UA sources for a gateway in the IoT SiteWise
console, all of your OPC-UA sources are stored in one capability configuration. To list all
capability configurations for a gateway, use DescribeGateway.
# Arguments
- `capability_namespace`: The namespace of the capability configuration. For example, if
you configure OPC-UA sources from the IoT SiteWise console, your OPC-UA capability
configuration has the namespace iotsitewise:opcuacollector:version, where version is a
number such as 1.
- `gateway_id`: The ID of the gateway that defines the capability configuration.
"""
describe_gateway_capability_configuration(capabilityNamespace, gatewayId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways/$(gatewayId)/capability/$(capabilityNamespace)"; aws_config=aws_config)
describe_gateway_capability_configuration(capabilityNamespace, gatewayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways/$(gatewayId)/capability/$(capabilityNamespace)", params; aws_config=aws_config)
"""
describe_logging_options()
describe_logging_options(params::Dict{String,<:Any})
Retrieves the current IoT SiteWise logging options.
"""
describe_logging_options(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/logging"; aws_config=aws_config)
describe_logging_options(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/logging", params; aws_config=aws_config)
"""
describe_portal(portal_id)
describe_portal(portal_id, params::Dict{String,<:Any})
Retrieves information about a portal.
# Arguments
- `portal_id`: The ID of the portal.
"""
describe_portal(portalId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/portals/$(portalId)"; aws_config=aws_config)
describe_portal(portalId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/portals/$(portalId)", params; aws_config=aws_config)
"""
describe_project(project_id)
describe_project(project_id, params::Dict{String,<:Any})
Retrieves information about a project.
# Arguments
- `project_id`: The ID of the project.
"""
describe_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects/$(projectId)"; aws_config=aws_config)
describe_project(projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects/$(projectId)", params; aws_config=aws_config)
"""
describe_storage_configuration()
describe_storage_configuration(params::Dict{String,<:Any})
Retrieves information about the storage configuration for IoT SiteWise.
"""
describe_storage_configuration(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/configuration/account/storage"; aws_config=aws_config)
describe_storage_configuration(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/configuration/account/storage", params; aws_config=aws_config)
"""
disassociate_assets(asset_id, child_asset_id, hierarchy_id)
disassociate_assets(asset_id, child_asset_id, hierarchy_id, params::Dict{String,<:Any})
Disassociates a child asset from the given parent asset through a hierarchy defined in the
parent asset's model.
# Arguments
- `asset_id`: The ID of the parent asset from which to disassociate the child asset.
- `child_asset_id`: The ID of the child asset to disassociate.
- `hierarchy_id`: The ID of a hierarchy in the parent asset's model. Hierarchies allow
different groupings of assets to be formed that all come from the same asset model. You can
use the hierarchy ID to identify the correct asset to disassociate. For more information,
see Asset hierarchies in the IoT SiteWise User Guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
disassociate_assets(assetId, childAssetId, hierarchyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets/$(assetId)/disassociate", Dict{String, Any}("childAssetId"=>childAssetId, "hierarchyId"=>hierarchyId, "clientToken"=>string(uuid4())); aws_config=aws_config)
disassociate_assets(assetId, childAssetId, hierarchyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/assets/$(assetId)/disassociate", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("childAssetId"=>childAssetId, "hierarchyId"=>hierarchyId, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
get_asset_property_aggregates(aggregate_types, end_date, resolution, start_date)
get_asset_property_aggregates(aggregate_types, end_date, resolution, start_date, params::Dict{String,<:Any})
Gets aggregated values for an asset property. For more information, see Querying aggregates
in the IoT SiteWise User Guide. To identify an asset property, you must specify one of the
following: The assetId and propertyId of an asset property. A propertyAlias, which is a
data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an
asset property's alias, see UpdateAssetProperty.
# Arguments
- `aggregate_types`: The data aggregating function.
- `end_date`: The inclusive end of the range from which to query historical data, expressed
in seconds in Unix epoch time.
- `resolution`: The time interval over which to aggregate data.
- `start_date`: The exclusive start of the range from which to query historical data,
expressed in seconds in Unix epoch time.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetId"`: The ID of the asset.
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 100
- `"nextToken"`: The token to be used for the next set of paginated results.
- `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data
stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information,
see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide.
- `"propertyId"`: The ID of the asset property.
- `"qualities"`: The quality by which to filter asset data.
- `"timeOrdering"`: The chronological sorting order of the requested information. Default:
ASCENDING
"""
get_asset_property_aggregates(aggregateTypes, endDate, resolution, startDate; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/aggregates", Dict{String, Any}("aggregateTypes"=>aggregateTypes, "endDate"=>endDate, "resolution"=>resolution, "startDate"=>startDate); aws_config=aws_config)
get_asset_property_aggregates(aggregateTypes, endDate, resolution, startDate, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/aggregates", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("aggregateTypes"=>aggregateTypes, "endDate"=>endDate, "resolution"=>resolution, "startDate"=>startDate), params)); aws_config=aws_config)
"""
get_asset_property_value()
get_asset_property_value(params::Dict{String,<:Any})
Gets an asset property's current value. For more information, see Querying current values
in the IoT SiteWise User Guide. To identify an asset property, you must specify one of the
following: The assetId and propertyId of an asset property. A propertyAlias, which is a
data stream alias (for example, /company/windfarm/3/turbine/7/temperature). To define an
asset property's alias, see UpdateAssetProperty.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetId"`: The ID of the asset.
- `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data
stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information,
see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide.
- `"propertyId"`: The ID of the asset property.
"""
get_asset_property_value(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/latest"; aws_config=aws_config)
get_asset_property_value(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/latest", params; aws_config=aws_config)
"""
get_asset_property_value_history()
get_asset_property_value_history(params::Dict{String,<:Any})
Gets the history of an asset property's values. For more information, see Querying
historical values in the IoT SiteWise User Guide. To identify an asset property, you must
specify one of the following: The assetId and propertyId of an asset property. A
propertyAlias, which is a data stream alias (for example,
/company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see
UpdateAssetProperty.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetId"`: The ID of the asset.
- `"endDate"`: The inclusive end of the range from which to query historical data,
expressed in seconds in Unix epoch time.
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 100
- `"nextToken"`: The token to be used for the next set of paginated results.
- `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data
stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information,
see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide.
- `"propertyId"`: The ID of the asset property.
- `"qualities"`: The quality by which to filter asset data.
- `"startDate"`: The exclusive start of the range from which to query historical data,
expressed in seconds in Unix epoch time.
- `"timeOrdering"`: The chronological sorting order of the requested information. Default:
ASCENDING
"""
get_asset_property_value_history(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/history"; aws_config=aws_config)
get_asset_property_value_history(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/history", params; aws_config=aws_config)
"""
get_interpolated_asset_property_values(end_time_in_seconds, interval_in_seconds, quality, start_time_in_seconds, type)
get_interpolated_asset_property_values(end_time_in_seconds, interval_in_seconds, quality, start_time_in_seconds, type, params::Dict{String,<:Any})
Get interpolated values for an asset property for a specified time interval, during a
period of time. For example, you can use the this operation to return the interpolated
temperature values for a wind turbine every 24 hours over a duration of 7 days. To identify
an asset property, you must specify one of the following: The assetId and propertyId of
an asset property. A propertyAlias, which is a data stream alias (for example,
/company/windfarm/3/turbine/7/temperature). To define an asset property's alias, see
UpdateAssetProperty.
# Arguments
- `end_time_in_seconds`: The inclusive end of the range from which to interpolate data,
expressed in seconds in Unix epoch time.
- `interval_in_seconds`: The time interval in seconds over which to interpolate data. Each
interval starts when the previous one ends.
- `quality`: The quality of the asset property value. You can use this parameter as a
filter to choose only the asset property values that have a specific quality.
- `start_time_in_seconds`: The exclusive start of the range from which to interpolate data,
expressed in seconds in Unix epoch time.
- `type`: The interpolation type. Valid values: LINEAR_INTERPOLATION
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetId"`: The ID of the asset.
- `"endTimeOffsetInNanos"`: The nanosecond offset converted from endTimeInSeconds.
- `"maxResults"`: The maximum number of results to return for each paginated request. If
not specified, the default value is 10.
- `"nextToken"`: The token to be used for the next set of paginated results.
- `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data
stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information,
see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide.
- `"propertyId"`: The ID of the asset property.
- `"startTimeOffsetInNanos"`: The nanosecond offset converted from startTimeInSeconds.
"""
get_interpolated_asset_property_values(endTimeInSeconds, intervalInSeconds, quality, startTimeInSeconds, type; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/interpolated", Dict{String, Any}("endTimeInSeconds"=>endTimeInSeconds, "intervalInSeconds"=>intervalInSeconds, "quality"=>quality, "startTimeInSeconds"=>startTimeInSeconds, "type"=>type); aws_config=aws_config)
get_interpolated_asset_property_values(endTimeInSeconds, intervalInSeconds, quality, startTimeInSeconds, type, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/properties/interpolated", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("endTimeInSeconds"=>endTimeInSeconds, "intervalInSeconds"=>intervalInSeconds, "quality"=>quality, "startTimeInSeconds"=>startTimeInSeconds, "type"=>type), params)); aws_config=aws_config)
"""
list_access_policies()
list_access_policies(params::Dict{String,<:Any})
Retrieves a paginated list of access policies for an identity (an Amazon Web Services SSO
user, an Amazon Web Services SSO group, or an IAM user) or an IoT SiteWise Monitor resource
(a portal or project).
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"iamArn"`: The ARN of the IAM user. For more information, see IAM ARNs in the IAM User
Guide. This parameter is required if you specify IAM for identityType.
- `"identityId"`: The ID of the identity. This parameter is required if you specify USER or
GROUP for identityType.
- `"identityType"`: The type of identity (Amazon Web Services SSO user, Amazon Web Services
SSO group, or IAM user). This parameter is required if you specify identityId.
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
- `"resourceId"`: The ID of the resource. This parameter is required if you specify
resourceType.
- `"resourceType"`: The type of resource (portal or project). This parameter is required if
you specify resourceId.
"""
list_access_policies(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/access-policies"; aws_config=aws_config)
list_access_policies(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/access-policies", params; aws_config=aws_config)
"""
list_asset_models()
list_asset_models(params::Dict{String,<:Any})
Retrieves a paginated list of summaries of all asset models.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_asset_models(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/asset-models"; aws_config=aws_config)
list_asset_models(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/asset-models", params; aws_config=aws_config)
"""
list_asset_relationships(asset_id, traversal_type)
list_asset_relationships(asset_id, traversal_type, params::Dict{String,<:Any})
Retrieves a paginated list of asset relationships for an asset. You can use this operation
to identify an asset's root asset and all associated assets between that asset and its root.
# Arguments
- `asset_id`: The ID of the asset.
- `traversal_type`: The type of traversal to use to identify asset relationships. Choose
the following option: PATH_TO_ROOT – Identify the asset's parent assets up to the root
asset. The asset that you specify in assetId is the first result in the list of
assetRelationshipSummaries, and the root asset is the last result.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_asset_relationships(assetId, traversalType; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/assetRelationships", Dict{String, Any}("traversalType"=>traversalType); aws_config=aws_config)
list_asset_relationships(assetId, traversalType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/assetRelationships", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("traversalType"=>traversalType), params)); aws_config=aws_config)
"""
list_assets()
list_assets(params::Dict{String,<:Any})
Retrieves a paginated list of asset summaries. You can use this operation to do the
following: List assets based on a specific asset model. List top-level assets. You
can't use this operation to list all assets. To retrieve summaries for all of your assets,
use ListAssetModels to get all of your asset model IDs. Then, use ListAssets to get all
assets for each asset model.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetModelId"`: The ID of the asset model by which to filter the list of assets. This
parameter is required if you choose ALL for filter.
- `"filter"`: The filter for the requested list of assets. Choose one of the following
options: ALL – The list includes all assets for a given asset model ID. The
assetModelId parameter is required if you filter by ALL. TOP_LEVEL – The list includes
only top-level assets in the asset hierarchy tree. Default: ALL
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_assets(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets"; aws_config=aws_config)
list_assets(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets", params; aws_config=aws_config)
"""
list_associated_assets(asset_id)
list_associated_assets(asset_id, params::Dict{String,<:Any})
Retrieves a paginated list of associated assets. You can use this operation to do the
following: List child assets associated to a parent asset by a hierarchy that you
specify. List an asset's parent asset.
# Arguments
- `asset_id`: The ID of the asset to query.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"hierarchyId"`: The ID of the hierarchy by which child assets are associated to the
asset. To find a hierarchy ID, use the DescribeAsset or DescribeAssetModel operations. This
parameter is required if you choose CHILD for traversalDirection. For more information, see
Asset hierarchies in the IoT SiteWise User Guide.
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
- `"traversalDirection"`: The direction to list associated assets. Choose one of the
following options: CHILD – The list includes all child assets associated to the asset.
The hierarchyId parameter is required if you choose CHILD. PARENT – The list includes
the asset's parent asset. Default: CHILD
"""
list_associated_assets(assetId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/hierarchies"; aws_config=aws_config)
list_associated_assets(assetId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/assets/$(assetId)/hierarchies", params; aws_config=aws_config)
"""
list_dashboards(project_id)
list_dashboards(project_id, params::Dict{String,<:Any})
Retrieves a paginated list of dashboards for an IoT SiteWise Monitor project.
# Arguments
- `project_id`: The ID of the project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_dashboards(projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/dashboards", Dict{String, Any}("projectId"=>projectId); aws_config=aws_config)
list_dashboards(projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/dashboards", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("projectId"=>projectId), params)); aws_config=aws_config)
"""
list_gateways()
list_gateways(params::Dict{String,<:Any})
Retrieves a paginated list of gateways.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways"; aws_config=aws_config)
list_gateways(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/20200301/gateways", params; aws_config=aws_config)
"""
list_portals()
list_portals(params::Dict{String,<:Any})
Retrieves a paginated list of IoT SiteWise Monitor portals.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_portals(; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/portals"; aws_config=aws_config)
list_portals(params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/portals", params; aws_config=aws_config)
"""
list_project_assets(project_id)
list_project_assets(project_id, params::Dict{String,<:Any})
Retrieves a paginated list of assets associated with an IoT SiteWise Monitor project.
# Arguments
- `project_id`: The ID of the project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_project_assets(projectId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects/$(projectId)/assets"; aws_config=aws_config)
list_project_assets(projectId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects/$(projectId)/assets", params; aws_config=aws_config)
"""
list_projects(portal_id)
list_projects(portal_id, params::Dict{String,<:Any})
Retrieves a paginated list of projects for an IoT SiteWise Monitor portal.
# Arguments
- `portal_id`: The ID of the portal.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return for each paginated request.
Default: 50
- `"nextToken"`: The token to be used for the next set of paginated results.
"""
list_projects(portalId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects", Dict{String, Any}("portalId"=>portalId); aws_config=aws_config)
list_projects(portalId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/projects", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("portalId"=>portalId), params)); aws_config=aws_config)
"""
list_tags_for_resource(resource_arn)
list_tags_for_resource(resource_arn, params::Dict{String,<:Any})
Retrieves the list of tags for an IoT SiteWise resource.
# Arguments
- `resource_arn`: The ARN of the resource.
"""
list_tags_for_resource(resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/tags", Dict{String, Any}("resourceArn"=>resourceArn); aws_config=aws_config)
list_tags_for_resource(resourceArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("GET", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn), params)); aws_config=aws_config)
"""
put_default_encryption_configuration(encryption_type)
put_default_encryption_configuration(encryption_type, params::Dict{String,<:Any})
Sets the default encryption configuration for the Amazon Web Services account. For more
information, see Key management in the IoT SiteWise User Guide.
# Arguments
- `encryption_type`: The type of encryption used for the encryption configuration.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"kmsKeyId"`: The Key ID of the customer managed customer master key (CMK) used for KMS
encryption. This is required if you use KMS_BASED_ENCRYPTION.
"""
put_default_encryption_configuration(encryptionType; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/configuration/account/encryption", Dict{String, Any}("encryptionType"=>encryptionType); aws_config=aws_config)
put_default_encryption_configuration(encryptionType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/configuration/account/encryption", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("encryptionType"=>encryptionType), params)); aws_config=aws_config)
"""
put_logging_options(logging_options)
put_logging_options(logging_options, params::Dict{String,<:Any})
Sets logging options for IoT SiteWise.
# Arguments
- `logging_options`: The logging options to set.
"""
put_logging_options(loggingOptions; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/logging", Dict{String, Any}("loggingOptions"=>loggingOptions); aws_config=aws_config)
put_logging_options(loggingOptions, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/logging", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("loggingOptions"=>loggingOptions), params)); aws_config=aws_config)
"""
put_storage_configuration(storage_type)
put_storage_configuration(storage_type, params::Dict{String,<:Any})
Configures storage settings for IoT SiteWise.
# Arguments
- `storage_type`: The type of storage that you specified for your data. The storage type
can be one of the following values: SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates
your data into a service managed database. MULTI_LAYER_STORAGE – IoT SiteWise
replicates your data into a service managed database and saves a copy of your raw data and
metadata in an Amazon S3 object that you specified.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"multiLayerStorage"`: Identifies a storage destination. If you specified
MULTI_LAYER_STORAGE for the storage type, you must specify a MultiLayerStorage object.
"""
put_storage_configuration(storageType; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/configuration/account/storage", Dict{String, Any}("storageType"=>storageType); aws_config=aws_config)
put_storage_configuration(storageType, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/configuration/account/storage", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("storageType"=>storageType), params)); aws_config=aws_config)
"""
tag_resource(resource_arn, tags)
tag_resource(resource_arn, tags, params::Dict{String,<:Any})
Adds tags to an IoT SiteWise resource. If a tag already exists for the resource, this
operation updates the tag's value.
# Arguments
- `resource_arn`: The ARN of the resource to tag.
- `tags`: A list of key-value pairs that contain metadata for the resource. For more
information, see Tagging your IoT SiteWise resources in the IoT SiteWise User Guide.
"""
tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/tags", Dict{String, Any}("resourceArn"=>resourceArn, "tags"=>tags); aws_config=aws_config)
tag_resource(resourceArn, tags, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn, "tags"=>tags), params)); aws_config=aws_config)
"""
untag_resource(resource_arn, tag_keys)
untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any})
Removes a tag from an IoT SiteWise resource.
# Arguments
- `resource_arn`: The ARN of the resource to untag.
- `tag_keys`: A list of keys for tags to remove from the resource.
"""
untag_resource(resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/tags", Dict{String, Any}("resourceArn"=>resourceArn, "tagKeys"=>tagKeys); aws_config=aws_config)
untag_resource(resourceArn, tagKeys, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("DELETE", "/tags", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("resourceArn"=>resourceArn, "tagKeys"=>tagKeys), params)); aws_config=aws_config)
"""
update_access_policy(access_policy_id, access_policy_identity, access_policy_permission, access_policy_resource)
update_access_policy(access_policy_id, access_policy_identity, access_policy_permission, access_policy_resource, params::Dict{String,<:Any})
Updates an existing access policy that specifies an identity's access to an IoT SiteWise
Monitor portal or project resource.
# Arguments
- `access_policy_id`: The ID of the access policy.
- `access_policy_identity`: The identity for this access policy. Choose an Amazon Web
Services SSO user, an Amazon Web Services SSO group, or an IAM user.
- `access_policy_permission`: The permission level for this access policy. Note that a
project ADMINISTRATOR is also known as a project owner.
- `access_policy_resource`: The IoT SiteWise Monitor resource for this access policy.
Choose either a portal or a project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
update_access_policy(accessPolicyId, accessPolicyIdentity, accessPolicyPermission, accessPolicyResource; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/access-policies/$(accessPolicyId)", Dict{String, Any}("accessPolicyIdentity"=>accessPolicyIdentity, "accessPolicyPermission"=>accessPolicyPermission, "accessPolicyResource"=>accessPolicyResource, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_access_policy(accessPolicyId, accessPolicyIdentity, accessPolicyPermission, accessPolicyResource, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/access-policies/$(accessPolicyId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("accessPolicyIdentity"=>accessPolicyIdentity, "accessPolicyPermission"=>accessPolicyPermission, "accessPolicyResource"=>accessPolicyResource, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_asset(asset_id, asset_name)
update_asset(asset_id, asset_name, params::Dict{String,<:Any})
Updates an asset's name. For more information, see Updating assets and models in the IoT
SiteWise User Guide.
# Arguments
- `asset_id`: The ID of the asset to update.
- `asset_name`: A unique, friendly name for the asset.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
update_asset(assetId, assetName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/assets/$(assetId)", Dict{String, Any}("assetName"=>assetName, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_asset(assetId, assetName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/assets/$(assetId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetName"=>assetName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_asset_model(asset_model_id, asset_model_name)
update_asset_model(asset_model_id, asset_model_name, params::Dict{String,<:Any})
Updates an asset model and all of the assets that were created from the model. Each asset
created from the model inherits the updated asset model's property and hierarchy
definitions. For more information, see Updating assets and models in the IoT SiteWise User
Guide. This operation overwrites the existing model with the provided model. To avoid
deleting your asset model's properties or hierarchies, you must include their IDs and
definitions in the updated asset model payload. For more information, see
DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all
previous data for that property. If you remove a hierarchy definition from an asset model,
IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the
type or data type of an existing property.
# Arguments
- `asset_model_id`: The ID of the asset model to update.
- `asset_model_name`: A unique, friendly name for the asset model.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"assetModelCompositeModels"`: The composite asset models that are part of this asset
model. Composite asset models are asset models that contain specific properties. Each
composite model has a type that defines the properties that the composite model supports.
Use composite asset models to define alarms on this asset model.
- `"assetModelDescription"`: A description for the asset model.
- `"assetModelHierarchies"`: The updated hierarchy definitions of the asset model. Each
hierarchy specifies an asset model whose assets can be children of any other assets created
from this asset model. For more information, see Asset hierarchies in the IoT SiteWise User
Guide. You can specify up to 10 hierarchies per asset model. For more information, see
Quotas in the IoT SiteWise User Guide.
- `"assetModelProperties"`: The updated property definitions of the asset model. For more
information, see Asset properties in the IoT SiteWise User Guide. You can specify up to 200
properties per asset model. For more information, see Quotas in the IoT SiteWise User Guide.
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
"""
update_asset_model(assetModelId, assetModelName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/asset-models/$(assetModelId)", Dict{String, Any}("assetModelName"=>assetModelName, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_asset_model(assetModelId, assetModelName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/asset-models/$(assetModelId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("assetModelName"=>assetModelName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_asset_property(asset_id, property_id)
update_asset_property(asset_id, property_id, params::Dict{String,<:Any})
Updates an asset property's alias and notification state. This operation overwrites the
property's existing alias and notification state. To keep your existing property's alias or
notification state, you must include the existing values in the UpdateAssetProperty
request. For more information, see DescribeAssetProperty.
# Arguments
- `asset_id`: The ID of the asset to be updated.
- `property_id`: The ID of the asset property to be updated.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"propertyAlias"`: The alias that identifies the property, such as an OPC-UA server data
stream path (for example, /company/windfarm/3/turbine/7/temperature). For more information,
see Mapping industrial data streams to asset properties in the IoT SiteWise User Guide. If
you omit this parameter, the alias is removed from the property.
- `"propertyNotificationState"`: The MQTT notification state (enabled or disabled) for this
asset property. When the notification state is enabled, IoT SiteWise publishes property
value updates to a unique MQTT topic. For more information, see Interacting with other
services in the IoT SiteWise User Guide. If you omit this parameter, the notification state
is set to DISABLED.
"""
update_asset_property(assetId, propertyId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/assets/$(assetId)/properties/$(propertyId)", Dict{String, Any}("clientToken"=>string(uuid4())); aws_config=aws_config)
update_asset_property(assetId, propertyId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/assets/$(assetId)/properties/$(propertyId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_dashboard(dashboard_definition, dashboard_id, dashboard_name)
update_dashboard(dashboard_definition, dashboard_id, dashboard_name, params::Dict{String,<:Any})
Updates an IoT SiteWise Monitor dashboard.
# Arguments
- `dashboard_definition`: The new dashboard definition, as specified in a JSON literal. For
detailed information, see Creating dashboards (CLI) in the IoT SiteWise User Guide.
- `dashboard_id`: The ID of the dashboard to update.
- `dashboard_name`: A new friendly name for the dashboard.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"dashboardDescription"`: A new description for the dashboard.
"""
update_dashboard(dashboardDefinition, dashboardId, dashboardName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/dashboards/$(dashboardId)", Dict{String, Any}("dashboardDefinition"=>dashboardDefinition, "dashboardName"=>dashboardName, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_dashboard(dashboardDefinition, dashboardId, dashboardName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/dashboards/$(dashboardId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("dashboardDefinition"=>dashboardDefinition, "dashboardName"=>dashboardName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_gateway(gateway_id, gateway_name)
update_gateway(gateway_id, gateway_name, params::Dict{String,<:Any})
Updates a gateway's name.
# Arguments
- `gateway_id`: The ID of the gateway to update.
- `gateway_name`: A unique, friendly name for the gateway.
"""
update_gateway(gatewayId, gatewayName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/20200301/gateways/$(gatewayId)", Dict{String, Any}("gatewayName"=>gatewayName); aws_config=aws_config)
update_gateway(gatewayId, gatewayName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/20200301/gateways/$(gatewayId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("gatewayName"=>gatewayName), params)); aws_config=aws_config)
"""
update_gateway_capability_configuration(capability_configuration, capability_namespace, gateway_id)
update_gateway_capability_configuration(capability_configuration, capability_namespace, gateway_id, params::Dict{String,<:Any})
Updates a gateway capability configuration or defines a new capability configuration. Each
gateway capability defines data sources for a gateway. A capability configuration can
contain multiple data source configurations. If you define OPC-UA sources for a gateway in
the IoT SiteWise console, all of your OPC-UA sources are stored in one capability
configuration. To list all capability configurations for a gateway, use DescribeGateway.
# Arguments
- `capability_configuration`: The JSON document that defines the configuration for the
gateway capability. For more information, see Configuring data sources (CLI) in the IoT
SiteWise User Guide.
- `capability_namespace`: The namespace of the gateway capability configuration to be
updated. For example, if you configure OPC-UA sources from the IoT SiteWise console, your
OPC-UA capability configuration has the namespace iotsitewise:opcuacollector:version, where
version is a number such as 1.
- `gateway_id`: The ID of the gateway to be updated.
"""
update_gateway_capability_configuration(capabilityConfiguration, capabilityNamespace, gatewayId; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/20200301/gateways/$(gatewayId)/capability", Dict{String, Any}("capabilityConfiguration"=>capabilityConfiguration, "capabilityNamespace"=>capabilityNamespace); aws_config=aws_config)
update_gateway_capability_configuration(capabilityConfiguration, capabilityNamespace, gatewayId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("POST", "/20200301/gateways/$(gatewayId)/capability", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("capabilityConfiguration"=>capabilityConfiguration, "capabilityNamespace"=>capabilityNamespace), params)); aws_config=aws_config)
"""
update_portal(portal_contact_email, portal_id, portal_name, role_arn)
update_portal(portal_contact_email, portal_id, portal_name, role_arn, params::Dict{String,<:Any})
Updates an IoT SiteWise Monitor portal.
# Arguments
- `portal_contact_email`: The Amazon Web Services administrator's contact email address.
- `portal_id`: The ID of the portal to update.
- `portal_name`: A new friendly name for the portal.
- `role_arn`: The ARN of a service role that allows the portal's users to access your IoT
SiteWise resources on your behalf. For more information, see Using service roles for IoT
SiteWise Monitor in the IoT SiteWise User Guide.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"alarms"`: Contains the configuration information of an alarm created in an IoT SiteWise
Monitor portal. You can use the alarm to monitor an asset property and get notified when
the asset property value is outside a specified range. For more information, see Monitoring
with alarms in the IoT SiteWise Application Guide.
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"notificationSenderEmail"`: The email address that sends alarm notifications.
- `"portalDescription"`: A new description for the portal.
- `"portalLogoImage"`:
"""
update_portal(portalContactEmail, portalId, portalName, roleArn; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/portals/$(portalId)", Dict{String, Any}("portalContactEmail"=>portalContactEmail, "portalName"=>portalName, "roleArn"=>roleArn, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_portal(portalContactEmail, portalId, portalName, roleArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/portals/$(portalId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("portalContactEmail"=>portalContactEmail, "portalName"=>portalName, "roleArn"=>roleArn, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
"""
update_project(project_id, project_name)
update_project(project_id, project_name, params::Dict{String,<:Any})
Updates an IoT SiteWise Monitor project.
# Arguments
- `project_id`: The ID of the project to update.
- `project_name`: A new friendly name for the project.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the
idempotency of the request. Don't reuse this client token if a new idempotent request is
required.
- `"projectDescription"`: A new description for the project.
"""
update_project(projectId, projectName; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/projects/$(projectId)", Dict{String, Any}("projectName"=>projectName, "clientToken"=>string(uuid4())); aws_config=aws_config)
update_project(projectId, projectName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()) = iotsitewise("PUT", "/projects/$(projectId)", Dict{String, Any}(mergewith(_merge, Dict{String, Any}("projectName"=>projectName, "clientToken"=>string(uuid4())), params)); aws_config=aws_config)
| [
2,
770,
2393,
318,
8295,
12,
27568,
416,
30865,
9171,
14706,
13,
20362,
198,
3500,
30865,
198,
3500,
30865,
13,
12298,
5432,
712,
1063,
25,
1312,
1747,
270,
413,
786,
198,
3500,
30865,
13,
40073,
198,
3500,
30865,
13,
52,
27586,
82,
... | 3.26843 | 24,606 |
<reponame>UnofficialJuliaMirrorSnapshots/JulieTest.jl-885aceaa-7568-5b56-b2d4-116a98ea4ee1
OK = '✓'
FAIL= '✖'
RESET = "\033[0m"
PAD = " " ^ 2
FAINT_COLOR = "\033[90m"
PASS_COLOR = "\033[32m"
PASS_LIGHT_COLOR = "\033[92m"
FAILED_COLOR = "\033[31m"
FAILED_LIGHT_COLOR = "\033[91m"
HOUR = 3600_000
MINUTES = 60_000
SECOND = 1000
DESCRIPTION_ERROR_MESSAGE = "PANIC! Got an error while called describe"
toMilis(n) = int(n * 10e2)
function testColor()
[println("\033[$(i)m Test_$i \033[0m") for i in 1:109]
end
function report(desc::Description)
desc.depth == 1 && println()
println(PAD ^ desc.depth, desc.name)
end
function summaryReport(passes::Array{Test,1},errors::Array{Union(Error,DescriptionError),1},elapsed)
println()
println(PASS_LIGHT_COLOR, PAD, length(passes), " passing ",FAINT_COLOR, "(", showTime(elapsed), ")")
length(errors) == 0 || println(FAILED_LIGHT_COLOR, PAD, length(errors), " failing")
println(RESET)
length(errors) == 0 || fullFailedReport(errors)
end
function showTime(n)
if n > HOUR
string(n/HOUR)[1:3] * "h"
elseif n > MINUTES
string(n/MINUTES)[1:3] * "m"
elseif n > SECOND
string(n/SECOND)[1:3] * "s"
else
string(n) * "ms"
end
end
function colorTime(elapsed::Int)
if elapsed != 0
color = elapsed > 500 ? 91 : (elapsed > 100 ? 93 : 90)
" \e[$(color)m($(showTime(elapsed)))$RESET"
else
""
end
end
function passReport(test::Test, elapsed::Int)
println(PAD ^ test.desc.depth,PAD, PASS_LIGHT_COLOR, OK, "\033[90m ", test.name, colorTime(elapsed), RESET)
end
function failedReport(err::DescriptionError)
err.desc.depth == 1 && println()
println(
PAD ^ err.desc.depth,FAILED_COLOR, FAIL, " ",
DESCRIPTION_ERROR_MESSAGE, " - ", err.desc.name,
RESET
)
end
function failedReport(err::Error)
print(PAD ^ err.test.desc.depth,PAD,FAILED_COLOR, FAIL," ",err.test.name)
print(RESET,'\n')
end
function fullFailedReport(errors::Array{Union(Error,DescriptionError),1})
for i in 1:length(errors)
err = errors[i]
print(PAD, i, ") ")
if isa(err, DescriptionError)
print(DESCRIPTION_ERROR_MESSAGE, " - ", err.desc.name)
else
print(err.test.desc.name, " - ", err.test.name)
end
println(":", FAILED_COLOR)
dump(err.err)
println(RESET)
end
end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
43826,
20910,
14,
16980,
494,
14402,
13,
20362,
12,
44230,
558,
7252,
12,
2425,
3104,
12,
20,
65,
3980,
12,
65,
17,
67,
19,
12,
18298,
64,
4089,
18213,
19,
1453,
16,
19... | 2.372021 | 965 |
"""
`generateVTK(filename, points; lines, cells, point_data, path, num, time)`
Generates a vtk file with the given data. Written by <NAME>.
**Arguments**
* `points::Array{Array{Float64,1},1}` : Points to output.
**Optional Arguments**
* `lines::Array{Array{Int64,1},1}` : line definitions. lines[i] contains the
indices of points in the i-th line.
* `cells::Array{Array{Int64,1},1}` : VTK polygons definiton. cells[i]
contains the indices of points in the i-th polygon.
* `data::Array{Dict{String,Any},1}` : Collection of data point fields in the
following format:
data[i] = Dict(
"field_name" => field_name::String
"field_type" => "scalar" or "vector"
"field_data" => point_data
)
where point_data[i] is the data at the i-th point.
See `examples.jl` for an example on how to use this function.
"""
function generateVTK(filename::String, points;
lines::Array{Array{Int64,1},1}=Array{Int64,1}[],
cells::Array{Array{Int64,1},1}=Array{Int64,1}[],
point_data=nothing, cell_data=nothing,
num=nothing, time=nothing,
path="", comments="", _griddims::Int64=-1,
keep_points::Bool=false,
override_cell_type::Int64=-1)
aux = num!=nothing ? ".$num" : ""
ext = aux*".vtk"
if path !=""
_path = string(path, (path[end]!="/" ? "/" : ""))
else
_path = path
end
f = open(string(_path, filename, ext), "w")
# HEADER
header = "# vtk DataFile Version 4.0" # File version and identifier
header = string(header, "\n", " ", comments) # Title
header = string(header, "\n", "ASCII") # File format
header = string(header, "\n", "DATASET UNSTRUCTURED_GRID")
write(f, header)
# TIME
if time!=nothing
line0 = "\nFIELD FieldData 1"
line1 = "\nSIM_TIME 1 1 double"
line2 = "\n$(time)"
write(f, line0*line1*line2)
end
np = size(points)[1]
nl = size(lines)[1]
nc = size(cells)[1]
_keep_points = keep_points || (nl==0 && nc==0)
# POINTS
write(f, string("\n", "POINTS ", np, " float"))
for i in 1:np
print(f, "\n", points[i][1], " ", points[i][2], " ", points[i][3])
end
# We do this to avoid outputting points as cells if outputting a Grid
# or if we simply want to ignore points
if _griddims!=-1 || !_keep_points
auxnp = np
np = 0
end
# CELLS
auxl = size(lines)[1]
for line in lines
auxl += size(line)[1]
end
auxc = size(cells)[1]
for cell in cells
auxc += size(cell)[1]
end
write(f, "\n\nCELLS $(np+nl+nc) $(2*np+auxl+auxc)")
for i in 1:np+nl+nc
if i<=np
pts = [i-1]
elseif i<=np+nl
pts = lines[i-np]
else
pts = cells[i-(nl+np)]
end
print(f, "\n", size(pts,1))
for pt in pts
print(f, " ", pt)
end
end
write(f, "\n\nCELL_TYPES $(np+nl+nc)")
for i in 1:np+nl+nc
if i<=np
tpe = 1
elseif i<=np+nl
tpe = 4
else
if override_cell_type==-1
if _griddims!=-1
if _griddims==1
tpe = 3
elseif _griddims==2
tpe = 9
elseif _griddims==3
tpe = 12
else
error("Generation of VTK cells of $_griddims dimensions not implemented")
end
else
tpe = 7
end
else
tpe = override_cell_type
end
end
print(f, "\n", tpe)
end
if _griddims!=-1 || !_keep_points
np = auxnp
end
# POINT DATA
if point_data!=nothing
write(f, "\n\nPOINT_DATA $np")
end
_p_data = point_data!=nothing ? point_data : []
for field in _p_data
field_name = field["field_name"]
field_type = field["field_type"]
data = field["field_data"]
if size(data)[1]!=np
warn("Corrupted field $(field_name)! Field size != number of points.")
end
if field_type=="scalar"
write(f, "\n\nSCALARS $field_name float\nLOOKUP_TABLE default")
for entry in data
print(f, "\n", entry)
end
elseif field_type=="vector"
write(f, "\n\nVECTORS $field_name float")
for entry in data
print(f, "\n", entry[1], " ", entry[2], " ", entry[3])
end
else
error("Unknown field type $(field_type).")
end
end
# CELL DATA
if cell_data!=nothing
write(f, "\n\nCELL_DATA $nc")
end
_c_data = cell_data!=nothing ? cell_data : []
for field in _c_data
field_name = field["field_name"]
field_type = field["field_type"]
data = field["field_data"]
if size(data)[1]!=nc
warn("Corrupted field $(field_name)! Field size != number of cells.")
end
if field_type=="scalar"
write(f, "\n\nSCALARS $field_name float\nLOOKUP_TABLE default")
for entry in data
print(f, "\n", entry)
end
elseif field_type=="vector"
write(f, "\n\nVECTORS $field_name float")
for entry in data
print(f, "\n", entry[1], " ", entry[2], " ", entry[3])
end
else
error("Unknown field type $(field_type).")
end
end
close(f)
return filename*ext*";"
end | [
37811,
198,
220,
4600,
8612,
378,
36392,
42,
7,
34345,
11,
2173,
26,
3951,
11,
4778,
11,
966,
62,
7890,
11,
3108,
11,
997,
11,
640,
8,
63,
198,
8645,
689,
257,
410,
30488,
2393,
351,
262,
1813,
1366,
13,
22503,
416,
1279,
20608,
... | 2.056792 | 2,606 |
@info("Please allow up to 20 minutes for all tests to execute.")
import SeisIO
import SeisIO: get_svn
cd(dirname(pathof(SeisIO))*"/../test")
get_svn("https://github.com/jpjones76/SeisIO-TestData/trunk/SampleFiles", "SampleFiles")
include("local_restricted.jl")
include("test_helpers.jl")
# Announce test begin
test_start = Dates.now()
ltestname = 48
printstyled(stdout,
string(test_start, ": tests begin, path = ", path, ", has_restricted = ", has_restricted, ", keep_log = ", keep_log, ", keep_samples = ", keep_samples, "\n"),
color=:light_green,
bold=true)
# Run all tests
# huehuehue grep "include(joinpath" runtests.jl | awk -F "(" '{print $3}' | awk -F "," {'print $1'}
for d in ["CoreUtils", "Types", "RandSeis", "Utils", "NativeIO", "DataFormats", "Processing", "Quake", "Web"]
ld = length(d)
ll = div(ltestname - ld - 2, 2)
lr = ll + (isodd(ld) ? 1 : 0)
printstyled(string("="^ll, " ", d, " ", "="^lr, "\n"), color=:cyan, bold=true)
for i in readdir(path*"/"*d)
f = joinpath(d,i)
if endswith(i, ".jl")
printstyled(lpad(" "*f, ltestname)*"\n", color=:cyan)
write(out, string("\n\ntest ", f, "\n\n"))
flush(out)
include(f)
end
end
end
# Cleanup
include("cleanup.jl")
if keep_samples == false
include("rm_samples.jl")
end
if !keep_log
try
rm("runtests.log")
catch err
@warn(string("can't remove runtests.log; threw err", err))
end
end
# Announce tests end
test_end = Dates.now()
δt = 0.001*(test_end-test_start).value
mm = round(Int, div(δt, 60))
ss = rem(δt, 60)
printstyled(string(test_end, ": tests end, elapsed time (mm:ss.μμμ) = ",
@sprintf("%02i", mm), ":",
@sprintf("%06.3f", ss), "\n"), color=:light_green, bold=true)
printstyled("To run some data acquisition examples, execute this command: include(\"", path, "/examples.jl\").\n", color=:cyan, bold=true)
| [
31,
10951,
7203,
5492,
1249,
510,
284,
1160,
2431,
329,
477,
5254,
284,
12260,
19570,
201,
198,
11748,
1001,
271,
9399,
201,
198,
11748,
1001,
271,
9399,
25,
651,
62,
21370,
77,
201,
198,
10210,
7,
15908,
3672,
7,
6978,
1659,
7,
465... | 2.253456 | 868 |
<gh_stars>1-10
using JLD2
using PyPlot
using Statistics
using Printf
nfile = 8
include("../../../src/loglinspace.jl")
include("../../../src/histogram_code.jl")
# Load in likelihood profile:
#@load "T1_likelihood_profile_student_all_3.0sig.jld2"
@load "../../../data/T1_likelihood_profile_student_all_3.0sig_v02.jld2"
# Load in the Markov chain dataset:
@load "../../../data/T1_hmc_total_02212020.jld2"
#@load "/Users/ericagol/Observing/Spitzer/DDT2019/Campaign04/v15/Hyak/T1_hmc_total_02212020.jld2"
## Now, make plots of each parameter versus the others:
#planet =["b","c","d","e","f","g","h"]
#var = ["m","P","t0","ecos","esin","log(nu)","V1 e^{1/(2nu)}"]
#
#for i=1:36
# if i < 36
# ip1 = ceil(Int64,i/5); ip2 = mod(i-1,5)+1
# x = vec(elements_grid_all[:,ip1+1,ip2,:])
# xname = string(planet[ip1]," ",var[ip2])
# else
# x = log.(vec(ndof_grid_all))
# xname = var[6]
# end
# for j=i+1:37
## for j=36:37
# if j < 36
# jp1 = ceil(Int64,j/5); jp2 = mod(j-1,5)+1
# y = vec(elements_grid_all[:,jp1+1,jp2,:])
# yname = string(planet[jp1]," ",var[jp2])
# elseif j < 37
# y = log.(vec(ndof_grid_all))
# yname = var[6]
# else
# y = exp.(vec(lnV1_grid_all) .+ 1 ./(2vec(ndof_grid_all)))
# yname = var[7]
# end
# if(abs(cor(x,y)) > 0.5)
# clf()
# cmap = exp.(-0.5 .*(vec(chi_grid_all) .- minimum(chi_grid_all)))
# scatter(x[cmap .> 0.6],y[cmap .> 0.6],c=cmap[cmap .> 0.6])
# println(i," ",xname," ",j," ",yname," ",cor(x,y))
# read(stdin,Char)
# end
# end
#end
planet =["b","c","d","e","f","g","h"]
cp = ["C0","C1","C2","C3","C4","C5","C6","C7","C8","C9"]
x2=collect(linearspace(-0.015,0.015,1000))
fig,axes = subplots(4,2,figsize=(10,8),sharex="col",sharey="row")
for i=1:7
ax = axes[i]
x= elements_grid_all[(i-1)*5+4,i+1,4,:]; ecos0 = elements_grid_all[(i-1)*5+4,i+1,4,16]
ecc = sqrt.(x.^2 .+ elements_grid_all[(i-1)*5+5,i+1,5,:].^2)
prob = exp.(-0.5*(chi_grid_all[(i-1)*5+4,:] .-chi_grid_all[(i-1)*5+4,16]))
ax.plot(x,prob./maximum(prob),color=cp[i],linewidth=1)
prob2 = exp.(-0.5 .*(x2 .-ecos0).^2 ./cov_save[(i-1)*5+4,(i-1)*5+4])
ax.plot(x2,prob2,color=cp[i],alpha=0.3,linewidth=1)
# Plot histogram:
ecos_bin,ecos_hist,ecos_bin_square,ecos_hist_square = histogram(state_total[(i-1)*5+4,:],50)
# ax.plot(ecos_bin_square,ecos_hist_square ./maximum(ecos_hist_square),color=cp[i],linewidth=3,label=L"$e\cos{\omega}$)
ax.plot(ecos_bin_square,ecos_hist_square ./maximum(ecos_hist_square),color=cp[i],linewidth=3,label=L"$e\cos \omega$")
x = elements_grid_all[(i-1)*5+5,i+1,5,:]; esin0 = elements_grid_all[i*5,i+1,5,16]
ecc = sqrt.(x.^2 + elements_grid_all[5i-4,i+1,4,:].^2)
prob = exp.(-0.5*(chi_grid_all[(i-1)*5+5,:] .-chi_grid_all[(i-1)*5+5,16]))
ax.plot(x,prob./maximum(prob),linestyle=":",color=cp[i],linewidth=1)
prob2 = exp.(-0.5 .*(x2 .-esin0).^2 ./cov_save[i*5,i*5])
ax.plot(x2,prob2,color=cp[i],linestyle=":",alpha=0.3,linewidth=1)
# Plot histogram:
esin_bin,esin_hist,esin_bin_square,esin_hist_square = histogram(state_total[(i-1)*5+5,:],50)
ax.plot(esin_bin_square,esin_hist_square ./maximum(esin_hist_square),color=cp[i],linestyle=":",linewidth=3,label=L"$e\sin{\omega}$")
# ax.plot(esin_bin_square,esin_hist_square ./maximum(esin_hist_square),color=cp[i],linestyle=":",linewidth=3)
ax.plot([0,0],[0,1.05],linestyle="--",color=cp[i],linewidth=2)
ax.legend();
ax.axis([-0.0175,0.0175,0,1.05]); ax.annotate(string("(",planet[i],")"),xy=[-0.014;0.8])
println(planet[i]," ",@sprintf("%6.4f",ecos0),"+-",@sprintf("%6.4f",sqrt(cov_save[5i-1,5i-1])),
" ",@sprintf("%6.4f",esin0),"+-",@sprintf("%6.4f",sqrt(cov_save[5i,5i])))
ax.grid(linestyle=":")
end
ax = axes[8]
#ax.axis("off")
# Plot the prior:
include("compute_ecc_prior.jl")
ax.grid(linestyle=":")
tight_layout()
subplots_adjust(hspace = 0,wspace=0)
savefig("../T1_eccentricity_vectors_likelihood_profile_hmc.pdf",bbox_inches="tight")
#read(stdin,Char)
# Make a plot of eccentricity histogram:
clf()
for i=1:7
ecc_bin,ecc_hist,ecc_bin_square,ecc_hist_square = histogram(sqrt.(state_total[(i-1)*5+4,:].^2+state_total[(i-1)*5+5,:].^2),50)
plot(ecc_bin_square,ecc_hist_square./maximum(ecc_hist_square),color=cp[i],linewidth=3,label=planet[i])
end
xlabel("Eccentricity",fontsize=15)
ylabel("Probability",fontsize=15)
legend(fontsize=15)
axis([0,0.015,0,1.05])
xticks(fontsize=15)
yticks(fontsize=15)
#read(stdin,Char)
savefig("../eccentricity_posterior.pdf",bbox_inches="tight")
#read(stdin,Char)
fig,axes = subplots(4,2)
for i=1:7
ax = axes[i]
# Plot period:
P0 = elements_grid_all[(i-1)*5+2,i+1,2,16]; x= elements_grid_all[(i-1)*5+2,i+1,2,:] .- P0
prob = exp.(-0.5*(chi_grid_all[(i-1)*5+4,:] .-chi_grid_all[(i-1)*5+4,16]))
sigP = sqrt(cov_save[(i-1)*5+2,(i-1)*5+2])
x2=collect(linearspace(-4sigP,4sigP,1000))
ax.plot(x,prob,label="Period",color=cp[i])
prob2 = exp.(-0.5 .*x2.^2 ./cov_save[(i-1)*5+2,(i-1)*5+2])
ax.plot(x2,prob2,color=cp[i],alpha=0.3)
t0 = elements_grid_all[(i-1)*5+3,i+1,3,16]
prob = exp.(-0.5*(chi_grid_all[(i-1)*5+3,:] .-chi_grid_all[(i-1)*5+3,16]))
x = elements_grid_all[(i-1)*5+3,i+1,3,:] .- t0
ax.plot(x,prob,label=L"$t_0$",linestyle="--",color=cp[i])
sigt0 = sqrt(cov_save[(i-1)*5+3,(i-1)*5+3])
x2=collect(linearspace(-4sigt0,4sigt0,1000))
prob2 = exp.(-0.5 .*x2.^2 ./cov_save[(i-1)*5+3,(i-1)*5+3])
ax.plot(x2,prob2,color=cp[i],linestyle="--",alpha=0.3)
ax.plot([0,0],[0,1],linestyle=":",color=cp[i])
ax.legend();
ax.axis([-0.015,0.015,0,1]); ax.annotate(string("(",planet[i],")"),xy=[-0.014;0.8])
println(planet[i]," ",@sprintf("%6.4f",P0),"+-",@sprintf("%6.4f",sigP),
" ",@sprintf("%6.4f",t0),"+-",@sprintf("%6.4f",sigt0))
end
# Finally plot log(ndof) & V1e^{1/(2nu)}:
fig,axes = subplots(1,2,sharey="row")
ax = axes[1]
#ndof0 = ndof_grid_all[nparam-1,16]; x= ndof_grid_all[nparam-1,:]
#prob = exp.(-0.5*(chi_grid_all[nparam-1,:] .-chi_grid_all[nparam-1,16]))
#signdof = sqrt(cov_save[nparam-1,nparam-1])
#x2=collect(linearspace(-4signdof,4signdof,1000))
#ax.plot(x,prob,label=L"$\nu$",color=cp[n])
#prob2 = exp.(-0.5 .*x2.^2 ./cov_save[nparam-1,nparam-1])
#x2 .+= ndof0
#ax.plot(x2,prob2,color=cp[n],alpha=0.3)
#lgndof0 = log(ndof_grid_all[nparam-1,16]); x= log.(ndof_grid_all[nparam-1,:])
lgndof0 = lgndof_grid_all[nparam-1,16]; x= lgndof_grid_all[nparam-1,:]
prob = exp.(-0.5*(chi_grid_all[nparam-1,:] .-chi_grid_all[nparam-1,16]))
signdof = sqrt(cov_save[nparam-1,nparam-1])/exp(lgndof0)
x2=collect(linearspace(-4signdof,4signdof,1000))
ax.plot(x,prob,label=L"$\nu$",color=cp[n],linewidth=3)
prob2 = exp.(-0.5 .*x2.^2 ./(cov_save[nparam-1,nparam-1]/exp(2lgndof0)))
x2 .+= lgndof0
ax.plot(x2,prob2,color=cp[n],alpha=0.3,linewidth=3)
# Plot histogram of log(ndof) parameter:
lndof_bin,lndof_hist,lndof_bin_square,lndof_hist_square = histogram(state_total[36,:],50)
ax.plot(lndof_bin_square,lndof_hist_square./maximum(lndof_hist_square))
ax.set_xlabel(L"$\log{\nu}$",fontsize=15)
ax.set_ylabel("Probability",fontsize=15)
ax = axes[2]
V1exp2nuinv0 = V1exp2nuinv_grid_all[nparam,16]; x= V1exp2nuinv_grid_all[nparam,:]
prob = exp.(-0.5*(chi_grid_all[nparam,:] .-chi_grid_all[nparam,16]))
#sigV1exp2nuinv = sqrt(cov_save[nparam,nparam])
sigV1exp2nuinv = 0.09
x2=collect(linearspace(-4sigV1exp2nuinv,4sigV1exp2nuinv,1000))
ax.plot(x,prob,label=L"$\ln(V_1)$",color=cp[n+1],linewidth=3)
#prob2 = exp.(-0.5 .*x2.^2 ./cov_save[nparam,nparam])
prob2 = exp.(-0.5 .*x2.^2 ./sigV1exp2nuinv^2)
x2 .+= V1exp2nuinv0
ax.plot(x2,prob2,color=cp[n+1],alpha=0.3,linewidth=3)
# Plot histogram of this parameter:
V1expinv2nu_bin,V1expinv2nu_hist,V1expinv2nu_bin_square,V1expinv2nu_hist_square = histogram(state_total[37,:],50)
ax.plot(V1expinv2nu_bin_square,V1expinv2nu_hist_square./maximum(V1expinv2nu_hist_square))
ax.set_xlabel(L"$V_1 e^{1/(2\nu)}$",fontsize=15)
subplots_adjust(wspace=0)
savefig("../T1_students_params_transformed.pdf",bbox_inches="tight")
| [
27,
456,
62,
30783,
29,
16,
12,
940,
628,
198,
3500,
449,
11163,
17,
198,
3500,
9485,
43328,
198,
3500,
14370,
198,
3500,
12578,
69,
198,
77,
7753,
796,
807,
198,
17256,
7203,
40720,
40720,
40720,
10677,
14,
6404,
21602,
10223,
13,
... | 1.908894 | 4,160 |
<reponame>JuliaOpt/MathOptInterface.jl
# Copyright (c) 2017: <NAME> and contributors
# Copyright (c) 2017: Google Inc.
#
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
"""
abstract type AbstractBridge <: MOI.Bridges.AbstractBridge end
Subtype of [`MathOptInterface.Bridges.AbstractBridge`](@ref) for objective
bridges.
"""
abstract type AbstractBridge <: MOI.Bridges.AbstractBridge end
"""
supports_objective_function(
BT::Type{<:MOI.Bridges.Objective.AbstractBridge},
F::Type{<:MOI.AbstractScalarFunction},
)::Bool
Return a `Bool` indicating whether the bridges of type `BT` support bridging
objective functions of type `F`.
## Implementation notes
* This method depends only on the type of the inputs, not the runtime values.
* There is a default fallback, so you need only implement this method For
objective functions that the bridge implements.
"""
function supports_objective_function(
::Type{<:AbstractBridge},
::Type{<:MOI.AbstractScalarFunction},
)
return false
end
"""
concrete_bridge_type(
BT::Type{<:MOI.Bridges.Objective.AbstractBridge},
F::Type{<:MOI.AbstractScalarFunction},
)::Type
Return the concrete type of the bridge supporting objective functions of type
`F`.
This function can only be called if `MOI.supports_objective_function(BT, F)` is
`true`.
"""
function concrete_bridge_type(
::Type{BT},
::Type{<:MOI.AbstractScalarFunction},
) where {BT}
return BT
end
function concrete_bridge_type(
b::MOI.Bridges.AbstractBridgeOptimizer,
F::Type{<:MOI.AbstractScalarFunction},
)
return concrete_bridge_type(MOI.Bridges.bridge_type(b, F), F)
end
"""
bridge_objective(
BT::Type{<:MOI.Bridges.Objective.AbstractBridge},
model::MOI.ModelLike,
func::MOI.AbstractScalarFunction,
)::BT
Bridge the objective function `func` using bridge `BT` to `model` and returns
a bridge object of type `BT`.
## Implementation notes
* The bridge type `BT` must be a concrete type, that is, all the type
parameters of the bridge must be set.
"""
function bridge_objective(
::Type{<:AbstractBridge},
::MOI.ModelLike,
func::MOI.AbstractScalarFunction,
)
return throw(
MOI.UnsupportedAttribute(MOI.ObjectiveFunction{typeof(func)}()),
)
end
function MOI.set(
::MOI.ModelLike,
::MOI.ObjectiveSense,
bridge::AbstractBridge,
::MOI.OptimizationSense,
)
return throw(
ArgumentError(
"Objective bridge of type `$(typeof(bridge))` does not support " *
"modifying the objective sense. As a workaround, set the sense " *
"to `MOI.FEASIBILITY_SENSE` to clear the objective function and " *
"bridges.",
),
)
end
function MOI.get(
::MOI.ModelLike,
::MOI.ObjectiveFunction,
bridge::AbstractBridge,
)
return throw(
ArgumentError(
"ObjectiveFunction bridge of type `$(typeof(bridge))` does not" *
" support getting the objective function.",
),
)
end
function MOI.delete(::MOI.ModelLike, bridge::AbstractBridge)
return throw(
ArgumentError(
"`MOI.delete` not implemented for `ObjectiveFunction` bridges of " *
"type `$(typeof(bridge))`",
),
)
end
| [
27,
7856,
261,
480,
29,
16980,
544,
27871,
14,
37372,
27871,
39317,
13,
20362,
198,
2,
15069,
357,
66,
8,
2177,
25,
1279,
20608,
29,
290,
20420,
198,
2,
15069,
357,
66,
8,
2177,
25,
3012,
3457,
13,
198,
2,
198,
2,
5765,
286,
428... | 2.668504 | 1,270 |
# Example from Nocedal & Wright, p. 281
# Used to test all the different algorithms
@testset "2by2" begin
function f_2by2!(F, x)
F[1] = (x[1]+3)*(x[2]^3-7)+18
F[2] = sin(x[2]*exp(x[1])-1)
end
function g_2by2!(J, x)
J[1, 1] = x[2]^3-7
J[1, 2] = 3*x[2]^2*(x[1]+3)
u = exp(x[1])*cos(x[2]*exp(x[1])-1)
J[2, 1] = x[2]*u
J[2, 2] = u
end
df = OnceDifferentiable(f_2by2!, g_2by2!, [ -0.5; 1.4], [ -0.5; 1.4])
# Test trust region
r = nlsolve(df, [ -0.5; 1.4], method = :trust_region, autoscale = true)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-7
r = nlsolve(df, [ -0.5; 1.4], method = :trust_region, autoscale = false)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-7
df32 = OnceDifferentiable(f_2by2!, g_2by2!, [ -0.5f0; 1.4f0], [ -0.5f0; 1.4f0])
r = nlsolve(df32, [ -0.5f0; 1.4f0], method = :trust_region, autoscale = true)
@test eltype(r.zero) == Float32
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-7
r = nlsolve(df32, [ -0.5f0; 1.4f0], method = :trust_region, autoscale = false)
@test eltype(r.zero) == Float32
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-7
# Test Newton
r = nlsolve(df, [ -0.5; 1.4], method = :newton, linesearch = LineSearches.BackTracking(), ftol = 1e-6)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-6
r = nlsolve(df32, [ -0.5f0; 1.4f0], method = :newton, linesearch = LineSearches.BackTracking(), ftol = 1e-3)
@test eltype(r.zero) == Float32
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-6
r = nlsolve(df, [ -0.5; 1.4], method = :newton, linesearch = LineSearches.HagerZhang(), ftol = 1e-6)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-6
r = nlsolve(df, [ -0.5; 1.4], method = :newton, linesearch = LineSearches.StrongWolfe(), ftol = 1e-6)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-6
# test local convergence of Anderson: close to a fixed-point and with
# a small beta, f should be almost affine, in which case Anderson is
# equivalent to GMRES and should converge
r = nlsolve(df, [ 0.01; .99], method = :anderson, m = 10, beta=.01)
@test converged(r)
@test norm(r.zero - [ 0; 1]) < 1e-8
end
| [
2,
17934,
422,
399,
420,
276,
282,
1222,
12206,
11,
279,
13,
39882,
198,
2,
16718,
284,
1332,
477,
262,
1180,
16113,
198,
31,
9288,
2617,
366,
17,
1525,
17,
1,
2221,
628,
198,
8818,
277,
62,
17,
1525,
17,
0,
7,
37,
11,
2124,
8... | 2.101493 | 1,005 |
<reponame>UnofficialJuliaMirrorSnapshots/MusicManipulations.jl-274955c0-c284-5bf7-b122-5ecd51c559de<filename>test/quantizer_tests.jl<gh_stars>10-100
using Test
let
cd(@__DIR__)
midi = readMIDIFile("serenade_full.mid")
piano = midi.tracks[4]
notes = getnotes(piano, midi.tpq)
tpq = 960
triplets = [0, 1//3, 2//3, 1]
sixteenths = [0, 1//4, 2//4, 3//4, 1]
@testset "Classify triplets" begin
@test isgrid(triplets)
class = classify(notes, triplets)
inbetw = [246
450
618
619
620
627
628
629
637
638
639
640]
@test length(class) == length(notes)
@test findall(class .== 2) == inbetw
@test sum( sum( class .== n ) for n in 1:4) == length(notes)
end
@testset "Classify 16ths" begin
@test isgrid(sixteenths)
class = classify(notes, sixteenths)
@test length(class) == length(notes)
@test sum( sum( class .== n ) for n in 1:5) == length(notes)
end
@testset "Quantize" begin
tripletstpq = triplets.*960
qnotes = quantize(notes, triplets)
@test qnotes.tpq == notes.tpq
@test length(notes) == length(qnotes)
for f in (velocities, pitches)
@test f(notes) == f(qnotes)
end
@test durations(notes) != durations(qnotes)
qqnotes = quantize(notes, triplets, false)
@test durations(notes) == durations(qqnotes)
pos = positions(notes)
qpos = positions(qnotes)
@test positions(notes) !== positions(qnotes)
@test mod.(qpos, 320) == zeros(length(notes))
end
@testset "quantize duration" begin
for (i, grid) in enumerate([triplets, sixteenths])
qnotes = quantize(notes, grid)
dnotes = quantize_duration!(deepcopy(qnotes), grid)
for note in dnotes
@test note.duration != 0
@test mod(note.duration, tpq÷(2+i)) == 0
end
end
end
end
@testset "Noninteger grid*tpq product" begin
cd(@__DIR__)
grid = [0,0.383,0.73,1]
midi = readMIDIFile("serenade_full.mid")
notes = getnotes(midi, 4)
tpq = 960
qnotes = quantize(notes, grid)
for note in qnotes
@test note.duration != 0
end
end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
43826,
20910,
14,
22648,
5124,
541,
5768,
13,
20362,
12,
1983,
2920,
2816,
66,
15,
12,
66,
30336,
12,
20,
19881,
22,
12,
65,
18376,
12,
20,
21142,
4349,
66,
38605,
2934,
... | 2.250797 | 941 |
<reponame>uoa-ems-research/JEMSS.jl
##########################################################################
# Copyright 2017 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
using StatsBase
@testset "histogram addition" begin
# null histogram
h1 = fit(Histogram, [0], 0:1)
h2 = NullHist()
h3 = h1+h2
@test h3 == h1
@test h3 == h2+h1 # commutative
# histograms with UnitRange edges
h1 = fit(Histogram, [0], 0:2)
h2 = fit(Histogram, [0,2], 0:3)
h3 = h1+h2
@test h3 == h2+h1 # commutative
@test h3.edges == h2.edges
@test h3.weights == [2,0,1] # [1,0] + [1,0,1]
# histograms with StepRange edges
h1 = fit(Histogram, [0], 0:2:4)
h2 = fit(Histogram, [0,5], 0:2:6)
h3 = h1+h2
@test h3 == h2+h1 # commutative
@test h3.edges == h2.edges
@test h3.weights == [2,0,1] # [1,0] + [1,0,1]
# histograms with StepRangeLen edges
h1 = fit(Histogram, [0], 0.0:2.0)
h2 = fit(Histogram, [0,2], 0.0:3.0)
h3 = h1+h2
@test h3 == h2+h1 # commutative
@test h3.edges == h2.edges
@test h3.weights == [2,0,1] # [1,0] + [1,0,1]
end
| [
27,
7856,
261,
480,
29,
84,
12162,
12,
5232,
12,
34033,
14,
41,
3620,
5432,
13,
20362,
198,
29113,
29113,
7804,
2235,
198,
2,
15069,
2177,
1279,
20608,
28401,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
... | 2.584665 | 626 |
<gh_stars>0
function __init__cifar100()
DEPNAME = "CIFAR100"
register(DataDep(
DEPNAME,
"""
Dataset: The CIFAR-100 dataset
Authors: <NAME>, <NAME>, <NAME>
Website: https://www.cs.toronto.edu/~kriz/cifar.html
Reference: https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf
[Krizhevsky, 2009]
<NAME>.
"Learning Multiple Layers of Features from Tiny Images",
Tech Report, 2009.
The CIFAR-100 dataset is a labeled subsets of the 80
million tiny images dataset. It consists of 60000
32x32 colour images in 100 classes. Specifically, it
has 100 classes containing 600 images each. There are
500 training images and 100 testing images per class.
The 100 classes in the CIFAR-100 are grouped into 20
superclasses. Each image comes with a "fine" label
(the class to which it belongs) and a "coarse" label
(the superclass to which it belongs).
The compressed archive file that contains the
complete dataset is available for download at the
offical website linked above; specifically the binary
version for C programs. Note that using the data
responsibly and respecting copyright remains your
responsibility. The authors of CIFAR-10 aren't really
explicit about any terms of use, so please read the
website to make sure you want to download the
dataset.
""",
"https://www.cs.toronto.edu/~kriz/cifar-100-binary.tar.gz",
"58a81ae192c23a4be8b1804d68e518ed807d710a4eb253b1f2a199162a40d8ec",
post_fetch_method = file -> (run(BinDeps.unpack_cmd(file, dirname(file), ".gz", ".tar")); rm(file))
))
end
"""
CIFAR100(; Tx=Float32, split=:train, dir=nothing)
CIFAR100([Tx, split])
The CIFAR100 dataset is a labeled subsets of the 80
million tiny images dataset. It consists of 60000
32x32 colour images in 10 classes, with 6000 images
per class.
Return the CIFAR-100 **trainset** labels (coarse and fine)
corresponding to the given `indices` as a tuple of two `Int` or
two `Vector{Int}`. The variables returned are the coarse label(s)
(`Yc`) and the fine label(s) (`Yf`) respectively.
# Arguments
$ARGUMENTS_SUPERVISED_ARRAY
- `split`: selects the data partition. Can take the values `:train:` or `:test`.
# Fields
$FIELDS_SUPERVISED_ARRAY
- `split`.
# Methods
$METHODS_SUPERVISED_ARRAY
- [`convert2image`](@ref) converts features to `RGB` images.
# Examples
```julia-repl
julia> dataset = CIFAR100()
CIFAR100:
metadata => Dict{String, Any} with 3 entries
split => :train
features => 32×32×3×50000 Array{Float32, 4}
targets => (coarse = "50000-element Vector{Int64}", fine = "50000-element Vector{Int64}")
julia> dataset[1:5].targets
(coarse = [11, 15, 4, 14, 1], fine = [19, 29, 0, 11, 1])
julia> X, y = dataset[];
julia> dataset.metadata
Dict{String, Any} with 3 entries:
"n_observations" => 50000
"class_names_coarse" => ["aquatic_mammals", "fish", "flowers", "food_containers", "fruit_and_vegetables", "household_electrical_devices", "household_furniture", "insects", "large_carnivores", "large_man-made_…
"class_names_fine" => ["apple", "aquarium_fish", "baby", "bear", "beaver", "bed", "bee", "beetle", "bicycle", "bottle" … "train", "trout", "tulip", "turtle", "wardrobe", "whale", "willow_tree", "wolf", "w…
```
"""
struct CIFAR100 <: SupervisedDataset
metadata::Dict{String, Any}
split::Symbol
features::Array{<:Any, 4}
targets::NamedTuple{(:coarse, :fine), Tuple{Vector{Int}, Vector{Int}}}
end
CIFAR100(; split=:train, Tx=Float32, dir=nothing) = CIFAR100(Tx, split; dir)
function CIFAR100(Tx::Type, split::Symbol=:train; dir=nothing)
DEPNAME = "CIFAR100"
TRAINSET_FILENAME = joinpath("cifar-100-binary", "train.bin")
TESTSET_FILENAME = joinpath("cifar-100-binary", "test.bin")
COARSE_FILENAME = joinpath("cifar-100-binary", "coarse_label_names.txt")
FINE_FILENAME = joinpath("cifar-100-binary", "fine_label_names.txt")
TRAINSET_SIZE = 50_000
TESTSET_SIZE = 10_000
@assert split ∈ (:train, :test)
if split == :train
file_path = datafile(DEPNAME, TRAINSET_FILENAME, dir)
images, labels_c, labels_f = CIFAR100Reader.readdata(file_path, TRAINSET_SIZE)
else
file_path = datafile(DEPNAME, TESTSET_FILENAME, dir)
images, labels_c, labels_f = CIFAR100Reader.readdata(file_path, TESTSET_SIZE)
end
features = bytes_to_type(Tx, images)
targets = (coarse = labels_c, fine = labels_f)
metadata = Dict{String, Any}()
metadata["class_names_coarse"] = readlines(datafile(DEPNAME, COARSE_FILENAME, dir))
metadata["class_names_fine"] = readlines(datafile(DEPNAME, FINE_FILENAME, dir))
metadata["n_observations"] = size(features)[end]
return CIFAR100(metadata, split, features, targets)
end
convert2image(::Type{<:CIFAR100}, x) = convert2image(CIFAR10, x)
# DEPRECATED INTERFACE, REMOVE IN v0.7 (or 0.6.x)
function Base.getproperty(::Type{CIFAR100}, s::Symbol)
if s == :traintensor
@warn "CIFAR100.traintensor() is deprecated, use `CIFAR100(split=:train).features` instead." maxlog=2
traintensor(T::Type=N0f8; kws...) = traintensor(T, :; kws...)
traintensor(i; kws...) = traintensor(N0f8, i; kws...)
function traintensor(T::Type, i; dir=nothing)
CIFAR100(; split=:train, Tx=T, dir)[i][1]
end
return traintensor
elseif s == :testtensor
@warn "CIFAR100.testtensor() is deprecated, use `CIFAR100(split=:test).features` instead." maxlog=2
testtensor(T::Type=N0f8; kws...) = testtensor(T, :; kws...)
testtensor(i; kws...) = testtensor(N0f8, i; kws...)
function testtensor(T::Type, i; dir=nothing)
CIFAR100(; split=:test, Tx=T, dir)[i][1]
end
return testtensor
elseif s == :trainlabels
@warn "CIFAR100.trainlabels() is deprecated, use `CIFAR100(split=:train).targets` instead." maxlog=2
trainlabels(; kws...) = trainlabels(:; kws...)
function trainlabels(i; dir=nothing)
yc, yf = CIFAR100(; split=:train, dir)[i][2]
yc, yf
end
return trainlabels
elseif s == :testlabels
@warn "CIFAR100.testlabels() is deprecated, use `CIFAR100(split=:test).targets` instead." maxlog=2
testlabels(; kws...) = testlabels(:; kws...)
function testlabels(i; dir=nothing)
yc, yf = CIFAR100(; split=:test, dir)[i][2]
yc, yf
end
return testlabels
elseif s == :traindata
@warn "CIFAR100.traindata() is deprecated, use `CIFAR100(split=:train)[]` instead." maxlog=2
traindata(T::Type=N0f8; kws...) = traindata(T, :; kws...)
traindata(i; kws...) = traindata(N0f8, i; kws...)
function traindata(T::Type, i; dir=nothing)
x, (yc, yf) = CIFAR100(; split=:train, Tx=T, dir)[i]
x, yc, yf
end
return traindata
elseif s == :testdata
@warn "CIFAR100.testdata() is deprecated, use `CIFAR100(split=:test)[]` instead." maxlog=2
testdata(T::Type=N0f8; kws...) = testdata(T, :; kws...)
testdata(i; kws...) = testdata(N0f8, i; kws...)
function testdata(T::Type, i; dir=nothing)
x, (yc, yf) = CIFAR100(; split=:test, Tx=T, dir)[i]
x, yc, yf
end
return testdata
elseif s == :convert2image
@warn "CIFAR100.convert2image(x) is deprecated, use `convert2image(CIFAR100, x)` instead"
return x -> convert2image(CIFAR100, x)
elseif s == :classnames_fine
@warn "CIFAR100.classnames_fine() is deprecated, use `CIFAR100().metadata[\"class_names_fine\"]` instead"
return () -> CIFAR100().metadata["class_names_fine"]
elseif s == :classnames_coarse
@warn "CIFAR100.classnames_coarse() is deprecated, use `CIFAR100().metadata[\"class_names_coarse\"]` instead"
return () -> CIFAR100().metadata["class_names_coarse"]
else
return getfield(CIFAR100, s)
end
end
| [
27,
456,
62,
30783,
29,
15,
198,
8818,
11593,
15003,
834,
66,
361,
283,
3064,
3419,
198,
220,
220,
220,
5550,
13137,
10067,
796,
366,
34,
5064,
1503,
3064,
1,
628,
220,
220,
220,
7881,
7,
6601,
12156,
7,
198,
220,
220,
220,
220,
... | 2.292286 | 3,565 |
module TestTearing
println("TestTearing: Tests tearing algorithm of the symbolic handling.")
using Modia
# Desired:
# using ModiaMath: plot
#
# In order that these packages need not to be defined in the user environment, they are included via Modia:
using Modia.ModiaMath: plot
# Tearing1
# No tearing is performed since coefficients of the block equations are not 1 or -1.
@model Tearing1 begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(start=1.0)
@equations begin
2.0*x1 + 3.0*x2 = x3
-1.4*x1 + 5.0*x2 = 2.0*x3
der(x3) = -x3
end
end
result = simulate(Tearing1, 1.0; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3"))
# Tearing1B
#
@model Tearing1B begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(start=1.0)
@equations begin
2.0*x1 + x2 = x3
-1.4*x1 + 5.0*x2 = 2.0*x3
der(x3) = -x3
end
end
result = simulate(Tearing1B, 1.0; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3"))
# Tearing2
# A correct solution is:
# x2 := (x3 - 1.0) - 2.0 * (sin)(x1)
# -1.4*sin(x1) + 5.0*cos(x2) = 2.0*x3
# der(x3) = -x3
#
@model Tearing2 begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(start=1.0)
@equations begin
2.0*sin(x1) + x2 = x3-1.0
-1.4*sin(x1) + 5.0*cos(x2) = 2.0*x3
der(x3) = -x3
end
end
result = simulate(Tearing2, 0.3; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3"))
# Tearing3
# Variables that are explicitely solved due to tearing: x1, x2
#
@model Tearing3 begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(size=())
x4 = Float(start=1.0)
@equations begin
2.0*sin(x1) + x2 = x3-1.0 - 0.01*sin(x4)
-1.4*sin(x1) + 5.0*cos(x2) + (0.02*x3)^3 = 2.0*x4
sin(0.1*x4) = abs(x3) - x1
der(x4) = -x4
end
end
result = simulate(Tearing3, 0.3; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3", "x4"))
# Tearing4
# Variables that are explicitely solved due to tearing: x1, x2
#
@model Tearing4 begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(size=())
x4 = Float(size=())
x5 = Float(start=1.0)
@equations begin
2.0*sin(x1) + x2 = x3-1.0 - 0.01*sin(x4)
-1.4*sin(x1) + 5.0*cos(x2) + (0.02*x3)^3 = 2.0*x4
sin(0.1*x4) = abs(x3) - x1
-2*x4 + 3.0*x1 -3*x2 = 0.0
der(x5) = -x5
end
end
result = simulate(Tearing4, 0.3; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3", "x4", "x5"))
@model TearingCombined begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(start=1.0)
x11 = Float(size=())
x12 = Float(size=())
x13 = Float(start=1.0)
x21 = Float(size=())
x22 = Float(size=())
x23 = Float(start=1.0)
x31 = Float(size=())
x32 = Float(size=())
x33 = Float(start=1.0)
x34 = Float(start=1.0)
x35 = Float(size=())
@equations begin
2.0*x1 + 3.0*x2 = x3
-1.4*x1 + 5.0*x2 = 2.0*x3
der(x3) = -x3
2.0*x11 + x12 = x13
-1.4*x11 + 5.0*x12 = 2.0*x13
der(x13) = -x13
2.0*sin(x21) + x22 = x23-1.0
-1.4*sin(x21) + 5.0*cos(x22) = 2.0*x23
der(x23) = -x23
2.0*sin(x31) + x32 = x33-1.0
-1.4*sin(x35) + 5.0*cos(x34) = 2.0*x33
x34 = 1.0*x32
2*x35 = 2*x31
der(x33) = -x33
end
end
result = simulate(TearingCombined, 1.0; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3"))
# Tearing5
# RemoveAuxiliary should remove equation "2*x1 = 4*x2"
# Tearing should then eliminate x1 = 2*x2
#
# The log shows, that all this works. However, the generated code
# has a state vector of [x1, x2, x3, x4]. Since tearing can compute x1 from x2,
# it would be possible that the state vector has only elements [x2,x3,x4].
# Otherwise, tearing has basically no effect, because the system of equations
# was not reduced in the generated code.
@model Tearing5 begin
x1 = Float(size=())
x2 = Float(size=())
x3 = Float(size=())
x4 = Float(start=1.0)
@equations begin
x1 = 2*x2
2*x1 = 4*x2
x1 + x2 = x3 + x4
3.1*x2 + 1.2*x3 + 1.3*x4 = 0
der(x4) = -x4
end
end
result = simulate(Tearing5, 0.3; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("x1", "x2", "x3", "x4"), figure=5)
# Tearing6
@model Tearing6 begin
C1 = 1e-3
C2 = 2e-3
u1 = Float(size=(), start=1.0)
u2 = Float(size=(), state=false)
i1 = Float(size=())
v1 = Float(size=())
v0 = Float(size=())
@equations begin
C1*der(u1) = i1
C2*der(u2) = -i1
u1 = v1 - v0
u2 = v1 - v0
v0 = 0
end
end
result = simulate(Tearing6, 1.0; logTranslation=true, logSimulation=true, tearing=true, removeSingularities=true)
plot(result, ("u1", "u2", "i1", "v1", "v0"), figure=6)
end
| [
21412,
6208,
51,
6648,
198,
198,
35235,
7203,
14402,
51,
6648,
25,
30307,
24447,
11862,
286,
262,
18975,
9041,
19570,
198,
198,
3500,
3401,
544,
628,
198,
2,
2935,
1202,
25,
198,
2,
220,
220,
1262,
3401,
544,
37372,
25,
7110,
198,
2... | 2.050521 | 2,494 |
<reponame>moble/Spherical.jl
"""Algorithm for computing H, as given by arxiv:1403.7698
H is related to Wigner's (small) d via
dₗⁿᵐ = ϵₙ ϵ₋ₘ Hₗⁿᵐ,
where
⎧ 1 for k≤0
ϵₖ = ⎨
⎩ (-1)ᵏ for k>0
H has various advantages over d, including the fact that it can be efficiently
and robustly valculated via recurrence relations, and the following symmetry
relations:
H^{m', m}_n(β) = H^{m, m'}_n(β)
H^{m', m}_n(β) = H^{-m', -m}_n(β)
H^{m', m}_n(β) = (-1)^{n+m+m'} H^{-m', m}_n(π - β)
H^{m', m}_n(β) = (-1)^{m+m'} H^{m', m}_n(-β)
Because of these symmetries, we only need to evaluate at most 1/4 of all the
elements.
"""
"""Return flat index into arrray of [n, m] pairs.
Assumes array is ordered as
[
[n, m]
for n in range(n_max+1)
for m in range(-n, n+1)
]
"""
nm_index(n, m) = m + n * (n + 1) + 1
"""Return flat index into arrray of [n, abs(m)] pairs
Assumes array is ordered as
[
[n, m]
for n in range(n_max+1)
for m in range(n+1)
]
"""
nabsm_index(n, absm) = absm + (n * (n + 1)) ÷ 2 + 1
"""Return flat index into arrray of [n, mp, m]
Assumes array is ordered as
[
[n, mp, m]
for n in range(n_max+1)
for mp in range(-n, n+1)
for m in range(-n, n+1)
]
"""
nmpm_index(n, mp, m) = (((4n + 6) * n + 6mp + 5) * n + 3(m + mp)) ÷ 3 + 1
@inbounds function _step_1!(w::WignerMatrixCalculator)
"""If n=0 set H_{0}^{0,0}=1."""
w.Hwedge[1] = 1
end
@inbounds function _step_2!(w::WignerMatrixCalculator, expiβ::Complex)
"""Compute values H^{0,m}_{n}(β)for m=0,...,n and H^{0,m}_{n+1}(β) for m=0,...,n+1
Uses Eq. (32) of Gumerov-Duraiswami (2014) [arxiv:1403.7698]:
H^{0,m}_{n}(β) = (-1)^m √((n-|m|)! / (n+|m|)!) P^{|m|}_{n}(cos β)
= (-1)^m P̄^{|m|}_{n}(cos β) / √(k (2n+1))
Here, k=1 for m=0, and k=2 for m>0, and
P̄ = √{k(2n+1)(n-m)!/(n+m)!} P
We use the "fully normalized" associated Legendre functions (fnALF) P̄ because,
as explained by Xing et al. (2020) [https://doi.org/10.1007/s00190-019-01331-0],
it is possible to compute these values very efficiently and accurately, while
also delaying the onset of overflow and underflow.
NOTE: Though not specified in arxiv:1403.7698, there is not enough information
for step 4 unless we also use symmetry to set H^{1,0}_{n} here. Similarly,
step 5 needs additional information, which depends on setting H^{0, -1}_{n}
from its symmetric equivalent H^{0, 1}_{n} in this step.
"""
n_max, mp_max, TW = ℓₘₐₓ(w), m′ₘₐₓ(w), T(w)
Hwedge, Hextra, Hv = w.Hwedge, w.Hextra, w.Hv
cosβ = expiβ.re
sinβ = expiβ.im
sqrt3 = √TW(3)
# The general expressions for the constants are Eq. (13) of Xing et al.:
#
# aₙ = √((2n+1)/TW(2n-1))
# bₙ = √(2*(n-1)*(2n+1)/TW(n*(2n-1)))
# cₙₘ = √(((n+m)*(n-m)*(2n+1)) / TW(2n-1)) / n
# dₙₘ = √(((n-m)*(n-m-1)*(2n+1)) / TW(2n-1)) / (2n)
# eₙₘ = √(((n+m)*(n+m-1)*(2n+1)) / TW(2n-1)) / (2n)
#
# Below, I factor aₙ out of each of these expressions, along with 1/2n where
# relevant, to avoid divisions.
#
# We initialize with Eq. (14), then step through with Eq. (12), to compute all
# values of P̄. Finally, we normalize everything again to compute the H
# values.
if n_max > 0
# n = 1
n0n_index = WignerHindex(1, 0, 1, mp_max)
Hwedge[n0n_index] = sqrt3 * sinβ
Hwedge[n0n_index-1] = sqrt3 * cosβ
# n = 2, ..., n_max+1
for n in 2:n_max+1
if n <= n_max
n0n_index = WignerHindex(n, 0, n, mp_max)
H = Hwedge
else
n0n_index = n + 1
H = Hextra
end
nm10nm1_index = WignerHindex(n-1, 0, n-1, mp_max)
inv2n = inv(TW(2n))
aₙ = √((2n+1)/TW(2n-1))
bₙ = aₙ * √((2*(n-1))/TW(n))
# m = n
eₙₘ = inv2n * √TW((2n)*(2n+1))
H[n0n_index] = sinβ * eₙₘ * Hwedge[nm10nm1_index]
# m = n-1
eₙₘ = inv2n * √TW((2n-2)*(2n+1))
cₙₘ = 2inv2n * √TW(2n+1)
H[n0n_index-1] = cosβ * cₙₘ * Hwedge[nm10nm1_index] + sinβ * eₙₘ * Hwedge[nm10nm1_index-1]
# m = n-2, ..., 2
for i in 2:n-2
# m = n-i
cₙₘ = 2inv2n * aₙ * √TW((2n-i)*i)
dₙₘ = inv2n * aₙ * √TW(i*(i-1))
eₙₘ = inv2n * aₙ * √TW((2n-i)*(2n-i-1))
H[n0n_index-i] = (
cosβ * cₙₘ * Hwedge[nm10nm1_index-i+1]
- sinβ * (
dₙₘ * Hwedge[nm10nm1_index-i+2]
- eₙₘ * Hwedge[nm10nm1_index-i]
)
)
end
# m = 1
cₙₘ = 2inv2n * aₙ * √TW((n+1)*(n-1))
dₙₘ = inv2n * aₙ * √TW((n-1)*(n-2))
eₙₘ = inv2n * aₙ * √TW(2n*(n+1))
H[n0n_index-n+1] = (
cosβ * cₙₘ * Hwedge[nm10nm1_index-n+2]
- sinβ * (
dₙₘ * Hwedge[nm10nm1_index-n+3]
- eₙₘ * Hwedge[nm10nm1_index-n+1]
)
)
# m = 0
cₙₘ = aₙ
dₙₘ = inv2n * aₙ * √TW(n*(n-1))
eₙₘ = dₙₘ
H[n0n_index-n] = (
aₙ * cosβ * Hwedge[nm10nm1_index-n+1]
- bₙ * sinβ * Hwedge[nm10nm1_index-n+2] / 2
)
# Supply extra edge cases as noted in docstring
if n <= n_max
Hv[nm_index(n, 1)] = Hwedge[WignerHindex(n, 0, 1, mp_max)]
Hv[nm_index(n, 0)] = Hwedge[WignerHindex(n, 0, 1, mp_max)]
end
end
# Supply extra edge cases as noted in docstring
Hv[nm_index(1, 1)] = Hwedge[WignerHindex(1, 0, 1, mp_max)]
Hv[nm_index(1, 0)] = Hwedge[WignerHindex(1, 0, 1, mp_max)]
# Normalize, changing P̄ to H values
for n in 1:n_max+1
if n <= n_max
n00_index = WignerHindex(n, 0, 0, mp_max)
H = Hwedge
else
n00_index = 1
H = Hextra
end
const0 = inv(√TW(2n+1))
const1 = inv(√TW(4n+2))
H[n00_index] *= const0
for m in 1:n
H[n00_index+m] *= const1
end
if n <= n_max
Hv[nm_index(n, 1)] *= -const1
Hv[nm_index(n, 0)] *= -const1
end
end
end
end
@inbounds function _step_3!(w::WignerMatrixCalculator, expiβ::Complex)
"""Use relation (41) to compute H^{1,m}_{n}(β) for m=1,...,n. Using symmetry and shift
of the indices this relation can be written as
b^{0}_{n+1} H^{1, m}_{n} = (b^{−m−1}_{n+1} (1−cosβ))/2 H^{0, m+1}_{n+1}
− (b^{ m−1}_{n+1} (1+cosβ))/2 H^{0, m−1}_{n+1}
− a^{m}_{n} sinβ H^{0, m}_{n+1}
"""
avalues = a(w)
bvalues = b(w)
n_max = ℓₘₐₓ(w)
mp_max = m′ₘₐₓ(w)
Hwedge = w.Hwedge
Hextra = w.Hextra
cosβ = expiβ.re
sinβ = expiβ.im
cosβ₊ = (1+cosβ)/2
cosβ₋ = (1-cosβ)/2
if n_max > 0 && mp_max > 0
for n in 1:n_max
# m = 1, ..., n
i1 = WignerHindex(n, 1, 1, mp_max)
if n+1 <= n_max
i2 = WignerHindex(n+1, 0, 0, mp_max)
H2 = Hwedge
else
i2 = 1
H2 = Hextra
end
i3 = nm_index(n+1, 0)
i4 = nabsm_index(n, 1)
inverse_b5 = inv(bvalues[i3])
for i in 0:n-1
b6 = bvalues[-i+i3-2]
b7 = bvalues[i+i3]
a8 = avalues[i+i4]
Hwedge[i+i1] = inverse_b5 * (
(
b6 * cosβ₋ * H2[i+i2+2]
- b7 * cosβ₊ * H2[i+i2]
)
- a8 * sinβ * H2[i+i2+1]
)
end
end
end
end
@inbounds function _step_4!(w::WignerMatrixCalculator)
"""Recursively compute H^{m'+1, m}_{n}(β) for m'=1,...,n−1, m=m',...,n using relation (50) resolved
with respect to H^{m'+1, m}_{n}:
d^{m'}_{n} H^{m'+1, m}_{n} = d^{m'−1}_{n} H^{m'−1, m}_{n}
− d^{m−1}_{n} H^{m', m−1}_{n}
+ d^{m}_{n} H^{m', m+1}_{n}
(where the last term drops out for m=n).
"""
dvalues, n_max, mp_max = d(w), ℓₘₐₓ(w), m′ₘₐₓ(w)
Hwedge, Hv = w.Hwedge, w.Hv
if n_max > 0 && mp_max > 0
for n in 2:n_max
for mp in 1:min(n, mp_max)-1
# m = m', ..., n-1
# i1 = WignerHindex(n, mp+1, mp, mp_max)
i1 = WignerHindex(n, mp+1, mp+1, mp_max) - 1
i2 = WignerHindex(n, mp-1, mp, mp_max)
# i3 = WignerHindex(n, mp, mp-1, mp_max)
i3 = WignerHindex(n, mp, mp, mp_max) - 1
i4 = WignerHindex(n, mp, mp+1, mp_max)
i5 = nm_index(n, mp)
i6 = nm_index(n, mp-1)
inverse_d5 = inv(dvalues[i5])
d6 = dvalues[i6]
let i=0
d7 = dvalues[i+i6]
d8 = dvalues[i+i5]
Hv[i+nm_index(n, mp+1)] = inverse_d5 * (
d6 * Hwedge[i+i2]
- d7 * Hv[i+nm_index(n, mp)]
+ d8 * Hwedge[i+i4]
)
end
for i in 1:n-mp-1
d7 = dvalues[i+i6]
d8 = dvalues[i+i5]
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
- d7 * Hwedge[i+i3]
+ d8 * Hwedge[i+i4]
)
end
# m = n
let i=n-mp
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
- dvalues[i+i6] * Hwedge[i+i3]
)
end
end
end
end
end
@inbounds function _step_5!(w::WignerMatrixCalculator)
"""Recursively compute H^{m'−1, m}_{n}(β) for m'=−1,...,−n+1, m=−m',...,n using relation (50)
resolved with respect to H^{m'−1, m}_{n}:
d^{m'−1}_{n} H^{m'−1, m}_{n} = d^{m'}_{n} H^{m'+1, m}_{n}
+ d^{m−1}_{n} H^{m', m−1}_{n}
− d^{m}_{n} H^{m', m+1}_{n}
(where the last term drops out for m=n).
NOTE: Although arxiv:1403.7698 specifies the loop over mp to start at -1, I
find it necessary to start at 0, or there will be missing information. This
also requires setting the (m',m)=(0,-1) components before beginning this loop.
"""
dvalues, n_max, mp_max = d(w), ℓₘₐₓ(w), m′ₘₐₓ(w)
Hwedge, Hv = w.Hwedge, w.Hv
if n_max > 0 && mp_max > 0
for n in 0:n_max
for mp in 0:-1:1-min(n, mp_max)
# m = -m', ..., n-1
# i1 = WignerHindex(n, mp-1, -mp, mp_max)
i1 = WignerHindex(n, mp-1, -mp+1, mp_max) - 1
# i2 = WignerHindex(n, mp+1, -mp, mp_max)
i2 = WignerHindex(n, mp+1, -mp+1, mp_max) - 1
# i3 = WignerHindex(n, mp, -mp-1, mp_max)
i3 = WignerHindex(n, mp, -mp, mp_max) - 1
i4 = WignerHindex(n, mp, -mp+1, mp_max)
i5 = nm_index(n, mp-1)
i6 = nm_index(n, mp)
i7 = nm_index(n, -mp-1)
i8 = nm_index(n, -mp)
inverse_d5 = inv(dvalues[i5])
d6 = dvalues[i6]
let i=0
d7 = dvalues[i+i7]
d8 = dvalues[i+i8]
if mp == 0
Hv[i+nm_index(n, mp-1)] = inverse_d5 * (
d6 * Hv[i+nm_index(n, mp+1)]
+ d7 * Hv[i+nm_index(n, mp)]
- d8 * Hwedge[i+i4]
)
else
Hv[i+nm_index(n, mp-1)] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ d7 * Hv[i+nm_index(n, mp)]
- d8 * Hwedge[i+i4]
)
end
end
for i in 1:n+mp-1
d7 = dvalues[i+i7]
d8 = dvalues[i+i8]
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ d7 * Hwedge[i+i3]
- d8 * Hwedge[i+i4]
)
end
# m = n
let i=n+mp
Hwedge[i+i1] = inverse_d5 * (
d6 * Hwedge[i+i2]
+ dvalues[i+i7] * Hwedge[i+i3]
)
end
end
end
end
end
"""
H!(w, expiβ)
Compute (a quarter of) the H matrix
WARNING: The returned array will be a view into the `workspace` variable (see
below for an explanation of that). If you need to call this function again
using the same workspace before extracting all information from the first call,
you should use `numpy.copy` to make a separate copy of the result.
Parameters
----------
expiβ : array_like
Value of exp(i*β) on which to evaluate the H matrix.
Returns
-------
Hwedge : array
This is a 1-dimensional array of floats; see below.
workspace : array_like, optional
A working array like the one returned by Wigner.new_workspace(). If not
present, this object's default workspace will be used. Note that it is not
safe to use the same workspace on multiple threads. Also see the WARNING
above.
See Also
--------
d : Compute the full Wigner d matrix
D : Compute the full Wigner 𝔇 matrix
rotate : Avoid computing the full 𝔇 matrix and rotate modes directly
evaluate : Avoid computing the full 𝔇 matrix and evaluate modes directly
Notes
-----
H is related to Wigner's (small) d via
dₗⁿᵐ = ϵₙ ϵ₋ₘ Hₗⁿᵐ,
where
⎧ 1 for k≤0
ϵₖ = ⎨
⎩ (-1)ᵏ for k>0
H has various advantages over d, including the fact that it can be efficiently
and robustly valculated via recurrence relations, and the following symmetry
relations:
H^{m', m}_n(β) = H^{m, m'}_n(β)
H^{m', m}_n(β) = H^{-m', -m}_n(β)
H^{m', m}_n(β) = (-1)^{n+m+m'} H^{-m', m}_n(π - β)
H^{m', m}_n(β) = (-1)^{m+m'} H^{m', m}_n(-β)
Because of these symmetries, we only need to evaluate at most 1/4 of all the
elements.
"""
function H!(w::WignerMatrixCalculator, expiβ::Complex)
_step_1!(w)
_step_2!(w, expiβ)
_step_3!(w, expiβ)
_step_4!(w)
_step_5!(w)
w.Hwedge
end
| [
27,
7856,
261,
480,
29,
39949,
293,
14,
4561,
37910,
13,
20362,
198,
37811,
2348,
42289,
329,
14492,
367,
11,
355,
1813,
416,
610,
87,
452,
25,
1415,
3070,
13,
22,
39357,
198,
198,
39,
318,
3519,
284,
370,
570,
263,
338,
357,
1747... | 1.581861 | 9,449 |
"""
Simulation
A simulation composes the observed trace and the backend solve of Algorithm 7.1.
A simulation instance is returned from an `optimize` call.
"""
struct Simulation
model::Model
driver::Driver
trace::BlockOptTrace
backend::BlockOptBackend
function Simulation(model::Model, driver::Driver)
# TODO: Handle ill-formed input
new(model, driver, BlockOptTrace(model, driver), BlockOptBackend(model, driver))
end
end
trace(s::Simulation) = getfield(s, :trace)
model(s::Simulation) = model(trace(s))
driver(s::Simulation) = driver(trace(s))
"""
trs_timer(s::Simulation)
The elapsed time simulation `s` has been in `trs_solve(s)`.
"""
trs_timer(s::Simulation) = trs_timer(trace(s))
"""
trs_counter(s::Simulation)
The count of trust-region subproblem solves for simulation `s`.
"""
trs_counter(s::Simulation) = trs_counter(trace(s))
"""
ghs_timer(s::Simulation)
The elapsed time simulation `s` has been in `gHS(s)`.
"""
ghs_timer(s::Simulation) = ghs_timer(trace(s))
"""
ghs_counter(s::Simulation)
The count of `gHS` evaluations for simulation `s`.
"""
ghs_counter(s::Simulation) = ghs_counter(trace(s))
weave!(s::Simulation, field, val) = weave!(trace(s), field, val)
weave_level(s::Simulation) = weave_level(trace(s))
"""
f_vals(s::Simulation)
A vector holding objective values ``f(xₖ)`` for each successful iterate ``xₖ``
of simulation `s`.
"""
f_vals(s::Simulation) = f_vals(trace(s))
"""
∇f_norms(s::Simulation)
A vector holding normed gradient values ``||∇f(xₖ)||₂`` for each successful iterate ``xₖ``
of simulation `s`.
"""
∇f_norms(s::Simulation) = ∇f_norms(trace(s))
"""
p_norms(s::Simulation)
A vector holding distance ``||pₖ||₂` of each successful step ``pₖ``
of simulation `s`.
"""
p_norms(s::Simulation) = p_norms(trace(s))
"""
Δ_vals(s::Simulation)
A vector holding the trust-region radius passed to `trs_small` in TRS.jl
during each successful step of simulation `s`.
"""
Δ_vals(s::Simulation) = Δ_vals(trace(s))
"""
ρ_vals(s::Simulation)
A vector storing the ratio of actual reduction to model reduction of
each successful step of simulation `s`.
"""
ρ_vals(s::Simulation) = ρ_vals(trace(s))
"""
weave(args::Simulation...)
Generates a Weave.jl report of the simulation args.
"""
function weave(args::Simulation...)
println(pwd())
Weave.weave(
joinpath(dirname(pathof(BlockOpt)), "lib/trace.jmd");
args = args,
out_path = mkpath(
joinpath(directory(model(first(args))), "trace_$(trunc(now(), Minute))"),
),
)
end
io(s::Simulation) = io(trace(s))
log_level(s::Simulation) = log_level(trace(s))
info!(s::Simulation, args...) = info!(trace(s), args...)
debug!(s::Simulation, args...) = debug!(trace(s), args...)
warn!(s::Simulation, args...) = warn!(trace(s), args...)
error!(s::Simulation, args...) = error!(trace(s), args...)
backend(s::Simulation) = getfield(s, :backend)
fₖ(s::Simulation) = fₖ(backend(s))
∇fₖ_norm(s::Simulation) = ∇fₖ_norm(backend(s))
pₖ_norm(s::Simulation) = pₖ_norm(backend(s))
Δₖ(s::Simulation) = Δₖ(backend(s))
ρ(s::Simulation) = ρ(backend(s))
Base.getproperty(s::Simulation, sym::Symbol) = @restrict Simulation
Base.propertynames(s::Simulation) = ()
"""
initialize(s::Simulation)
Performs each step up to the preliminary update
to obtain ``H₀``. Lines 1-6 of `Algorithm 7.1`.
"""
function initialize(s::Simulation)
initialize(backend(s))
increment!(ghs_counter(s))
weave!(s, f_vals, fₖ(s))
weave!(s, ∇f_norms, ∇fₖ_norm(s))
weave!(s, Δ_vals, Δₖ(s))
info!(s, "Simulating:", s)
nothing
end
"""
terminal(s::Simulation)
True if the state of `s` is terminal.
"""
function terminal(s::Simulation)
if terminal(backend(s), evaluations(trs_counter(s)))
return true
end
return false
end
"""
build_trs(s::Simulation)
Build arguments for the trs_small call to TRS.jl.
"""
function build_trs(s::Simulation)
build_trs(backend(s))
nothing
end
"""
solve_trs(s::Simulation)
Solve ``aₖ`` in Equation (5.5).
"""
function solve_trs(s::Simulation)
on!(trs_timer(s))
solve_trs(backend(s))
off!(trs_timer(s))
increment!(trs_counter(s))
nothing
end
"""
build_trial(s::Simulation)
Build trial iterate, evaluate the objective at the trial location, and compute ``ρ``.
"""
function build_trial(s::Simulation)
build_trial(backend(s))
nothing
end
"""
update_Δₖ(s::Simulation)
Updates the radius Δₖ after each trust-region subproblem solve.
"""
function update_Δₖ(s::Simulation)
update_Δₖ(backend(s))
nothing
end
"""
accept_trial(s::Simulation)
Observe the value of ``ρ``, accept positive values, and then update ``xₖ, fₖ``.
"""
function accept_trial(s::Simulation)
if accept_trial(backend(s))
weave!(s, f_vals, fₖ(s))
weave!(s, Δ_vals, Δₖ(s))
weave!(s, p_norms, pₖ_norm(s))
weave!(s, ρ_vals, ρ(s))
return true
end
return false
end
"""
pflag(s::Simulation)
The preliminary secant update flag of the Driver of `s` the default value is false.
"""
function pflag(s::Simulation)
return pflag(backend(s))
end
"""
secantQN(s::Simulation)
Performs the standard secant update for `s`'s inverse Hessian approximation ``Hₖ``.
The QN formula used is given by the Driver of `s`.
"""
function secantQN(s::Simulation)
secantQN(backend(s))
nothing
end
"""
update_Sₖ(s::Simulation)
Updates the ``2w-1`` sample directions of simulation `s`.
See: `S_update_a`, `S_update_b`, `S_update_c`, `S_update_d`, `S_update_e`, `S_update_f`.
"""
function update_Sₖ(s::Simulation)
update_Sₖ(backend(s))
nothing
end
"""
gHS(s::Simulation)
See Algorithm ``3.1``
"""
function gHS(s::Simulation)
on!(ghs_timer(s))
gHS(backend(s))
off!(ghs_timer(s))
increment!(ghs_counter(s))
weave!(s, ∇f_norms, ∇fₖ_norm(s))
nothing
end
"""
blockQN(s::Simulation)
Performs a block update for `s`'s inverse Hessian approximation ``Hₖ``.
The QN formula used is given by the Driver of `s`.
"""
function blockQN(s::Simulation)
blockQN(backend(s))
nothing
end
function optimize!(simulation::Simulation)
initialize(simulation)
build_trs(simulation)
while !terminal(simulation)
solve_trs(simulation)
build_trial(simulation)
if accept_trial(simulation)
if pflag(simulation)
secantQN(simulation)
end
update_Sₖ(simulation)
gHS(simulation)
blockQN(simulation)
build_trs(simulation)
end
update_Δₖ(simulation)
end
info!(simulation, "Terminating:", trace(simulation))
return simulation
end
| [
37811,
198,
8890,
1741,
198,
198,
32,
18640,
552,
4629,
262,
6515,
12854,
290,
262,
30203,
8494,
286,
978,
42289,
767,
13,
16,
13,
198,
32,
18640,
4554,
318,
4504,
422,
281,
4600,
40085,
1096,
63,
869,
13,
220,
198,
37811,
198,
7249... | 2.374826 | 2,876 |
function make_model(f, ϵsub, ϵ, cellL, thickness, order, lb, ub, filename)
# TODO: call out to Python
pts = chebpoints(order, lb, ub)
end
function get_model(order, lb, ub, filename)
f = open(filename)
function val(line)
dat = split(line)
parse(Float64, dat[1]) + parse(Float64, dat[2]) * im
end
vals = [val(line) for line in eachline(f)]
chebinterp(vals, lb, ub)
end
# rrule for Chebyshev polynomial functor. TODO: support chebjacobian (or explicitly don't support it)
# TODO: support x real
function rrule(c::ChebPoly, x::AbstractVector)
project_x = ProjectTo(x)
y, Δy = chebgradient(c, x)
pullback(∂y) = NoTangent(), project_x(∂y * Δy')
y, pullback
end
| [
8818,
787,
62,
19849,
7,
69,
11,
18074,
113,
7266,
11,
18074,
113,
11,
2685,
43,
11,
20735,
11,
1502,
11,
18360,
11,
20967,
11,
29472,
8,
198,
220,
220,
220,
1303,
16926,
46,
25,
869,
503,
284,
11361,
198,
220,
220,
220,
43344,
... | 2.380795 | 302 |
module SurfaceTopology
using GeometryTypes
include("primitives.jl")
include("plainds.jl")
include("faceds.jl")
include("cachedds.jl")
include("edgeds.jl")
export FaceDS, CachedDS, EdgeDS
export FaceRing, VertexRing, EdgeRing
export Edges,Faces
end # module
| [
21412,
20321,
9126,
1435,
628,
198,
3500,
2269,
15748,
31431,
198,
198,
17256,
7203,
19795,
20288,
13,
20362,
4943,
198,
17256,
7203,
25638,
9310,
13,
20362,
4943,
198,
17256,
7203,
24903,
82,
13,
20362,
4943,
198,
17256,
7203,
66,
2317,
... | 2.966292 | 89 |
function form(f::Function, args...; attrs...) :: HTMLString
normal_element(f, "form", [args...], attr(attrs...))
end
"""
$TYPEDSIGNATURES
"""
function form(children::Union{String,Vector{String}} = "", args...; attrs...) :: HTMLString
normal_element(children, "form", [args...], attr(attrs...))
end
"""
$TYPEDSIGNATURES
"""
function attr(attrs...)
attrs = Pair{Symbol,Any}[attrs...]
for p in attrs
p[1] == :enctype && return attrs
end
push!(attrs, :enctype => "multipart/form-data")
end | [
8818,
1296,
7,
69,
3712,
22203,
11,
26498,
986,
26,
708,
3808,
23029,
7904,
11532,
10100,
198,
220,
3487,
62,
30854,
7,
69,
11,
366,
687,
1600,
685,
22046,
986,
4357,
708,
81,
7,
1078,
3808,
986,
4008,
198,
437,
198,
198,
37811,
1... | 2.671958 | 189 |
using YaoExtensions, Yao
using Test, Random
using Optim: LBFGS, optimize
using Optim
"""
learn_u4(u::AbstractMatrix; niter=100)
Learn a general U4 gate. The optimizer is LBFGS.
"""
function learn_u4(u::AbstractBlock; niter=100)
ansatz = general_U4()
params = parameters(ansatz)
println("initial loss = $(operator_fidelity(u,ansatz))")
optimize(x->-operator_fidelity(u, dispatch!(ansatz, x)),
(G, x) -> (G .= -operator_fidelity'(u, dispatch!(ansatz, x))[2]),
parameters(ansatz),
LBFGS(),
Optim.Options(iterations=niter))
println("final fidelity = $(operator_fidelity(u,ansatz))")
return ansatz
end
using Random
Random.seed!(2)
u = matblock(rand_unitary(4))
c = learn_u4(u; niter=150)
| [
3500,
37826,
11627,
5736,
11,
37826,
198,
3500,
6208,
11,
14534,
198,
3500,
30011,
25,
22199,
37,
14313,
11,
27183,
198,
3500,
30011,
198,
198,
37811,
198,
220,
220,
220,
2193,
62,
84,
19,
7,
84,
3712,
23839,
46912,
26,
299,
2676,
2... | 2.403785 | 317 |
num_unlabel_samples = 800
Mat_Label, labels, Mat_Unlabel = loadCircleData(num_unlabel_samples)
iter = round(linspace(1,70,4))
res = []
for i in iter
unlabel_data_labels = label_propagation(Mat_Label, Mat_Unlabel, labels, kernel_type = "knn", knn_num_neighbors = 10, max_iter = i)
push!(res, unlabel_data_labels)
end
res = reduce(hcat, res)
show_example(Mat_Label, labels, Mat_Unlabel, res)
| [
22510,
62,
403,
18242,
62,
82,
12629,
796,
10460,
220,
220,
198,
19044,
62,
33986,
11,
14722,
11,
6550,
62,
3118,
18242,
796,
3440,
31560,
293,
6601,
7,
22510,
62,
403,
18242,
62,
82,
12629,
8,
220,
198,
2676,
796,
2835,
7,
21602,
... | 2.454545 | 165 |
<reponame>logankilpatrick/PopGen.jl<filename>src/Read.jl
### GenePop parsing ###
"""
genepop(infile::String; digits::Int64 = 3, popsep::Any = "POP", numpops::Int64)
Load a Genepop format file into memory as a PopObj object.
### Arguments
- `infile` : path to Genepop file
- `digits` : number of digits denoting each allele
- `popsep` : word that separates populations in `infile` (default: "POP")
- `numpops` : number of populations in `infile` (used for checking parser)
- `marker` : "snp" (default) or "msat"
### File must follow standard Genepop formatting:
- First line is a comment (and skipped)
- Loci are listed after first line as one-per-line without commas or in single comma-separated row
- A line with a particular keyword (default "POP") must delimit populations
- File is tab or space delimted
## Example
`waspsNY = genepop("wasp_hive.gen", digits = 3, popsep = "POP", numpops = 2);`
### Genepop file example:
Wasp populations in New York \n
Locus1 \n
Locus2 \n
Locus3 \n
POP \n
Oneida_01, 250230 564568 110100 \n
Oneida_02, 252238 568558 100120 \n
Oneida_03, 254230 564558 090100 \n
POP \n
Newcomb_01, 254230 564558 080100 \n
Newcomb_02, 000230 564558 090080 \n
Newcomb_03, 254230 000000 090100 \n
Newcomb_04, 254230 564000 090120 \n
"""
function genepop(
infile::String;
digits::Int64 = 3,
popsep::Any = "POP",
numpops::Int64,
marker = "snp",
)
println("\n", "Input File : ", abspath(infile))
if lowercase(marker) == "snp"
geno_type = Int8
else
geno_type = Int16
end
gpop = split(open(readlines, infile)[2:end], popsep)
if length(gpop) - 1 != numpops
error("incorrect number of populations detected, see docstring for formatting
expected : $numpops
detected : $(length(gpop)-1) ")
end
if length(gpop[1]) == 1 # loci horizontally stacked
locinames = strip.(split(gpop[1] |> join, ",") |> Array{String,1})
replace!(locinames, "." => "_")
else # loci vertically stacked
locinames = replace(gpop[1], "." => "_")
end
d = Dict(string(i) => [] for i in locinames)
popid = []
indnames = []
for i = 2:length(gpop)
append!(popid, fill(i - 1, length(gpop[i])))
for j = 1:length(gpop[i])
#println(Base.Threads.threadid()) ########
phasedloci = []
push!(indnames, split(strip(gpop[i][j]), r"\,|\t")[1])
unphasedloci = split(strip(gpop[i][j]), r"\s|\t")[2:end] |>
Array{String,1}
replace!(unphasedloci, "-9" => "0"^digits) #just in case -9 = missing
for locus in unphasedloci
phasedlocus = parse.(
geno_type,
[join(i) for i in Iterators.partition(locus, digits)],
) |> sort |> Tuple
push!(phasedloci, phasedlocus)
end
for (loc, geno) in zip(locinames, phasedloci)
push!(d[loc], geno)
end
end
end
ploidy = length.(d[locinames[1]]) # lazy finding of ploidy from single locus
for (loc, ploid) in zip(locinames, ploidy)
miss_geno = fill(0, ploid) |> Tuple
msat_miss_geno = ("0")
replace!(d[loc], miss_geno => missing)
replace!(d[loc], msat_miss_geno => missing)
end
# typesafe genotype DataFrame
loci_df = DataFrame([Symbol(i) => Array{Union{Tuple,Missing},1}(d[i]) for i in locinames])
samples_df = DataFrame(
name = string.(indnames),
population = string.(popid),
ploidy = Int8.(ploidy),
longitude = fill(missing, length(indnames)),
latitude = fill(missing, length(indnames)),
)
PopObj(samples_df, loci_df)
end
# Alternative line-by-line reader
## NOT EXPORTED
#=
function gpop2(infile::String; digits::Int64 = 3, popsep::Any = "POP", numpops::Int64)
println("\n", "Input File : ", abspath(infile))
popid = []
indnames = []
locinames = []
d = Dict()
linenum = 1
popcount = 0
open(infile) do file
for ln in eachline(file)
if popcount == 0
if linenum == 1
linenum += 1
continue
elseif linenum == 2
if occursin(",", ln) == true #loci are horizontally stacked
append!(locinames, strip.(split(ln |> join, ",") |> Array{String,1}))
replace!(locinames, "." => "_")
linenum += 1
continue
else # loci are vertically stacked
push!(locinames, ln)
linenum += 1
continue
end
else
if ln == popsep
popcount += 1
continue
else
push!(locinames, ln)
continue
end
end
end
if ln == popsep
popcount += 1
continue
else
phasedloci = []
push!(indnames, split(strip(ln), r"\,|\t")[1])
push!(popid, popcount)
unphasedloci = split(strip(ln), r"\s|\t")[2:end] |> Array{String,1}
for locus in unphasedloci
phasedlocus = parse.(Int16,[join(i) for i in Iterators.partition(locus,digits)]) |> sort |> Tuple
push!(phasedloci, phasedlocus)
end
if locinames[1] ∉ keys(d)
[d[i] = [] for i in locinames]
end
for (loc,geno) in zip(locinames, phasedloci)
push!(d[loc], geno)
end
end
end
end
ploidy = length.(d[locinames[1]]) # lazy finding of ploidy from single locus
for (loc, ploid) in zip(locinames, ploidy)
miss_geno = fill(0,ploid) |> Tuple
replace!(d[loc], miss_geno => missing)
end
loci_df = DataFrame([i = Array{Union{Tuple, Missing},1}(d[i]) for i in locinames])
names!(loci_df, Symbol.(locinames))
samples_df = DataFrame(name = string.(indnames),
population = popid,
ploidy = Int8.(ploidy),
longitude = fill(missing,length(indnames)),
latitude = fill(missing,length(indnames)))
PopObj(samples_df, loci_df)
end
=#
### CSV parsing ###
"""
csv(infile::String; delim::Union{Char,String,Regex}, digits::Int64 = 3, location::Bool = false)
Load a CSV-type file into memory as a PopObj object
### Arguments
- `infile` : path to CSV file
- `delim` : delimiter characters. default is comma (","), can be space (" "), tab ("\\t"), etc.
- `digits` : number of digits denoting each allele
- `marker` : "snp" (default) or "msat"
- `location` : decimal degrees longitude/latitude provided as values 3/4
### File formatting:
- Loci names must be first row
- Individuals names must be first value in row
- Population ID's must be second value in row
- [Optional] longitude (x) values third value in row, latitude (y) fourth
## Example
`lizardsCA = Read.csv("CA_lizards.csv", delim = ",", digits = 3);`
### Formatting example:
Locus1,Locus2,Locus3 \n
sierra_01,mountain,001001,002002,001001 \n
sierra_02,mountain,001001,001001,001002 \n
snbarb_03,coast,001001,001001,001002 \n
snbarb_02,coast,001001,001001,001001 \n
snbarb_03,coast,001002,001001,001001 \n
"""
function csv(
infile::String;
delim::Union{Char,String,Regex} = ",",
digits::Int = 3,
marker = "snp",
location::Bool = false,
)
println("\n", "Input File : ", abspath(infile))
popid = []
indnames = []
locx = []
locy = []
locinames = []
d = Dict()
linenum = 1
if lowercase(marker) == "snp"
geno_type = Int8
else
geno_type = Int16
end
file = open(infile, "r") #do file
for ln in eachline(file)
if linenum == 1
loci_raw = split(ln, delim)
loci_safe = replace(loci_raw, "." => "_")
append!(locinames, loci_safe)
[d[string(i)] = [] for i in locinames]
linenum += 1
continue
end
if location == false
tmp = split(ln, delim) |> Array{String,1}
# phase genotypes by ploidy
phasedloci = []
for locus in tmp[3:end]
phasedlocus = parse.(
geno_type,
[join(i) for i in Iterators.partition(locus, digits)],
) |> sort |> Tuple
push!(phasedloci, phasedlocus)
end
for (loc, geno) in zip(locinames, phasedloci)
push!(d[loc], geno)
end
push!(indnames, tmp[1])
push!(popid, tmp[2])
push!(locx, missing)
push!(locy, missing)
else
tmp = split(ln, delim) |> Array{String,1}
replace!(tmp, "-9" => "0"^digits) #just in case -9 = missing
phasedloci = []
for locus in tmp[5:end]
phasedlocus = parse.(
Int16,
[join(i) for i in Iterators.partition(locus, digits)],
) |> sort |> Tuple
push!(phasedloci, phasedlocus)
end
for (loc, geno) in zip(locinames, phasedloci)
push!(d[loc], geno)
end
push!(indnames, tmp[1])
push!(popid, tmp[2])
push!(locx, parse.(Float64, tmp[3]))
push!(locy, parse.(Float64, tmp[4]))
end
end
close(file)
ploidy = length.(d[locinames[1]]) # lazy finding of ploidy from single locus
for (loc, ploid) in zip(locinames, ploidy)
miss_geno = fill(0, ploid) |> Tuple
msat_miss_geno = ("0")
replace!(d[loc], miss_geno => missing)
replace!(d[loc], msat_miss_geno => missing)
end
# typesafe genotype DataFrame
loci_df = DataFrame([Symbol(i) => Array{Union{Tuple,Missing},1}(d[i]) for i in locinames])
#names!(loci_df, Symbol.(locinames))
samples_df = DataFrame(
name = string.(indnames),
population = string.(popid),
ploidy = Int8.(ploidy),
longitude = locx,
latitude = locy,
)
PopObj(samples_df, loci_df)
end
### VCF parsing ###
"""
vcf(infile::String)
Load a VCF file into memory as a PopObj object. Population and [optional]
location information need to be provided separately.
- `infile` : path to VCF file
"""
function vcf(infile::String)
vcf_file = VCF.Reader(open(infile, "r"))
# get sample names from header
sample_names = header(vcf_file).sampleID
# fill in pop/lat/long with missing
population = fill(missing, length(sample_names))
lat = fill(missing, length(sample_names))
long = fill(missing, length(sample_names))
## array of genotypes
# get loci names
locinames = []
d = Dict()
# get genotypes
for record in vcf_file
chr_safe = replace(VCF.chrom(record), "." => "_")
chr_safer = replace(chr_safe, "|" => "_")
pos = VCF.pos(record) |> string
push!(locinames, chr_safer*"_"*pos)
geno_raw = [split(i, ('/', '|')) for i in VCF.genotype(record, :, "GT")] |> sort
# change missing data "." to "-1"
geno_corr_miss = [replace(i, "." => "-1") for i in geno_raw]
# convert everything to an integer
geno_int = [parse.(Int8, i) for i in geno_corr_miss]
# add 1 to shift genos so 0 is 1 and -1 is 0
geno_shift = [i .+ Int8(1) for i in geno_int]
geno_final = [replace(i, 0 => missing) for i in geno_shift]
geno_tuple = [Tuple(i) for i in geno_final]
d[locinames[end]] = geno_tuple
end
ploidy = length.(d[locinames[1]])
loci_df = DataFrame([i = Array{Union{Tuple, Missing},1}(d[i]) for i in locinames])
names!(loci_df, Symbol.(locinames))
samples_df = DataFrame(name = sample_names,
population = population,
ploidy = Int8.(ploidy),
latitude = lat,
longitude = long)
PopObj(samples_df, loci_df)
end
| [
27,
7856,
261,
480,
29,
6404,
962,
346,
29615,
14,
16979,
13746,
13,
20362,
27,
34345,
29,
10677,
14,
5569,
13,
20362,
198,
21017,
13005,
16979,
32096,
44386,
198,
37811,
198,
220,
220,
220,
2429,
538,
404,
7,
259,
7753,
3712,
10100,
... | 1.9652 | 6,408 |
<reponame>ErikQQY/ClimateMachine.jl<gh_stars>100-1000
import ..ShallowWater: forcing_term!
@inline function forcing_term!(::SWModel, ::Coupled, S, Q, A, t)
S.U += A.Gᵁ
return nothing
end
| [
27,
7856,
261,
480,
29,
36,
12602,
48,
48,
56,
14,
37649,
37573,
13,
20362,
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
11748,
11485,
2484,
12154,
19184,
25,
10833,
62,
4354,
0,
198,
198,
31,
45145,
2163,
10833,
62,
4354,
0,
7,
... | 2.373494 | 83 |
<filename>test/runtests.jl
using RandomMatrix, LinearAlgebra
using Test
@test randDiagonal(2) !== nothing
@test randTriangular(1:3,3,upper=false,Diag=false) !== nothing
@test !isreal(randMatrix(10)|>eigvals)
@test isreal(randHermitian(3)|>eigvals)
@test randHermitian(ComplexNormal(im,2),3,diag=Elliptic(0.1,c=im,R=9)) !==nothing
@test pdf(MarchenkoPastur(rand()),0) == 0 | [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
198,
3500,
14534,
46912,
11,
44800,
2348,
29230,
198,
3500,
6208,
198,
198,
31,
9288,
43720,
18683,
27923,
7,
17,
8,
5145,
855,
2147,
198,
31,
9288,
43720,
14824,
21413,
7,
16,
25,... | 2.45098 | 153 |
<reponame>emmt/Cairo.jl
## header to provide surface and context
using Cairo
c = CairoRGBSurface(256,256);
cr = CairoContext(c);
save(cr);
set_source_rgb(cr,0.8,0.8,0.8); # light gray
rectangle(cr,0.0,0.0,256.0,256.0); # background
fill(cr);
restore(cr);
save(cr);
## original example, following here
arc(cr, 128.0, 128.0, 76.8, 0, 2 * pi);
clip(cr);
new_path(cr); # current path is not consumed by cairo_clip()
rectangle(cr, 0, 0, 256, 256);
fill(cr);
set_source_rgb(cr, 0, 1, 0);
move_to(cr, 0, 0);
line_to(cr, 256, 256);
move_to(cr, 256, 0);
line_to(cr, 0, 256);
set_line_width(cr, 10.0);
stroke(cr);
## mark picture with current date
restore(cr);
move_to(cr,0.0,12.0);
set_source_rgb(cr, 0,0,0);
show_text(cr,Libc.strftime(time()));
write_to_png(c,"sample_clip.png");
| [
27,
7856,
261,
480,
29,
368,
16762,
14,
34,
18131,
13,
20362,
198,
2235,
13639,
284,
2148,
4417,
290,
4732,
198,
3500,
23732,
198,
66,
796,
23732,
48192,
4462,
333,
2550,
7,
11645,
11,
11645,
1776,
198,
6098,
796,
23732,
21947,
7,
6... | 2.234957 | 349 |
# Read the image format written by RADMC3D.
# Read the ascii text in `image.out` and parse things into a 3 dimensional matrix (x, y, lambda)
# The first four lines are format information
# iformat # = 1 (2 is local observer)
# im_nx im_ny #number of pixels in x and y directions
# nlam # number of images at different wavelengths
# pixsize_x pixsize_y # size of the pixels in cm
# lambda[1] ... lambda[nlam + 1] # wavelengths (um) correspending to images
# pixels, ordered from left to right (increasing x) in the inner loop, and from bottom to top (increasing y) in the outer loop. And wavelength is the outermost loop.
"The image module contains various data types for reading and holding images produced by the radiative transfer programs (via `RADMC-3D`), as well as routines for processing these images."
module image
export imread, imToSky, imToSpec, SkyImage, -
export taureadImg, taureadPos
using ..constants
# import Images # The Images.jl package, not affiliated w/ DiskJockey
# using Dierckx
import Base.- # extend this for Image
# Define an image type, which can store the data as well as pixel spacing
abstract type Image end
"
RawImage(data, pixsize_x, pixsize_y, lams)
Hold the raw output from `RADMC-3D` in a 3D array (npix_y, npix_x, nlam).
RawImage reflects the RADMC convention that both x and y are increasing
with array index. This means that to display the image as RADMC intends it,
you must set the first array element to the lower left corner."
mutable struct RawImage <: Image
data::Array{Float64,3} # [ergs/s/cm^2/Hz/ster]
pixsize_x::Float64 # [cm]
pixsize_y::Float64 # [cm]
lams::Vector{Float64} # [μm]
end
"SkyImage is a holder that has both RA and DEC increasing with array index
This convention is necessary for the FFT step
However, to display this image in the traditional sky convention (North up,
East to the left), you must set the first array element to the lower left
corner *and* flip the array along the RA axis: `fliplr(data)` or flipdim(data, 2)"
mutable struct SkyImage <: Image
data::Array{Float64,3} # [Jy/pixel]
ra::Vector{Float64} # [arcsec]
dec::Vector{Float64} # [arcsec]
lams::Vector{Float64} # [μm]
end
"TausurfImage is designed to hold the results of the RADMC-3D `tausurf` operation. This is
The distance in cm above or below the plane tangent to the observer, which intersects the origin of the model.
From the RADMC3D manual:
The image output file image.out will now contain, for each pixel, the position along the ray in centimeters where τ = τs. The zero point is the surface perpendicular to the direction of observation, going through the pointing position (which is, by default the origin (0, 0, 0)). Positive values mean that the surface is closer to the observer than the plane, while negative values mean that the surface is behind the plane.
So for this datastructure, it's the same thing as RawImage, just instead of intensity, we have distance above/below plane."
mutable struct TausurfImg <: Image
data::Array{Float64,3} # cm above/behind central projected plane of disk
pixsize_x::Float64 # [cm]
pixsize_y::Float64 # [cm]
lams::Vector{Float64} # [μm]
end
"Encapsulates the 3D position of the pixels representing the tau=1 surface, in the same datashape as the image.
For each pixel, this is the x, y, or z position."
mutable struct TausurfPos
data_x::Array{Float64,3} # [cm]
data_y::Array{Float64,3} # [cm]
data_z::Array{Float64,3} # [cm]
lams::Vector{Float64} # [μm]
end
"Subtraction for `SkyImage`s"
function -(img1::SkyImage, img2::SkyImage)
# @assert img1.lams == img2.lams "Images must have the same wavelengths."
@assert isapprox(img1.ra, img2.ra) "Images must have same RA coordinates."
@assert isapprox(img1.dec, img2.dec) "Images must have same DEC coordinates."
data = img1.data - img2.data
return SkyImage(data, img1.ra, img1.dec, img1.lams)
end
"SkyImage constructor for just a single frame"
SkyImage(data::Matrix{Float64}, ra::Vector{Float64}, dec::Vector{Float64}, lam::Float64) =
SkyImage(reshape(data, tuple(size(data)..., 1)), ra, dec, [lam])
"
imread(file=\"image.out\")
Read the image file (default=image.out) and return it as an Image object, which contains the fluxes in Jy/pixel,
the sizes and locations of the pixels in arcseconds, and the wavelengths (in microns) corresponding to the images"
function imread(file = "image.out")
fim = open(file, "r")
iformat = parse(Int, readline(fim))
im_nx, im_ny = split(readline(fim))
im_nx = parse(Int, im_nx)
im_ny = parse(Int, im_ny)
nlam = parse(Int, readline(fim))
pixsize_x, pixsize_y = split(readline(fim))
pixsize_x = parse(Float64, pixsize_x)
pixsize_y = parse(Float64, pixsize_y)
# Read the wavelength array
lams = Array{Float64}(undef, nlam)
for i = 1:nlam
lams[i] = parse(Float64, readline(fim))
end
# Create an array with the proper size, and then read the file into it
data = Array{Float64}(undef, im_ny, im_nx, nlam)
# According to the RADMC manual, section A.15, the pixels are ordered
# left to right (increasing x) in the inner loop, and from bottom to top
# (increasing y) in the outer loop.
# Basically, pack the array in order but display with origin=lower.
# Because of the way an image is stored as a matrix, we actually pack the
# array indices as data[y, x, lam]
# radmc3dPy achieves something similar by keeping indices the x,y but
# swaping loop order (radmcPy/image.py:line 675)
for k = 1:nlam
readline(fim) # Junk space
for j = 1:im_ny
for i = 1:im_nx
data[j,i,k] = parse(Float64, readline(fim))
end
end
end
close(fim)
# According to the RADMC3D manual, the units are *intensity* [erg cm−2 s−1 Hz−1 ster−1]
return RawImage(data, pixsize_x, pixsize_y, lams)
end
"
taureadImg(file=\"image_tausurf.out\")
Like imread, but for tausurf. Pixels that have no ``\\tau`` surface are set to `NaN`."
function taureadImg(file = "image_tausurf.out")
fim = open(file, "r")
iformat = parse(Int, readline(fim))
im_nx, im_ny = split(readline(fim))
im_nx = parse(Int, im_nx)
im_ny = parse(Int, im_ny)
nlam = parse(Int, readline(fim))
pixsize_x, pixsize_y = split(readline(fim))
pixsize_x = parse(Float64, pixsize_x)
pixsize_y = parse(Float64, pixsize_y)
# Read the wavelength array
lams = Array{Float64}(nlam)
for i = 1:nlam
lams[i] = parse(Float64, readline(fim))
end
# Create an array with the proper size, and then read the file into it
data = Array{Float64}(im_ny, im_nx, nlam)
for k = 1:nlam
readline(fim) # Junk space
for j = 1:im_ny
for i = 1:im_nx
val = parse(Float64, readline(fim))
data[j,i,k] = val
# If RADMC3D signaled that there is no tau=1 surface here, set height to NaN
if isapprox(val, -1e91)
data[j,i,k] = NaN #
else
data[j,i,k] = val
end
end
end
end
close(fim)
# According to the RADMC3D manual, the units are *intensity* [erg cm−2 s−1 Hz−1 ster−1]
return TausurfImg(data, pixsize_x, pixsize_y, lams)
end
"Read the (x,y,z) positions of the ``\\tau=1`` pixels."
function taureadPos(file = "tausurface_3d.out")
fim = open(file, "r")
iformat = parse(Int, readline(fim))
im_nx, im_ny = split(readline(fim))
im_nx = parse(Int, im_nx)
im_ny = parse(Int, im_ny)
nlam = parse(Int, readline(fim))
# pixsize_x, pixsize_y = split(readline(fim))
# pixsize_x = parse(Float64, pixsize_x)
# pixsize_y = parse(Float64, pixsize_y)
# Read the wavelength array
lams = Array{Float64}(nlam)
for i = 1:nlam
lams[i] = parse(Float64, readline(fim))
end
# Create an array with the proper size, and then read the file into it
data_x = Array{Float64}(im_ny, im_nx, nlam)
data_y = Array{Float64}(im_ny, im_nx, nlam)
data_z = Array{Float64}(im_ny, im_nx, nlam)
# In contrast to the other image formats, apparently there is only a space before the lams, not inbetween lams.
readline(fim) # Junk space
for k = 1:nlam
for j = 1:im_ny
for i = 1:im_nx
val_x, val_y, val_z = split(readline(fim))
val_x = parse(Float64, val_x)
val_y = parse(Float64, val_y)
val_z = parse(Float64, val_z)
# If RADMC3D signaled that there is no tau=1 surface here (any of the xyz values are -1e91)
# then set the height to NaN
if isapprox(val_x, -1e91)
data_x[j,i,k] = NaN
data_y[j,i,k] = NaN
data_z[j,i,k] = NaN
else
data_x[j,i,k] = val_x
data_y[j,i,k] = val_y
data_z[j,i,k] = val_z
end
end
end
end
close(fim)
return TausurfPos(data_x, data_y, data_z, lams)
end
"
imToSky(img::RawImage, dpc::Float64)
Convert a `RawImage` to a `SkyImage`. Assumes dpc is parsecs."
function imToSky(img::RawImage, dpc::Float64)
# The RawImage is oriented with North up and East increasing to the left.
# this means that for the RawImage, the delta RA array goes from + to -
# However, the SkyImage actually requires RA (ll) in increasing form.
# Therefore we flip along the RA axis, fliplr(data) or flipdim(data, 2)
# println("Min and max intensity ", minimum(img.data), " ", maximum(img.data))
# println("Pixel size ", img.pixsize_x)
# println("Steradians subtended by each pixel ", img.pixsize_x * img.pixsize_y / (dpc * pc)^2)
# convert from ergs/s/cm^2/Hz/ster to to Jy/ster
conv = 1e23 # [Jy/ster]
# Conversion from erg/s/cm^2/Hz/ster to Jy/pixel at 1 pc distance.
# conv = 1e23 * img.pixsize_x * img.pixsize_y / (dpc * pc)^2
# Flip across RA dimension
# dataJy = flipdim(img.data, 2) .* conv
dataJy = reverse(img.data, dims = 2) .* conv
(im_ny, im_nx) = size(dataJy)[1:2] # y and x dimensions of the image
# The locations of pixel centers in cm
# if n_x = 16, goes [-7.5, -6.5, ..., -0.5, 0.5, ..., 6.5, 7.5] * pixsize
xx = ((Float64[i for i = 0:im_nx - 1] .+ 0.5) .- im_nx / 2.) * img.pixsize_x
yy = ((Float64[i for i = 0:im_ny - 1] .+ 0.5) .- im_ny / 2.) * img.pixsize_y
# The locations of the pixel centers in relative arcseconds
# Note both RA and DEC increase with array index.
ra = xx ./ (AU * dpc)
dec = yy ./ (AU * dpc)
return SkyImage(dataJy, ra, dec, img.lams)
end
"Take an image and integrate all the frames to create a spatially-integrated spectrum"
function imToSpec(img::SkyImage)
# pixels in SkyImage are Jy/ster
# convert from Jy/str to Jy/pixel using str/pixel
dRA = abs(img.ra[2] - img.ra[1]) * arcsec
dDEC = abs(img.dec[2] - img.dec[1]) * arcsec
# Add up all the flux in the pixels to create the spectrum
flux = dropdims(sum(img.data .* dRA .* dDEC, dims = (1, 2)), dims = (1, 2))
spec = hcat(img.lams, flux) # First column is wl, second is flux
return spec
end
"Calculate the integrated line flux"
function integrateSpec(spec::Matrix{Float64}, lam0::Float64)
# First column is wl, second is flux
wl = spec[:,1]
fl = spec[:,2]
# Convert wl to kms
vs = c_kms * (spec[:,1] .- lam0) / lam0
if vs[2] - vs[1] < 0
reverse!(vs)
reverse!(fl)
end
# println(vs)
# println(fl)
spl = Spline1D(vs, fl)
tot = integrate(spl, vs[1], vs[end])
return tot
end
"Storage for zeroth moment map"
mutable struct ZerothMoment <: Image
data::Array{Float64,2} # [Jy · Hz / pixel]
ra::Vector{Float64} # [arcsec]
dec::Vector{Float64} # [arcsec]
end
"
convert(::Type{ZerothMoment}, img::SkyImage)
Convert a `SkyImage` to a `ZerothMoment` map."
function convert(::Type{ZerothMoment}, img::SkyImage)
# Sum along the frequency axis
data = squeeze(sum(img.data, (3)), 3)
return ZerothMoment(data, img.ra, img.dec)
end
end # model
| [
2,
4149,
262,
2939,
5794,
3194,
416,
33540,
9655,
18,
35,
13,
198,
198,
2,
4149,
262,
355,
979,
72,
2420,
287,
4600,
9060,
13,
448,
63,
290,
21136,
1243,
656,
257,
513,
38517,
17593,
357,
87,
11,
331,
11,
37456,
8,
198,
198,
2,
... | 2.425889 | 5,060 |
<filename>sample.jl
using GeneratorsX
@generator function f(xs)
for y in xs
for x in y
@yield x
end
end
end
collect(f([[1], [2, 3], [4, 5]]))
| [
27,
34345,
29,
39873,
13,
20362,
198,
3500,
2980,
2024,
55,
198,
31,
8612,
1352,
2163,
277,
7,
34223,
8,
198,
220,
220,
220,
329,
331,
287,
2124,
82,
198,
220,
220,
220,
220,
220,
220,
220,
329,
2124,
287,
331,
198,
220,
220,
22... | 1.913978 | 93 |
module CommonSubexpressions
export @cse, cse
struct Cache
args_to_symbol::Dict{Symbol, Symbol}
disqualified_symbols::Set{Symbol}
setup::Vector{Expr}
end
Cache() = Cache(Dict{Symbol,Symbol}(), Set{Symbol}(), Vector{Expr}())
function add_element!(cache::Cache, name, expr::Expr)
sym = gensym(expr.args[1])
cache.args_to_symbol[name] = sym
push!(cache.setup, :($sym = $(expr)))
sym
end
disqualify!(cache::Cache, x) = nothing
disqualify!(cache::Cache, s::Symbol) = push!(cache.disqualified_symbols, s)
disqualify!(cache::Cache, expr::Expr) = foreach(arg -> disqualify!(cache, arg), expr.args)
# fallback for non-Expr arguments
combine_subexprs!(setup, expr, warn_enabled::Bool) = expr
const standard_expression_forms = Set{Symbol}(
(:call,
:block,
:comprehension,
:(=>),
:(:),
:(&),
:(&&),
:(|),
:(||),
:tuple,
:for,
:ref,
:macrocall,
Symbol("'")))
const assignment_expression_forms = Set{Symbol}(
(:(=),
:(+=),
:(-=),
:(*=),
:(/=)))
function combine_subexprs!(cache::Cache, expr::Expr, warn_enabled::Bool)
if expr.head == :function
# We can't continue CSE through a function definition, but we can
# start over inside the body of the function:
for i in 2:length(expr.args)
expr.args[i] = combine_subexprs!(expr.args[i], warn_enabled)
end
elseif expr.head == :line
# nothing
elseif expr.head in assignment_expression_forms
disqualify!(cache, expr.args[1])
for i in 2:length(expr.args)
expr.args[i] = combine_subexprs!(cache, expr.args[i], warn_enabled)
end
elseif expr.head == :generator
for i in vcat(2:length(expr.args), 1)
expr.args[i] = combine_subexprs!(cache, expr.args[i], warn_enabled)
end
elseif expr.head in standard_expression_forms
for (i, child) in enumerate(expr.args)
expr.args[i] = combine_subexprs!(cache, child, warn_enabled)
end
if expr.head == :call
for (i, child) in enumerate(expr.args)
expr.args[i] = combine_subexprs!(cache, child, warn_enabled)
end
if all(!isa(arg, Expr) && !(arg in cache.disqualified_symbols) for arg in expr.args)
combined_args = Symbol(expr.args...)
if !haskey(cache.args_to_symbol, combined_args)
sym = add_element!(cache, combined_args, expr)
else
sym = cache.args_to_symbol[combined_args]
end
return sym
else
end
end
else
warn_enabled && warn("CommonSubexpressions can't yet handle expressions of this form: $(expr.head)")
end
return expr
end
combine_subexprs!(x, warn_enabled::Bool = true) = x
function combine_subexprs!(expr::Expr, warn_enabled::Bool)
cache = Cache()
expr = combine_subexprs!(cache, expr, warn_enabled)
Expr(:block, cache.setup..., expr)
end
macro cse(expr, warn_enabled::Bool = true)
result = combine_subexprs!(expr, warn_enabled)
# println(result)
esc(result)
end
cse(expr, warn_enabled::Bool = true) = combine_subexprs!(copy(expr), warn_enabled)
end
| [
21412,
8070,
7004,
42712,
507,
198,
198,
39344,
2488,
66,
325,
11,
269,
325,
198,
198,
7249,
34088,
198,
220,
220,
220,
26498,
62,
1462,
62,
1837,
23650,
3712,
35,
713,
90,
13940,
23650,
11,
38357,
92,
198,
220,
220,
220,
40650,
62,... | 2.191561 | 1,493 |
<filename>src/transition_parsing/systems/listbased.jl
"""
ListBasedNonProjective()
Transition system for list-based non-projective dependency parsing.
Described in Nivre 2008, "Algorithms for Deterministic Incremental Dependency Parsing."
"""
struct ListBasedNonProjective <: AbstractTransitionSystem end
initconfig(s::ListBasedNonProjective, graph::DependencyTree) =
ListBasedNonProjectiveConfig(graph)
initconfig(s::ListBasedNonProjective, deptype, words) =
ListBasedNonProjectiveConfig{deptype}(words)
projective_only(::ListBasedNonProjective) = false
transition_space(::ListBasedNonProjective, labels=[]) =
isempty(labels) ? [LeftArc(), RightArc(), NoArc(), Shift()] :
[LeftArc.(labels)..., RightArc.(labels)..., NoArc(), Shift()]
struct ListBasedNonProjectiveConfig{T} <: AbstractParserConfiguration{T}
λ1::Vector{Int} # right-headed
λ2::Vector{Int} # left-headed
β::Vector{Int}
A::Vector{T}
end
function ListBasedNonProjectiveConfig{T}(words::Vector{String}) where {T}
λ1 = [0]
λ2 = Int[]
β = 1:length(words)
A = [unk(T, id, w) for (id,w) in enumerate(words)]
ListBasedNonProjectiveConfig{T}(λ1, λ2, β, A)
end
function ListBasedNonProjectiveConfig{T}(gold::DependencyTree) where {T}
λ1 = [0]
λ2 = Int[]
β = 1:length(gold)
A = [dep(token, head=-1) for token in gold]
ListBasedNonProjectiveConfig{T}(λ1, λ2, β, A)
end
ListBasedNonProjectiveConfig(gold::DependencyTree) =
ListBasedNonProjectiveConfig{eltype(gold)}(gold)
buffer(cfg::ListBasedNonProjectiveConfig) = cfg.β
token(cfg::ListBasedNonProjectiveConfig, i) = iszero(i) ? root(deptype(cfg)) :
i == -1 ? noval(deptype(cfg)) :
cfg.A[i]
tokens(cfg::ListBasedNonProjectiveConfig) = cfg.A
tokens(cfg::ListBasedNonProjectiveConfig, is) = [token(cfg, i) for i in is]
function leftarc(cfg::ListBasedNonProjectiveConfig, args...; kwargs...)
λ1, i = cfg.λ1[1:end-1], cfg.λ1[end]
j, β = cfg.β[1], cfg.β[2:end]
A = copy(cfg.A)
i != 0 && (A[i] = dep(A[i], args...; head=j, kwargs...))
ListBasedNonProjectiveConfig(λ1, [i ; cfg.λ2], [j ; β], A)
end
function rightarc(cfg::ListBasedNonProjectiveConfig, args...; kwargs...)
λ1, i = cfg.λ1[1:end-1], cfg.λ1[end]
j, β = cfg.β[1], cfg.β[2:end]
A = copy(cfg.A)
A[j] = dep(A[j], args...; head=i, kwargs...)
ListBasedNonProjectiveConfig(λ1, [i ; cfg.λ2], [j ; β], A)
end
function noarc(cfg::ListBasedNonProjectiveConfig)
λ1, i = cfg.λ1[1:end-1], cfg.λ1[end]
λ2, β, A = cfg.λ2, cfg.β, cfg.A
ListBasedNonProjectiveConfig(λ1, [i ; λ2], β, A)
end
function shift(cfg::ListBasedNonProjectiveConfig)
λ1, λ2 = cfg.λ1, cfg.λ2
i, β = cfg.β[1], cfg.β[2:end]
ListBasedNonProjectiveConfig([λ1 ; λ2 ; i], Int[], β, cfg.A)
end
function isfinal(cfg::ListBasedNonProjectiveConfig)
return all(a -> head(a) != -1, tokens(cfg)) && length(cfg.λ1) == length(cfg.A) + 1 &&
length(cfg.λ2) == 0 && length(cfg.β) == 0
end
"""
static_oracle(::ListBasedNonProjectiveConfig, tree)
Return a training oracle function which returns gold transition
operations from a parser configuration with reference to `graph`.
"""
function static_oracle(cfg::ListBasedNonProjectiveConfig, tree, arc=untyped)
l = i -> arc(tree[i])
if length(cfg.λ1) >= 1 && length(cfg.β) >= 1
i, λ1 = cfg.λ1[end], cfg.λ1[1:end-1]
j, β = cfg.β[1], cfg.β[2:end]
if !iszero(i) && head(tree, i) == j
return LeftArc(l(i)...)
elseif head(tree, j) == i
return RightArc(l(j)...)
end
j_deps = dependents(tree, j)
if (!(any(x -> x < j, j_deps) && j_deps[1] < i)) && !(head(tree, j) < i)
return Shift()
end
end
if length(cfg.λ1) == 0
return Shift()
end
return NoArc()
end
# todo?
possible_transitions(cfg::ListBasedNonProjectiveConfig, graph::DependencyTree, arc=untyped) =
TransitionOperator[static_oracle(cfg, graph, arc)]
==(cfg1::ListBasedNonProjectiveConfig, cfg2::ListBasedNonProjectiveConfig) =
cfg1.λ1 == cfg2.λ1 && cfg1.λ2 == cfg2.λ2 && cfg1.β == cfg2.β && cfg1.A == cfg2.A
function Base.show(io::IO, c::ListBasedNonProjectiveConfig)
λ1 = join(c.λ1, ",")
λ2 = join(c.λ2, ",")
β = join(c.β, ",")
print(io, "ListBasedNonProjectiveConfig([$λ1],[$λ2],[$β])\n$(join([join([id(t),form(t),head(t)],'\t') for t in tokens(c)],'\n'))")
end
| [
27,
34345,
29,
10677,
14,
7645,
653,
62,
79,
945,
278,
14,
10057,
82,
14,
4868,
3106,
13,
20362,
198,
37811,
198,
220,
220,
220,
7343,
15001,
15419,
16775,
425,
3419,
198,
198,
8291,
653,
1080,
329,
1351,
12,
3106,
1729,
12,
16302,
... | 2.214321 | 2,025 |
type class_nlo <: internal_AbstractNLPEvaluator
# linear program with non-linear objective
# min f(x)
# A*x = b
# x >= 0
_n::Int64 # number of variables
_m::Int64 # number of constraints
_A::SparseMatrixCSC{Float64,Int64}
_b::Array{Float64,1}
obj::class_nl_function
function class_nlo(A::SparseMatrixCSC{Float64,Int64},b::Array{Float64,1}, obj::class_nl_function)
(m, n) = size(A)
return new(n,m,A,b,obj);
end
end
################
# METHODS
################
# evaluate objective
function internal_eval_c(nlo::class_nlo, x::Array{Float64,1})
return nlo.obj.value(x);
end
# evalutate constraints
function internal_eval_a(nlo::class_nlo, x::Array{Float64,1})
return nlo._A * x - nlo._b;
end
# evaluate gradient of constraints
function internal_eval_jac_a(nlo::class_nlo, x::Array{Float64,1}) # J
return nlo._A;
end
# hessian of lagrangian
function internal_eval_hesslag_prod(nlo::class_nlo, x::Array{Float64,1}, y::Array{Float64,1})
return nlo.obj.hessian(x)
end
# gradient of lagrangian
function internal_eval_gradlag(nlo::class_nlo, x::Array{Float64,1}, y::Array{Float64,1})
return internal_eval_gradc(nlo, x) - nlo._A' * y;
end
# gradient of f
function internal_eval_gradc(nlo::class_nlo, x::Array{Float64,1})
return nlo.obj.gradient(x);
end
# (\nabla g * x - g)
function internal_eval_b(nlo::class_nlo, x::Array{Float64,1})
return nlo._b;
end
function n(nlo::class_nlo)
return nlo._n;
end
function m(nlo::class_nlo)
return nlo._m;
end
| [
4906,
1398,
62,
77,
5439,
1279,
25,
5387,
62,
23839,
32572,
11401,
2100,
84,
1352,
198,
220,
1303,
14174,
1430,
351,
1729,
12,
29127,
9432,
198,
220,
1303,
949,
277,
7,
87,
8,
198,
220,
1303,
317,
9,
87,
796,
275,
198,
220,
1303,
... | 2.4219 | 621 |
<reponame>fqiang/MJPlayGround.jl
using MJPlayGround
using Base.Test
# write your own tests here
@test 1 == 1
a=2;
a
| [
27,
7856,
261,
480,
29,
69,
80,
15483,
14,
43421,
11002,
35539,
13,
20362,
198,
3500,
33974,
11002,
35539,
198,
3500,
7308,
13,
14402,
198,
198,
2,
3551,
534,
898,
5254,
994,
198,
31,
9288,
352,
6624,
352,
198,
198,
64,
28,
17,
26... | 2.510638 | 47 |
"""
perpendicular_vector(vec)
Compute a vector perpendicular to `vec` by switching the two elements with
largest absolute value, flipping the sign of the second largest, and setting the
remaining element to zero.
"""
function perpendicular_vector(vec::SVector{3})
T = eltype(vec)
# find indices of the two elements of vec with the largest absolute values:
absvec = abs.(vec)
ind1 = argmax(absvec) # index of largest element
tmin = typemin(T)
@inbounds absvec2 = @SVector [ifelse(i == ind1, tmin, absvec[i]) for i = 1 : 3] # set largest element to typemin(T)
ind2 = argmax(absvec2) # index of second-largest element
# perp[ind1] = -vec[ind2], perp[ind2] = vec[ind1], set remaining element to zero:
@inbounds perpind1 = -vec[ind2]
@inbounds perpind2 = vec[ind1]
tzero = zero(T)
perp = @SVector [ifelse(i == ind1, perpind1, ifelse(i == ind2, perpind2, tzero)) for i = 1 : 3]
end
@noinline length_error(v, len) =
throw(DimensionMismatch("Expected length $len, got length $(length(v))"))
@inline function check_length(v, len)
if length(v) != len
length_error(v, len)
end
end
function skew(v::AbstractVector)
check_length(v, 3)
@SMatrix [0 -v[3] v[2];
v[3] 0 -v[1];
-v[2] v[1] 0]
end
function vee(S::AbstractMatrix)
return @SVector [S[3,2], S[1,3], S[2,1]]
end
"""
The element type for a rotation matrix with a given angle type is composed of
trigonometric functions of that type.
"""
Base.@pure rot_eltype(angle_type) = typeof(sin(zero(angle_type))) | [
37811,
198,
220,
220,
220,
47190,
62,
31364,
7,
35138,
8,
198,
198,
7293,
1133,
257,
15879,
47190,
284,
4600,
35138,
63,
416,
15430,
262,
734,
4847,
351,
198,
28209,
4112,
1988,
11,
33097,
262,
1051,
286,
262,
1218,
4387,
11,
290,
4... | 2.52818 | 621 |
mutable struct NothingXMLElement <: MyXMLElement
el::Nothing
NothingXMLElement() = new(nothing)
end
function make_xml(::NothingXMLElement)
# do nothing
end
| [
76,
18187,
2878,
10528,
37643,
2538,
1732,
1279,
25,
2011,
37643,
2538,
1732,
198,
220,
220,
220,
1288,
3712,
18465,
198,
220,
220,
220,
10528,
37643,
2538,
1732,
3419,
796,
649,
7,
22366,
8,
198,
437,
198,
198,
8818,
787,
62,
19875,
... | 2.864407 | 59 |
module MaxHelpingHandMultiRoomStrawberry
using ..Ahorn, Maple
@mapdef Entity "MaxHelpingHand/MultiRoomStrawberry" MultiRoomStrawberry(x::Integer, y::Integer,
name::String="multi_room_strawberry", winged::Bool=false, moon::Bool=false, checkpointID::Integer=-1, order::Integer=-1)
const placements = Ahorn.PlacementDict(
"Multi-Room Strawberry (max480's Helping Hand)" => Ahorn.EntityPlacement(
MultiRoomStrawberry
)
)
# winged, moon
sprites = Dict{Tuple{Bool, Bool}, String}(
(false, false) => "collectables/strawberry/normal00",
(true, false) => "collectables/strawberry/wings01",
(false, true) => "collectables/moonBerry/normal00",
(true, true) => "collectables/moonBerry/normal00"
)
function Ahorn.selection(entity::MultiRoomStrawberry)
x, y = Ahorn.position(entity)
moon = get(entity.data, "moon", false)
winged = get(entity.data, "winged", false)
sprite = sprites[(winged, moon)]
return Ahorn.getSpriteRectangle(sprite, x, y)
end
function Ahorn.renderAbs(ctx::Ahorn.Cairo.CairoContext, entity::MultiRoomStrawberry, room::Maple.Room)
x, y = Ahorn.position(entity)
moon = get(entity.data, "moon", false)
winged = get(entity.data, "winged", false)
sprite = sprites[(winged, moon)]
Ahorn.drawSprite(ctx, sprite, x, y)
end
end
| [
171,
119,
123,
21412,
5436,
12621,
13886,
12885,
29800,
41178,
1273,
1831,
8396,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
31,
8899,
4299,
20885,
366,
11518,
12621,
13886,
12885,
14,
29800,
41178,
1273,
1831,
8396,
1,
15237... | 2.649194 | 496 |
<reponame>jona125/ImagineInterface.jl
using ImagineInterface, ImagineFormat
ais = parse_ai("t.ai")
# Grab one particular signal
piezo = getname(ais, "axial piezo monitor")
# piezo is just a reference, the data are loaded on-demand. This lets you
# work with long recordings.
# Extract values in physical units, which here represent the position
data = get_samples(piezo)
# We can also get them in their original voltage units...
datav = get_samples(piezo; sampmap=:volts)
# ...or even in the raw Int16 format
dataraw = get_samples(piezo; sampmap=:raw)
# Let's display this signal
using ImaginePlots # you have to install this and its dependencies manually
using Plots
plot(piezo)
stimuli = getname(ais, "stimuli")
# This channel is just noise, but if it had had a sequence of TTL pulses
# this would give us a list of scan #s at which the pulses start
stimstarts = find_pulse_starts(stimuli; sampmap=:volts)
| [
27,
7856,
261,
480,
29,
73,
4450,
11623,
14,
25153,
39317,
13,
20362,
198,
3500,
18450,
39317,
11,
18450,
26227,
198,
198,
15152,
796,
21136,
62,
1872,
7203,
83,
13,
1872,
4943,
198,
198,
2,
25339,
530,
1948,
6737,
198,
21749,
10872,
... | 3.275986 | 279 |
# ===============================
# Written by AAE
# <NAME>, Winter 2014
# simulkade.com
# ===============================
# =============================== SOLVERS ===================================
function solveLinearPDE(m::MeshStructure, M::SparseMatrixCSC{Float64, Int64}, RHS::Array{Float64,1})
N=m.dims
x=M\RHS # until the problem is solved with Julia "\" solver
phi = CellValue(m, reshape(full(x), tuple(N+2...)))
phi
end
function solvePDE(m::MeshStructure, M::SparseMatrixCSC{Float64, Int64}, RHS::Array{Float64,1})
N=m.dims
x=M\RHS # until the problem is solved with Julia "\" solver
phi = CellValue(m, reshape(full(x), tuple(N+2...)))
phi
end
function solveMUMPSLinearPDE(m::MeshStructure, M::SparseMatrixCSC{Float64, Int64}, RHS::Array{Float64,1})
N=m.dims
x=solveMUMPS(M,RHS) # until the problem is solved with Julia "\" solver
phi = CellValue(m, reshape(full(x), tuple(N+2...)))
phi
end
function solveExplicitPDE(phi_old::CellValue, dt::Real, RHS::Array{Float64,1},
BC::BoundaryCondition)
d = phi_old.domain.dimension
N = phi_old.domain.dims
phi_val=reshape(phi_old.value[:]+dt*RHS, tuple(N+2...))
if (d==1) || (d==1.5)
phi_val= phi_val[2:N[1]+1]
elseif (d==2) || (d==2.5) || (d==2.8)
phi_val= phi_val[2:N[1]+1, 2:N[2]+1]
elseif (d==3) || (d==3.2)
phi_val= phi_val[2:N[1]+1, 2:N[2]+1, 2:N[3]+1]
end
return createCellVariable(phi_old.domain, phi_val, BC)
end
function solveExplicitPDE(phi_old::CellValue, dt::Real, RHS::Array{Float64,1},
BC::BoundaryCondition, alfa::CellValue)
d = phi_old.domain.dimension
N = phi_old.domain.dims
phi_val=reshape(phi_old.value[:]+dt*RHS./alfa.value[:], tuple(N+2...))
if (d==1) || (d==1.5)
phi_val= phi_val[2:N[1]+1]
elseif (d==2) || (d==2.5) || (d==2.8)
phi_val= phi_val[2:N[1]+1, 2:N[2]+1]
elseif (d==3) || (d==3.2)
phi_val= phi_val[2:N[1]+1, 2:N[2]+1, 2:N[3]+1]
end
return createCellVariable(phi_old.domain, phi_val, BC)
end
# =========================== Visualization =================================
function visualizeCells(phi::CellValue)
d=phi.domain.dimension
if d==1 || d==1.5
x = [phi.domain.facecenters.x[1]; phi.domain.cellcenters.x; phi.domain.facecenters.x[end]]
phi = [0.5*(phi.value[1]+phi.value[2]); phi.value[2:end-1]; 0.5*(phi.value[end-1]+phi.value[end])]
plot(x, phi)
elseif d==2 || d==2.5
x = [phi.domain.facecenters.x[1]; phi.domain.cellcenters.x; phi.domain.facecenters.x[end]]
y = [phi.domain.facecenters.y[1]; phi.domain.cellcenters.y; phi.domain.facecenters.y[end]]
phi0 = Base.copy(phi.value)
phi0[:,1] = 0.5*(phi0[:,1]+phi0[:,2])
phi0[1,:] = 0.5*(phi0[1,:]+phi0[2,:])
phi0[:,end] = 0.5*(phi0[:,end]+phi0[:,end-1])
phi0[end,:] = 0.5*(phi0[end,:]+phi0[end-1,:])
phi0[1,1] = phi0[1,2]
phi0[1,end] = phi0[1,end-1]
phi0[end,1] = phi0[end,2]
phi0[end,end] = phi0[end,end-1]
pcolor(x, y, phi0')
elseif d==2.8
x = [phi.domain.facecenters.x[1]; phi.domain.cellcenters.x; phi.domain.facecenters.x[end]]
y = [phi.domain.facecenters.y[1]; phi.domain.cellcenters.y; phi.domain.facecenters.y[end]]
phi0 = Base.copy(phi.value)
phi0[:,1] = 0.5*(phi0[:,1]+phi0[:,2])
phi0[1,:] = 0.5*(phi0[1,:]+phi0[2,:])
phi0[:,end] = 0.5*(phi0[:,end]+phi0[:,end-1])
phi0[end,:] = 0.5*(phi0[end,:]+phi0[end-1,:])
phi0[1,1] = phi0[1,2]
phi0[1,end] = phi0[1,end-1]
phi0[end,1] = phi0[end,2]
phi0[end,end] = phi0[end,end-1]
subplot(111, polar="true")
pcolor(y, x, phi0)
elseif d==3
Nx = phi.domain.dims[1]
Ny = phi.domain.dims[2]
Nz = phi.domain.dims[3]
x=[phi.domain.facecenters.x[1]; phi.domain.cellcenters.x; phi.domain.facecenters.x[end]]
y=Array(Float64,1,Ny+2)
y[:]=[phi.domain.facecenters.y[1]; phi.domain.cellcenters.y; phi.domain.facecenters.y[end]]
z=Array(Float64,1,1,Nz+2)
z[:]=[phi.domain.facecenters.z[1]; phi.domain.cellcenters.z; phi.domain.facecenters.z[end]]
phi0 = Base.copy(phi.value)
phi0[:,1,:]=0.5*(phi0[:,1,:]+phi0[:,2,:])
phi0[:,end,:]=0.5*(phi0[:,end-1,:]+phi0[:,end,:])
phi0[:,:,1]=0.5*(phi0[:,:,1]+phi0[:,:,1])
phi0[:,:,end]=0.5*(phi0[:,:,end-1]+phi0[:,:,end])
phi0[1,:,:]=0.5*(phi0[1,:,:]+phi0[2,:,:])
phi0[end,:,:]=0.5*(phi0[end-1,:,:]+phi0[end,:,:])
vmin = minimum(phi0)
vmax = maximum(phi0)
a=ones(Nx+2,Ny+2,Nz+2)
X = x.*a
Y = y.*a
Z = z.*a
s=mayavis.pipeline[:scalar_field](X,Y,Z,phi0)
mayavis.pipeline[:image_plane_widget](s, plane_orientation="x_axes", slice_index=0, vmin=vmin, vmax=vmax)
mayavis.pipeline[:image_plane_widget](s, plane_orientation="y_axes", slice_index=0, vmin=vmin, vmax=vmax)
mayavis.pipeline[:image_plane_widget](s, plane_orientation="z_axes", slice_index=0, vmin=vmin, vmax=vmax)
mayavis.pipeline[:image_plane_widget](s, plane_orientation="z_axes", slice_index=floor(Integer,Nz/2.0), vmin=vmin, vmax=vmax)
mayavis.outline()
# # 6 surfaces
# # surfaces 1,2 (x=x[1], x=x[end])
# Y=repmat(y,1,Nz)
# Z=repmat(z,1,Ny)
# mayavis.mesh(x[1]*ones(Ny,Nz),Y,Z',scalars=squeeze(phi.value[2,2:end-1,2:end-1],1), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(x[end]*ones(Ny,Nz),Y,Z',scalars=squeeze(phi.value[end-1,2:end-1,2:end-1],1), vmin=vmin, vmax=vmax, opacity=0.8)
#
# # surfaces 3,4 (y=y[1], y=y[end]
# X = repmat(x,1,Nz)
# Z = repmat(z,1,Nx)
# mayavis.mesh(X,y[1]*ones(Nx,Nz),Z',scalars=squeeze(phi.value[2:end-1,2,2:end-1],2), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(X,y[end]*ones(Nx,Nz),Z',scalars=squeeze(phi.value[2:end-1,end-1,2:end-1],2), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.axes()
#
# # surfaces 5,6 (z=z[1], z=z[end]
# X = repmat(x,1,Ny)
# Y = repmat(y,1,Nx)
# mayavis.mesh(X,Y',z[1]*ones(Nx,Ny),scalars=phi.value[2:end-1,2:end-1,2], vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(X,Y',z[end]*ones(Nx,Ny),scalars=phi.value[2:end-1,2:end-1,end-1], vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.colorbar()
mayavis.axes()
mshot= mayavis.screenshot()
mayavis.show()
return mshot
elseif d==3.2
Nx = phi.domain.dims[1]
Ny = phi.domain.dims[2]
Nz = phi.domain.dims[3]
r=[phi.domain.facecenters.x[1]; phi.domain.cellcenters.x; phi.domain.facecenters.x[end]]
theta = Array(Float64,1,Ny+2)
theta[:]=[phi.domain.facecenters.y[1]; phi.domain.cellcenters.y; phi.domain.facecenters.y[end]]
z=Array(Float64,1,1,Nz+2)
z[:]=[phi.domain.facecenters.z[1]; phi.domain.cellcenters.z; phi.domain.facecenters.z[end]]
a=ones(Nx+2,Ny+2,Nz+2)
R=r.*a
TH = theta.*a
Z = z.*a
X=R.*cos(TH)
Y=R.*sin(TH)
phi0 = Base.copy(phi.value)
phi0[:,1,:]=0.5*(phi0[:,1,:]+phi0[:,2,:])
phi0[:,end,:]=0.5*(phi0[:,end-1,:]+phi0[:,end,:])
phi0[:,:,1]=0.5*(phi0[:,:,1]+phi0[:,:,1])
phi0[:,:,end]=0.5*(phi0[:,:,end-1]+phi0[:,:,end])
phi0[1,:,:]=0.5*(phi0[1,:,:]+phi0[2,:,:])
phi0[end,:,:]=0.5*(phi0[end-1,:,:]+phi0[end,:,:])
vmin = minimum(phi0)
vmax = maximum(phi0)
# 6 surfaces
# surfaces 1,2 (x=x[1], x=x[end])
mayavis.mesh(squeeze(X[floor(Integer,Nx/2.0),:,:],1),squeeze(Y[floor(Integer,Nx/2.0),:,:],1),squeeze(Z[floor(Integer,Nx/2.0),:,:],1),
scalars=squeeze(phi0[floor(Integer,Nx/2.0)+1,:,:],1), vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.mesh(squeeze(X[Nx,:,:],1),squeeze(Y[Nx,:,:],1),squeeze(Z[Nx,:,:],1),
scalars=squeeze(phi0[Nx+2,:,:],1), vmin=vmin, vmax=vmax, opacity=0.8)
# surfaces 3,4 (y=y[1], y=y[end]
mayavis.mesh(squeeze(X[:,floor(Integer,Ny/2.0),:],2),squeeze(Y[:,floor(Integer,Ny/2.0),:],2),squeeze(Z[:,floor(Integer,Ny/2.0),:],2),
scalars=squeeze(phi0[:,floor(Integer,Ny/2.0)+1,:],2), vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.mesh(squeeze(X[:,Ny,:],2),squeeze(Y[:,Ny,:],2),squeeze(Z[:,Ny,:],2),
scalars=squeeze(phi0[:,Ny+2,:],2), vmin=vmin, vmax=vmax, opacity=0.8)
# surfaces 5,6 (z=z[1], z=z[end]
mayavis.mesh(X[:,:,floor(Integer,Nz/2.0)],Y[:,:,floor(Integer,Nz/2.0)],Z[:,:,floor(Integer,Nz/2.0)],
scalars=phi0[:,:,floor(Integer,Nz/2.0)+1], vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.mesh(X[:,:,Nz],Y[:,:,Nz],Z[:,:,Nz],
scalars=phi0[:,:,Nz+1], vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.colorbar()
mayavis.axes()
mshot=mayavis.screenshot()
mayavis.show()
return mshot
end
end
########################## Visualize Vectors #####################
function visualizeCellVectors(phi::CellVector)
d=phi.domain.dimension
if d==1 || d==1.5
println("Vector visualization works only in 2D and 3D")
elseif d==2 || d==2.5
x = phi.domain.cellcenters.x
y = phi.domain.cellcenters.y
quiver(repmat(x, 1, length(y)), repmat(y', length(x), 1), phi.xvalue, phi.yvalue)
elseif d==2.8
x = phi.domain.cellcenters.x
y = phi.domain.cellcenters.y'
subplot(111, polar="true")
quiver(repmat(y, length(x), 1), repmat(x, 1, length(y)),
phi.xvalue.*cos(y)-phi.yvalue.*sin(y), phi.xvalue.*sin(y)+phi.yvalue.*cos(y))
elseif d==3
Nx = phi.domain.dims[1]
Ny = phi.domain.dims[2]
Nz = phi.domain.dims[3]
x=phi.domain.cellcenters.x
y=Array(Float64,1,Ny)
y[:]=phi.domain.cellcenters.y
z=Array(Float64,1,1,Nz)
z[:]=phi.domain.cellcenters.z
#vmin = minimum(phi.xvalue)
#vmax = maximum(phi0)
a=ones(Nx,Ny,Nz)
X = x.*a
Y = y.*a
Z = z.*a
mayavis.quiver3d(X,Y,Z, phi.xvalue, phi.yvalue, phi.zvalue)
mayavis.outline()
# # 6 surfaces
# # surfaces 1,2 (x=x[1], x=x[end])
# Y=repmat(y,1,Nz)
# Z=repmat(z,1,Ny)
# mayavis.mesh(x[1]*ones(Ny,Nz),Y,Z',scalars=squeeze(phi.value[2,2:end-1,2:end-1],1), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(x[end]*ones(Ny,Nz),Y,Z',scalars=squeeze(phi.value[end-1,2:end-1,2:end-1],1), vmin=vmin, vmax=vmax, opacity=0.8)
#
# # surfaces 3,4 (y=y[1], y=y[end]
# X = repmat(x,1,Nz)
# Z = repmat(z,1,Nx)
# mayavis.mesh(X,y[1]*ones(Nx,Nz),Z',scalars=squeeze(phi.value[2:end-1,2,2:end-1],2), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(X,y[end]*ones(Nx,Nz),Z',scalars=squeeze(phi.value[2:end-1,end-1,2:end-1],2), vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.axes()
#
# # surfaces 5,6 (z=z[1], z=z[end]
# X = repmat(x,1,Ny)
# Y = repmat(y,1,Nx)
# mayavis.mesh(X,Y',z[1]*ones(Nx,Ny),scalars=phi.value[2:end-1,2:end-1,2], vmin=vmin, vmax=vmax, opacity=0.8)
# mayavis.mesh(X,Y',z[end]*ones(Nx,Ny),scalars=phi.value[2:end-1,2:end-1,end-1], vmin=vmin, vmax=vmax, opacity=0.8)
mayavis.colorbar()
mayavis.axes()
mshot= mayavis.screenshot()
mayavis.show()
return mshot
elseif d==3.2
Nx = phi.domain.dims[1]
Ny = phi.domain.dims[2]
Nz = phi.domain.dims[3]
r=phi.domain.cellcenters.x
theta = Array(Float64,1,Ny)
theta[:]=phi.domain.cellcenters.y
z=Array(Float64,1,1,Nz)
z[:]=phi.domain.cellcenters.z
a=ones(Nx,Ny,Nz)
R=r.*a
TH = theta.*a
Z = z.*a
X=R.*cos(TH)
Y=R.*sin(TH)
#vmin = minimum(phi0)
#vmax = maximum(phi0)
# 6 surfaces
# surfaces 1,2 (x=x[1], x=x[end])
mayavis.quiver3d(X,Y,Z, phi.xvalue.*cos(TH)-phi.yvalue.*sin(TH),
phi.xvalue.*sin(TH)+phi.yvalue.*cos(TH), phi.zvalue)
mayavis.colorbar()
mayavis.axes()
mshot=mayavis.screenshot()
mayavis.show()
return mshot
end
end
| [
2,
36658,
25609,
855,
198,
2,
22503,
416,
317,
14242,
198,
2,
1279,
20608,
22330,
10633,
1946,
198,
2,
985,
12171,
671,
13,
785,
198,
2,
36658,
25609,
855,
198,
198,
2,
36658,
25609,
855,
36817,
28884,
46111,
855,
198,
8818,
8494,
1... | 1.921644 | 5,692 |
<reponame>efaulhaber/Trixi.jl<filename>test/test_examples_3d_curved.jl
module TestExamples3DCurved
using Test
using Trixi
include("test_trixi.jl")
# pathof(Trixi) returns /path/to/Trixi/src/Trixi.jl, dirname gives the parent directory
EXAMPLES_DIR = joinpath(pathof(Trixi) |> dirname |> dirname, "examples", "3d")
@testset "Curved mesh" begin
@testset "elixir_advection_basic_curved.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_basic_curved.jl"),
l2 = [0.00013446460962856976],
linf = [0.0012577781391462928])
end
@testset "elixir_advection_free_stream_curved.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_advection_free_stream_curved.jl"),
l2 = [1.830875777528287e-14],
linf = [7.491784970170556e-13],
atol = 8e-13, # required to make tests pass on Windows
)
end
@testset "elixir_euler_source_terms_curved.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_source_terms_curved.jl"),
l2 = [0.01032310150257373, 0.009728768969448439, 0.009728768969448494, 0.009728768969448388, 0.015080412597559597],
linf = [0.034894790428615874, 0.033835365548322116, 0.033835365548322116, 0.03383536554832034, 0.06785765131417065])
end
@testset "elixir_euler_free_stream_curved.jl" begin
@test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream_curved.jl"),
l2 = [2.8815700334367128e-15, 9.361915278236651e-15, 9.95614203619935e-15, 1.6809941842374106e-14, 1.4815037041566735e-14],
linf = [4.1300296516055823e-14, 2.0444756998472258e-13, 1.0133560657266116e-13, 3.1896707497480747e-13, 6.092903959142859e-13])
end
end
end # module
| [
27,
7856,
261,
480,
29,
891,
2518,
5976,
263,
14,
51,
8609,
72,
13,
20362,
27,
34345,
29,
9288,
14,
9288,
62,
1069,
12629,
62,
18,
67,
62,
22019,
1079,
13,
20362,
198,
21412,
6208,
27730,
18,
9697,
333,
1079,
198,
198,
3500,
6208,... | 2.11625 | 800 |
function k_tr{T}(x::T)
if(isnan(x) || abs(x)<= one(T))
return one(Float64)
else
return zero(Float64)
end
end
function k_bt{T}(x::T)
if isnan(x)
one(Float64)
end
float(max(one(T)-abs(x), zero(T)))
end
function k_pr{T}(x::T)
if isnan(x)
one(Float64)
end
ax = abs(x)
if(ax > one(T))
zero(Float64)
elseif ax <= .5
float(1 - 6 * x^2 + 6 * ax^3)
else
float(2 * (1-ax)^3)
end
end
function k_qs{T <: Number}(x::T)
if isnan(x)
one(Float64)
end
if(isequal(x, zero(eltype(x))))
one(Float64)
else
return (25/(12*π²*x^2))*(sin(sixπ*x/5)/(sixπ*x/5)-cos(sixπ*x/5))
end
end
function k_th{T <: Number}(x::T)
if isnan(x)
one(Float64)
end
ax = abs(x)
if(ax < one(T))
(1 + cos(π*x))/2
else
zero(Float64)
end
end
##############################################################################
##
## Optimal band-width
##
##############################################################################
type TruncatedKernel <: HAC
kernel::Function
bw::Function
end
type BartlettKernel <: HAC
kernel::Function
bw::Function
end
type ParzenKernel <: HAC
kernel::Function
bw::Function
end
type TukeyHanningKernel <: HAC
kernel::Function
bw::Function
end
type QuadraticSpectralKernel <: HAC
kernel::Function
bw::Function
end
type VARHAC <: HAC
imax::Int64
ilag::Int64
imodel::Int64
end
typealias TRK TruncatedKernel
typealias BTK BartlettKernel
typealias PRK ParzenKernel
typealias THK TukeyHanningKernel
typealias QSK QuadraticSpectralKernel
TruncatedKernel() = TRK(k_tr, optimalbw_ar_one)
BartlettKernel() = BTK(k_bt, optimalbw_ar_one)
ParzenKernel() = PRK(k_pr, optimalbw_ar_one)
TukeyHanningKernel() = THK(k_th, optimalbw_ar_one)
QuadraticSpectralKernel() = QSK(k_qs, optimalbw_ar_one)
TruncatedKernel(bw::Number) = TRK(k_tr, (x, k) -> float(bw))
BartlettKernel(bw::Number) = BTK(k_bt, (x, k) -> float(bw))
ParzenKernel(bw::Number) = PRK(k_pr, (x, k) -> float(bw))
TukeyHanningKernel(bw::Number) = THK(k_th, (x, k) -> float(bw))
QuadraticSpectralKernel(bw::Number) = QSK(k_qs, (x, k) -> float(bw))
VARHAC() = VARHAC(2, 2, 1)
VARHAC(imax::Int64) = VARHAC(imax, 2, 1)
function bandwidth(k::HAC, X::AbstractMatrix)
return floor(k.bw(X, k))
end
function bandwidth(k::QuadraticSpectralKernel, X::AbstractMatrix)
return k.bw(X, k)
end
kernel(k::HAC, x::Real) = k.kernel(x)
function Γ(X::AbstractMatrix, j::Int64)
T, p = size(X)
Q = zeros(eltype(X), p, p)
if j>=0
for h=1:p, s = 1:h
for t = j+1:T
@inbounds Q[s, h] = Q[s, h] + X[t, s]*X[t-j, h]
end
end
else
for h=1:p, s = 1:h
for t = -j+1:T
@inbounds Q[s,h] = Q[s ,h] + X[t+j, s]*X[t,h]
end
end
end
return Q
end
vcov(X::AbstractMatrix, k::VARHAC) = varhac(X, k.imax, k.ilag, k.imodel)
function vcov(X::AbstractMatrix, k::HAC; prewhite=true)
n, p = size(X)
!prewhite || ((X, D) = pre_white(X))
bw = bandwidth(k, X)
Q = zeros(eltype(X), p, p)
for j=-bw:bw
Base.BLAS.axpy!(kernel(k, j/bw), Γ(X, @compat Int(j)), Q)
end
Base.LinAlg.copytri!(Q, 'U')
if prewhite
Q[:] = D*Q*D'
end
return scale!(Q, 1/n)
end
function vcov(X::AbstractMatrix, k::QuadraticSpectralKernel; prewhite=true)
n, p = size(X)
!prewhite || ((X, D) = pre_white(X))
bw = bandwidth(k, X)
Q = zeros(eltype(X), p, p)
for j=-n:n
Base.BLAS.axpy!(kernel(k, j/bw), Γ(X, @compat Int(j)), Q)
end
Base.LinAlg.copytri!(Q, 'U')
if prewhite
Q[:] = D*Q*D'
end
return scale!(Q, 1/n)
end
vcov(x::DataFrameRegressionModel, k::HAC; args...) = vcov(x.model, k; args...)
function vcov(ll::LinPredModel, k::HAC; args...)
B = meat(ll, k; args...)
A = bread(ll)
scale!(A*B*A, 1/nobs(ll))
end
function meat(l::LinPredModel, k::HAC; args...)
u = wrkresidwts(l.rr)
X = ModelMatrix(l)
z = X.*u
vcov(z, k; args...)
end
| [
8818,
479,
62,
2213,
90,
51,
92,
7,
87,
3712,
51,
8,
198,
220,
220,
220,
611,
7,
271,
12647,
7,
87,
8,
8614,
2352,
7,
87,
8,
27,
28,
530,
7,
51,
4008,
198,
220,
220,
220,
220,
220,
220,
220,
1441,
530,
7,
43879,
2414,
8,
... | 1.883988 | 2,267 |
<reponame>chenwilliam77/RiskAdjustedLinearizations
using RiskAdjustedLinearizations, FastGaussQuadrature, Test
@testset "Gauss-Hermite Quadrature for Expectations of Functions of Independent Normally Distributed Random Variables/Vectors" begin
f(x) = x # calculate the expected value
g(x) = 1. # calculate the probability
ϵᵢ, wᵢ = RiskAdjustedLinearizations.standard_normal_gausshermite(3)
true_eps, true_wts = gausshermite(3)
@test ϵᵢ == true_eps .* sqrt(2.)
@test wᵢ == true_wts ./ sqrt(π)
mean51 = gausshermite_expectation(f, 5., 1., 5)
mean01 = gausshermite_expectation(f, 0., 1., 5)
mean53 = gausshermite_expectation(f, 5., 3., 5)
mean03 = gausshermite_expectation(f, 0., 3., 5)
prob = gausshermite_expectation(g, -5., .1)
@test mean51 ≈ 5.
@test mean53 ≈ 5.
@test isapprox(mean01, 0., atol = 1e-14)
@test isapprox(mean03, 0., atol = 1e-14)
@test prob ≈ 1.
h1(x) = x[1]
h2(x) = x[2]
prob1 = gausshermite_expectation(g, [.5, 5.], [1., 1.], 5)
mean11 = gausshermite_expectation(h1, [.5, 5.], [1., 1.], 5)
mean21 = gausshermite_expectation(h2, [.5, 5.], [1., 1.], 5)
prob2 = gausshermite_expectation(g, [5., -1.], [1., 1.], (5, 5))
mean12 = gausshermite_expectation(h1, [.5, 5.], [1., 1.], (5, 5))
mean22 = gausshermite_expectation(h2, [.5, 5.], [1., 1.], (5, 5))
prob3 = gausshermite_expectation(g, [5., -1., 2.], [1., 1., 2.], [5, 5, 3])
mean13 = gausshermite_expectation(h1, [.5, 5., 2.], [1., 1., 1.], [5, 5, 3])
mean23 = gausshermite_expectation(h2, [.5, 5., 2.], [1., 1., 1.], [5, 5, 3])
@test prob1 ≈ prob2 ≈ prob3 ≈ 1.
@test mean11 ≈ mean12 ≈ mean13 ≈ .5
@test mean21 ≈ mean22 ≈ mean23 ≈ 5.
end
nothing
| [
27,
7856,
261,
480,
29,
6607,
10594,
1789,
3324,
14,
49,
1984,
39668,
276,
14993,
451,
4582,
198,
3500,
19602,
39668,
276,
14993,
451,
4582,
11,
12549,
35389,
1046,
4507,
41909,
1300,
11,
6208,
198,
198,
31,
9288,
2617,
366,
35389,
10... | 2.113939 | 825 |
<reponame>SebastianM-C/LabReports.jl
using LabReports
using Test
@testset "LabReports.jl" begin
folder = "fake_data"
reference_folder = "reference_data"
to_rename = joinpath("fake_data", "200", "200_C&D_3.4e-3")
renamed = joinpath("fake_data", "200", "200_C&D_3.4e-3_D")
data = @test_logs (:info, "Renamed $to_rename to $renamed") find_files(folder)
reference_data = find_files(reference_folder)
@test length(data) == length(reference_data) == 3
@testset "Find $k" for k in keys(data)
for (datafile, ref) in zip(data[k], reference_data[k])
@test basename(datafile.filename) == basename(ref.filename)
@test basename(datafile.savename) == basename(ref.savename)
@test datafile.units == ref.units
@test datafile.legend_units == ref.legend_units
@test datafile.idx == ref.idx
end
end
include("originlab.jl")
ENV["UNITFUL_FANCY_EXPONENTS"] = false
process_data("CV", data, select=(:Scan, ==, 2))
process_data("EIS", data, select=(Symbol("-Phase (°)"), >, 0))
process_data("C&D", data, insert_D=(0, 1.4, 0, 0, 0), continue_col=Symbol("Time (s)"))
for ((root,dirs,files),(ref_root,ref_dirs,ref_files)) in zip(walkdir(folder), walkdir(reference_folder))
@test dirs == ref_dirs
@test length(files) == length(ref_files)
@test files == ref_files
@testset "File comparison for $file" for (file, ref_file) in zip(files, ref_files)
f = read(joinpath(root, file), String)
r = read(joinpath(ref_root, ref_file), String)
@test f == r
end
end
@test filevalue(data["CV"][1]) == "14000"
@test files_with_val(data["CV"], "14000") == [data["CV"][1]]
vals = Dict("15"=>Set(["2e-3","8.9e-5"]), "200"=>Set(["3.4e-3","8.9e-5","7.4e-2"]))
@test filevalues(data["C&D"]) == vals
include("analysis.jl")
# Cleanup
clear(folder, r".*\.dat")
mv(renamed, to_rename)
end
| [
27,
7856,
261,
480,
29,
50,
1765,
459,
666,
44,
12,
34,
14,
17822,
37844,
13,
20362,
198,
3500,
3498,
37844,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
17822,
37844,
13,
20362,
1,
2221,
198,
220,
220,
220,
9483,
796,
366,
307... | 2.201764 | 907 |
<gh_stars>1-10
capa = 10
c = Channel(capa)
function f(c)
for i in 1:10
put!(c, i)
end
end
function g(c)
for i in c
sleep(0.5)
println("From g: ", i)
end
println("Finish")
end
@async f(c)
@async g(c)
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
11128,
64,
796,
838,
198,
66,
796,
11102,
7,
11128,
64,
8,
198,
198,
8818,
277,
7,
66,
8,
198,
220,
220,
220,
329,
1312,
287,
352,
25,
940,
198,
220,
220,
220,
220,
220,
220,
220,
12... | 1.782609 | 138 |
using HDF5
function test_hdf5_output_layer(backend::Backend, T, eps)
println("-- Testing HDF5 Output Layer on $(typeof(backend)){$T}...")
############################################################
# Prepare Data for Testing
############################################################
tensor_dim = abs(rand(Int)) % 4 + 2
dims = tuple((abs(rand(Int, tensor_dim)) % 8 + 1)...)
println(" > $dims")
input = rand(T, dims)
input_blob = make_blob(backend, input)
output_fn = string(Mocha.temp_filename(), ".hdf5")
open(output_fn, "w") do file
# create an empty file
end
layer = HDF5OutputLayer(bottoms=[:input], datasets=[:foobar], filename=output_fn)
@test_throws ErrorException setup(backend, layer, Blob[input_blob], Blob[NullBlob()])
layer = HDF5OutputLayer(bottoms=[:input], datasets=[:foobar],
filename=output_fn, force_overwrite=true)
state = setup(backend, layer, Blob[input_blob], Blob[NullBlob()])
# repeat 3 times
forward(backend, state, Blob[input_blob])
forward(backend, state, Blob[input_blob])
forward(backend, state, Blob[input_blob])
shutdown(backend, state)
expected_output = cat(tensor_dim, input, input, input)
got_output = h5open(output_fn, "r") do h5
read(h5, "foobar")
end
@test size(expected_output) == size(got_output)
@test eltype(expected_output) == eltype(got_output)
@test all(abs(expected_output-got_output) .< eps)
rm(output_fn)
end
function test_hdf5_output_layer(backend::Backend)
test_hdf5_output_layer(backend, Float32, 1e-5)
test_hdf5_output_layer(backend, Float64, 1e-10)
end
if test_cpu
test_hdf5_output_layer(backend_cpu)
end
if test_gpu
test_hdf5_output_layer(backend_gpu)
end
| [
3500,
5572,
37,
20,
198,
198,
8818,
1332,
62,
71,
7568,
20,
62,
22915,
62,
29289,
7,
1891,
437,
3712,
7282,
437,
11,
309,
11,
304,
862,
8,
198,
220,
44872,
7203,
438,
23983,
5572,
37,
20,
25235,
34398,
319,
29568,
4906,
1659,
7,
... | 2.690852 | 634 |
struct IterativeSolvers_LOBPCG{Tv} <: AbstractEigenMethod{Tv}
precond::Matrix{Tv}
IterativeSolvers_LOBPCG{Tv}(h::AbstractMatrix{Tv}, nev = 1) where {Tv} =
new(rand(Tv, size(h,1), nev))
end
IterativeSolvers_LOBPCG(h::AbstractMatrix{Tv}, nev = 1) where {Tv} =
IterativeSolvers_LOBPCG{Tv}(h, nev)
function (d::Diagonalizer{<:IterativeSolvers_LOBPCG, Tv})(nev::Integer;
side = upper, precond = true, kw...) where {Tv}
if size(d.method.precond) != (size(d.matrix, 1), nev)
d.method = IterativeSolvers_LOBPCG(d.matrix, nev) # reset preconditioner
end
largest = ifelse(isfinite(d.point), side === upper , d.point > 0)
result = IterativeSolvers.lobpcg(d.lmap, largest, d.method.precond; kw...)
λs, ϕs = result.λ, result.X
isfinite(d.point) && (λs .= 1 ./ λs .+ d.point)
precond && foreach(i -> (d.method.precond[i] = ϕs[i]), eachindex(d.method.precond))
return Eigen(real(λs), ϕs)
end | [
7249,
40806,
876,
36949,
690,
62,
43,
9864,
5662,
38,
90,
51,
85,
92,
1279,
25,
27741,
36,
9324,
17410,
90,
51,
85,
92,
198,
220,
220,
220,
3718,
623,
3712,
46912,
90,
51,
85,
92,
198,
220,
220,
220,
40806,
876,
36949,
690,
62,
... | 2.127803 | 446 |
<gh_stars>0
__precompile__()
module DictFiles
using Blosc, FunctionalData, Reexport, Compat
using HDF5
@reexport using JLD
export DictFile, dictopen, dictread, dictwrite, close, compact
export getindex, get, getkey, at, setindex!, delete!, blosc, deblosc, serialized, deserialize
export mmap
export haskey, isdict, keys, values, exists
import Base: getindex, get, getkey, setindex!, delete!, haskey, keys, values
macro onpid(pid, a)
quote
r = @fetchfrom $pid try
$a
catch e
e
end
isa(r, Exception) ? rethrow(r) : r
end
end
function blosc(a; kargs...)
io = IOBuffer()
serialize(io, a)
Any[:blosc_compressed, compress(takebuf_array(io); kargs...)]
end
function deblosc(a)
if isa(a, Array) && length(a) > 0 && a[1] == :blosc_compressed
Base.deserialize(IOBuffer(decompress(UInt8,a[2])))
else
a
end
end
function serialized(a; kargs...)
io = IOBuffer()
serialize(io, a)
Any[:dictfiles_serializeditem, takebuf_array(io)]
end
function deserialize(a)
if (isa(a, Array) || isa(a, Tuple)) && length(a) > 0 && a[1] == :dictfiles_serializeditem
Base.deserialize(IOBuffer(snd(a)))
else
a
end
end
#####################################################
## DictFile, dictfile
defaultmode(filename) = !existsfile(filename) || (uperm(filename) & 0x2 > 0) ? "r+" : "r"
type DictFile
jld::JLD.JldFile
ref::@compat(Future)
basekey::Tuple
pid
function DictFile(filename::AbstractString, mode::AbstractString = defaultmode(filename); compress = false)
exists(f) = (s = stat(filename); s.inode != 0 && s.size > 0)
if mode == "r" && !exists(filename)
error("DictFile: file $filename does not exist")
end
if mode == "r+" && !exists(filename)
mode = "w"
end
try
ref = @spawnat myid() jldopen(filename, mode, compress = false, mmaparrays = false)
a = new(fetch(ref),ref,(), myid())
return a
catch e
println("DictFile: error while trying to open file $filename")
Base.display_error(e, catch_backtrace())
rethrow(e)
end
end
function DictFile(fid::JLD.JldFile, basekey::Tuple)
ref = @spawnat myid() fid
r = new(fid, ref, basekey, myid()); finalizer(r, x -> close(x))
r
end
end
DictFile(filename::AbstractString, mode::AbstractString = defaultmode(filename), k...) = DictFile(DictFile(filename, mode), k...)
function DictFile(a::DictFile, k...)
d = a.jld[makekey(a,k)]
if !(typeof(d) <: JLD.JldGroup)
error("DictFile: Try to get proxy DictFile for key $k but that is not a JldGroup")
end
DictFile(a.jld, tuple(a.basekey..., k...))
end
function dictread(filename, key...)
dictopen(filename) do a
a[key...]
end
end
function dictwrite(v::Dict, args...)
dictopen(args...) do a
a[] = v
end
end
function dictopen(f::Function, args...)
fid = DictFile(args...)
try
f(fid)
finally
close(fid)
end
end
#####################################################
## close
import Base.close
function close(a::DictFile)
if isempty(a.basekey)
@onpid a.pid close(a.jld)
end
end
#####################################################
## getindex, setindex!
function makekey(a::DictFile, k::Tuple)
if any(map(x->isa(x,Function), k))
error("DictFiles.makekey: keys cannot contains functions. Key tuple was: $k")
end
reprs = [Base.map(repr, a.basekey)..., Base.map(repr, k)...]
reprs = @p map reprs replace "/" "{{__Dictfiles_slash}}"
key = "/"*join(reprs, "/")
#@show key
key
end
function getindex(a::DictFile, k...)
@onpid a.pid begin
key = makekey(a, k)
if !isempty(k) && !exists(a.jld, key)
error("DictFile: key $k does not exist")
end
if isempty(k)
k2 = keys(a)
return Dict(zip(k2, [getindex(a,x) for x in k2]))
elseif typeof(a.jld[key]) <: JLD.JldGroup
k2 = keys(a, k...)
d2 = DictFile(a, k...)
return Dict(zip(k2, map(x->getindex(d2,x), k2)))
else
return deblosc(DictFiles.deserialize(deblosc(read(a.jld, key))))
end
end
end
function setindex!(a::DictFile, v::Dict, k...; kargs...)
@onpid a.pid begin
if isempty(k)
map(x->delete!(a,x), keys(a))
flush(a.jld.plain)
end
map(x->setindex!(a, v[x], tuple(k...,x)...), keys(v); kargs...)
end
end
function setindex!(a::DictFile, v::Void, k...; kargs...)
end
function setindex!(a::DictFile, v, k...; kargs...)
@onpid a.pid begin
if isempty(k)
error("DictFile: cannot use empty key $k here")
end
key = makekey(a, k)
#@show "in setindex" k key
for i in 1:length(k)
subkey = makekey(a, k[1:i])
if exists(a.jld, subkey) && !(typeof(a.jld[subkey]) <: JLD.JldGroup)
delete!(a, k[1:i]...)
flush(a.jld.plain)
break
end
end
if exists(a.jld, key)
#@show "in setindex, deleting" key
delete!(a, k...)
flush(a.jld.plain)
if exists(a.jld, key)
error("i thought we deleted this?")
end
end
# map(showinfo, v)
write(a.jld, key, v; kargs...)
flush(a.jld.plain)
end
end
#####################################################
## get, getkey
get(a::DictFile, default, k...) = haskey(a, k...) ? a[k...] : default
getkey(a::DictFile, default, k...) = haskey(a, k...) ? k : default
import FunctionalData.at
at(a::DictFile, args...) = getkey(a, args...)
#####################################################
## delete!
import Base.delete!
function delete!(a::DictFile, k...)
@onpid a.pid begin
key = makekey(a,k)
#@show "deleting" key
if exists(a.jld, key)
HDF5.o_delete(a.jld, key)
#HDF5.o_delete(a.jld,"/_refs"*key)
flush(a.jld.plain)
end
end
end
#####################################################
## mmap
function mmap(a::DictFile, k...)
@onpid a.pid begin
dataset = a.jld[makekey(a, k)]
if ismmappable(dataset.plain)
return readmmap(dataset.plain)
else
error("DictFile: The dataset for $k does not support mmapping")
end
end
end
#####################################################
## haskey, keys, values
import Base.haskey
function haskey(a::DictFile, k...)
@onpid a.pid exists(a.jld, makekey(a, k))
end
function isdict(a::DictFile, k...)
@onpid a.pid begin
key = makekey(a,k);
e = exists(a.jld, key)
e && typeof(a.jld[key]) <: JLD.JldGroup
end
end
import Base.keys
function parsekey(a)
a = @p replace a "{{__Dictfiles_slash}}" "/"
r = parse(a)
r = isa(r,QuoteNode) ? Base.unquoted(r) : r
try
if !isa(r, Symbol)
r2 = eval(r)
if isa(r2, Tuple)
r = r2
end
end
catch e
Base.display_error(e, catch_backtrace())
end
r
end
function sortkeys(a)
if all(x -> isa(x, Real), a)
ind = sortperm(a)
else
ind = sortperm(map(string, a));
end
a[ind]
end
function keys(a::DictFile)
@onpid a.pid begin
b = isempty(a.basekey) ? a.jld : a.jld[makekey(a,())]
sortkeys([parsekey(x) for x in names(b)])
end
end
function keys(a::DictFile, k...)
@onpid a.pid begin
key = makekey(a,k)
if !exists(a.jld, key)
return Any[]
end
g = a.jld[key]
if !(isa(g,JLD.JldGroup))
return Any[]
end
sortkeys([parsekey(x) for x in setdiff(names(a.jld[key]), [:id, :file, :plain])])
end
end
import Base.values
values(a::DictFile, k...) = [a[k..., x] for x in keys(a, k...)]
#####################################################
## dump
import Base.dump
dump(a::DictFile) = dump(STDOUT, a)
function dump(io::IO, a::DictFile, maxdepth::Int = typemax(Int))
function printkey(k, maxdepth, indent = 0)
#@show k makekey(k) indent keys(a, k...)
subkeys = sort(keys(a, k...))
println(repeat(" ",indent), k[end], length(subkeys)>0 ? ":" : "")
if indent<maxdepth
Base.map(x-> printkey(tuple(k...,x), maxdepth, indent+1), subkeys)
end
end
Base.map(x->printkey(tuple(x), maxdepth), sort(keys(a)))
end
#####################################################
## compact
function compact(filename::AbstractString)
tmpfilename = tempname()
dictopen(filename) do from
dictopen(tmpfilename,"w") do to
function copykey(k)
if isdict(from, k...)
map(x->copykey(tuple(k..., x)), keys(from, k...))
assert(isempty(setdiff(keys(from, k...), keys(to, k...))))
else
to[k...] = from[k...]
end
end
[copykey(tuple(x)) for x in keys(from)]
end
end
mv(tmpfilename, filename, remove_destination=true)
end
include("snapshot.jl")
end
| [
27,
456,
62,
30783,
29,
15,
198,
834,
3866,
5589,
576,
834,
3419,
198,
198,
21412,
360,
713,
25876,
198,
3500,
1086,
17500,
11,
44224,
6601,
11,
797,
39344,
11,
3082,
265,
198,
3500,
5572,
37,
20,
198,
31,
631,
87,
634,
1262,
449,... | 2.09154 | 4,468 |
<filename>src/noise_interfaces/common.jl
DiffEqBase.has_reinit(i::AbstractNoiseProcess) = true
function DiffEqBase.reinit!(W::Union{NoiseProcess,NoiseApproximation},dt;
t0 = W.t[1],
erase_sol = true,
setup_next = false)
if erase_sol
resize!(W.t,1)
resize!(W.W,1)
if W.Z != nothing
resize!(W.Z,1)
end
end
W.curt = t0
W.dt = dt
if typeof(W) <: NoiseApproximation
reinit!(W.source1)
if W.source2 != nothing
reinit!(W.source2)
end
end
if isinplace(W)
W.curW .= first(W.W)
if W.Z != nothing
W.curZ .= first(W.Z)
end
else
W.curW = first(W.W)
if W.Z != nothing
W.curZ = first(W.Z)
end
end
if typeof(W) <: NoiseProcess
while !isempty(W.S₁)
pop!(W.S₁) # Get a reset for this stack?
end
ResettableStacks.reset!(W.S₂)
end
setup_next && setup_next_step!(W)
return nothing
end
function DiffEqBase.reinit!(W::AbstractNoiseProcess,dt;
t0 = W.t[1],
erase_sol = true,
setup_next = false)
W.curt = t0
W.dt = dt
if typeof(W) <: NoiseGrid
if isinplace(W)
W.curW .= W.W[1]
if W.Z != nothing
W.curZ .= W.Z[1]
end
else
W.curW = W.W[1]
if W.Z != nothing
W.curZ = W.Z[1]
end
end
W.step_setup = true
end
setup_next && setup_next_step!(W)
return nothing
end
function Base.reverse(W::AbstractNoiseProcess)
if typeof(W) <: NoiseGrid
backwardnoise = NoiseGrid(reverse(W.t),reverse(W.W))
else
W.save_everystep = false
backwardnoise = NoiseWrapper(W, reverse=true)
end
return backwardnoise
end
| [
27,
34345,
29,
10677,
14,
3919,
786,
62,
3849,
32186,
14,
11321,
13,
20362,
198,
28813,
36,
80,
14881,
13,
10134,
62,
260,
15003,
7,
72,
3712,
23839,
2949,
786,
18709,
8,
796,
2081,
198,
8818,
10631,
36,
80,
14881,
13,
260,
15003,
... | 1.873134 | 938 |
module types
export Body, Moon, Command_Module, EarthMoonSystem
type Body{T}
mass::T
velocity::Vector{T}
radius::T
position::Vector{T}
end
typealias Moon Body
type Command_Module{T}
mass::T
velocity::Vector{T}
radius::T
position::Vector{T}
positionE::Vector{T}
positionH::Vector{T}
velocityE::Vector{T}
velocityH::Vector{T}
end
type EarthMoonSystem
time::Float64
earth::Body
moon::Moon
command_module::Command_Module
end
end
| [
21412,
3858,
198,
198,
39344,
12290,
11,
6869,
11,
9455,
62,
26796,
11,
3668,
31640,
11964,
198,
198,
4906,
12290,
90,
51,
92,
198,
220,
220,
220,
2347,
3712,
51,
198,
220,
220,
220,
15432,
3712,
38469,
90,
51,
92,
198,
220,
220,
... | 2.487437 | 199 |
### A Pluto.jl notebook ###
# v0.19.6
using Markdown
using InteractiveUtils
# This Pluto notebook uses @bind for interactivity. When running this notebook outside of Pluto, the following 'mock version' of @bind gives bound variables a default value (instead of an error).
macro bind(def, element)
quote
local iv = try Base.loaded_modules[Base.PkgId(Base.UUID("6e696c72-6542-2067-7265-42206c756150"), "AbstractPlutoDingetjes")].Bonds.initial_value catch; b -> missing; end
local el = $(esc(element))
global $(esc(def)) = Core.applicable(Base.get, el) ? Base.get(el) : iv(el)
el
end
end
# ╔═╡ 685479e8-1ad5-48d8-b9fe-f2cf8a672700
using AstroImages, PlutoUI
# ╔═╡ 59e1675f-9426-4bc4-88cc-e686ed90b6b5
md"""
Download a FITS image and open it.
Apply `restrict` to downscale 2x for faster rendering.
"""
# ╔═╡ d1e5947b-2c1a-46fc-ab8f-feeba03453e7
img = AstroImages.restrict(
AstroImage(download("http://www.astro.uvic.ca/~wthompson/astroimages/fits/656nmos.fits"))
);
# ╔═╡ c9ebe984-4630-47c1-a941-795293f5b3c1
md"""
Display options
"""
# ╔═╡ a3e81f3f-203b-47b7-ac60-b4267eddfad4
md"""
| parameter | value |
|-----------|-------|
|`cmap` | $( @bind cmap Select([:magma, :turbo, :ice, :viridis, :seaborn_icefire_gradient, "red"]) ) |
|`clims`| $( @bind clims Select([Percent(99.5), Percent(95), Percent(80), Zscale(), (0, 400)]) ) |
| `stretch` | $( @bind stretch Select([identity, asinhstretch, logstretch, sqrtstretch, powstretch, powerdiststretch, squarestretch])) |
| `contrast` | $(@bind contrast Slider(0:0.1:2.0, default=1.0)) |
| `bias` | $(@bind bias Slider(0:0.1:1.0, default=0.5)) |
"""
# ╔═╡ 2315ffec-dc49-413a-b0d6-1bcce2addd76
imview(img; cmap, clims, stretch, contrast, bias)
# ╔═╡ d2bd2f13-ed23-42c5-9317-5b48ec3a8bb7
md"""
## `implot`
Uncomment the following cells to use `Plots` instead.
"""
# ╔═╡ fe6b5b76-8b77-4bfc-a2e8-bcc0b78ad764
# using Plots
# ╔═╡ f557784e-828c-415e-abb0-964b3a9fe8ef
# implot(img; cmap, clims, stretch, contrast, bias)
# ╔═╡ Cell order:
# ╠═685479e8-1ad5-48d8-b9fe-f2cf8a672700
# ╟─59e1675f-9426-4bc4-88cc-e686ed90b6b5
# ╠═d1e5947b-2c1a-46fc-ab8f-feeba03453e7
# ╟─c9ebe984-4630-47c1-a941-795293f5b3c1
# ╟─a3e81f3f-203b-47b7-ac60-b4267eddfad4
# ╠═2315ffec-dc49-413a-b0d6-1bcce2addd76
# ╟─d2bd2f13-ed23-42c5-9317-5b48ec3a8bb7
# ╠═fe6b5b76-8b77-4bfc-a2e8-bcc0b78ad764
# ╠═f557784e-828c-415e-abb0-964b3a9fe8ef
| [
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1129,
13,
21,
198,
198,
3500,
2940,
2902,
198,
3500,
21365,
18274,
4487,
198,
198,
2,
770,
32217,
20922,
3544,
2488,
21653,
329,
9427,
3458,
13,
1649,
2491,
428,
20922,
... | 1.990871 | 1,205 |
<reponame>astrieanna/raft.jl<filename>src/raft.jl<gh_stars>0
module Raft
using ProtoBuf
include("./common_types.jl")
include("./leader.jl")
include("./candidate.jl")
include("./follower.jl")
end
| [
27,
7856,
261,
480,
29,
459,
5034,
7697,
14,
1617,
13,
20362,
27,
34345,
29,
10677,
14,
1617,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
21412,
7567,
701,
198,
3500,
45783,
33,
3046,
198,
198,
17256,
7,
1911,
14,
11321,
62,
19199... | 2.444444 | 81 |
<reponame>JuliaQuant/QuantLib.jl
type SwapForwardBasisSystem <: MarketModelBasisSystem
rateTimes::Vector{Float64}
exerciseTimes::Vector{Float64}
currentIndex::Int
rateIndex::Vector{Int}
evolution::EvolutionDescription
end
function SwapForwardBasisSystem(rateTimes::Vector{Float64}, exerciseTimes::Vector{Float64})
evolution = EvolutionDescription(rateTimes, exerciseTimes)
rateIndex = Vector{Int}(length(exerciseTimes))
j = 1
for i in eachindex(exerciseTimes)
while j <= length(rateTimes) && rateTimes[j] < exerciseTimes[i]
j += 1
end
rateIndex[i] = j
end
return SwapForwardBasisSystem(rateTimes, exerciseTimes, -1, rateIndex, evolution)
end
reset!(bs::SwapForwardBasisSystem) = bs.currentIndex = 1
next_step!(bs::SwapForwardBasisSystem, ::CurveState) = bs.currentIndex += 1
function number_of_functions(bs::SwapForwardBasisSystem)
n = length(bs.exerciseTimes)
sizes = fill(10, n)
if bs.rateIndex[n] == length(bs.rateIndex) - 3
sizes[end] = 6
end
if bs.rateIndex[n] == length(bs.rateIndex) - 2
sizes[end] = 3
end
return sizes
end
function set_values!(bs::SwapForwardBasisSystem, currentState::CurveState, results::Vector{Float64})
rateIndex = bs.rateIndex[bs.currentIndex - 1]
if rateIndex < length(bs.rateTimes) - 2
resize!(results, 10)
x = forward_rate(currentState, rateIndex)
y = coterminal_swap_rate(currentState, rateIndex + 1)
z = discount_ratio(currentState, rateIndex, length(bs.rateTimes))
results[1] = 1.0
results[2] = x
results[3] = y
results[4] = z
results[5] = x*y
results[6] = y*z
results[7] = z*x
results[8] = x*x
results[9] = y*y
results[10] = z*z
else
if rateIndex == length(bs.rateTimes) - 2
x = forward_rate(currentState, rateIndex)
y = forward_rate(currentState, rateIndex + 1)
resize!(results, 6)
results[1] = 1.0
results[2] = x
results[3] = y
results[4] = x*x
results[5] = x*y
results[6] = y*y
else
x = forward_rate(currentState, rateIndex)
resize!(results, 3)
results[1] = 1.0
results[2] = x
results[3] = x*x
end
end
return results
end
clone(bs::SwapForwardBasisSystem) = SwapForwardBasisSystem(copy(bs.rateTimes), copy(bs.exerciseTimes), bs.currentIndex, copy(bs.rateIndex), clone(bs.evolution))
| [
27,
7856,
261,
480,
29,
16980,
544,
24915,
14,
24915,
25835,
13,
20362,
198,
4906,
48408,
39746,
15522,
271,
11964,
1279,
25,
5991,
17633,
15522,
271,
11964,
198,
220,
2494,
28595,
3712,
38469,
90,
43879,
2414,
92,
198,
220,
5517,
28595... | 2.525134 | 935 |
function fibsq04()
#
# test polynomial suggested by Goedecker
#
n = 4;
p = fib(n)
p = conv(p,p)
z = [-.7748041132154339;
-.07637893113374573-.8147036471703865*im; -.07637893113374573+.8147036471703865*im;
1.927561975482925]
z = [z 2*ones(n)]
p, PolyZeros(z)
end
| [
8818,
12900,
31166,
3023,
3419,
198,
2,
198,
2,
220,
1332,
745,
6213,
49070,
5220,
416,
1514,
18654,
15280,
198,
2,
198,
220,
220,
220,
299,
796,
604,
26,
198,
220,
220,
220,
279,
796,
12900,
7,
77,
8,
198,
220,
220,
220,
279,
7... | 1.821429 | 168 |
<reponame>zgornel/j4pr.jl
#################################################################################################
# Functions needed for the manipulation of datacell labels
#
# Desired functionality:
# - adding labels to existing DataCell
# - removing labels from existing DataCell
# - changing some/all labels from existing DataCell
# - remap labels to something else : e.g. from "apple" to "bear" etc using regexpz
# - extract column(s) from data and transform them into labels
#
# Observation: All functions return new objects (they do not modify the existing ones)
#################################################################################################
# Remove labels from DataCells
# - Can be done by creating a new datacell(old_datacell.x) or datacell(old_datacell.x, old_datacell.y[:,...])
# Change labels from existing datacell
# - Extract current labels, modify accordingly and create new datacell re-using the data and new labels.
"""
addlabels(x, labels)
Creates a new datacell by adding `labels<:AbstractArray` to the existing labels
of data cell `x`.
"""
addlabels(x::T where T<:CellData, labels::S where S<:AbstractArray) = datacell(getx!(x), dcat(gety!(x), labels), name=x.tinfo)
"""
unlabel(data)
Removes labels (if any) from the data cell and returns a new datacell with the
data contents only.
"""
unlabel(x::T where T<:CellData) = datacell(getx(x); name = x.tinfo)
"""
labelize(x, f [,idx ,remove])
labelize(x, idx [,remove])
labelize(x, v [,remove])
Adds labels (also known as 'targets') to `x::DataCell`. If `x` is not provided, returns a fixed `Function cell` that when piped a DataCell into,
it will add labels/targets and return it.
# Arguments
* `f` is a `targets`-related function (e.g. `targets(f,...)` ) that it is applied to the existing labels of `x` if `idx` is not specified or to the
variables of `x` indicated by `idx`
* `idx` specifies variables in `x`; can be anything that can be used as a variable index in a `DataCell`
* `v` a vector that will become the new targets of `x`
* `remove` defaults to `true` and specifies whether any existing labels/targets are to be kept or, if `idx` is present, whether to remove the
variables from `x` from which the new labels/targets were obtained.
"""
labelize(f::T where T<:Function, remove::Bool=true) = FunctionCell(labelize, (f,remove), "Data labeler: f=$f, remove=$remove")
labelize(v::T where T<:AbstractArray, remove::Bool=true) = FunctionCell(labelize, (v,remove), "Data labeler: preloaded targets, remove=$remove")
labelize(idx::T where T, remove::Bool=true) = FunctionCell(labelize, (idx,remove), "Data labeler: idx=$idx remove=$remove")
labelize(f::T where T<:Function, idx::S where S, remove::Bool=true) = FunctionCell(labelize, (f,idx,remove), "Data labeler: f=$f idx=$idx remove=$remove")
labelize(x::T where T<:CellDataU, f::Function, remove::Bool=true) =
error("[labelize] Targets or the indices of variables from which to create targets required.")
labelize(x::T where T<:CellData, f::Function, remove::Bool=true) = begin
if remove
datacell(getx(x), targets(f, gety(x)), name=x.tinfo) # replace labels
else
datacell(getx(x), dcat(gety(x), targets(f, gety(x))), name=x.tinfo) # add to existing labels
end
end
labelize(x::T where T<:CellData, v::S where S<:AbstractArray, remove::Bool=true) = begin
if remove
return datacell( getx(x), getobs(v), name=x.tinfo) # replace labels
else
return datacell( getx(x), dcat(gety(x), getobs(v)), name=x.tinfo ) # add to existing labels
end
end
labelize(x::T where T<:CellData, idx::S where S, remove::Bool=true) = labelize(x, identity, idx, remove)
labelize(x::T where T<:CellData, f::Function, idx::S where S, remove::Bool=true) = begin
labels = targets(f, getx!(varsubset(x,idx)))
if remove
return datacell( getobs(_variable_(x, setdiff(1:nvars(x),idx))), labels, name=x.tinfo)
else
return datacell( getx(x), labels, name=x.tinfo )
end
end
| [
27,
7856,
261,
480,
29,
89,
70,
1211,
417,
14,
73,
19,
1050,
13,
20362,
198,
29113,
29113,
29113,
2,
198,
2,
40480,
2622,
329,
262,
17512,
286,
4818,
558,
297,
14722,
198,
2,
198,
2,
2935,
1202,
11244,
25,
198,
2,
220,
220,
220,... | 2.918919 | 1,369 |
module ReactiveMPMathTest
using Test
using ReactiveMP
using Random
@testset "Math" begin
@testset "tiny/huge" begin
@test typeof(tiny) === TinyNumber
@test typeof(huge) === HugeNumber
@test convert(Float32, tiny) == 1f-6
@test convert(Float64, tiny) == 1e-12
@test convert(BigFloat, tiny) == big"1e-24"
@test convert(Float32, huge) == 1f+6
@test convert(Float64, huge) == 1e+12
@test convert(BigFloat, huge) == big"1e+24"
@test @inferred clamp(1f0, tiny, huge) == 1f0
@test @inferred clamp(0f0, tiny, huge) == 1f-6
@test @inferred clamp(1f13, tiny, huge) == 1f+6
@test @inferred clamp(1.0, tiny, huge) == 1.0
@test @inferred clamp(0.0, tiny, huge) == 1e-12
@test @inferred clamp(1e13, tiny, huge) == 1e12
@test @inferred clamp(big"1.0", tiny, huge) == big"1.0"
@test @inferred clamp(big"0.0", tiny, huge) == big"1e-24"
@test @inferred clamp(big"1e25", tiny, huge) == big"1e+24"
for a in (1, 1.0, 0, 0.0, 1f0, 0f0, Int32(0), Int32(1), big"1", big"1.0", big"0", big"0.0")
T = typeof(a)
for v in [ tiny, huge ]
V = typeof(v)
for op in [ +, -, *, /, >, >=, <, <= ]
@test @inferred op(a, v) == op(a, convert(promote_type(T, V), v))
@test @inferred op(v, a) == op(convert(promote_type(T, V), v), a)
@test @inferred op(a, v) == op(a, v)
@test @inferred op(v, a) == op(v, a)
end
@test v <= (@inferred clamp(a, v, Inf)) <= Inf
@test zero(a) <= (@inferred clamp(a, zero(a), v)) <= v
for size in [ 3, 5 ]
for array in [ fill(a, (size, )), fill(a, (size, size)) ]
for op in [ +, -, *, /, >, >=, <, <= ]
@test @inferred op.(array, v) == op.(array, convert(promote_type(T, V), v))
@test @inferred op.(v, array) == op.(convert(promote_type(T, V), v), array)
@test @inferred op.(array, v) == op.(array, v)
@test @inferred op.(v, array) == op.(v, array)
end
@test @inferred clamp.(array, v, Inf) == clamp.(array, v, Inf)
@test @inferred clamp.(array, zero(array), v) == clamp.(array, zero(array), v)
end
end
end
end
end
end
end | [
21412,
797,
5275,
44,
5868,
776,
14402,
198,
198,
3500,
6208,
198,
3500,
797,
5275,
7378,
198,
3500,
14534,
198,
198,
31,
9288,
2617,
366,
37372,
1,
2221,
628,
220,
220,
220,
2488,
9288,
2617,
366,
44152,
14,
40878,
1,
2221,
220,
62... | 1.807827 | 1,431 |
using Random, Plots, Distributions, Statistics
# Example solution for exercise 8.8 using Julia 1.4.1
#
# My solution has similar shape with the book, but different start state value
# under the greedy policy. I am not sure where goes wrong, probably in the
# reward calculation? But my results are similar to all the other people's
# results which I found online (see below reference implementation). So just
# take my solution as one of the references, don't treat it absolutely correct.
#
# By @burmecia at GitHub, 15 May 2020
#
# Other reference implementation:
#
# 1. https://github.com/ShangtongZhang/reinforcement-learning-an-introduction/blob/master/chapter08/trajectory_sampling.py
# 2. https://github.com/JuliaReinforcementLearning/ReinforcementLearningAnIntroduction.jl/blob/b5c718a891a4b3db4fae177b8b33ca506df1ecea/notebooks/Chapter08_Trajectory_Sampling.ipynb
# 3. https://github.com/enakai00/rl_book_solutions/blob/master/Chapter08/Exercise_8_8_Solution.ipynb
stat_len = 10000
T = 200000
ε = 0.1
act_len = 2
term_prob = 0.1
tick_num = 100
# make transition matrix, (state, action) -> (next_state, reward)
# element is b tuples: [(next_state, reward),...]
function make_transition_matrix(b)
trans = [
map(s -> (s, randn()), rand(1:stat_len, b))
for s = 1:stat_len, a = 1:act_len
]
# terminal state, transit to itself with zero reward
trans[stat_len, 1] = [(stat_len, 0) for _ = 1:b]
trans[stat_len, 2] = [(stat_len, 0) for _ = 1:b]
trans
end
# take action, observe the next state and reward
function next_state(s, a, trans)
if rand() < term_prob
return stat_len, 0
end
sample(trans[s, a])
end
# evaluate start state value under greedy policy
function start_state_value(Q, trans)
n = 200
returns = zeros(Float64, n)
for i = 1:n
s = 1
while s != stat_len
a = argmax(Q[s, :])
s, r = next_state(s, a, trans)
returns[i] += r
end
end
mean(returns)
end
function run_uniform(b, trans)
Q = zeros(Float64, stat_len, act_len)
interval = T ÷ tick_num
vπ_s0 = zeros(Float64, tick_num + 1)
for t = 0:T
s = t % stat_len + 1
a = (t % (stat_len * act_len)) ÷ stat_len + 1
next_states = trans[s, a]
Q[s, a] =
(1 - term_prob) *
mean(map(ns -> (ns[2] + maximum(Q[ns[1], :])), next_states))
if t % interval == 0
vπ_s0[t ÷ interval + 1] = start_state_value(Q, trans)
end
end
vπ_s0
end
function run_on_policy(b, trans)
Q = zeros(Float64, stat_len, act_len)
interval = T ÷ tick_num
vπ_s0 = zeros(Float64, tick_num + 1)
s = 1
for t = 0:T
if rand() < ε
a = rand(1:act_len)
else
a = argmax(Q[s, :])
end
next_states = trans[s, a]
Q[s, a] =
(1 - term_prob) *
mean(map(ns -> (ns[2] + maximum(Q[ns[1], :])), next_states))
s, r = next_state(s, a, trans)
if s == stat_len
s = 1
end
if t % interval == 0
vπ_s0[t ÷ interval + 1] = start_state_value(Q, trans)
end
end
vπ_s0
end
function experiment(b_list, p, subplot)
task_cnt = 100
interval = T ÷ tick_num
x = 0:interval:T
for b in b_list
uniform = zeros(Float64, task_cnt, length(x))
on_policy = zeros(Float64, task_cnt, length(x))
println("start subplot $(subplot), b=$(b)")
for i = 1:task_cnt
if i % 10 == 0
println(" task $(i)")
end
trans = make_transition_matrix(b)
uniform[i, :] = run_uniform(b, trans)
on_policy[i, :] = run_on_policy(b, trans)
end
uniform = mean(uniform, dims = 1)
on_policy = mean(on_policy, dims = 1)
plot!(
p,
x,
uniform[1, :],
subplot = subplot,
title = "$(stat_len) states",
label = "b=$(b), uniform",
)
plot!(
p,
x,
on_policy[1, :],
subplot = subplot,
label = "b=$(b), on-policy",
)
end
end
function main()
global stat_len, T
p = plot(
xlabel = "Computation time, in expected updates",
ylabel = "Start state value",
legend = :best,
size = (600, 800),
layout = (2, 1),
)
stat_len, T = 1000, 20000
experiment([1, 3, 10], p, 1)
stat_len, T = 10000, 200000
experiment([1, 3], p, 2)
display(p)
end
# uncomment the main function below to start learning
#main()
| [
3500,
14534,
11,
1345,
1747,
11,
46567,
507,
11,
14370,
198,
198,
2,
17934,
4610,
329,
5517,
807,
13,
23,
1262,
22300,
352,
13,
19,
13,
16,
198,
2,
198,
2,
2011,
4610,
468,
2092,
5485,
351,
262,
1492,
11,
475,
1180,
923,
1181,
1... | 2.097253 | 2,221 |
<reponame>elavia/liquid_spheroid
#
# Script for calculation of far-field pattern in the liquid case (sphere)
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User configurable parameters
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Physical parameters
rho10 = 5.00 ; # Density ratio
k_0 = 4 ; # Wave number in media 0
k_1 = 6 ; # Wave number in media 1
a = 1 ; # Sphere radius
theta_inc = pi/4 ; # Incidence angle in radians
# Software parameters
grid_size = 200 ; # Angular grid size
N = 150 ; # Number of terms in the series solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Calculation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Far-Field pattern calculation
Pattern = Pattern_LiquidSphere( rho10, k_0, k_1, a, grid_size, N, theta_inc ) ;
# Saving to disk
writedlm( "Out.Pattern.dat", Pattern , '\t' ) ; | [
27,
7856,
261,
480,
29,
417,
40543,
14,
39250,
62,
2777,
258,
3882,
198,
197,
2,
198,
197,
2,
197,
7391,
329,
17952,
286,
1290,
12,
3245,
3912,
287,
262,
8122,
1339,
357,
2777,
1456,
8,
198,
197,
2,
628,
197,
2,
220,
27156,
2715... | 3.326087 | 276 |
<gh_stars>1-10
using Plots, Test
@testset "Subplot sclicing" begin
pl = @test_nowarn plot(
rand(4, 8),
layout = 4,
yscale = [:identity :identity :log10 :log10],
)
@test pl[1][:yaxis][:scale] == :identity
@test pl[2][:yaxis][:scale] == :identity
@test pl[3][:yaxis][:scale] == :log10
@test pl[4][:yaxis][:scale] == :log10
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
1345,
1747,
11,
6208,
198,
198,
31,
9288,
2617,
366,
7004,
29487,
264,
565,
6345,
1,
2221,
198,
220,
220,
220,
458,
796,
2488,
9288,
62,
2197,
1501,
7110,
7,
198,
220,
220,
220,
22... | 2.043716 | 183 |
<filename>src/blinreg.jl
"""
linear regression function
Compute coeff. estimates, s.e's, equation σ, Rsquared
usage:
b, seb, s, R2 = linreg(y,x)
y = dependent variable vector
x = matrix of independent variables (no intercept in x)
"""
function blinreg(y,x)
# add intercept
n = length(y)
X = [ones(n) x]
b = (X'*X) \ X'*y
resids = y - X*b
RSS = sum(resids.^2)
s2 = RSS/n
covb = s2.*inv(X'X)
seb = sqrt.(diag(covb))
k = length(b)
### Correct formulas for AIC and BIC
BIC = n*log(s2) + k*log(n)
AIC = 2*k + n*log(s2)
odds = zeros(k)
pvals = zeros(k)
that = abs.(b)./seb
for i in 1:k
odds[i] = (1.0 + (that[i]^2)/(n-k))^((n-k+1)/2.0)
pvals[i] = 1.0 - cdf(TDist(n-k), that[i])
end
println(" coeffs = ", round.(b, digits=3))
println(" s.e's = ", round.(seb, digits=3))
println(" odds = ", round.(odds, digits=3))
println("p-values = ", round.(pvals, digits=4))
println("s^2 (eqn. variance) = ",round(s2, digits=6))
# compute R^2
tss = sum((y .- mean(y)).^2)
R2 = 1 - s2*n/tss
println("Rsquared = ",round(R2, digits=3))
println("AIC = ", round(AIC, digits = 2), " BIC = ", round(BIC, digits = 2),)
s = sqrt(s2)
return b, seb, odds, pvals, s, R2, RSS, AIC, BIC
end
## example of use:
#x = randn(20)
#y = 1 + 1.*x .+ randn(20)
#b, seb, odds, pvals, s, R2, RSS, AIC, BIC = blinreg(y,x)
| [
27,
34345,
29,
10677,
14,
2436,
259,
2301,
13,
20362,
198,
198,
37811,
198,
29127,
20683,
2163,
198,
220,
3082,
1133,
763,
14822,
13,
7746,
11,
264,
13,
68,
338,
11,
16022,
18074,
225,
11,
12820,
421,
1144,
198,
198,
26060,
25,
628,... | 2.104294 | 652 |
<gh_stars>0
using BCTRNN
using DiffEqSensitivity
using OrdinaryDiffEq
import DiffEqFlux: FastChain, FastDense
import Flux: ClipValue, ADAM
# Not in Project.toml
using Plots
gr()
include("half_cheetah_data_loader.jl")
function train_cheetah(epochs, solver=nothing; sensealg=nothing,
T=Float32, model_size=5, batchsize=1, seq_len=32, normalise=true,
kwargs...)
train_dl, test_dl, _, _ = get_2d_dl(T; batchsize, seq_len, normalise=true)
@show size(first(train_dl)[1])
@show size(first(train_dl)[1][1])
f_in = 17
f_out = 17
n_neurons = model_size
n_sens = n_neurons
n_out = n_neurons
model = FastChain(BCTRNN.Mapper(f_in),
BCTRNN.LTC(f_in, n_neurons, solver, sensealg; n_sens, n_out),
FastDense(n_out, f_out))
cb = BCTRNN.MyCallback(T; ecb=mycb, nepochs=epochs, nsamples=length(train_dl))
#opt = GalacticOptim.Flux.Optimiser(ClipValue(0.5), ADAM(0.02))
opt = BCTRNN.ClampBoundOptim(BCTRNN.get_bounds(model,T)..., ClipValue(T(0.8)), ADAM(T(0.005)))
BCTRNN.optimize(model, BCTRNN.loss_seq, cb, opt, train_dl, epochs, T), model
end
#1173.351351 seconds (1.02 G allocations: 65.414 GiB, 1.82% gc time, 0.51% compilation time)
train_cheetah(30, Tsit5(); sensealg=InterpolatingAdjoint(autojacvec=ReverseDiffVJP(true)), model_size=8, batchsize=10, abstol=1e-4, reltol=1e-3
)
train_cheetah(30, AutoTsit5(Rosenbrock23(autodiff=false)); sensealg=InterpolatingAdjoint(autojacvec=ReverseDiffVJP(true)), model_size=6, batchsize=10, abstol=1e-4, reltol=1e-3
) | [
27,
456,
62,
30783,
29,
15,
198,
3500,
347,
4177,
49,
6144,
198,
3500,
10631,
36,
80,
50,
40545,
198,
3500,
14230,
3219,
28813,
36,
80,
198,
11748,
10631,
36,
80,
37,
22564,
25,
12549,
35491,
11,
12549,
35,
1072,
198,
11748,
1610,
... | 2.18705 | 695 |
# build the lookup
code = [0, 1, 2, 3, 4]
using Base.Iterators
code4 = product(code, code, code, code)
using DataFrames
function makeu(code4)
c = [code4...]
d = Dict{Int, Int}()
upto = 1
for i in 1:4
if c[i] == 0
## do nothing
elseif haskey(d, c[i])
c[i] = d[c[i]]
else
d[c[i]] = upto
c[i] = upto
upto += 1
end
end
c
end
ucode4 = collect([makeu(c) for c in code4]) |> unique
res = [move!([ucode4...]) for ucode4 in ucode4]
a = mapreduce(x->transpose([x...]), vcat, ucode4)
b = mapreduce(x->transpose([x...]), vcat, res)
using CSV, DataFrames
df = DataFrame()
for (i, c) in enumerate(eachcol(hcat(a,b)))
df[!, Symbol("ok"*string(i))] = c
end
df
CSV.write("d:/data/ok.csv", df)
| [
2,
1382,
262,
35847,
201,
198,
8189,
796,
685,
15,
11,
352,
11,
362,
11,
513,
11,
604,
60,
201,
198,
201,
198,
3500,
7308,
13,
29993,
2024,
201,
198,
201,
198,
8189,
19,
796,
1720,
7,
8189,
11,
2438,
11,
2438,
11,
2438,
8,
201... | 1.97644 | 382 |
<reponame>UnofficialJuliaMirrorSnapshots/NumericExtensions.jl-d47e95ee-b316-5a26-aa50-c41fa0b627f1<filename>src/statistics.jl
# Reduction functions related to statistics
###################
#
# Varm & Stdm
#
###################
# varm
function varm(x::ContiguousRealArray, mu::Real)
!isempty(x) || error("varm: empty array is not allowed.")
n = length(x)
@inbounds s2 = abs2(x[1] - mu)
for i = 2:n
@inbounds s2 += abs2(x[i] - mu)
end
s2 / (n - 1)
end
function _varm_eachcol!{R<:Real}(m::Int, n::Int, dst::ContiguousArray{R}, x, mu)
c = inv(m - 1)
o = 0
for j = 1:n
s2 = zero(R)
@inbounds mu_j = mu[j]
for i = 1:m
@inbounds s2 += abs2(x[o + i] - mu_j)
end
@inbounds dst[j] = s2 * c
o += m
end
end
function _varm_eachrow!(m::Int, n::Int, dst::ContiguousRealArray, x, mu)
o::Int = 0
for i = 1:m
@inbounds dst[i] = abs2(x[o + i] - mu[i])
end
o += m
for j = 2:n-1
for i = 1:m
@inbounds dst[i] += abs2(x[o + i] - mu[i])
end
o += m
end
c = inv(n - 1)
for i = 1:m
@inbounds v = dst[i] + abs2(x[o + i] - mu[i])
dst[i] = v * c
end
end
function varm!(dst::ContiguousRealArray, x::ContiguousRealArray, mu::ContiguousRealArray, dim::Int)
!isempty(x) || error("varm!: empty array is not allowed.")
nd = ndims(x)
1 <= dim <= nd || error("varm: invalid value of dim.")
shp = size(x)
rlen = prod(Base.reduced_dims(shp, dim))
length(dst) == length(mu) == rlen || error("Inconsistent argument dimensions.")
if dim == 1
m = shp[1]
n = succ_length(shp, dim)
_varm_eachcol!(m, n, dst, x, mu)
else
m = prec_length(shp, dim)
n = shp[dim]
k = succ_length(shp, dim)
_varm_eachrow!(m, n, dst, x, mu)
if k > 1
mn = m * n
ro = m
ao = mn
for l = 2 : k
_varm_eachrow!(m, n,
offset_view(dst, ro, m), offset_view(x, ao, m, n),
offset_view(mu, ro, m))
ro += m
ao += mn
end
end
end
dst
end
function varm(x::ContiguousRealArray, mu::ContiguousRealArray, dim::Int)
rsiz = Base.reduced_dims(size(x), dim)
length(mu) == prod(rsiz) || error("Inconsistent argument dimensions.")
R = fptype(promote_type(eltype(x), eltype(mu)))
varm!(Array(R, rsiz), x, mu, dim)
end
# stdm
stdm(x::ContiguousRealArray, mu::Real) = sqrt(varm(x, mu))
stdm(x::ContiguousRealArray, mu::ContiguousArray, dim::Int) = sqrt!(varm(x, mu, dim))
function stdm!(dst::ContiguousRealArray, x::ContiguousRealArray, mu::ContiguousRealArray, dim::Int)
sqrt!(varm!(dst, x, mu, dim))
end
###################
#
# Var & Std
#
###################
var(x::ContiguousRealArray) = varm(x, mean(x))
function var!(dst::ContiguousRealArray, x::ContiguousRealArray, dim::Int)
!isempty(dst) || error("var: empty array is not allowed.")
nd = ndims(x)
1 <= dim <= nd || error("var: invalid value of dim.")
shp = size(x)
if dim == 1
m = shp[1]
n = succ_length(shp, 1)
ao = 0
for j in 1 : n
dst[j] = var(offset_view(x, ao, m))
ao += m
end
else
varm!(dst, x, mean(x, dim), dim)
end
dst
end
function var(x::ContiguousRealArray, dim::Int)
var!(Array(fptype(eltype(x)), Base.reduced_dims(size(x), dim)), x, dim)
end
# std
std(x::ContiguousRealArray) = sqrt(var(x))
std(x::ContiguousRealArray, dim::Int) = sqrt!(var(x, dim))
std!(dst::ContiguousRealArray, x::ContiguousRealArray, dim::Int) = sqrt!(var!(dst, x, dim))
###################
#
# LogFunsumexp
#
###################
function logsumexp(x::ContiguousRealArray)
!isempty(x) || error("logsumexp: empty array not allowed.")
u = maximum(x)
log(sumfdiff(ExpFun(), x, u)) + u
end
function _logsumexp_eachcol!(m::Int, n::Int, dst::ContiguousRealArray, x::ContiguousRealArray)
o = 0
for j in 1 : n
# compute max
u = x[o + 1]
for i in 2 : m
@inbounds xi = x[o + i]
if xi > u
u = xi
end
end
# sum exp
@inbounds s = exp(x[o + 1] - u)
for i in 2 : m
@inbounds s += exp(x[o + i] - u)
end
# compute log
dst[j] = log(s) + u
o += m
end
end
function _logsumexp_eachrow!(m::Int, n::Int, dst::ContiguousRealArray,
u::ContiguousRealArray, x::ContiguousRealArray)
# compute max
for i in 1 : m
@inbounds u[i] = x[i]
end
o = m
for j in 2 : n
for i in 1 : m
@inbounds ui = u[i]
@inbounds xi = x[o+i]
if xi > ui
@inbounds u[i] = xi
end
end
o += m
end
# sum exp
for i in 1 : m
@inbounds dst[i] = exp(x[i] - u[i])
end
o = m
for j in 2 : n
for i in 1 : m
@inbounds dst[i] += exp(x[o + i] - u[i])
end
o += m
end
# compute log
for i in 1 : m
@inbounds dst[i] = log(dst[i]) + u[i]
end
end
function logsumexp!{R<:Real,T<:Real}(dst::ContiguousArray{R}, x::ContiguousRealArray{T}, dim::Int)
!isempty(x) || error("logsumexp!: empty array not allowed.")
nd = ndims(x)
1 <= dim <= nd || error("logsumexp!: invalid value of dim.")
shp = size(x)
if dim == 1
m = shp[1]
n = succ_length(shp, dim)
_logsumexp_eachcol!(m, n, dst, x)
else
m = prec_length(shp, dim)
n = shp[dim]
k = succ_length(shp, dim)
u = Array(T, m)
_logsumexp_eachrow!(m, n, dst, u, x)
if k > 1
mn = m * n
ro = m
ao = mn
for l = 2 : k
_logsumexp_eachrow!(m, n, offset_view(dst, ro, m), u, offset_view(x, ao, m, n))
ro += m
ao += mn
end
end
end
dst
end
function logsumexp{T<:Real}(x::ContiguousArray{T}, dim::Int)
logsumexp!(Array(fptype(T), Base.reduced_dims(size(x), dim)), x, dim)
end
###################
#
# Softmax
#
###################
function softmax!(dst::ContiguousRealArray, x::ContiguousRealArray)
!isempty(x) || error("softmax!: empty array is not allowed.")
n = length(x)
length(dst) == n || error("Inconsistent argument dimensions.")
u = maximum(x)
@inbounds s = dst[1] = exp(x[1] - u)
for i in 2 : n
@inbounds s += (dst[i] = exp(x[i] - u))
end
c = inv(s)
for i in 1 : n
@inbounds dst[i] *= c
end
dst
end
softmax(x::ContiguousArray) = softmax!(Array(fptype(eltype(x)), size(x)), x)
function _softmax_eachcol!(m::Int, n::Int, dst::ContiguousRealArray, x::ContiguousRealArray)
o = 0
for j in 1 : n
softmax!(offset_view(dst, o, m), offset_view(x, o, m))
o += m
end
end
function _softmax_eachrow!(m::Int, n::Int, dst::ContiguousRealArray, u::ContiguousRealArray, x::ContiguousRealArray)
# compute max
for i in 1 : m
@inbounds u[i] = x[i]
end
o = m
for j in 2 : n
for i in 1 : m
@inbounds ui = u[i]
@inbounds xi = x[o + i]
if xi > ui
@inbounds u[i] = xi
end
end
o += m
end
# compute sum
s = view(u, m+1:2*m)
for i in 1 : m
@inbounds s[i] = dst[i] = exp(x[i] - u[i])
end
o = m
for j in 2 : n
for i in 1 : m
@inbounds s[i] += (dst[o + i] = exp(x[o + i] - u[i]))
end
o += m
end
rcp!(s)
o = 0
for j in 1 : n
for i in 1 : m
@inbounds dst[o + i] .*= s[i]
end
o += m
end
end
function softmax!{T<:Real}(dst::ContiguousRealArray, x::ContiguousArray{T}, dim::Int)
!isempty(x) || error("softmax!: empty array is not allowed.")
nd = ndims(x)
1 <= dim <= nd || error("softmax!: invalid value for the dim argument.")
shp = size(x)
if dim == 1
m = shp[1]
n = succ_length(shp, dim)
_softmax_eachcol!(m, n, dst, x)
else
m = prec_length(shp, dim)
n = shp[dim]
k = succ_length(shp, dim)
u = Array(fptype(T), 2*m)
_softmax_eachrow!(m, n, dst, u, x)
if k > 1
mn = m * n
o = mn
for l = 2 : k
_softmax_eachrow!(m, n, offset_view(dst, o, m, n), u, offset_view(x, o, m, n))
o += mn
end
end
end
dst
end
softmax(x::ContiguousRealArray, dim::Int) = softmax!(Array(fptype(eltype(x)), size(x)), x, dim)
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
43826,
20910,
14,
45,
39223,
11627,
5736,
13,
20362,
12,
67,
2857,
68,
3865,
1453,
12,
65,
33400,
12,
20,
64,
2075,
12,
7252,
1120,
12,
66,
3901,
13331,
15,
65,
49856,
... | 1.887048 | 4,648 |
<gh_stars>0
compute(n::Int, k::Int)::Float64 = round(7(1 - prod(1 .- k ./ (6n ÷ 7 + 1:n))), digits=9) | [
27,
456,
62,
30783,
29,
15,
198,
5589,
1133,
7,
77,
3712,
5317,
11,
479,
3712,
5317,
2599,
25,
43879,
2414,
796,
2835,
7,
22,
7,
16,
532,
40426,
7,
16,
764,
12,
479,
24457,
357,
21,
77,
6184,
115,
767,
1343,
352,
25,
77,
4008,... | 1.980392 | 51 |
<filename>src/json_parser.jl
using TrussMorph
tm = TrussMorph
import JSON
using Dates
using Printf
function parse_truss_json(file_path::String; parse_morph=false)
data = Dict()
open(file_path, "r") do f
data_txt = read(f, String)
data = JSON.parse(data_txt)
end
dim = data["dimension"]
n_nodes = data["node_num"]
n_elements = data["element_num"]
# get material properties
# pressure: kN/cm^2 -> kN/m^2
# density: kN/m^3
mp = tm.MaterialProperties(data["material_properties"]["material_name"],
data["material_properties"]["youngs_modulus"] * 1e4,
data["material_properties"]["shear_modulus"] * 1e4,
data["material_properties"]["poisson_ratio"],
data["material_properties"]["density"])
X = Matrix{Float64}(undef, n_nodes,2)
T = Matrix{Int}(undef, n_elements,2)
fix_node_ids = []
# get node coord
for i=1:n_nodes
X[i,:] = hcat(data["node_list"][i]["point"]["X"],
data["node_list"][i]["point"]["Y"])
# data["node_list"][i]["point"]["Z"])
if 1 == data["node_list"][i]["is_grounded"]
push!(fix_node_ids, i)
end
end
# get fixities
fix_dof = [1,2,6]
if 2 != dim
fix_dof = 1:1:6
end
S = Matrix{Int}(undef, length(fix_node_ids),length(fix_dof)+1)
for i=1:length(fix_node_ids)
S[i,1] = fix_node_ids[i]
S[i,2:end] = data["node_list"][fix_node_ids[i]]["fixities"][fix_dof]'
end
# get element node ids
for i=1:n_elements
T[i,:] = (data["element_list"][i]["end_node_ids"] .+ 1)'
end
morph_data = Dict()
if parse_morph
morph_data = data["morph_data"]
# @show morph_data
end
return Truss(X, T, S, mp), morph_data
end
function parse_load_json(file_path::String, node_dof::Int)
data = Dict()
open(file_path, "r") do f
data_txt = read(f, String)
data = JSON.parse(data_txt)
end
dim = data["dimension"]
n_load_nodes = length(data["point_load_list"])
@assert(dim == 2)
Load = zeros(n_load_nodes, 1+node_dof)
for i=1:n_load_nodes
Load[i,1] = data["point_load_list"][i]["applied_node_id"] + 1
if 2 == dim
Load[i,2] = data["point_load_list"][i]["Fx"]
Load[i,3] = data["point_load_list"][i]["Fy"]
if 3 == node_dof
Load[i,4] = data["point_load_list"][i]["Mz"]
end
end
end
@assert(n_load_nodes > 0)
return Load
# TODO: include_self_weight
end
function save_morph_path_json(morph_path::Array{Matrix{Float64}}, file_dir::String, st_file_name::String, end_file_name::String, t0::TrussMorph.Truss, sE::Vector{Float64}, wE::Vector{Float64}, totE::Vector{Float64}, sumE::Float64, parm_smooth::Float64, parm_weight::Float64)
pure_st_file_name = SubString(st_file_name, 1:length(st_file_name)-length(".json"))
pure_end_file_name = SubString(end_file_name, 1:length(end_file_name)-length(".json"))
result_file_name = pure_st_file_name * "-" * pure_end_file_name
f_file_dir = joinpath(file_dir, result_file_name)
if !ispath(f_file_dir)
mkpath(f_file_dir)
end
# same material
mp_data = Dict()
mp_data["material_name"] = t0.mp.name
mp_data["youngs_modulus"] = t0.mp.E * 1e-4
mp_data["youngs_modulus_unit"] = "kN/cm2"
mp_data["shear_modulus"] = t0.mp.G * 1e-4
mp_data["shear_modulus_unit"] = "kN/cm2"
mp_data["tensile_yeild_stress"] = "N/A"
mp_data["tensile_yeild_stress_unit"] = "kN/cm2"
mp_data["density"] = t0.mp.ρ
mp_data["density_unit"] = "kN/m3"
mp_data["poisson_ratio"] = t0.mp.μ
mp_data["radius"] = "N/A"
mp_data["radius_unit"] = "centimeter"
# same topology
topo_data = Dict[]
for e=1:size(t0.T,1)
e_data = Dict()
e_data["end_node_ids"] = t0.T[e,:]
e_data["element_id"] = e-1
e_data["layer_id"] = 0
push!(topo_data, e_data)
end
# morph data
morph_data = Dict()
morph_data["smooth_parameter"] = parm_smooth
morph_data["weight_parameter"] = parm_weight
morph_data["time_in_path"] = string(0,"/",0)
morph_data["weight_energy"] = 0.0
morph_data["smoothness_energy"] = 0.0
morph_data["tot_energy"] = 0.0
plen = length(morph_path)
for i=1:plen
data = Dict()
data["model_name"] = result_file_name * "_mp" * string(i,"-",plen)
data["model_type"]= "2D_frame"
data["unit"] = "meter"
data["generate_time"] = string(Dates.now())
data["dimension"] = size(morph_path[i],2)
i_morph_data = morph_data #deepcopy
i_morph_data["time_in_path"] = string(i,"/",plen)
i_morph_data["smoothness_energy"] = sE[i]
i_morph_data["weight_energy"] = wE[i]
i_morph_data["tot_energy"] = totE[i]
i_morph_data["sum_energy"] = sumE
data["morph_data"] = i_morph_data
data["node_num"] = size(morph_path[i],1)
data["element_num"] = size(t0.T,1)
data["material_properties"] = mp_data
data["node_list"] = Dict[]
for j=1:size(morph_path[i],1)
pt_data = Dict()
pt_data["point"] = Dict()
pt_data["point"]["X"] = morph_path[i][j,1]
pt_data["point"]["Y"] = morph_path[i][j,2]
pt_data["node_id"] = j-1
pt_fix = findall(x->x==j, t0.S[:,1])
pt_data["is_grounded"] = !isempty(pt_fix)
if !isempty(pt_fix)
@assert(length(pt_fix) == 1)
pt_data["fixities"] = ones(Int, 6)
if 2 == size(morph_path[i],1)
pt_data["fixities"][1] = t0.S[pt_fix,1]
pt_data["fixities"][2] = t0.S[pt_fix,2]
pt_data["fixities"][6] = t0.S[pt_fix,3]
end
else
pt_data["fixities"] = []
end
push!(data["node_list"], pt_data)
end
data["element_list"] = topo_data
# write
stringdata = JSON.json(data)
result_json_name = result_file_name * "_" * string(@sprintf("%03d",i),"-",plen) * ".json"
open(joinpath(f_file_dir,result_json_name), "w") do f
write(f, stringdata)
end
end
end
| [
27,
34345,
29,
10677,
14,
17752,
62,
48610,
13,
20362,
198,
3500,
833,
1046,
44,
13425,
198,
17209,
796,
833,
1046,
44,
13425,
198,
11748,
19449,
198,
3500,
44712,
198,
3500,
12578,
69,
198,
198,
8818,
21136,
62,
2213,
1046,
62,
17752... | 1.933999 | 3,303 |
using ImageIO
using Test
using ColorTypes
using FixedPointNumbers
using ImageCore
using Logging
using Random
#logger = ConsoleLogger(stdout, Logging.Debug)
logger = ConsoleLogger(stdout, Logging.Info)
global_logger(logger)
tmpdir = joinpath(@__DIR__,"temp")
@testset "ImageIO.jl" begin
# Write your own tests here.
@testset "libtiff" begin
@test ImageIO.ModTIFF.version() == "LIBTIFF, Version 4.0.10\nCopyright (c) 1988-1996 <NAME>\nCopyright (c) 1991-1996 Silicon Graphics, Inc."
end
@testset "libpng" begin
isdir(tmpdir) && rm(tmpdir, recursive = true)
mkdir(tmpdir)
img = rand(Bool, 5, 5, 5, 5)
filepath = joinpath(tmpdir, "5x5x5x5.png")
@test_throws ErrorException ModPNG.writeimage(filepath, img)
@testset "Binary Image" begin
a = rand(Bool, 11, 10)
filepath = joinpath(tmpdir, "binary1.png")
ModPNG.writeimage(filepath, a)
b1 = ModPNG.readimage(filepath)
@test b1 == convert(Array{Gray{N0f8}}, a)
a = bitrand(5,5)
filepath = joinpath(tmpdir, "binary2.png")
ModPNG.writeimage(filepath, a)
b2 = ModPNG.readimage(filepath)
@test b2 == convert(Array{Gray{N0f8}}, a)
a = colorview(Gray, a)
filepath = joinpath(tmpdir, "binary3.png")
ModPNG.writeimage(filepath, a)
b3 = ModPNG.readimage(filepath)
@test b3 == convert(Array{Gray{N0f8}}, a)
end
@testset "Gray image" begin
gray = vcat(fill(Gray(1.0), 10, 10), fill(Gray(0.5), 10, 10), fill(Gray(0.0), 10, 10))
filepath = joinpath(tmpdir, "gray1.png")
ModPNG.writeimage(filepath, gray)
g1 = ModPNG.readimage(filepath)
@test g1 == convert(Array{Gray{N0f8}}, gray)
gray = rand(Gray{N0f8}, 5, 5)
filepath = joinpath(tmpdir, "gray2.png")
ModPNG.writeimage(filepath, gray)
g2 = ModPNG.readimage(filepath)
@test g2 == gray
end
@testset "Color - RGB" begin
#rgb8 = rand(RGB{N0f8}, 10, 5)
rgb8 = reshape(range(RGB{N0f8}(1,0,0),RGB{N0f8}(0,1,1), length=10*5), 10, 5)
filepath = joinpath(tmpdir, "rgb_n0f8.png")
ModPNG.writeimage(filepath, rgb8)
r1 = ModPNG.readimage(filepath)
@test r1 == rgb8
#rgb16 = rand(RGB{N0f16}, 10, 5)
rgb16 = reshape(range(RGB{N0f16}(1,0,0),RGB{N0f16}(0,1,1), length=10*5), 10, 5)
filepath = joinpath(tmpdir, "rgb_n0f16.png")
ModPNG.writeimage(filepath, rgb16)
r2 = ModPNG.readimage(filepath)
ModPNG.writeimage(joinpath(tmpdir, "rgb_n0f16_resave.png"), r2)
@test r2 == rgb16
end
@testset "Alpha" begin
# RGBA
r = RGBA(1.0,0.0,0.0, 0.2)
g = RGBA(0.0,1.0,0.0, 0.8)
b = RGBA(0.0,0.0,1.0, 1.0)
rgba1 = vcat(fill(r, 50,100), fill(g, 50,100), fill(b, 50,100))
filepath = joinpath(tmpdir, "rgba1.png")
ModPNG.writeimage(filepath, rgba1)
r1 = ModPNG.readimage(filepath)
@test r1 == rgba1
# GrayA
r = GrayA(1.0, 0.25)
g = GrayA(0.5, 0.5)
b = GrayA(0.0, 0.75)
graya = vcat(fill(r, 50,100), fill(g, 50,100), fill(b, 50,100))
filepath = joinpath(tmpdir, "graya1.png")
ModPNG.writeimage(filepath, graya)
g1 = ModPNG.readimage(filepath)
@test g1 == convert(Array{GrayA{N0f8}}, graya)
end
# TODO implement palette
end
@testset "libjpeg" begin
@test unsafe_string(ImageIO.ModJPEG.tjGetErrorStr()) == "No error"
end
end
# try
# rm(tmpdir, recursive = true)
# catch
# @error "Unable to remove temp directory at: $(tmpdir)"
# end
| [
3500,
7412,
9399,
198,
3500,
6208,
198,
3500,
5315,
31431,
198,
3500,
10832,
12727,
49601,
198,
3500,
7412,
14055,
198,
3500,
5972,
2667,
198,
3500,
14534,
198,
198,
2,
6404,
1362,
796,
24371,
11187,
1362,
7,
19282,
448,
11,
5972,
2667,... | 1.858089 | 2,114 |
<filename>examples/random_circles.jl
using OdinSon
using Distributions
# Graphics[Table[Circle[RandomReal[10, 2]], {100}]]
f1 = Canvas([Circle(rand(Uniform(0, 10), 2), 1, style=Style(fill=nothing)) for i = 1:100])
render(f1) # the render in the Mathematica notebook is implicit
f2 =Canvas(mapslices(p->Circle(p, 1, style=Style(fill=nothing)), rand(Uniform(0, 10), 100, 2), 2))
render(f2)
# So very verbose in comparison. Thow if instead we have like Mathematica implicit defaults
# which is very close, but I don't like the defaults as they don't feel natural to me,
# save for maybe having the style not fill by default
#=
```
Graphics[Table[Circle[RandomReal[10, 2]], {100}]]
Canvas([Circle(rand(Uniform(0, 10), 2)) for i = 1:100])
```
=#
| [
27,
34345,
29,
1069,
12629,
14,
25120,
62,
66,
343,
5427,
13,
20362,
198,
3500,
19758,
31056,
198,
3500,
46567,
507,
198,
198,
2,
19840,
58,
10962,
58,
31560,
293,
58,
29531,
15633,
58,
940,
11,
362,
60,
4357,
1391,
3064,
92,
11907,... | 2.853846 | 260 |
using PyPlot
"""
`histplot(x)` plot a histogram of the values in `x` and
`histplot(x,n)` gives a plot with `n` bins.
"""
histplot = plt[:hist]
| [
3500,
9485,
43328,
198,
198,
37811,
198,
63,
10034,
29487,
7,
87,
8,
63,
7110,
257,
1554,
21857,
286,
262,
3815,
287,
4600,
87,
63,
290,
198,
63,
10034,
29487,
7,
87,
11,
77,
8,
63,
3607,
257,
7110,
351,
4600,
77,
63,
41701,
13,... | 2.482759 | 58 |
## ---------------- Bounded learning
"""
$(SIGNATURES)
For a single college (not a ModelObject).
Each college is endowed with `maxLearn`. Once a student has learned this much, learning productivity falls to 0 (or a constant).
`dh = exp(aScale * a) * studyTime ^ timeExp * A`
The functional form for `A` is governed by the `tfpSpec`. It depends on how much has been learned.
The way learning is defined is governed by `learnRelativeToH0`.
- false: `h learned = h - h0`
- true: `h learned = (h / h0 - 1)`. Then a college limits the percentage increase in the h endowment.
Options should be type parameters +++
A potential alternative with better scaling would be
`A = hExp .* ( log(maxLearn) .- log.(max.(1.0, h learned)) )`
`hExp` governs the slope. `maxLearn` governs the intercept. But the shape is fixed.
"""
mutable struct HcProdFctBounded <: AbstractHcProdFct
minTfp :: Double # Additive minimum tfp
tfp :: Double
maxLearn :: Double
timeExp :: Double
# Curvature: how strongly does learning decline as (h-h0) → maxLearn
hExp :: Double
# Depreciation rate
deltaH :: Double
# Ability scale
aScale :: Double
# Fixed time cost per course
timePerCourse :: Double
# Study time per course minimum (this is assigned when study time very low)
minTimePerCourse :: Double
# Learning as percentage of endowment? Or as (h - h0).
learnRelativeToH0 :: Bool
# TFP can be computed in several ways. See `base_tfp`.
tfpSpec :: AbstractTfpSpec
end
## ------------- All colleges
Base.@kwdef mutable struct HcProdBoundedSwitches <: AbstractHcProdSwitches
# Same exponents on time and h. If yes, ignore `hExp`.
minTfp :: Double = 1.0
calMinTfp :: Bool = true
tfp :: Double = 1.0
calTfpBase :: Bool = true
sameExponents :: Bool = true
timeExp :: Double = 0.6
calTimeExp :: Bool = true
hExp :: Double = 0.9
# hExpLb :: Double = 0.5
calHExp :: Bool = true
deltaH :: Double = 0.0
calDeltaH :: Bool = false
aScale :: Double = 0.2
calAScale :: Bool = true
# Learning as percentage of endowment?
learnRelativeToH0 :: Bool = false
# TFP from (max learning - learning)
tfpSpec :: AbstractTfpSpec = TfpMaxLearnMinusLearn()
end
"""
$(SIGNATURES)
Since all colleges share some parameters, we need a model object that keeps
track of parameters that are common or differ by college.
"""
mutable struct HcProdBoundedSet <: AbstractHcProdSet
objId :: ObjectId
switches :: HcProdBoundedSwitches
nc :: CollInt
# Calibrated parameters
minTfp :: Double
tfp :: Double
maxLearnV :: BoundedVector
timeExp :: Double
hExp :: Double
deltaH :: Double
# Ability scale
aScale :: Double
# Fixed time cost per course
timePerCourse :: Double
# Study time per course minimum (this is assigned when study time very low)
minTimePerCourse :: Double
pvec :: ParamVector
end
## H production: Bounded learning
max_learn(hs :: HcProdBoundedSet, ic) = ModelParams.values(hs.maxLearnV, ic);
max_learn(h :: HcProdFctBounded) = h.maxLearn;
learning_relative_to_h0(h :: HcProdFctBounded) = h.learnRelativeToH0;
learning_relative_to_h0(h :: HcProdBoundedSwitches) = h.learnRelativeToH0;
learning_relative_to_h0(h :: HcProdBoundedSet) = learning_relative_to_h0(h.switches);
tfp_spec(h :: HcProdFctBounded) = h.tfpSpec;
tfp_spec(h :: HcProdBoundedSwitches) = h.tfpSpec;
tfp_spec(h :: HcProdBoundedSet) = tfp_spec(h.switches);
## ---------- Construction
# Initialize with defaults
function make_hc_prod_set(objId :: ObjectId, nc :: Integer,
switches :: HcProdBoundedSwitches)
st = symbol_table(); # eventually use preconstructed +++
@assert validate_hprod(switches);
pTimePerCourse = init_time_per_course();
pMaxLearn = init_max_learn(objId, switches, nc);
minTfp = switches.minTfp;
pMinTfp = Param(:minTfp, "Min tfp", "A_{min}",
minTfp, minTfp, 0.0, 2.0, switches.calMinTfp);
tfpBase = switches.tfp;
pTfpBase = Param(:tfp, ldescription(:hTfpNeutral), lsymbol(:hTfpNeutral),
tfpBase, tfpBase, 0.1, 2.0, switches.calTfpBase);
timeExp = switches.timeExp;
pTimeExp = Param(:timeExp, ldescription(:hTimeExp), lsymbol(:hTimeExp),
timeExp, timeExp, 0.2, 0.9, switches.calTimeExp);
deltaH = delta_h(switches);
pDeltaH = Param(:deltaH, ldescription(:ddh), lsymbol(:ddh),
deltaH, deltaH, 0.0, 0.5, cal_delta_h(switches));
# Governs slope inside of TFP (should be inside of TFP spec +++)
hExp = switches.hExp;
pHExp = Param(:hExp, "TFP slope coefficient", lsymbol(:hHExp),
hExp, hExp, gma_range(tfp_spec(switches))..., switches.calHExp);
aScale = switches.aScale;
tfpSpec = tfp_spec(switches);
pAScale = Param(:aScale, ldescription(:hAScale), lsymbol(:hAScale),
aScale, aScale, gma_range(tfpSpec)..., switches.calAScale);
pvec = ParamVector(objId, [pMinTfp, pTfpBase, pTimeExp, pHExp, pDeltaH, pAScale, pTimePerCourse]);
# Min study time required per course. Should never bind.
minTimePerCourse =
hours_per_week_to_mtu(0.1 / data_to_model_courses(1));
h = HcProdBoundedSet(objId, switches, nc,
minTfp, tfpBase, pMaxLearn,
timeExp, hExp, deltaH, aScale,
pTimePerCourse.value, minTimePerCourse, pvec);
@assert validate_hprod_set(h)
return h
end
# Upper bound should depend on whether learning is relative to h0.
function init_max_learn(objId :: ObjectId, switches, nc :: Integer)
ownId = make_child_id(objId, :tfpV);
dMaxLearnV = fill(0.2, nc);
if learning_relative_to_h0(switches)
ub = 3.0;
else
ub = 5.0;
end
b = BoundedVector(ownId, ParamVector(ownId), :increasing, 0.2, ub, dMaxLearnV);
set_pvector!(b; description = ldescription(:maxLearn),
symbol = lsymbol(:maxLearn));
return b
end
make_test_hc_bounded_set(; learnRelativeToH0 = true,
tfpSpec = TfpMaxLearnMinusLearn()) =
make_hc_prod_set(ObjectId(:HProd), 4,
HcProdBoundedSwitches(
deltaH = 0.05,
learnRelativeToH0 = learnRelativeToH0,
tfpSpec = tfpSpec
));
# Make h production function for one college
function make_h_prod(hs :: HcProdBoundedSet, iCollege :: Integer)
return HcProdFctBounded(hs.minTfp, hs.tfp,
max_learn(hs, iCollege),
time_exp(hs), h_exp(hs),
delta_h(hs), hs.aScale,
hs.timePerCourse, hs.minTimePerCourse,
learning_relative_to_h0(hs), tfp_spec(hs));
end
function make_test_hprod_bounded(;
learnRelativeToH0 = true, tfpSpec = TfpMaxLearnMinusLearn())
minTfp = 0.7;
gma = sum(gma_range(tfpSpec)) / 2;
hS = HcProdFctBounded(minTfp, 0.6, 3.1, 0.7, gma, 0.1, 0.3, 0.01, 0.005,
learnRelativeToH0, tfpSpec);
@assert validate_hprod(hS);
return hS
end
## ---------- One college
function validate_hprod(hS :: HcProdFctBounded)
isValid = (max_learn(hS) > 0.05) && (0.0 < time_exp(hS) ≤ 1.0);
gmaMin, gmaMax = gma_range(tfp_spec(hS));
gma = h_exp(hS);
isValid = isValid && (gmaMin <= gma <= gmaMax);
return isValid
end
"""
$(SIGNATURES)
H produced (before shock is realized). Nonnegative.
# Arguments
- nTriedV
number of courses attempted this period.
- h0V
h endowments, so that `hV - h0V` is learning.
"""
function dh(hS :: HcProdFctBounded, abilV, hV, h0V, timeV, nTriedV)
sTimeV = study_time_per_course(hS, timeV, nTriedV);
# deltaHV = (max_learn(hS) ^ h_exp(hS) .- learned_h(hS, hV, h0V) .^ h_exp(hS));
# tfpV = hS.tfp .* max.(0.0, deltaHV) .^ (1.0 / h_exp(hS));
return nTriedV .* base_tfp(hS, hV, h0V) .* (sTimeV .^ hS.timeExp) .*
exp.(hS.aScale .* abilV);
end
## ---------- TFP specs
# Base TFP: the term in front of (sTime ^ beta * exp(ability))
function base_tfp(hS :: HcProdFctBounded, hV, h0V)
tfpSpec = tfp_spec(hS);
learnV = learned_h(hS, hV, h0V);
tfpV = hS.minTfp .+ hS.tfp .* tfp(tfpSpec, learnV, max_learn(hS), h_exp(hS));
return tfpV
end
# Expected range of TFP
function tfp_range(hS :: HcProdFctBounded)
return hS.minTfp .+ tfp(hS) .*
tfp_range(tfp_spec(hS), h_exp(hS), max_learn(hS));
end
# Learned h, scaled for the production function
function learned_h(hS :: HcProdFctBounded, hV, h0V)
if learning_relative_to_h0(hS)
dh = max.(0.0, hV .- h0V) ./ h0V;
else
dh = max.(0.0, hV .- h0V);
end
return dh
end
# function show_string(hS :: HcProdFctBounded)
# fs = Formatting.FormatExpr("dh = {1:.2f} h ^ {2:.2f} t ^ {3:.2f} exp({3:.2f} a)");
# return format(fs, hS.tfp, h_exp(hS), hS.timeExp, hS.aScale);
# end
function Base.show(io :: IO, hS :: HcProdFctBounded)
maxLearn = round(max_learn(hS), digits = 2);
print(io, "H prod fct: Bounded learning < $maxLearn");
end
## --------------------- For all colleges
function Base.show(io :: IO, switches :: HcProdBoundedSwitches)
print(io, "H production: bounded learning.");
end
function settings_table(h :: HcProdBoundedSwitches)
ddh = delta_h(h);
cal_delta_h(h) ? deprecStr = "calibrated" : deprecStr = "fixed at $ddh";
h.learnRelativeToH0 ? learnStr = "h/h0 - 1" : learnStr = "h - h0";
ownSettings = [
"H production function" "Bounded learning";
"Depreciation" deprecStr
"Learning of the form" learnStr
];
return vcat(ownSettings, settings_table(h.tfpSpec))
end
function settings_list(h :: HcProdBoundedSwitches, st)
eqnHChange = ["H production function", "eqnHChange", eqn_hchange(h)];
return [eqnHChange]
end
function validate_hprod(s :: HcProdBoundedSwitches)
isValid = true;
return isValid
end
function validate_hprod_set(h :: HcProdBoundedSet)
isValid = (h.nc > 1) && (h.timeExp > 0.0) &&
(h.aScale > 0.0) && (h.timePerCourse > 0.0);
isValid = isValid && (1.0 > delta_h(h) >= 0.0);
return isValid
end
function eqn_hchange(h :: HcProdBoundedSwitches)
"\\hTfp \\sTimePerCourse^{\\hTimeExp} e^{\\hAScale \\abil}"
end
# -------------- | [
2235,
34400,
220,
347,
6302,
4673,
198,
198,
37811,
198,
220,
220,
220,
29568,
46224,
47471,
8,
198,
198,
1890,
257,
2060,
4152,
357,
1662,
257,
9104,
10267,
737,
198,
10871,
4152,
318,
44134,
351,
4600,
9806,
20238,
44646,
4874,
257,
... | 2.361124 | 4,306 |
<filename>src/plotting/all_plots.jl<gh_stars>0
################################################################################
# Copyright 2021, <NAME> #
################################################################################
function plot_result_caseA(path_to_caseA_csv::String, which_plot::String, ymin::Float64, ymax::Float64)
casea = CSV.read(path_to_caseA_csv)
casea_wls = casea |> Query.@filter(_.criterion .== "wls") |> DataFrames.DataFrame
casea_rwls = casea |> Query.@filter(_.criterion .== "rwls") |> DataFrames.DataFrame
casea_rwlav = casea |> Query.@filter(_.criterion .== "rwlav") |> DataFrames.DataFrame
ACP_df_wls = casea_wls |> Query.@filter(_.eq_model .== "rACP") |> DataFrames.DataFrame
ACR_df_wls = casea_wls |> Query.@filter(_.eq_model .== "rACR") |> DataFrames.DataFrame
IVR_df_wls = casea_wls |> Query.@filter(_.eq_model .== "rIVR") |> DataFrames.DataFrame
ACP_df_rwls = casea_rwls |> Query.@filter(_.eq_model .== "rACP") |> DataFrames.DataFrame
ACR_df_rwls = casea_rwls |> Query.@filter(_.eq_model .== "rACR") |> DataFrames.DataFrame
IVR_df_rwls = casea_rwls |> Query.@filter(_.eq_model .== "rIVR") |> DataFrames.DataFrame
ACP_df_rwlav = casea_rwlav |> Query.@filter(_.eq_model .== "rACP") |> DataFrames.DataFrame
ACR_df_rwlav = casea_rwlav |> Query.@filter(_.eq_model .== "rACR") |> DataFrames.DataFrame
IVR_df_rwlav = casea_rwlav |> Query.@filter(_.eq_model .== "rIVR") |> DataFrames.DataFrame
if which_plot == "time"
p1 = Plots.scatter([ACP_df_wls.n_bus, ACR_df_wls.n_bus, IVR_df_wls.n_bus], [ACP_df_wls.solve_time, ACR_df_wls.solve_time, IVR_df_wls.solve_time], markershape=[:circle :rect :utriangle], label=["ACP" "ACR" "IVR"], ylabel="Solve time [s]",
yscale=:log, legend=false, ylims=(ymin, ymax), title="WLS")
p2 = Plots.scatter([ACP_df_rwls.n_bus, ACR_df_rwls.n_bus, IVR_df_rwls.n_bus], [ACP_df_rwls.solve_time, ACR_df_rwls.solve_time, IVR_df_rwls.solve_time], markershape=[:circle :rect :utriangle], label=["ACP" "ACR" "IVR"],
legend=:bottomright, yscale=:log, xlabel="number of buses [-]", ylims=(ymin, ymax), yaxis=nothing, title="rWLS")
p3 = Plots.scatter([ACP_df_rwlav.n_bus, ACR_df_rwlav.n_bus, IVR_df_rwlav.n_bus], [ACP_df_rwlav.solve_time, ACR_df_rwlav.solve_time, IVR_df_rwlav.solve_time], markershape=[:circle :rect :utriangle],
legend=false, yscale=:log, ylims=(ymin, ymax), yaxis=nothing, title="rWLAV")
Plots.plot(p1, p2, p3, layout = (1,3))
elseif which_plot == "error_ph1"
Plots.scatter([ACR_df_rwlav.n_bus, ACR_df_rwlav.n_bus], [ACR_df_rwlav.err_max_1, ACR_df_rwlav.err_avg_1], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study A - Phase 1", ylims = (ymin, ymax))
elseif which_plot == "error_ph2"
Plots.scatter([ACR_df_rwlav.n_bus, ACR_df_rwlav.n_bus], [ACR_df_rwlav.err_max_2, ACR_df_rwlav.err_avg_2], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study A - Phase 2", ylims = (ymin, ymax))
elseif which_plot == "error_ph3"
Plots.scatter([ACR_df_rwlav.n_bus, ACR_df_rwlav.n_bus], [ACR_df_rwlav.err_max_3, ACR_df_rwlav.err_avg_3], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study A - Phase 3", ylims = (ymin, ymax))
else
display("ERROR: plot type $which_plot in argument `which_plot` not recognized. Possibilities are: \"time\", \"error_ph1\", \"error_ph2\", \"error_ph3\"")
end
end
function plot_result_caseB(path_to_caseB_csv::String, which_plot::String)
caseb = CSV.read(path_to_caseB_csv)
LDF_df = caseb |> Query.@filter(_.eq_model .== "LD3F") |> DataFrames.DataFrame
IVR_df = caseb |> Query.@filter(_.eq_model .== "rIVR") |> DataFrames.DataFrame
if which_plot == "time"
Plots.scatter([IVR_df.n_bus, LDF_df.n_bus], [IVR_df.solve_time, LDF_df.solve_time], markershape=[:utriangle :diamond], label=["IVR" "LD3F"], ylabel="Solve time [s]", xlabel="Number of buses [-]",
legend=:bottomright, title="$which_plot plot for case study B", yscale=:log)
elseif which_plot == "error_ph1"
Plots.scatter([LDF_df.n_bus, LDF_df.n_bus], [LDF_df.err_max_1, LDF_df.err_avg_1], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study B - Phase 1")
elseif which_plot == "error_ph2"
Plots.scatter([LDF_df.n_bus, LDF_df.n_bus], [LDF_df.err_max_2, LDF_df.err_avg_2], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study B - Phase 2")
elseif which_plot == "error_ph3"
Plots.scatter([LDF_df.n_bus, LDF_df.n_bus], [LDF_df.err_max_3, LDF_df.err_avg_3], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Number of buses [-]",
legend=:topright, title="Error plot for case study B - Phase 3")
else
display("ERROR: plot type $which_plot in argument `which_plot` not recognized. Possibilities are: \"time\", \"error_ph1\", \"error_ph2\", \"error_ph3\"")
end
end
function plot_result_caseC(path_to_caseC_csv::String)
casec = CSV.read(path_to_caseC_csv)
linIVR_df = casec |> Query.@filter(_.eq_model .== "linIVR") |> DataFrames.DataFrame
rIVR_df = casec |> Query.@filter(_.eq_model .== "rIVR") |> DataFrames.DataFrame
Plots.scatter([linIVR_df.n_bus, rIVR_df.n_bus], [linIVR_df.solve_time, rIVR_df.solve_time], markershape=[:circle :utriangle], label=["IVR - linear" "IVR - nonlinear"], ylabel="Solve time [s]", xlabel="Number of buses [-]",
legend=:bottomright, title="Plot for case study C", yscale=:log)
end
function plot_result_caseD(path_to_caseD_csv::String, which_plot::String)
caseD = CSV.read(path_to_caseD_csv)
x = (caseD.n_meas.-3)/(3*55)*100
if which_plot == "error_ph1"
Plots.scatter([x, x], [caseD.err_max_1, caseD.err_avg_1], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Measured users [%]",
legend=:bottomleft, title="Error plot for case study D - Phase 1", yscale=:log)
elseif which_plot == "error_ph2"
Plots.scatter([x, x], [caseD.err_max_2, caseD.err_avg_2], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Measured users [%]",
legend=:bottomleft, title="Error plot for case study D - Phase 2", yscale=:log)
elseif which_plot == "error_ph3"
Plots.scatter([x, x], [caseD.err_max_3, caseD.err_avg_3], markershape=[:circle :utriangle], label=["max. abs. error" "avg. abs. error"], ylabel="Absolute error ϵ [p.u.]", xlabel="Measured users [%]",
legend=:bottomleft, title="Error plot for case study D - Phase 3", yscale=:log)
else
display("ERROR: plot type $which_plot in argument `which_plot` not recognized. Possibilities are: \"error_ph1\", \"error_ph2\", \"error_ph3\"")
end
end
| [
27,
34345,
29,
10677,
14,
29487,
889,
14,
439,
62,
489,
1747,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
29113,
29113,
14468,
198,
2,
220,
15069,
33448,
11,
1279,
20608,
29,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.274567 | 3,409 |
# Get the summary of some numbers in fibonacci sequence.
v = [1,1]
s = 0
while (v[end] < 4000000)
if v[end]%2==0 s+=v[end] end
push!(v, v[end]+v[end-1])
end
println(s)
# 4613732
| [
2,
3497,
262,
10638,
286,
617,
3146,
287,
12900,
261,
44456,
8379,
13,
198,
85,
796,
685,
16,
11,
16,
60,
198,
82,
796,
657,
198,
4514,
357,
85,
58,
437,
60,
1279,
604,
10535,
8,
198,
220,
220,
220,
611,
410,
58,
437,
60,
4,
... | 2.113636 | 88 |
function objective_min_cost_TNEP(pm::_PM.AbstractPowerModel)
if pm.setting["FSprotection"] == true || pm.setting["NSprotection"] == true
objective_min_cost_TNEP_FSNS(pm)
elseif pm.setting["Permanentloss"] == true
objective_min_cost_TNEP_PL(pm)
end
end
function objective_min_cost_TNEP_nocl(pm::_PM.AbstractPowerModel)
if pm.setting["FSprotection"] == true || pm.setting["NSprotection"] == true
objective_min_cost_TNEP_FSNS_nocl(pm)
elseif pm.setting["Permanentloss"] == true
objective_min_cost_TNEP_PL_nocl(pm)
end
end
function objective_min_cost_TNEP_FSNS_nocl(pm::_PM.AbstractPowerModel)
base_nws = pm.setting["base_list"]
cont_nws = pm.setting["Cont_list"]
Total_sample = pm.setting["Total_sample"]
curt_gen = pm.setting["curtailed_gen"]
max_curt = pm.setting["max_curt"]
year_base = pm.setting["year_base"]
total_year = pm.setting["total_yr"]
gen_cost = Dict()
FFR_cost = Dict()
FCR_cost = Dict()
fail_prob = Dict()
Inv_cost = _PM.var(pm)[:Inv_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Gen_cost = _PM.var(pm)[:Gen_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FFRReserves = _PM.var(pm)[:FFR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FCRReserves = _PM.var(pm)[:FCR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Cont = _PM.var(pm)[:Cont] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Curt = _PM.var(pm)[:curt] = JuMP.@variable(pm.model, [i in 1:total_year], start = 0, lower_bound = 0)
sol_component_value_mod_wonw(pm, :Inv_cost, Inv_cost)
sol_component_value_mod_wonw(pm, :Gen_cost, Gen_cost)
sol_component_value_mod_wonw(pm, :Curt, Curt)
sol_component_value_mod_wonw(pm, :FFR_Reserves, FFRReserves)
sol_component_value_mod_wonw(pm, :FCR_Reserves, FCRReserves)
sol_component_value_mod_wonw(pm, :Cont, Cont)
for (n, nw_ref) in _PM.nws(pm)
for (r, reserves) in nw_ref[:reserves]
FFR_cost[(n,r)] = reserves["Cf"]
FCR_cost[(n,r)] = reserves["Cg"]
end
for (i,gen) in nw_ref[:gen]
pg = _PM.var(pm, n, :pg, i)
if length(gen["cost"]) == 1
gen_cost[(n,i)] = gen["cost"][1]
elseif length(gen["cost"]) == 2
gen_cost[(n,i)] = gen["cost"][1]*pg + gen["cost"][2]
elseif length(gen["cost"]) == 3
gen_cost[(n,i)] = gen["cost"][2]*pg + gen["cost"][3]
else
gen_cost[(n,i)] = 0.0
end
end
end
FFR_list = Dict()
FCR_list = Dict()
for (b,c,br) in cont_nws
FFR_list[(b,c)] = FFR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pff, 2)+
FCR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pgg, 2)
# a_ub, a_lb = _IM.variable_domain(_PM.var(pm, c, :Pff, 2))
# display("a_ub:$a_ub a_lb:$a_lb")
end
Zff = _PM.var(pm)[:Zff] = Dict((b, c) => JuMP.@variable(pm.model, base_name = "zff[$(string(b)),$(string(c))]", binary = true, start = 0) for (b, c, br) in cont_nws)
sol_component_value_mod_wonw(pm, :Zff, Zff)
#maximum out of onshore converters
FFRReserves_max = _PM.var(pm)[:FFRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FFRReserves_max", start = 0)
FCRReserves_max = _PM.var(pm)[:FCRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FCRReserves_max", start = 0)
for b in base_nws
sol_component_value_mod(pm, b, :FFRReserves_max, FFRReserves_max[b])
sol_component_value_mod(pm, b, :FCRReserves_max, FCRReserves_max[b])
end
base_cont= Dict()
for b in base_nws
# [item for item in a if item[2] == 4]
base_cont = [tt for tt in cont_nws if tt[1] == b]
####_PM.var(pm)[:Zff] = JuMP.@variable(pm.model, [(z,w) in base_cont], base_name="zff", binary = true, start = 0 ) #just one variable, how to create multiple with an index
for (z,w) in base_cont
display(JuMP.@constraint(pm.model, FFRReserves_max[b] >= FFR_list[(z,w)] ))
display(JuMP.@constraint(pm.model, FFRReserves_max[b] <= FFR_list[(z,w)] + 100*(1-Zff[(z,w)])))
end
display(JuMP.@constraint(pm.model, sum(Zff[(z,w)] for (z, w) in base_cont) == 1 ) )
end
Scale = 8760*5/Total_sample # 5 for no. of gap years between two time steps
# multiperiod
JuMP.@constraint(pm.model, Inv_cost == sum(sum(conv["cost"]*_PM.var(pm, 1, :conv_ne, i) for (i,conv) in _PM.nws(pm)[1][:convdc_ne]) for (n, nw_ref) in _PM.nws(pm)) +
sum(sum(branch["cost"]*_PM.var(pm, 1, :branchdc_ne, i) for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]) for (n, nw_ref) in _PM.nws(pm)) )
JuMP.@constraint(pm.model, Gen_cost == sum(sum(Scale/10^6*gen_cost[(b,i)] for (i,gen) in _PM.nws(pm)[b][:gen]) for b in base_nws) )
JuMP.@constraint(pm.model, FFRReserves == Scale*sum(FFRReserves_max[b] for b in base_nws) )
JuMP.@constraint(pm.model, FCRReserves == Scale*sum(FCRReserves_max[b] for b in base_nws) )
# display(JuMP.@constraint(pm.model, FCRReserves == sum(FCR_cost[(c,2)]*100*Scale/10^6*_PM.var(pm, c, :Pgg, 2) for (b,c,br) in cont_nws) ) )
display(JuMP.@constraint(pm.model, Cont == sum(_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, b, :branchdc_ne, br)*
(Scale/10^6*gen_cost[(c,3)] - Scale/10^6*gen_cost[(b,3)]) for (b,c,br) in cont_nws) ) )
curtailment = Dict()
capacity = Dict()
for b in base_nws
for c in curt_gen
curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
end
end
for y = 1:total_year
JuMP.@constraint(pm.model, Curt[y] == sum(sum(curtailment[(b,c)] for c in curt_gen) for b in year_base[y]) / sum(sum(capacity[(b,c)] for c in curt_gen) for b in year_base[y]) )
JuMP.@constraint(pm.model, Curt[y] <= max_curt)
end
return JuMP.@objective(pm.model, Min, Inv_cost + Gen_cost + FFRReserves + FCRReserves + Cont - Gen_cost)
end
function objective_min_cost_TNEP_FSNS(pm::_PM.AbstractPowerModel)
display("objective_min_cost_TNEP_FSNS")
base_nws = pm.setting["base_list"]
cont_nws = pm.setting["Cont_list"]
Total_sample = pm.setting["Total_sample"]
curt_gen = pm.setting["curtailed_gen"]
max_curt = pm.setting["max_curt"]
year_base = pm.setting["year_base"]
total_year = pm.setting["total_yr"]
gen_cost = Dict()
FFR_cost = Dict()
FCR_cost = Dict()
fail_prob = Dict()
Inv_cost = _PM.var(pm)[:Inv_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Gen_cost = _PM.var(pm)[:Gen_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FFRReserves = _PM.var(pm)[:FFR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FCRReserves = _PM.var(pm)[:FCR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Curt = _PM.var(pm)[:curt] = JuMP.@variable(pm.model, start = 0)
Cont = _PM.var(pm)[:Cont] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
weights = pm.setting["weights"]
sol_component_value_mod_wonw(pm, :Inv_cost, Inv_cost)
sol_component_value_mod_wonw(pm, :Gen_cost, Gen_cost)
sol_component_value_mod_wonw(pm, :FFR_Reserves, FFRReserves)
sol_component_value_mod_wonw(pm, :FCR_Reserves, FCRReserves)
sol_component_value_mod_wonw(pm, :Cont, Cont)
sol_component_value_mod_wonw(pm, :Curt, Curt)
for (n, nw_ref) in _PM.nws(pm)
for (r, reserves) in nw_ref[:reserves]
FFR_cost[(n,r)] = reserves["Cf"]
FCR_cost[(n,r)] = reserves["Cg"]
end
for (i,gen) in nw_ref[:gen]
pg = _PM.var(pm, n, :pg, i)
if length(gen["cost"]) == 1
gen_cost[(n,i)] = gen["cost"][1]
elseif length(gen["cost"]) == 2
gen_cost[(n,i)] = gen["cost"][1]*pg + gen["cost"][2]
elseif length(gen["cost"]) == 3
gen_cost[(n,i)] = gen["cost"][2]*pg + gen["cost"][3]
else
gen_cost[(n,i)] = 0.0
end
end
end
FFR_list = Dict()
FCR_list = Dict()
for (b,c,br) in cont_nws
FFR_list[(b,c)] = FFR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pff, 2)+
FCR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pgg, 2)
# a_ub, a_lb = _IM.variable_domain(_PM.var(pm, c, :Pff, 2))
# display("a_ub:$a_ub a_lb:$a_lb")
end
Zff = _PM.var(pm)[:Zff] = Dict((b, c) => JuMP.@variable(pm.model, base_name = "zff[$(string(b)),$(string(c))]", binary = true, start = 0) for (b, c, br) in cont_nws)
sol_component_value_mod_wonw(pm, :Zff, Zff)
# _IM.sol_component_value(pm, nw, :reserves, :Zff, base_nws, Zff)
FFRReserves_max = _PM.var(pm)[:FFRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FFRReserves_max", start = 0)
FCRReserves_max = _PM.var(pm)[:FCRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FCRReserves_max", start = 0)
for b in base_nws
sol_component_value_mod(pm, b, :FFRReserves_max, FFRReserves_max[b])
sol_component_value_mod(pm, b, :FCRReserves_max, FCRReserves_max[b])
end
base_cont= Dict()
for b in base_nws
# [item for item in a if item[2] == 4]
base_cont = [tt for tt in cont_nws if tt[1] == b]
####_PM.var(pm)[:Zff] = JuMP.@variable(pm.model, [(z,w) in base_cont], base_name="zff", binary = true, start = 0 ) #just one variable, how to create multiple with an index
for (z,w) in base_cont
JuMP.@constraint(pm.model, FFRReserves_max[b] >= FFR_list[(z,w)] )
JuMP.@constraint(pm.model, FFRReserves_max[b] <= FFR_list[(z,w)] + 100*(1-Zff[(z,w)]))
end
JuMP.@constraint(pm.model, sum(Zff[(z,w)] for (z, w) in base_cont) == 1 )
end
Scale = 8760*10 # 5 for no. of gap years between two time steps
# for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]
# display(branch["cost"])
# end
#
# for (i,conv) in _PM.nws(pm)[1][:convdc_ne]
# display(conv["cost"])
# end
# display(gen_cost)
JuMP.@constraint(pm.model, Inv_cost == sum(sum(conv["cost"]*_PM.var(pm, 1, :conv_ne, i) for (i,conv) in _PM.nws(pm)[1][:convdc_ne]) for (n, nw_ref) in _PM.nws(pm)) +
sum(sum(branch["cost"]*_PM.var(pm, 1, :branchdc_ne, i) for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]) for (n, nw_ref) in _PM.nws(pm)) )
JuMP.@constraint(pm.model, Gen_cost == sum(weights[b]*sum(Scale/10^6*gen_cost[(b,i)] for (i,gen) in _PM.nws(pm)[b][:gen]) for b in base_nws) )
# display(JuMP.@constraint(pm.model, FFRReserves == sum(FFR_cost[(c,2)]*100*Scale/10^6* _PM.var(pm, c, :Pff, 2) for (b,c,br) in cont_nws) ))
JuMP.@constraint(pm.model, FFRReserves == Scale*sum(weights[b]*FFRReserves_max[b] for b in base_nws) )
JuMP.@constraint(pm.model, FCRReserves == Scale*sum(weights[b]*FCRReserves_max[b] for b in base_nws) )
# display(JuMP.@constraint(pm.model, FCRReserves == sum(FCR_cost[(c,2)]*100*Scale/10^6*_PM.var(pm, c, :Pgg, 2) for (b,c,br) in cont_nws) ) )
# print(JuMP.@constraint(pm.model, Cont == sum( sum( (Scale/10^6) *gen_cost[(c,i)]*_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, 1, :branchdc_ne, br)
# for i in curt_gen)
# for (b,c,br) in cont_nws) ) )
# display(JuMP.@constraint(pm.model, Cont == sum(weights[b]*_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, b, :branchdc_ne, br)*
# (Scale/10^6*gen_cost[(c,3)] - Scale/10^6*gen_cost[(b,3)]) for (b,c,br) in cont_nws) ) )
curtailment = Dict()
capacity = Dict()
#
# for b in base_nws
# for c in curt_gen
# curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
# # curtailment[(b,c)] = _PM.var(pm, b, :pg, c)
# capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
# end
# end
curtailment = Dict()
capacity = Dict()
for b in base_nws
for c in curt_gen
curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
end
end
for y = 1:total_year
JuMP.@constraint(pm.model, Curt[y] == sum(sum(curtailment[(b,c)] for c in curt_gen) for b in year_base[y]) / sum(sum(capacity[(b,c)] for c in curt_gen) for b in year_base[y]) )
# JuMP.@constraint(pm.model, Curt[y] <= max_curt)
end
return JuMP.@objective(pm.model, Min, Inv_cost + FFRReserves + FCRReserves + Gen_cost)
end
function objective_min_cost_TNEP_PL_nocl(pm::_PM.AbstractPowerModel)
base_nws = pm.setting["base_list"]
cont_nws = pm.setting["Cont_list"]
Total_sample = pm.setting["Total_sample"]
curt_gen = pm.setting["curtailed_gen"]
max_curt = pm.setting["max_curt"]
year_base = pm.setting["year_base"]
total_year = pm.setting["total_yr"]
weights = pm.setting["weights"]
gen_cost = Dict()
FFR_cost = Dict()
FCR_cost = Dict()
fail_prob = Dict()
Inv_cost = _PM.var(pm)[:Inv_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Gen_cost = _PM.var(pm)[:Gen_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FFRReserves = _PM.var(pm)[:FFR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FCRReserves = _PM.var(pm)[:FCR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Cont = _PM.var(pm)[:Cont] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Curt = _PM.var(pm)[:curt] = JuMP.@variable(pm.model, [i in 1:total_year], start = 0, lower_bound = 0)
sol_component_value_mod_wonw(pm, :Inv_cost, Inv_cost)
sol_component_value_mod_wonw(pm, :Gen_cost, Gen_cost)
sol_component_value_mod_wonw(pm, :Curt, Curt)
sol_component_value_mod_wonw(pm, :FFR_Reserves, FFRReserves)
sol_component_value_mod_wonw(pm, :FCR_Reserves, FCRReserves)
sol_component_value_mod_wonw(pm, :Cont, Cont)
for (n, nw_ref) in _PM.nws(pm)
for (r, reserves) in nw_ref[:reserves]
FFR_cost[(n,r)] = reserves["Cf"]
FCR_cost[(n,r)] = reserves["Cg"]
end
for (i,gen) in nw_ref[:gen]
pg = _PM.var(pm, n, :pg, i)
if length(gen["cost"]) == 1
gen_cost[(n,i)] = gen["cost"][1]
elseif length(gen["cost"]) == 2
gen_cost[(n,i)] = gen["cost"][1]*pg + gen["cost"][2]
elseif length(gen["cost"]) == 3
gen_cost[(n,i)] = gen["cost"][2]*pg + gen["cost"][3]
else
gen_cost[(n,i)] = 0.0
end
end
end
Scale = 8760/Total_sample*5 # 5 for no. of gap years between two time steps
JuMP.@constraint(pm.model, Inv_cost == sum(sum(conv["cost"]*_PM.var(pm, 1, :conv_ne, i) for (i,conv) in _PM.nws(pm)[1][:convdc_ne]) for (n, nw_ref) in _PM.nws(pm)) +
sum(sum(branch["cost"]*_PM.var(pm, 1, :branchdc_ne, i) for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]) for (n, nw_ref) in _PM.nws(pm)) )
JuMP.@constraint(pm.model, Gen_cost == sum(sum(Scale/10^6*gen_cost[(b,i)] for (i,gen) in _PM.nws(pm)[b][:gen]) for b in base_nws) )
JuMP.@constraint(pm.model, FFRReserves == sum(FFR_cost[(c,2)]*100*Scale/10^6* _PM.var(pm, c, :Pff, 2) for (b,c,br) in cont_nws) )
JuMP.@constraint(pm.model, FCRReserves == sum(FCR_cost[(c,2)]*100*Scale/10^6*_PM.var(pm, c, :Pgg, 2) for (b,c,br) in cont_nws) )
JuMP.@constraint(pm.model, Cont == sum(_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, b, :branchdc_ne, br)*
(Scale/10^6*gen_cost[(c,3)] - Scale/10^6*gen_cost[(b,3)]) for (b,c,br) in cont_nws) )
curtailment = Dict()
capacity = Dict()
for b in base_nws
for c in curt_gen
curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
end
end
for y = 1:total_year
JuMP.@constraint(pm.model, Curt[y] == sum(sum(curtailment[(b,c)] for c in curt_gen) for b in year_base[y]) / sum(sum(capacity[(b,c)] for c in curt_gen) for b in year_base[y]) )
# JuMP.@constraint(pm.model, Curt[y] <= max_curt)
end
return display( JuMP.@objective(pm.model, Min, Inv_cost + Gen_cost + FFRReserves + FCRReserves + Cont ) )
end
function objective_min_cost_TNEP_PL(pm::_PM.AbstractPowerModel)
base_nws = pm.setting["base_list"]
cont_nws = pm.setting["Cont_list"]
Total_sample = pm.setting["Total_sample"]
curt_gen = pm.setting["curtailed_gen"]
max_curt = pm.setting["max_curt"]
year_base = pm.setting["year_base"]
total_year = pm.setting["total_yr"]
gen_cost = Dict()
FFR_cost = Dict()
FCR_cost = Dict()
fail_prob = Dict()
Inv_cost = _PM.var(pm)[:Inv_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Gen_cost = _PM.var(pm)[:Gen_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FFRReserves = _PM.var(pm)[:FFR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FCRReserves = _PM.var(pm)[:FCR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Curt = _PM.var(pm)[:curt] = JuMP.@variable(pm.model, start = 0)
Cont = _PM.var(pm)[:Cont] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
weights = pm.setting["weights"]
sol_component_value_mod_wonw(pm, :Inv_cost, Inv_cost)
sol_component_value_mod_wonw(pm, :Gen_cost, Gen_cost)
sol_component_value_mod_wonw(pm, :FFR_Reserves, FFRReserves)
sol_component_value_mod_wonw(pm, :FCR_Reserves, FCRReserves)
sol_component_value_mod_wonw(pm, :Cont, Cont)
sol_component_value_mod_wonw(pm, :Curt, Curt)
for (n, nw_ref) in _PM.nws(pm)
for (r, reserves) in nw_ref[:reserves]
FFR_cost[(n,r)] = reserves["Cf"]
FCR_cost[(n,r)] = reserves["Cg"]
end
for (i,gen) in nw_ref[:gen]
pg = _PM.var(pm, n, :pg, i)
if length(gen["cost"]) == 1
gen_cost[(n,i)] = gen["cost"][1]
elseif length(gen["cost"]) == 2
gen_cost[(n,i)] = gen["cost"][1]*pg + gen["cost"][2]
elseif length(gen["cost"]) == 3
gen_cost[(n,i)] = gen["cost"][2]*pg + gen["cost"][3]
else
gen_cost[(n,i)] = 0.0
end
end
end
FFR_list = Dict()
FCR_list = Dict()
for (b,c,br) in cont_nws
FFR_list[(b,c)] = FFR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pff, 2)+
FCR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pgg, 2)
# a_ub, a_lb = _IM.variable_domain(_PM.var(pm, c, :Pff, 2))
# display("a_ub:$a_ub a_lb:$a_lb")
end
Zff = _PM.var(pm)[:Zff] = Dict((b, c) => JuMP.@variable(pm.model, base_name = "zff[$(string(b)),$(string(c))]", binary = true, start = 0) for (b, c, br) in cont_nws)
sol_component_value_mod_wonw(pm, :Zff, Zff)
# _IM.sol_component_value(pm, nw, :reserves, :Zff, base_nws, Zff)
FFRReserves_max = _PM.var(pm)[:FFRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FFRReserves_max", start = 0)
FCRReserves_max = _PM.var(pm)[:FCRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FCRReserves_max", start = 0)
for b in base_nws
sol_component_value_mod(pm, b, :FFRReserves_max, FFRReserves_max[b])
sol_component_value_mod(pm, b, :FCRReserves_max, FCRReserves_max[b])
end
base_cont= Dict()
for b in base_nws
# [item for item in a if item[2] == 4]
base_cont = [tt for tt in cont_nws if tt[1] == b]
####_PM.var(pm)[:Zff] = JuMP.@variable(pm.model, [(z,w) in base_cont], base_name="zff", binary = true, start = 0 ) #just one variable, how to create multiple with an index
for (z,w) in base_cont
JuMP.@constraint(pm.model, FFRReserves_max[b] >= FFR_list[(z,w)] )
JuMP.@constraint(pm.model, FFRReserves_max[b] <= FFR_list[(z,w)] + 100*(1-Zff[(z,w)]))
end
JuMP.@constraint(pm.model, sum(Zff[(z,w)] for (z, w) in base_cont) == 1 )
end
Scale = 8760*10 # 5 for no. of gap years between two time steps
# for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]
# display(branch["cost"])
# end
#
# for (i,conv) in _PM.nws(pm)[1][:convdc_ne]
# display(conv["cost"])
# end
# display(gen_cost)
JuMP.@constraint(pm.model, Inv_cost == sum(sum(conv["cost"]*_PM.var(pm, 1, :conv_ne, i) for (i,conv) in _PM.nws(pm)[1][:convdc_ne]) for (n, nw_ref) in _PM.nws(pm)) +
sum(sum(branch["cost"]*_PM.var(pm, 1, :branchdc_ne, i) for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]) for (n, nw_ref) in _PM.nws(pm)) )
JuMP.@constraint(pm.model, Gen_cost == sum(weights[b]*sum(Scale/10^6*gen_cost[(b,i)] for (i,gen) in _PM.nws(pm)[b][:gen]) for b in base_nws) )
# display(JuMP.@constraint(pm.model, FFRReserves == sum(FFR_cost[(c,2)]*100*Scale/10^6* _PM.var(pm, c, :Pff, 2) for (b,c,br) in cont_nws) ))
JuMP.@constraint(pm.model, FFRReserves == Scale*sum(weights[b]*FFRReserves_max[b] for b in base_nws) )
JuMP.@constraint(pm.model, FCRReserves == Scale*sum(weights[b]*FCRReserves_max[b] for b in base_nws) )
# display(JuMP.@constraint(pm.model, FCRReserves == sum(FCR_cost[(c,2)]*100*Scale/10^6*_PM.var(pm, c, :Pgg, 2) for (b,c,br) in cont_nws) ) )
# print(JuMP.@constraint(pm.model, Cont == sum( sum( (Scale/10^6) *gen_cost[(c,i)]*_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, 1, :branchdc_ne, br)
# for i in curt_gen)
# for (b,c,br) in cont_nws) ) )
# display(JuMP.@constraint(pm.model, Cont == sum(weights[b]*_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, b, :branchdc_ne, br)*
# (Scale/10^6*gen_cost[(c,3)] - Scale/10^6*gen_cost[(b,3)]) for (b,c,br) in cont_nws) ) )``
curtailment = Dict()
capacity = Dict()
#
# for b in base_nws
# for c in curt_gen
# curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
# # curtailment[(b,c)] = _PM.var(pm, b, :pg, c)
# capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
# end
# end
curtailment = Dict()
capacity = Dict()
for b in base_nws
for c in curt_gen
curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
end
end
for y = 1:total_year
JuMP.@constraint(pm.model, Curt[y] == sum(sum(curtailment[(b,c)] for c in curt_gen) for b in year_base[y]) / sum(sum(capacity[(b,c)] for c in curt_gen) for b in year_base[y]) )
# JuMP.@constraint(pm.model, Curt[y] <= max_curt)
end
return display( JuMP.@objective(pm.model, Min, Inv_cost + Gen_cost + FFRReserves + FCRReserves) )
end
function objective_min_cost_TNEP_FSNS_rev1(pm::_PM.AbstractPowerModel)
base_nws = pm.setting["base_list"]
cont_nws = pm.setting["Cont_list"]
Total_sample = pm.setting["Total_sample"]
curt_gen = pm.setting["curtailed_gen"]
max_curt = pm.setting["max_curt"]
year_base = pm.setting["year_base"]
total_year = pm.setting["total_yr"]
gen_cost = Dict()
FFR_cost = Dict()
FCR_cost = Dict()
fail_prob = Dict()
Inv_cost = _PM.var(pm)[:Inv_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Gen_cost = _PM.var(pm)[:Gen_cost] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FFRReserves = _PM.var(pm)[:FFR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
FCRReserves = _PM.var(pm)[:FCR_Reserves] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
Curt = _PM.var(pm)[:curt] = JuMP.@variable(pm.model, start = 0)
Cont = _PM.var(pm)[:Cont] = JuMP.@variable(pm.model, start = 0, lower_bound = 0)
weights = pm.setting["weights"]
sol_component_value_mod_wonw(pm, :Inv_cost, Inv_cost)
sol_component_value_mod_wonw(pm, :Gen_cost, Gen_cost)
sol_component_value_mod_wonw(pm, :FFR_Reserves, FFRReserves)
sol_component_value_mod_wonw(pm, :FCR_Reserves, FCRReserves)
sol_component_value_mod_wonw(pm, :Cont, Cont)
for (n, nw_ref) in _PM.nws(pm)
for (r, reserves) in nw_ref[:reserves]
FFR_cost[(n,r)] = reserves["Cf"]
FCR_cost[(n,r)] = reserves["Cg"]
end
for (i,gen) in nw_ref[:gen]
pg = _PM.var(pm, n, :pg, i)
if length(gen["cost"]) == 1
gen_cost[(n,i)] = gen["cost"][1]
elseif length(gen["cost"]) == 2
gen_cost[(n,i)] = gen["cost"][1]*pg + gen["cost"][2]
elseif length(gen["cost"]) == 3
gen_cost[(n,i)] = gen["cost"][2]*pg + gen["cost"][3]
else
gen_cost[(n,i)] = 0.0
end
end
end
FFR_list = Dict()
FCR_list = Dict()
for (b,c,br) in cont_nws
FFR_list[(b,c)] = FFR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pff, 2)+
FCR_cost[(c,2)]*100/10^6*_PM.var(pm, c, :Pgg, 2)
# a_ub, a_lb = _IM.variable_domain(_PM.var(pm, c, :Pff, 2))
# display("a_ub:$a_ub a_lb:$a_lb")
end
Zff = _PM.var(pm)[:Zff] = Dict((b, c) => JuMP.@variable(pm.model, base_name = "zff[$(string(b)),$(string(c))]", binary = true, start = 0) for (b, c, br) in cont_nws)
sol_component_value_mod_wonw(pm, :Zff, Zff)
# _IM.sol_component_value(pm, nw, :reserves, :Zff, base_nws, Zff)
FFRReserves_max = _PM.var(pm)[:FFRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FFRReserves_max", start = 0)
FCRReserves_max = _PM.var(pm)[:FCRReserves_max] = JuMP.@variable(pm.model, [b in base_nws], base_name ="FCRReserves_max", start = 0)
for b in base_nws
sol_component_value_mod(pm, b, :FFRReserves_max, FFRReserves_max[b])
sol_component_value_mod(pm, b, :FCRReserves_max, FCRReserves_max[b])
end
base_cont= Dict()
for b in base_nws
# [item for item in a if item[2] == 4]
base_cont = [tt for tt in cont_nws if tt[1] == b]
####_PM.var(pm)[:Zff] = JuMP.@variable(pm.model, [(z,w) in base_cont], base_name="zff", binary = true, start = 0 ) #just one variable, how to create multiple with an index
for (z,w) in base_cont
JuMP.@constraint(pm.model, FFRReserves_max[b] >= FFR_list[(z,w)] )
JuMP.@constraint(pm.model, FFRReserves_max[b] <= FFR_list[(z,w)] + 100*(1-Zff[(z,w)]))
end
JuMP.@constraint(pm.model, sum(Zff[(z,w)] for (z, w) in base_cont) == 1 )
end
Scale = 8760*10 # 5 for no. of gap years between two time steps
JuMP.@constraint(pm.model, Inv_cost == sum(sum(conv["cost"]*_PM.var(pm, 1, :conv_ne, i) for (i,conv) in _PM.nws(pm)[1][:convdc_ne]) for (n, nw_ref) in _PM.nws(pm)) +
sum(sum(branch["cost"]*_PM.var(pm, 1, :branchdc_ne, i) for (i,branch) in _PM.nws(pm)[1][:branchdc_ne]) for (n, nw_ref) in _PM.nws(pm)) )
display(JuMP.@constraint(pm.model, Gen_cost == sum(weights[b]*sum(Scale/10^6*gen_cost[(b,i)] for i in curt_gen) for b in base_nws) ) )
# display(JuMP.@constraint(pm.model, FFRReserves == sum(FFR_cost[(c,2)]*100*Scale/10^6* _PM.var(pm, c, :Pff, 2) for (b,c,br) in cont_nws) ))
JuMP.@constraint(pm.model, FFRReserves == Scale*sum(weights[b]*FFRReserves_max[b] for b in base_nws) )
JuMP.@constraint(pm.model, FCRReserves == Scale*sum(weights[b]*FCRReserves_max[b] for b in base_nws) )
# JuMP.@constraint(pm.model, Cont == sum(weights[b]*_PM.ref(pm, b, :branchdc_ne, br)["fail_prob"]*_PM.var(pm, b, :branchdc_ne, br)*
# (Scale/10^6*(gen_cost[(c,1)] + gen_cost[(c,2)]) - Scale/10^6*(gen_cost[(b,1)]+gen_cost[(b,2)]) ]) for (b,c,br) in cont_nws) )
curtailment = Dict()
capacity = Dict()
# for b in base_nws
# for c in curt_gen
# curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
# # curtailment[(b,c)] = _PM.var(pm, b, :pg, c)
# capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
# end
# end
# curtailment = Dict()
# capacity = Dict()
# for b in base_nws
# for c in curt_gen
# curtailment[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c)) - _PM.var(pm, b, :pg, c)
# capacity[(b,c)] = JuMP.upper_bound(_PM.var(pm, b, :pg, c))
# end
# end
# for y = 1:total_year
# JuMP.@constraint(pm.model, Curt[y] == sum(sum(curtailment[(b,c)] for c in curt_gen) for b in year_base[y]) / sum(sum(capacity[(b,c)] for c in curt_gen) for b in year_base[y]) )
# JuMP.@constraint(pm.model, Curt[y] <= max_curt)
# end
display("objective_min_cost_TNEP_FSNS_rev1")
return JuMP.@objective(pm.model, Min, FFRReserves + FCRReserves + Inv_cost - Gen_cost)
end
| [
8818,
9432,
62,
1084,
62,
15805,
62,
46559,
8905,
7,
4426,
3712,
62,
5868,
13,
23839,
13434,
17633,
8,
198,
220,
220,
220,
611,
9114,
13,
33990,
14692,
10652,
42846,
8973,
6624,
2081,
8614,
9114,
13,
33990,
14692,
8035,
42846,
8973,
6... | 1.890038 | 15,960 |
"""
tfill(v, ::Val{D}) where D
Returns a tuple of length `D` that contains `D` times the object `v`.
In contrast to `tuple(fill(v,D)...)` which returns the same result, this function is type-stable.
"""
function tfill(v, ::Val{D}) where D
t = tfill(v, Val{D-1}())
(v,t...)
end
tfill(v,::Val{0}) = ()
tfill(v,::Val{1}) = (v,)
tfill(v,::Val{2}) = (v,v)
tfill(v,::Val{3}) = (v,v,v)
"""
get_val_parameter(::Val{T}) where T
get_val_parameter(::Type{Val{T}}) where T
Returns `T`.
"""
function get_val_parameter(::Val{T}) where T
T
end
function get_val_parameter(::Type{Val{T}}) where T
T
end
| [
198,
37811,
198,
220,
220,
220,
256,
20797,
7,
85,
11,
7904,
7762,
90,
35,
30072,
810,
360,
198,
198,
35561,
257,
46545,
286,
4129,
4600,
35,
63,
326,
4909,
4600,
35,
63,
1661,
262,
2134,
4600,
85,
44646,
198,
818,
6273,
284,
4600... | 2.261993 | 271 |
abstract type AbstractPardisoLU{Tv,Ti} <: AbstractLUFactorization{Tv,Ti} end
mutable struct PardisoLU{Tv, Ti} <: AbstractPardisoLU{Tv,Ti}
A::Union{ExtendableSparseMatrix{Tv,Ti},Nothing}
ps::Pardiso.PardisoSolver
phash::UInt64
end
function PardisoLU{Tv,Ti}(;iparm=nothing,dparm=nothing,mtype=nothing) where {Tv,Ti}
fact=PardisoLU{Tv,Ti}(nothing,Pardiso.PardisoSolver(),0)
default_initialize!(fact,iparm,dparm,mtype)
end
"""
```
PardisoLU(;valuetype=Float64,
indextype=Int64,
iparm::Vector,
dparm::Vector,
mtype::Int)
PardisoLU(matrix; iparm,dparm,mtype)
```
LU factorization based on pardiso. For using this, you need to issue `using Pardiso`
and have the pardiso library from [pardiso-project.org](https://pardiso-project.org)
[installed](https://github.com/JuliaSparse/Pardiso.jl#pardiso-60).
The optional keyword arguments `mtype`, `iparm` and `dparm` are
(Pardiso internal parameters)[https://github.com/JuliaSparse/Pardiso.jl#readme].
Forsetting them, one can also access the `PardisoSolver` e.g. like
```
using Pardiso
plu=PardisoLU()
Pardiso.set_iparm!(plu.ps,5,13.0)
```
"""
PardisoLU(;valuetype::Type=Float64, indextype::Type=Int64, kwargs...)=PardisoLU{valuetype,indextype}(;kwargs...)
#############################################################################################
mutable struct MKLPardisoLU{Tv, Ti} <: AbstractPardisoLU{Tv,Ti}
A::Union{ExtendableSparseMatrix{Tv,Ti},Nothing}
ps::Pardiso.MKLPardisoSolver
phash::UInt64
end
function MKLPardisoLU{Tv,Ti}(;iparm=nothing,mtype=nothing) where {Tv,Ti}
fact=MKLPardisoLU{Tv,Ti}(nothing,Pardiso.MKLPardisoSolver(),0)
default_initialize!(fact, iparm,nothing,mtype)
end
"""
```
MKLPardisoLU(;valuetype=Float64,
indextype=Int64,
iparm::Vector,
mtype::Int)
MKLPardisoLU(matrix; iparm, mtype)
```
LU factorization based on pardiso. For using this, you need to issue `using Pardiso`.
This version uses the early 2000's fork in Intel's MKL library.
The optional keyword arguments `mtype` and `iparm` are
(Pardiso internal parameters)[https://github.com/JuliaSparse/Pardiso.jl#readme].
For setting them you can also access the `PardisoSolver` e.g. like
```
using Pardiso
plu=MKLPardisoLU()
Pardiso.set_iparm!(plu.ps,5,13.0)
```
"""
MKLPardisoLU(;valuetype::Type=Float64, indextype::Type=Int64,kwargs...)=MKLPardisoLU{valuetype,indextype}(;kwargs...)
##########################################################################################
function default_initialize!(fact::AbstractPardisoLU{Tv,Ti}, iparm,dparm,mtype) where {Tv, Ti}
if !isnothing(mtype)
my_mtype=mtype
elseif Tv<:Complex
my_mtype=Pardiso.COMPLEX_NONSYM
else
my_mtype=Pardiso.REAL_NONSYM
end
Pardiso.set_matrixtype!(fact.ps,Pardiso.REAL_NONSYM)
if !isnothing(iparm)
for i=1:min(length(iparm),length(fact.ps.iparm))
Pardiso.set_iparm!(fact.ps,i,iparm[i])
end
end
if !isnothing(dparm)
for i=1:min(length(dparm),length(fact.ps.dparm))
Pardiso.set_dparm!(fact.ps,i,dparm[i])
end
end
fact
end
function update!(lufact::AbstractPardisoLU{Tv,Ti}) where {Tv, Ti}
ps=lufact.ps
flush!(lufact.A)
Acsc=lufact.A.cscmatrix
if lufact.phash!=lufact.A.phash
Pardiso.set_phase!(ps, Pardiso.RELEASE_ALL)
Pardiso.pardiso(ps, Tv[], Acsc, Tv[])
Pardiso.set_phase!(ps, Pardiso.ANALYSIS_NUM_FACT)
lufact.phash=lufact.A.phash
else
Pardiso.set_phase!(ps, Pardiso.NUM_FACT)
end
Pardiso.fix_iparm!(ps, :N)
Pardiso.pardiso(ps, Tv[], Acsc, Tv[])
lufact
end
function LinearAlgebra.ldiv!(u::AbstractArray{T,1} where T, lufact::AbstractPardisoLU, v::AbstractArray{T,1} where T)
ps=lufact.ps
Acsc=lufact.A.cscmatrix
Pardiso.set_phase!(ps, Pardiso.SOLVE_ITERATIVE_REFINE)
Pardiso.pardiso(ps, u, Acsc, v)
u
end
LinearAlgebra.ldiv!(fact::AbstractPardisoLU, v::AbstractArray{T,1} where T)=ldiv!(v,fact,copy(v))
@eval begin
@makefrommatrix PardisoLU
@makefrommatrix MKLPardisoLU
end
| [
397,
8709,
2099,
27741,
47,
446,
26786,
41596,
90,
51,
85,
11,
40533,
92,
1279,
25,
27741,
43,
36820,
11218,
1634,
90,
51,
85,
11,
40533,
92,
886,
198,
198,
76,
18187,
2878,
350,
446,
26786,
41596,
90,
51,
85,
11,
16953,
92,
1279,... | 2.21246 | 1,878 |
<reponame>bmharsha/KernelFunctions.jl
"""
ExponentiatedKernel()
Exponentiated kernel.
# Definition
For inputs ``x, x' \\in \\mathbb{R}^d``, the exponentiated kernel is defined as
```math
k(x, x') = \\exp(x^\\top x').
```
"""
struct ExponentiatedKernel <: SimpleKernel end
kappa(::ExponentiatedKernel, xᵀy::Real) = exp(xᵀy)
metric(::ExponentiatedKernel) = DotProduct()
iskroncompatible(::ExponentiatedKernel) = true
Base.show(io::IO, ::ExponentiatedKernel) = print(io, "Exponentiated Kernel")
| [
27,
7856,
261,
480,
29,
20475,
71,
945,
3099,
14,
42,
7948,
24629,
2733,
13,
20362,
198,
37811,
198,
220,
220,
220,
5518,
3471,
12931,
42,
7948,
3419,
198,
198,
16870,
3471,
12931,
9720,
13,
198,
198,
2,
30396,
198,
198,
1890,
17311... | 2.606218 | 193 |
<filename>src/FastArrayOps.jl
module FastArrayOps
import Base.LinAlg: BlasReal, BlasComplex, BlasFloat, BlasInt, BlasChar
const libblas = Base.libblas_name
export fast_scale!, unsafe_fast_scale!,
fast_add!, unsafe_fast_add!,
fast_addscal!, unsafe_fast_addscal!,
fast_copy!, unsafe_fast_copy!,
fast_fill!, unsafe_fast_fill!
export fast_check1, fast_check2, fast_check3, nmax2nel, nel2nmax, fast_args2range, fast_range2args
# WARNING: FastArrayOps.jl gets overwritten by FastArrayOps_src.jl when running make.jl
## CONSTANTS
const NLIM_SCALE = 13
const NLIM_SCALE_OOP1 = 80
const NLIM_SCALE_OOP2 = 100000
const NLIM_SCALEARR = typemax(Int)
const NLIM_SCALEARR_OOP = typemax(Int)
const NLIM_ADD = typemax(Int)
const NLIM_ADD_OOP = typemax(Int)
const NLIM_ADDARR = 13
const NLIM_ADDARR_OOP1 = 30
const NLIM_ADDARR_OOP2 = 100000
const NLIM_ADDARRSCAL = 13
const NLIM_ADDARRSCAL_OOP1 = 30
const NLIM_ADDARRSCAL_OOP2 = 100000
const NLIM_COPY1 = 80
const NLIM_COPY2 = 100000
const NLIM_FILL = 13
## UTILS
function nmax2nel(i::Int, inc::Int, nmax::Int)
@assert 0 < i
nmax < i && return 0
return div(nmax - i, abs(inc)) + 1
end
function nel2nmax(i::Int, inc::Int, nel::Int)
@assert 0 < i
nel < 0 && return i - 1
return i + (nel- 1)*abs(inc)
end
function fast_args2range(i::Int, inc::Int, n::Int)
@assert 0 < i
r = i:abs(inc):nel2nmax(i, abs(inc), n)
if inc < 0
r = reverse(r)
end
return r
end
function fast_range2args(r::Range)
inc = step(r)
if inc > 0
return (first(r), inc, length(r))
else
return (last(r), inc, length(r))
end
end
function fast_check1(x, ix, incx, n)
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
return 0
end
function fast_check2(x, ix, incx, y, iy, incy, n)
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
return 0
end
function fast_check3(x, ix, incx, y, iy, incy, z, iz, incz, n)
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
return 0
end
# utils for 0 fill
inttype(::Type{Float32}) = Int32
inttype(::Type{Float64}) = Int64
inttype(::Type{Complex64}) = Int64
inttype(::Type{Complex128}) = Int128
function fast_reinterpret1{T<:Number}(::Type{T}, a::Array)
@assert length(a) == 1
ccall(:jl_reshape_array, Array{T,1}, (Any, Any, Any), Array{T,1}, a, (1,))
end
# are float 0s zero bits?
function zerobits()
v = reinterpret(inttype(Float32), convert(Float32, 0))
v += reinterpret(inttype(Float64), convert(Float64, 0))
v += fast_reinterpret1(inttype(Complex64), [convert(Complex64, 0)])[1]
v += fast_reinterpret1(inttype(Complex128), [convert(Complex128, 0)])[1]
if v == 0
return true
else
return false
end
end
const ZEROFLOAT = zerobits()
for (fscal, fcopy, faxpy, ftbmv, fsbmv, elty) in (
(:dscal_, :dcopy_, :daxpy_, :dtbmv_, :dsbmv_, :Float64),
(:sscal_, :scopy_, :saxpy_, :stbmv_, :ssbmv_, :Float32),
(:zscal_, :zcopy_, :zaxpy_, :ztbmv_, :zsbmv_, :Complex128),
(:cscal_, :ccopy_, :caxpy_, :ctbmv_, :csbmv_, :Complex64))
## SCALE METHODS
for (f, isunsafe) in ( (:fast_scale!, false), (:unsafe_fast_scale!, true) )
@eval begin
# x = a*x
# =======
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
if n < $NLIM_SCALE*incx
# src: scalarr1_for.jl
incx = abs(incx)
@inbounds for i = ix:incx:ix-1+n*incx
x[i] = *( a, x[i] )
end
else
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set1_inc1.jl
incx = 1
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
if n < $NLIM_SCALE #*incx
# src: scalarr1_for_inc1.jl
@inbounds for i = ix:ix-1+n
x[i] = *( a, x[i] )
end
else
# src: set1_inc1.jl
incx = 1
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# x = a*y
# =======
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
mul = max(abs(incx), abs(incy))
if n < $NLIM_SCALE_OOP1*mul || n*mul > $NLIM_SCALE_OOP2
# src: scalarr1_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = *( a, y[iy+i*incy] )
end
else
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
mul = abs(incx)
if n < $NLIM_SCALE_OOP1*mul || n*mul > $NLIM_SCALE_OOP2
# src: scalarr1_foroop_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = *( a, y[d+i] )
end
else
# src: set2_inceq.jl
incy = incx
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_SCALE_OOP1 || n > $NLIM_SCALE_OOP2
# src: scalarr1_foroop_inc1.jl
d = iy - ix
@inbounds for i = ix:ix-1+n
x[i] = *( a, y[d+i] )
end
else
# src: set2_inc1.jl
incx = 1
incy = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_SCALE_OOP1 || n > $NLIM_SCALE_OOP2
# src: scalarr1_foroop_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = *( a, y[i] )
end
else
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
# src: blas_scale.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
ccall(($(string(fscal)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), px, &(incx))
end
return x
end
# x = x.*y
# ========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: arr2xy_for.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = *( x[ix+i*incx], y[iy+i*incy] )
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: arr2xy_for_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = *( x[i], y[d+i] )
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: arr2xy_for_inc1.jl
d = iy - ix
@inbounds for i = ix:ix-1+n
x[i] = *( x[i], y[d+i] )
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: arr2xy_for_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = *( x[i], y[i] )
end
return x
end
# x = y.*z
# ========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, z::Array{$elty}, iz::Int, incz::Int, n::Int)
$isunsafe || begin
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
# src: arr2yz_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
incz < 0 && (iz = iz+(n-1)*abs(incz))
@inbounds for i = 0:n-1
x[ix+i*incx] = *( y[iy+i*incy], z[iz+i*incz] )
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, n::Int)
$isunsafe || begin
# src: set3_inceq.jl
incy = incx
incz = incx
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
# src: arr2yz_foroop_inceq.jl
incx = abs(incx)
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = *( y[dy+i], z[dz+i] )
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, n::Int)
$isunsafe || begin
# src: set3_inc1.jl
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
# src: arr2yz_foroop_inc1.jl
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:ix-1+n
x[i] = *( y[dy+i], z[dz+i] )
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, z::Array{$elty}, n::Int)
$isunsafe || begin
# src: set3_inc1ieq.jl
iy = ix
iz = ix
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
# src: arr2yz_foroop_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = *( y[i], z[i] )
end
return x
end
end # eval begin
end # for
## ADD METHODS
for (f, isunsafe) in ( (:fast_add!, false), (:unsafe_fast_add!, true) )
@eval begin
# x = x + a
# =======
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
# src: scalarr1_for.jl
incx = abs(incx)
@inbounds for i = ix:incx:ix-1+n*incx
x[i] = +( a, x[i] )
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set1_inc1.jl
incx = 1
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
# src: scalarr1_for_inc1.jl
@inbounds for i = ix:ix-1+n
x[i] = +( a, x[i] )
end
return x
end
# x = y + a
# =======
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: scalarr1_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = +( a, y[iy+i*incy] )
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: scalarr1_foroop_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = +( a, y[d+i] )
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: scalarr1_foroop_inc1.jl
d = iy - ix
@inbounds for i = ix:ix-1+n
x[i] = +( a, y[d+i] )
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: scalarr1_foroop_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = +( a, y[i] )
end
return x
end
# x = x + y
# =========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARR #*mul # || n*mul > $NLIM_SCALEARR
# src: arr2xy_for.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = +( x[ix+i*incx], y[iy+i*incy] )
end
else
a = 1
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARR
# src: arr2xy_for_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = +( x[i], y[d+i] )
end
else
a = 1
# src: set2_inceq.jl
incy = incx
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARR
# src: arr2xy_for_inc1.jl
d = iy - ix
@inbounds for i = ix:ix-1+n
x[i] = +( x[i], y[d+i] )
end
else
a = 1
# src: set2_inc1.jl
incx = 1
incy = 1
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARR
# src: arr2xy_for_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = +( x[i], y[i] )
end
else
a = 1
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# x = y + z
# =========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, z::Array{$elty}, iz::Int, incz::Int, n::Int)
$isunsafe || begin
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARR_OOP1 || n > $NLIM_ADDARR_OOP2 #*mul # || n*mul > $NLIM_SCALEARR
# src: arr2yz_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
incz < 0 && (iz = iz+(n-1)*abs(incz))
@inbounds for i = 0:n-1
x[ix+i*incx] = +( y[iy+i*incy], z[iz+i*incz] )
end
else
a = 1
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, n::Int)
$isunsafe || begin
# src: set3_inceq.jl
incy = incx
incz = incx
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARR_OOP1 || n > $NLIM_ADDARR_OOP2
# src: arr2yz_foroop_inceq.jl
incx = abs(incx)
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = +( y[dy+i], z[dz+i] )
end
else
a = 1
# src: set3_inceq.jl
incy = incx
incz = incx
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, n::Int)
$isunsafe || begin
# src: set3_inc1.jl
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARR_OOP1 || n > $NLIM_ADDARR_OOP2
# src: arr2yz_foroop_inc1.jl
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:ix-1+n
x[i] = +( y[dy+i], z[dz+i] )
end
else
a = 1
# src: set3_inc1.jl
incx = 1
incy = 1
incz = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, z::Array{$elty}, n::Int)
$isunsafe || begin
# src: set3_inc1ieq.jl
iy = ix
iz = ix
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARR_OOP1 || n > $NLIM_ADDARR_OOP2
# src: arr2yz_foroop_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = +( y[i], z[i] )
end
else
a = 1
# src: set3_inc1ieq.jl
iy = ix
iz = ix
incx = 1
incy = 1
incz = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
end # eval begin
end # for
## ADDSCAL METHODS
for (f, isunsafe) in ( (:fast_addscal!, false), (:unsafe_fast_addscal!, true) )
@eval begin
# x = x + a*y
# =========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL #*mul # || n*mul > $NLIM_SCALEARR
# src: addarrscal_for.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = x[ix+i*incx] + y[iy+i*incy]*a
end
else
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL
# src: addarrscal_for_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = x[i] + y[d+i]*a
end
else
# src: set2_inceq.jl
incy = incx
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL
# src: addarrscal_for_inc1.jl
d = iy - ix
@inbounds for i = ix:ix-1+n
x[i] = x[i] + y[d+i]*a
end
else
# src: set2_inc1.jl
incx = 1
incy = 1
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, a::$elty, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL
# src: addarrscal_for_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = x[i] + y[i]*a
end
else
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# x = y + a*z
# =========
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, z::Array{$elty}, iz::Int, incz::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL_OOP1 || n > $NLIM_ADDARRSCAL_OOP2 #*mul # || n*mul > $NLIM_SCALEARR
# src: addarrscal_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
incz < 0 && (iz = iz+(n-1)*abs(incz))
@inbounds for i = 0:n-1
x[ix+i*incx] = y[iy+i*incy] + z[iz+i*incz]*a
end
else
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set3_inceq.jl
incy = incx
incz = incx
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL_OOP1 || n > $NLIM_ADDARRSCAL_OOP2
# src: addarrscal_foroop_inceq.jl
incx = abs(incx)
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = y[dy+i] + z[dz+i]*a
end
else
# src: set3_inceq.jl
incy = incx
incz = incx
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, z::Array{$elty}, iz::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set3_inc1.jl
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL_OOP1 || n > $NLIM_ADDARRSCAL_OOP2
# src: addarrscal_foroop_inc1.jl
dy = iy - ix
dz = iz - ix
@inbounds for i = ix:ix-1+n
x[i] = y[dy+i] + z[dz+i]*a
end
else
# src: set3_inc1.jl
incx = 1
incy = 1
incz = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, z::Array{$elty}, a::$elty, n::Int)
$isunsafe || begin
# src: set3_inc1ieq.jl
iy = ix
iz = ix
incx = 1
incy = 1
incz = 1
# src: fast_check3.jl
(0 != incx && 0 != incy && 0 != incz) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy && 0 < iz) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
iz+(n-1)*abs(incz) <= length(z) || throw(BoundsError())
end
if n < $NLIM_ADDARRSCAL_OOP1 || n > $NLIM_ADDARRSCAL_OOP2
# src: addarrscal_foroop_inc1ieq.jl
@inbounds for i = ix:ix-1+n
x[i] = y[i] + z[i]*a
end
else
# src: set3_inc1ieq.jl
iy = ix
iz = ix
incx = 1
incy = 1
incz = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
y, iy, incy = z, iz, incz
# src: blas_axpy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(faxpy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), &(a), py, &(incy), px, &(incx))
end
return x
end
end # eval begin
end # for
## COPY METHODS
for (f, isunsafe) in ( (:fast_copy!, false), (:unsafe_fast_copy!, true) )
@eval begin
# x = y
# =====
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, incy::Int, n::Int)
$isunsafe || begin
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
mul = max(abs(incx), abs(incy))
if n < $NLIM_COPY1*mul || n*mul > $NLIM_COPY2
# src: copy_foroop.jl
incx < 0 && (ix = ix+(n-1)*abs(incx))
incy < 0 && (iy = iy+(n-1)*abs(incy))
@inbounds for i = 0:n-1
x[ix+i*incx] = y[iy+i*incy]
end
else
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
end
return x
end
# inceq
function ($f)(x::Array{$elty}, ix::Int, incx::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inceq.jl
incy = incx
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
mul = abs(incx)
if n < $NLIM_COPY1*mul || n*mul > $NLIM_COPY2
# src: copy_foroop_inceq.jl
incx = abs(incx)
d = iy - ix
@inbounds for i = ix:incx:ix+(n-1)*incx
x[i] = y[d+i]
end
else
# src: set2_inceq.jl
incy = incx
# src: blas_copy.jl
px = convert(Ptr{$(elty)},x) + (ix-1)*sizeof($(elty))
py = convert(Ptr{$(elty)},y) + (iy-1)*sizeof($(elty))
ccall(($(string(fcopy)),libblas), Void,
(Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}, Ptr{$(elty)}, Ptr{BlasInt}),
&(n), py, &(incy), px, &(incx))
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, iy::Int, n::Int)
$isunsafe || begin
# src: set2_inc1.jl
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: set2_inc1.jl
incx = 1
incy = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
return x
end
# inc1ieq
function ($f)(x::Array{$elty}, ix::Int, y::Array{$elty}, n::Int)
$isunsafe || begin
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: fast_check2.jl
(0 != incx && 0 != incy) || throw(ArgumentError("zero increment"))
(0 < ix && 0 < iy) || throw(BoundsError())
ix+(n-1)*abs(incx) <= length(x) || throw(BoundsError())
iy+(n-1)*abs(incy) <= length(y) || throw(BoundsError())
end
# src: set2_inc1ieq.jl
iy = ix
incx = 1
incy = 1
# src: c_memcpy.jl
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
py = convert(Ptr{$(elty)},y) + (iy-1)*selty
ccall(:memcpy, Ptr{Void}, (Ptr{Void}, Ptr{Void}, Uint),
px, py, n*selty)
return x
end
end # eval begin
end # for
## FILL METHODS
for (f, isunsafe) in ( (:fast_fill!, false), (:unsafe_fast_fill!, true) )
@eval begin
# x = a
# =======
# general
function ($f)(x::Array{$elty}, ix::Int, incx::Int, a::$elty, n::Int)
$isunsafe || begin
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
# src: fill_foroop.jl
incx = abs(incx)
@inbounds for i = ix:incx:ix-1+n*incx
x[i] = a
end
return x
end
# inc1
function ($f)(x::Array{$elty}, ix::Int, a::$elty, n::Int)
$isunsafe || begin
# src: set1_inc1.jl
incx = 1
# src: fast_check1.jl
0 < incx || throw(ArgumentError("non-positive increment"))
0 < ix || throw(BoundsError())
ix+(n-1)*incx <= length(x) || throw(BoundsError())
end
if a == 0 && n > $NLIM_FILL && ZEROFLOAT
# src: set1_inc1.jl
incx = 1
# src: c_memset.jl
a::Int32 = a
selty = sizeof($(elty))
px = convert(Ptr{$(elty)},x) + (ix-1)*selty
ccall(:memset, Ptr{Void}, (Ptr{Void}, Int32, Csize_t),
px, a, n*selty)
else
# src: fill_foroop_inc1.jl
@inbounds for i = ix:ix-1+n
x[i] = a
end
end
return x
end
end # eval begin
end # for
end # for
end # module
| [
27,
34345,
29,
10677,
14,
22968,
19182,
41472,
13,
20362,
198,
21412,
12549,
19182,
41472,
198,
11748,
7308,
13,
14993,
2348,
70,
25,
1086,
292,
15633,
11,
1086,
292,
5377,
11141,
11,
1086,
292,
43879,
11,
1086,
292,
5317,
11,
1086,
2... | 1.970044 | 21,465 |
<gh_stars>0
# This file is a part of JuliaFEM.
# License is MIT: see https://github.com/JuliaFEM/FEMCoupling.jl/blob/master/LICENSE
using FEMCoupling: get_C
using Base.Test
@testset "Plain strain kinematic Coupling" begin
# plane strain problem
# Square shaped plane strain element(8x8) + one single node(2x2)
K=zeros(10,10)
K[1:8,1:8]=
[500 225 -350 75 -250 -225 100 -75;
225 500 -75 100 -225 -250 75 -350;
-350 -75 500 -225 100 75 -250 225;
75 100 -225 500 -75 -350 225 -250;
-250 -225 100 -75 500 225 -350 75;
-225 -250 75 -350 225 500 -75 100;
100 75 -250 225 -350 -75 500 -225;
-75 -350 225 -250 75 100 -225 500]
# Removing fixed DOFs (1 and 2) from nodes 1 and 4
K=K[1:end .!=1, 1:end .!=1]
K=K[1:end .!=1, 1:end .!=1]
K=K[1:end .!=5, 1:end .!=5]
K=K[1:end .!=5, 1:end .!=5]
# Force vector
f=zeros(10,1)
f[9,1]=1200
# Removing fixed DOFs
f=f[1:end .!= 1]
f=f[1:end .!= 1]
f=f[1:end .!= 5]
f=f[1:end .!= 5]
############### Calculations by hand
# Making the C matrix by hand
A=zeros(4,10)
A[1,3]-=1; A[1,9]=1; A[2,4]=-1; A[2,10]=1;
A[3,5]-=1; A[3,9]=1; A[4,6]=-1; A[4,10]=1
# Eliminating fixed dofs (1,2) from nodes 1 and 4
A=A[1:end , 1:end .!=1 ]
A=A[1:end , 1:end .!=1 ]
A=A[1:end , 1:end .!=5 ]
A=A[1:end , 1:end .!=5 ]
# Renaming variables to match with get_C.jl
C_expected=A
g_expected=zeros(4,1)
D_expected=zeros(4,4)
# Assembly for solving
K_expected= [K C_expected';
C_expected D_expected]
f_expected= [f;
g_expected]
u_expected = K_expected\f_expected
############### Calculating C,D and g with get_C.jl
# get_C(refnode,slaves,dofs,ndofs,K_size)
K_size=size(K,1)
C,D,g= FEMCoupling.get_C(5,[2,3],[1,2],2,10)
# Removing fixed DOFs
C=C[1:end , 1:end .!=1 ]
C=C[1:end , 1:end .!=1 ]
C=C[1:end , 1:end .!=5 ]
C=C[1:end , 1:end .!=5 ]
KK=[K C';
C D]
ff=[f;
g]
u = lufact(KK) \ full(ff)
@test isapprox(C,C_expected,rtol=0.0001)
@test isapprox(D,D_expected,rtol=0.0001)
@test isapprox(g,g_expected,rtol=0.0001)
@test isapprox(ff,f_expected,rtol=0.0001)
@test isapprox(KK,K_expected,rtol=0.0001)
@test isapprox(u,u_expected,rtol=0.0001)
# If the last test passes, all other tests will pass too.
# Other tests are made to help tracing why the last test doesn't pass.
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
770,
2393,
318,
257,
636,
286,
22300,
37,
3620,
13,
198,
2,
13789,
318,
17168,
25,
766,
3740,
1378,
12567,
13,
785,
14,
16980,
544,
37,
3620,
14,
37,
3620,
34,
280,
11347,
13,
20362,
14,
2436,... | 2.009683 | 1,136 |
<filename>src/block_extension/Diff.jl<gh_stars>0
export Diff
"""
Diff{GT, N} <: TagBlock{GT, N}
Diff(block) -> Diff
Mark a block as quantum differentiable.
"""
struct Diff{GT, N} <: TagBlock{GT, N}
content::GT
function Diff(content::AbstractBlock{N}) where {N}
@warn "Diff block has been deprecated, please use `Yao.AD.NoParams` to block non-differential parameters."
new{typeof(content), N}(content)
end
end
content(cb::Diff) = cb.content
chcontent(cb::Diff, blk::AbstractBlock) = Diff(blk)
YaoBlocks.PropertyTrait(::Diff) = YaoBlocks.PreserveAll()
apply!(reg::AbstractRegister, db::Diff) = apply!(reg, content(db))
mat(::Type{T}, df::Diff) where T = mat(T, df.content)
Base.adjoint(df::Diff) = chcontent(df, content(df)')
function YaoBlocks.print_annotation(io::IO, df::Diff)
printstyled(io, "[∂] "; bold=true, color=:yellow)
end
#### interface #####
export markdiff
"""
markdiff(mode::Symbol, block::AbstractBlock) -> AbstractBlock
markdiff(mode::Symbol) -> Function
automatically mark differentiable items in a block tree as differentiable.
"""
function markdiff end
# for QC
markdiff(block::Union{RotationGate, CPhaseGate}) = Diff(block)
# escape control blocks.
markdiff(block::ControlBlock) = block
function markdiff(blk::AbstractBlock)
blks = subblocks(blk)
isempty(blks) ? blk : chsubblocks(blk, markdiff.(blks))
end
YaoBlocks.AD.mat_back!(::Type{T}, db::Diff, adjm::AbstractMatrix, collector) where T = AD.mat_back!(T, content(db), adjm, collector)
| [
27,
34345,
29,
10677,
14,
9967,
62,
2302,
3004,
14,
28813,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
39344,
10631,
198,
37811,
198,
220,
220,
220,
10631,
90,
19555,
11,
399,
92,
1279,
25,
17467,
12235,
90,
19555,
11,
399,
92,
19... | 2.741007 | 556 |
<filename>julia/run.jl<gh_stars>0
using HetaSimulator, Plots
p = load_platform(".", rm_out=false)
scenarios = read_scenarios("data-mumenthaler-2000/scenarios.csv")
add_scenarios!(p, scenarios)
data = read_measurements("data-mumenthaler-2000/data.csv")
data_scn = Dict()
data_scn[:scn1] = filter(:scenario => ==(:scn1), data)
data_scn[:scn2] = filter(:scenario => ==(:scn2), data)
data_scn[:scn3] = filter(:scenario => ==(:scn3), data)
loss_add = Dict()
loss_add[:scn1] = 2*sum(log.(data_scn[:scn1][:,"prob.sigma"])) + length(data_scn[:scn1][:,"prob.sigma"])*log(2π)
loss_add[:scn2] = 2*sum(log.(data_scn[:scn2][:,"prob.sigma"])) + length(data_scn[:scn1][:,"prob.sigma"])*log(2π)
loss_add[:scn3] = 2*sum(log.(data_scn[:scn3][:,"prob.sigma"])) + length(data_scn[:scn1][:,"prob.sigma"])*log(2π)
add_measurements!(p, data)
# initial plot
sim(p) |> plot
sim(p, parameters_upd = [:Vmax=>0.3, :ke=>0., :k_a=>5.]) |> plot
# fitting 1 best -63.45
params_df_1 = read_parameters("./julia/parameters-1.csv")
res_fit_1 = fit(p, params_df_1; scenarios=[:scn1], ftol_abs=1e-6, ftol_rel=0.)
res_fit_1 = fit(p, optim(res_fit_1); scenarios=[:scn1], ftol_abs=1e-6, ftol_rel=0.)
fig = sim(p, scenarios=[:scn1], parameters_upd=optim(res_fit_1)) |> plot
# savefig(fig, "diagnostics/scn1_best.png")
# fitting 2 best -63.64
params_df_2 = read_parameters("./julia/parameters-2.csv")
res_fit_2 = fit(p, params_df_2; scenarios=[:scn2], ftol_abs=1e-6, ftol_rel=0.)
res_fit_2 = fit(p, optim(res_fit_2); scenarios=[:scn2], ftol_abs=1e-6, ftol_rel=0.)
fig = sim(p, scenarios=[:scn2], parameters_upd=optim(res_fit_2)) |> plot
# savefig(fig, "diagnostics/scn2_best.png")
# fitting 3 best -63.92
params_df_3 = read_parameters("./julia/parameters-3.csv")
res_fit_3 = fit(p, params_df_3; scenarios=[:scn3], ftol_abs=1e-6, ftol_rel=0.)
res_fit_3 = fit(p, optim(res_fit_3); scenarios=[:scn3], ftol_abs=1e-6, ftol_rel=0.)
fig = sim(p, scenarios=[:scn3], parameters_upd=optim(res_fit_3)) |> plot
# savefig(fig, "diagnostics/scn3_best.png")
############################## Identification ##########################
using LikelihoodProfiler, CSV
chi_level = 3.84
p_optim_1 = optim(res_fit_1)
p_optim_2 = optim(res_fit_2)
p_optim_3 = optim(res_fit_3)
sim_scn1 = sim(p.scenarios[:scn1], parameters_upd=p_optim_1)
sim_scn2 = sim(p.scenarios[:scn2], parameters_upd=p_optim_2)
sim_scn3 = sim(p.scenarios[:scn3], parameters_upd=p_optim_3)
p_names = Dict(
:scn1 => first.(p_optim_1),
:scn2 => first.(p_optim_2),
:scn3 => first.(p_optim_3),
)
function loss_func(params::Vector{P}, scen) where P<: Pair
sim_res = sim(p.scenarios[scen]; parameters_upd=params, reltol=1e-6, abstol=1e-8)
#sim_res = last(sim_vec[1])
return loss(sim_res, sim_res.scenario.measurements) - loss_add[scen]
end
function loss_func(pvals::Vector{N}, scen) where N <: Number
# @show pvals
@assert length(p_names[scen]) == length(pvals) "Number of params values doesn't match params names"
params = [pn => pv for (pn,pv) in zip(p_names[scen],pvals)]
loss_func(params,scen)
end
loss_scn1(params) = loss_func(params, :scn1)
loss_scn2(params) = loss_func(params, :scn2)
loss_scn3(params) = loss_func(params, :scn3)
function scan_func(params::Vector{P}, timepoint, scen) where P<: Pair
sim_vec = sim(p; parameters_upd=params, scenarios=[scen])
return last(sim_vec[1])(timepoint)[:BrAC]
end
function scan_func(pvals::Vector{N}, timepoint, scen) where N <: Number
@assert length(p_names[scen]) == length(pvals) "Number of params values doesn't match params names"
params = [pn => pv for (pn,pv) in zip(p_names[scen],pvals)]
scan_func(params, timepoint, scen)
end
scan_scn1(params, timepoint) = scan_func(params, timepoint, :scn1)
scan_scn2(params, timepoint) = scan_func(params, timepoint, :scn2)
scan_scn3(params, timepoint) = scan_func(params, timepoint, :scn3)
saveat_1 = saveat(p.scenarios[:scn1])
saveat_2 = saveat(p.scenarios[:scn2])
saveat_3 = saveat(p.scenarios[:scn3])
p_ident_1 = [get_interval(
last.(p_optim_1),
i,
loss_scn1,
:CICO_ONE_PASS,
theta_bounds = fill((1e-10,1e10), length(p_names[:scn1])),
scan_bounds=((last.(p_optim_1)[i])/1e4, (last.(p_optim_1)[i])*1e4),
scan_tol=1e-5,
#scale = fill(:log, length(p_names[:scn1])),
loss_crit = loss_scn1(p_optim_1) + chi_level
) for i in eachindex(p_names[:scn1])]
plb_1 = [iv.result[1].value for iv in p_ident_1]
pub_1 = [iv.result[2].value for iv in p_ident_1]
pliter_1 = [iv.result[1].counter for iv in p_ident_1]
puiter_1 = [iv.result[2].counter for iv in p_ident_1]
df = DataFrame(params = p_names[:scn1], optim = last.(p_optim_1), lower = plb_1, upper = pub_1, liter = pliter_1, uiter = puiter_1)
df.lower = replace(df.lower, nothing => missing)
df.upper = replace(df.upper, nothing => missing)
CSV.write("./julia/scn1_intervals.csv", df)
p_ident_2 = [get_interval(
last.(p_optim_2),
i,
loss_scn2,
:CICO_ONE_PASS,
#theta_bounds = fill((-10.,10.), length(p_names[:scn2])),
#scale = fill(:log, length(p_names[:scn2])),
loss_crit = loss_scn2(p_optim_2) + chi_level
) for i in eachindex(p_names[:scn2])]
p_ident_3 = [get_interval(
last.(p_optim_3),
i,
loss_scn3,
:CICO_ONE_PASS,
scale = fill(:log, length(p_names[:scn3])),
loss_crit = loss_scn3(p_optim_3) + chi_level
) for i in eachindex(p_names[:scn3])]
BrAC_ident_1 = [get_interval(
last.(p_optim_1),
params->scan_scn1(params,t),
loss_scn1,
:CICO_ONE_PASS,
scale = fill(:log, length(p_names[:scn1])),
loss_crit = loss_scn1(p_optim_1) + chi_level
) for t in saveat_1]
BrAC_ident_2 = [get_interval(
last.(p_optim_2),
params->scan_scn2(params,t),
loss_scn2,
:CICO_ONE_PASS,
scale = fill(:log, length(p_names[:scn2])),
loss_crit = loss_scn2(p_optim_2) + chi_level
) for t in saveat_2]
BrAC_ident_3 = [get_interval(
last.(p_optim_3),
params->scan_scn3(params,t),
loss_scn3,
:CICO_ONE_PASS,
scale = fill(:log, length(p_names[:scn3])),
loss_crit = loss_scn3(p_optim_3) + chi_level
) for t in saveat_3]
lb_1 = [iv.result[1].value for iv in BrAC_ident_1]
ub_1 = [iv.result[2].value for iv in BrAC_ident_1]
liter_1 = [iv.result[1].counter for iv in BrAC_ident_1]
uiter_1 = [iv.result[2].counter for iv in BrAC_ident_1]
df = DataFrame(times = saveat_1, lower = lb_1, upper = ub_1, liter = liter_1, uiter = uiter_1)
df.lower = replace(df.lower, nothing => missing)
df.upper = replace(df.upper, nothing => missing)
CSV.write("./julia/scn1_conf_band.csv", df)
BrAC_1 = sim_scn1.(saveat_1, :BrAC)
plot(sim_scn1, show_measurements=false, vars=[:BrAC])
scatter!(data_scn[:scn1].t, data_scn[:scn1].measurement, yerror=data_scn[:scn1][!,"prob.sigma"], label = "Measurements")
plot!(saveat_1, lb_1, fillrange = ub_1, fillalpha = 0.35, c = 1, label = "Confidence band")
savefig("./julia/conf_band.png")
lb_2 = [iv.result[1].value for iv in BrAC_ident_2]
ub_2 = [iv.result[2].value for iv in BrAC_ident_2]
BrAC_2 = sim_scn2.(saveat_2, :BrAC)
plot(sim_scn2, show_measurements=false, ribbon = (BrAC_2-lb_1,ub_2-BrAC_2), fc=:orange, fa=0.7)
lb_3 = [iv.result[1].value for iv in BrAC_ident_3]
ub_3 = [iv.result[2].value for iv in BrAC_ident_3]
BrAC_3 = sim_scn3.(saveat_3, :BrAC)
plot(sim_scn3, show_measurements=false, ribbon = (BrAC_3-lb_3,ub_3-BrAC_3), fc=:orange, fa=0.7)
### Validation band
t_scn1 = data_scn[:scn1].t
sigma_scn1 = data_scn[:scn1][!,"prob.sigma"]
sim_scn1 = sim(p.scenarios[:scn1]; parameters_upd=p_optim_1, reltol=1e-6, abstol=1e-8)
BrAC_scn1 = sim_scn1.(t_scn1, :BrAC)
function valid_obj1(params, i)
d1 = last(params)
_params = [pn => pv for (pn,pv) in zip(p_names[:scn1],params[1:end-1])]
sim_res = sim(p.scenarios[:scn1]; parameters_upd=_params, reltol=1e-6, abstol=1e-8)
d1_sim = sim_res(t_scn1[i])[:BrAC]
return loss(sim_res, sim_res.scenario.measurements) + (d1 - d1_sim)^2/(sigma_scn1[i])^2 - loss_add[:scn1]
end
valid_1 = []
for i in eachindex(t_scn1)
println(" Calculating CI for $(t_scn1[i]) timepoint")
push!(valid_1,
get_interval(
[last.(p_optim_1); BrAC_scn1[i]],
5,
p->valid_obj1(p,i),
:CICO_ONE_PASS,
theta_bounds = fill((1e-8,1e8), length(p_names[:scn1])+1),
scan_bounds=(1e-7,1e7),
scan_tol=1e-5,
scale = fill(:log, length(p_names[:scn1])+1),
loss_crit = loss_scn1(p_optim_1) + chi_level)
)
end
lb_1 = [iv.result[1].value for iv in valid_1]
ub_1 = [iv.result[2].value for iv in valid_1]
liter_1 = [iv.result[1].counter for iv in valid_1]
uiter_1 = [iv.result[2].counter for iv in valid_1]
df = DataFrame(times = t_scn1, lower = lb_1, upper = ub_1, liter = liter_1, uiter = uiter_1)
df.lower = replace(df.lower, nothing => missing)
df.upper = replace(df.upper, nothing => missing)
CSV.write("./julia/scn1_valid_bans.csv", df)
lb_1[1] = 0.0
lb_1[end-4:end] .= 0.0
plot(sim_scn1, show_measurements=false, vars=[:BrAC])
scatter!(data_scn[:scn1].t, data_scn[:scn1].measurement, yerror=data_scn[:scn1][!,"prob.sigma"], label = "Measurements")
plot!(t_scn1, lb_1, fillrange = ub_1, fillalpha = 0.35, c = 1, label = "Validation band")
savefig("./julia/valid_band.png") | [
27,
34345,
29,
73,
43640,
14,
5143,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
367,
17167,
8890,
8927,
11,
1345,
1747,
198,
198,
79,
796,
3440,
62,
24254,
7203,
33283,
42721,
62,
448,
28,
9562,
8,
198,
198,
1416,
268,
13010... | 2.155152 | 4,183 |
export TabularRandomPolicy
"""
TabularRandomPolicy(prob::Array{Float64, 2})
`prob` describes the distribution of actions for each state.
"""
struct TabularRandomPolicy <: AbstractPolicy
prob::Array{Float64,2}
end
(π::TabularRandomPolicy)(s) = sample(Weights(π.prob[s, :]))
(π::TabularRandomPolicy)(obs::Observation) = π(get_state(obs))
get_prob(π::TabularRandomPolicy, s) = @view π.prob[s, :]
get_prob(π::TabularRandomPolicy, s, a) = π.prob[s, a] | [
39344,
16904,
934,
29531,
36727,
198,
198,
37811,
198,
220,
220,
220,
16904,
934,
29531,
36727,
7,
1676,
65,
3712,
19182,
90,
43879,
2414,
11,
362,
30072,
198,
198,
63,
1676,
65,
63,
8477,
262,
6082,
286,
4028,
329,
1123,
1181,
13,
... | 2.573034 | 178 |
export elu, relu, selu, sigm, invx
using AutoGrad: AutoGrad, @primitive
"""
elu(x)
Return `(x > 0 ? x : exp(x)-1)`.
Reference: Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) (https://arxiv.org/abs/1511.07289).
"""
elu(x::T) where T = (x >= 0 ? x : exp(x)-T(1))
eluback(dy::T,y::T) where T = (y >= 0 ? dy : dy * (T(1)+y))
@primitive elu(x),dy,y eluback.(dy,y)
@primitive eluback(dy,y),ddx (ddx.*((y.>=0).+(y.<0).*(y.+1))) (ddx.*dy.*(y.<0))
"""
relu(x)
Return `max(0,x)`.
References:
* [<NAME>, 2010](https://icml.cc/Conferences/2010/abstracts.html#432). Rectified Linear Units Improve Restricted Boltzmann Machines. ICML.
* [Glorot, <NAME> Bengio, 2011](http://proceedings.mlr.press/v15/glorot11a). Deep Sparse Rectifier Neural Networks. AISTATS.
"""
relu(x::T) where T = max(x,T(0))
reluback(dy::T,y::T) where T = (y>0 ? dy : T(0))
@primitive relu(x),dy,y reluback.(dy,y)
@primitive reluback(dy,y),ddx (ddx.*(y.>0)) nothing
"""
selu(x)
Return `λ01 * (x > 0 ? x : α01 * (exp(x)-1))` where `λ01=1.0507009873554805` and `α01=1.6732632423543778`.
Reference: Self-Normalizing Neural Networks (https://arxiv.org/abs/1706.02515).
"""
selu(x::T) where T = (x >= 0 ? T(λ01)*x : T(λα01)*(exp(x)-T(1)))
seluback(dy::T,y::T) where T = (y >= 0 ? dy * T(λ01) : dy * (y + T(λα01)))
@primitive selu(x),dy,y seluback.(dy,y)
@primitive seluback(dy,y),ddx (T=eltype(y); ddx.*((y.>=0).*T(λ01).+(y.<0).*(y.+T(λα01)))) (ddx.*dy.*(y.<0))
const λ01 = 1.0507009873554805 # (1-erfc(1/sqrt(2))*sqrt(exp(1)))*sqrt(2pi)*(2*erfc(sqrt(2))*exp(2)+pi*erfc(1/sqrt(2))^2*exp(1)-2*(2+pi)*erfc(1/sqrt(2))*sqrt(exp(1))+pi+2)^(-0.5)
const α01 = 1.6732632423543778 # -sqrt(2/pi)/(erfc(1/sqrt(2))*exp(1/2)-1)
const λα01 = 1.7580993408473773 # λ01 * α01
"""
sigm(x)
Return `1/(1+exp(-x))`.
Reference: Numerically stable sigm implementation from http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick.
"""
sigm(x::T) where T = (x >= 0 ? T(1)/(T(1)+exp(-x)) : (z=exp(x); z/(T(1)+z)))
sigmback(dy::T,y::T) where T = (dy*y*(T(1)-y))
@primitive sigm(x),dy,y sigmback.(dy,y)
@primitive sigmback(dy,y),ddx ddx.*y.*(1 .- y) ddx.*dy.*(1 .- 2 .* y)
function invx(x)
@warn "invx() is deprecated, please use 1/x instead" maxlog=1
1/x
end
| [
39344,
1288,
84,
11,
823,
84,
11,
384,
2290,
11,
264,
17225,
11,
800,
87,
198,
3500,
11160,
42731,
25,
11160,
42731,
11,
2488,
19795,
1800,
628,
198,
37811,
198,
220,
220,
220,
1288,
84,
7,
87,
8,
198,
198,
13615,
4600,
7,
87,
1... | 2.024 | 1,125 |
function extract_node_list!(node::XMLElement, nodeArray::Array{XMLElement,1}, label::String)
# get kids of node -
list_of_children = collect(child_elements(node))
for child_node in list_of_children
if (name(child_node) == label)
push!(nodeArray, child_node)
else
extract_node_list!(child_node, nodeArray, label)
end
end
end
function extract_reactant_list(node::XMLElement)
species_array = Array{XMLElement,1}()
list_of_reactants_node = Array{XMLElement,1}()
# get list of reactants -
extract_node_list!(node,list_of_reactants_node,"listOfReactants")
# get the species ref kids -
list_of_children = collect(child_elements(list_of_reactants_node[1])) # we *should* have only one of these -
for child_node in list_of_children
push!(species_array,child_node)
end
return species_array
end
function extract_product_list(node::XMLElement)
species_array = Array{XMLElement,1}()
list_of_products_node = Array{XMLElement,1}()
# get list of reactants -
extract_node_list!(node,list_of_products_node,"listOfProducts")
# get the species ref kids -
list_of_children = collect(child_elements(list_of_products_node[1])) # we *should* have only one of these -
for child_node in list_of_children
push!(species_array,child_node)
end
return species_array
end
function build_metabolic_reaction_object_array(tree_root::XMLElement)::Array{VLMetabolicReaction,1}
# initialize -
reaction_object_array = Array{VLMetabolicReaction,1}()
tmp_reaction_array = Array{XMLElement,1}()
# extract the list of reaction objects -
extract_node_list!(tree_root, tmp_reaction_array, "reaction")
for xml_reaction_object in tmp_reaction_array
# build new reaction object -
robject = VLMetabolicReaction()
# get data -
rname = attribute(xml_reaction_object,"id")
rev_flag = attribute(xml_reaction_object,"reversible")
# build reactions phrases -
list_of_reactants = extract_reactant_list(xml_reaction_object)
list_of_products = extract_product_list(xml_reaction_object)
# left phase -
left_phrase = ""
for species_ref_tag in list_of_reactants
# species id and stoichiometry -
species = attribute(species_ref_tag,"species")
stcoeff = attribute(species_ref_tag, "stoichiometry")
left_phrase *= "$(stcoeff)*$(species)+"
end
left_phrase = left_phrase[1:end-1] # cutoff the trailing +
# right phrase -
right_phrase = ""
for species_ref_tag in list_of_products
# species id and stoichiometry -
species = attribute(species_ref_tag,"species")
stcoeff = attribute(species_ref_tag, "stoichiometry")
right_phrase *= "$(stcoeff)*$(species)+"
end
right_phrase = right_phrase[1:end-1] # cuttoff the trailing +
# populate the reaction object -
robject.reaction_name = rname
robject.ec_number = "[]"
robject.reversible = rev_flag
robject.left_phrase = left_phrase
robject.right_phrase = right_phrase
# cache -
push!(reaction_object_array, robject)
end
# return -
return reaction_object_array
end
function build_txtl_program_component(tree_root::XMLElement, filename::String)::VLProgramComponent
# initialize -
program_component = VLProgramComponent()
# load the header text for TXTL -
path_to_impl = "$(path_to_package)/distribution/julia/include/TXTL-Header-Section.txt"
buffer = include_function(path_to_impl)
# collapse -
flat_buffer = ""
[flat_buffer *= line for line in buffer]
# add data to program_component -
program_component.filename = filename;
program_component.buffer = flat_buffer
program_component.type = :buffer
# return -
return program_component
end
function build_grn_program_component(tree_root::XMLElement, filename::String)::VLProgramComponent
# initialize -
program_component = VLProgramComponent()
# load the header text for TXTL -
path_to_impl = "$(path_to_package)/distribution/julia/include/GRN-Header-Section.txt"
buffer = include_function(path_to_impl)
# collapse -
flat_buffer = ""
[flat_buffer *= line for line in buffer]
# add data to program_component -
program_component.filename = filename;
program_component.buffer = flat_buffer
program_component.type = :buffer
# return -
return program_component
end
function build_global_header_program_component(tree_root::XMLElement, filename::String)::VLProgramComponent
# initialize -
program_component = VLProgramComponent()
# load the header text for TXTL -
path_to_impl = "$(path_to_package)/distribution/julia/include/Global-Header-Section.txt"
buffer = include_function(path_to_impl)
# collapse -
flat_buffer = ""
[flat_buffer *= line for line in buffer]
# add data to program_component -
program_component.filename = filename;
program_component.buffer = flat_buffer
program_component.type = :buffer
# return -
return program_component
end
function build_metabolism_program_component(tree_root::XMLElement, filename::String)::VLProgramComponent
# initialize -
buffer = Array{String,1}()
program_component = VLProgramComponent()
# header information -
+(buffer, "// ***************************************************************************** //\n")
+(buffer, "#METABOLISM::START\n")
+(buffer, "// Metabolism record format:\n")
+(buffer, "// reaction_name (unique), [{; delimited set of ec numbers | []}],reactant_string,product_string,reversible\n")
+(buffer,"//\n")
+(buffer, "// Rules:\n");
+(buffer, "// The reaction_name field is unique, and metabolite symbols can not have special chars or spaces\n")
+(buffer, "//\n")
+(buffer, "// Example:\n")
+(buffer, "// R_A_syn_2,[6.3.4.13],M_atp_c+M_5pbdra+M_gly_L_c,M_adp_c+M_pi_c+M_gar_c,false\n")
+(buffer, "//\n")
+(buffer, "// Stochiometric coefficients are pre-pended to metabolite symbol, for example:\n")
+(buffer, "// R_adhE,[1.2.1.10; 1.1.1.1],M_accoa_c+2*M_h_c+2*M_nadh_c,M_coa_c+M_etoh_c+2*M_nad_c,true\n")
+(buffer, "\n")
# build the reaction array of objects -
reaction_object_array = build_metabolic_reaction_object_array(tree_root)
tmp_string = ""
for reaction_object::VLMetabolicReaction in reaction_object_array
# get data -
reaction_name = reaction_object.reaction_name
ec_number = reaction_object.ec_number
left_phrase = reaction_object.left_phrase
right_phrase = reaction_object.right_phrase
reversible_flag = reaction_object.reversible
# build a reaction string -
tmp_string = "$(reaction_name),$(ec_number),$(left_phrase),$(right_phrase),$(reversible_flag)\n"
# push onto the buffer -
+(buffer, tmp_string)
# clear -
tmp_string = []
end
# close the section -
+(buffer, "\n")
+(buffer, "#METABOLISM::STOP\n")
+(buffer, "// ***************************************************************************** //\n")
# collapse -
flat_buffer = ""
[flat_buffer *= line for line in buffer]
# add data to program_component -
program_component.filename = filename;
program_component.buffer = flat_buffer
program_component.type = :buffer
# return -
return program_component
end | [
8818,
7925,
62,
17440,
62,
4868,
0,
7,
17440,
3712,
37643,
2538,
1732,
11,
10139,
19182,
3712,
19182,
90,
37643,
2538,
1732,
11,
16,
5512,
6167,
3712,
10100,
8,
628,
220,
220,
220,
1303,
651,
3988,
286,
10139,
532,
198,
220,
220,
22... | 2.558325 | 3,009 |
<reponame>pagnani/ArDCA.jl
const allpermorder = [:NATURAL, :ENTROPIC, :REV_ENTROPIC, :RANDOM]
"""
ardca(Z::Array{Ti,2},W::Vector{Float64}; kwds...)
Auto-regressive analysis on the L×M alignment `Z` (numerically encoded in 1,…,21), and the `M`-dimensional normalized
weight vector `W`.
Return two `struct`: `::ArNet` (containing the inferred hyperparameters) and `::ArVar`
Optional arguments:
* `lambdaJ::Real=0.01` coupling L₂ regularization parameter (lagrange multiplier)
* `lambdaH::Real=0.01` field L₂ regularization parameter (lagrange multiplier)
* `epsconv::Real=1.0e-5` convergence value in minimzation
* `maxit::Int=1000` maximum number of iteration in minimization
* `verbose::Bool=true` set to `false` to stop printing convergence info on `stdout`
* `method::Symbol=:LD_LBFGS` optimization strategy see [`NLopt.jl`](https://github.com/JuliaOpt/NLopt.jl) for other options
* `permorder::Union{Symbol,Vector{Ti}}=:ENTROPIC` permutation order. Possible values are `:NATURAL,:ENTROPIC,:REV_ENTROPIC,:RANDOM` or a custom permutation vector
# Examples
```
julia> arnet, arvar= ardca(Z,W,lambdaJ=0,lambdaH=0,permorder=:REV_ENTROPIC,epsconv=1e-12);
```
"""
function ardca(Z::Array{Ti,2},W::Vector{Float64};
lambdaJ::Real=0.01,
lambdaH::Real=0.01,
epsconv::Real=1.0e-5,
maxit::Int=1000,
verbose::Bool=true,
method::Symbol=:LD_LBFGS,
permorder::Union{Symbol,Vector{Int}}=:ENTROPIC
) where Ti <: Integer
checkpermorder(permorder)
all(x -> x > 0, W) || throw(DomainError("vector W should normalized and with all positive elements"))
isapprox(sum(W), 1) || throw(DomainError("sum(W) ≠ 1. Consider normalizing the vector W"))
N, M = size(Z)
M = length(W)
q = Int(maximum(Z))
aralg = ArAlg(method, verbose, epsconv, maxit)
arvar = ArVar(N, M, q, lambdaJ, lambdaH, Z, W, permorder)
θ,psval = minimize_arnet(aralg, arvar)
Base.GC.gc() # something wrong with SharedArrays on Mac
ArNet(θ,arvar),arvar
end
"""
ardca(filename::String; kwds...)
Run [`ardca`](@ref) on the fasta alignment in `filename`
Return two `struct`: `::ArNet` (containing the inferred hyperparameters) and `::ArVar`
Optional arguments:
* `max_gap_fraction::Real=0.9` maximum fraction of insert in the sequence
* `remove_dups::Bool=true` if `true` remove duplicated sequences
* `theta=:auto` if `:auto` compute reweighint automatically. Otherwise set a `Float64` value `0 ≤ theta ≤ 1`
* `lambdaJ::Real=0.01` coupling L₂ regularization parameter (lagrange multiplier)
* `lambdaH::Real=0.01` field L₂ regularization parameter (lagrange multiplier)
* `epsconv::Real=1.0e-5` convergence value in minimzation
* `maxit::Int=1000` maximum number of iteration in minimization
* `verbose::Bool=true` set to `false` to stop printing convergence info on `stdout`
* `method::Symbol=:LD_LBFGS` optimization strategy see [`NLopt.jl`](https://github.com/JuliaOpt/NLopt.jl) for other options
* `permorder::Union{Symbol,Vector{Ti}}=:ENTROPIC` permutation order. Possible
values are `:NATURAL,:ENTROPIC,:REV_ENTROPIC,:RANDOM` or a custom permutation
vector
# Examples
```
julia> arnet, arvar = ardca("pf14.fasta", permorder=:ENTROPIC)
```
"""
function ardca(filename::String;
theta::Union{Symbol,Real}=:auto,
max_gap_fraction::Real=0.9,
remove_dups::Bool=true,
kwds...)
W, Z, N, M, q = read_fasta(filename, max_gap_fraction, theta, remove_dups)
W ./= sum(W)
ardca(Z, W; kwds...)
end
function checkpermorder(po::Symbol)
po ∈ allpermorder || error("permorder :$po not iplemented: only $allpermorder are defined");
end
(checkpermorder(po::Vector{Ti}) where Ti <: Integer) = isperm(po) || error("permorder is not a permutation")
function minimize_arnet(alg::ArAlg, var::ArVar{Ti}) where Ti
@extract var : N q q2
@extract alg : epsconv maxit method
vecps = Vector{Float64}(undef,N - 1)
θ = Vector{Float64}(undef, ((N*(N-1))>>1)*q2 + (N-1)*q)
Threads.@threads for site in 1:N-1
x0 = zeros(Float64, site * q2 + q)
opt = Opt(method, length(x0))
ftol_abs!(opt, epsconv)
xtol_rel!(opt, epsconv)
xtol_abs!(opt, epsconv)
ftol_rel!(opt, epsconv)
maxeval!( opt, maxit)
min_objective!(opt, (x, g) -> optimfunwrapper(x, g, site, var))
elapstime = @elapsed (minf, minx, ret) = optimize(opt, x0)
alg.verbose && @printf("site = %d\tpl = %.4f\ttime = %.4f\t", site, minf, elapstime)
alg.verbose && println("status = $ret")
vecps[site] = minf
offset = div(site*(site-1),2)*q2 + (site-1)*q + 1
θ[offset:offset+site * q2 + q - 1] .= minx
end
return θ, vecps
end
function optimfunwrapper(x::Vector, g::Vector, site, var)
g === nothing && (g = zeros(Float64, length(x)))
return pslikeandgrad!(x, g, site, var)
end
function pslikeandgrad!(x::Vector{Float64}, grad::Vector{Float64}, site::Int, arvar::ArVar)
@extract arvar : N M q q2 lambdaJ lambdaH Z W IdxZ
LL = length(x)
for i = 1:LL - q
grad[i] = 2.0 * lambdaJ * x[i]
end
for i = (LL - q + 1):LL
grad[i] = 2.0 * lambdaH * x[i]
end
pseudolike = 0.0
vecene = zeros(Float64, q)
expvecenesumnorm = zeros(Float64, q)
@inbounds for m in 1:M
izm = view(IdxZ, :, m)
zsm = Z[site+1,m] # the i index of P(x_i|x_1,...,x_i-1) corresponds here to i+1
fillvecene!(vecene, x, site, izm, q, N)
lnorm = logsumexp(vecene)
expvecenesumnorm .= @. exp(vecene - lnorm)
pseudolike -= W[m] * (vecene[ zsm ] - lnorm)
sq2 = site * q2
@avx for i in 1:site
for s in 1:q
grad[ izm[i] + s ] += W[m] * expvecenesumnorm[s]
end
grad[ izm[i] + zsm ] -= W[m]
end
@avx for s = 1:q
grad[ sq2 + s ] += W[m] * expvecenesumnorm[s]
end
grad[ sq2 + zsm ] -= W[m]
end
pseudolike += l2norm_asym(x, arvar)
end
function fillvecene!(vecene::Vector{Float64}, x::Vector{Float64}, site::Int, IdxSeq::AbstractArray{Int,1}, q::Int, N::Int)
q2 = q^2
sq2 = site * q2
@inbounds for l in 1:q
scra = 0.0
@avx for i in 1:site
scra += x[IdxSeq[i] + l]
end
scra += x[sq2 + l] # sum H
vecene[l] = scra
end
end
function logsumexp(X::Vector)
u = maximum(X)
isfinite(u) || return float(u)
return u + log(sum(x -> exp(x - u), X))
end
function l2norm_asym(vec::Array{Float64,1}, arvar::ArVar)
@extract arvar : q N lambdaJ lambdaH
LL = length(vec)
mysum1 = 0.0
@inbounds @avx for i = 1:(LL - q)
mysum1 += vec[i] * vec[i]
end
mysum1 *= lambdaJ
mysum2 = 0.0
@inbounds @avx for i = (LL - q + 1):LL
mysum2 += vec[i] * vec[i]
end
mysum2 *= lambdaH
return mysum1 + mysum2
end | [
27,
7856,
261,
480,
29,
79,
4660,
3216,
14,
3163,
35,
8141,
13,
20362,
198,
9979,
477,
16321,
2875,
796,
685,
25,
34259,
4261,
1847,
11,
1058,
3525,
49,
3185,
2149,
11,
1058,
2200,
53,
62,
3525,
49,
3185,
2149,
11,
1058,
49,
6981,... | 2.171943 | 3,222 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.