content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
<filename>scripts/code_replica_experiment.jl
using DrWatson, GPUAcceleratedTracking, CUDA, Tracking, GNSSSignals, StructArrays, ProgressMeter;
import Tracking: Hz, ms;
@quickactivate "GPUAcceleratedTracking"
N = 2048:32:262_144
err_rel = zeros(length(N))
@showprogress 0.5 for (idx, num_samples) in enumerate(N)
# num_samples = 2_048
num_ants = 1
num_correlators = 3
enable_gpu = Val(true)
system = GPSL1(use_gpu = Val(true));
# system_h = GPSL1(use_gpu = Val(false));
codes = system.codes
# codes_text_mem_simple = CuTexture(
# CuTextureArray(codes)
# )
codes_text_mem = CuTexture(
CuTextureArray(codes),
address_mode = CUDA.ADDRESS_MODE_WRAP,
interpolation = CUDA.NearestNeighbour(),
normalized_coordinates = true
)
code_frequency = get_code_frequency(system)
code_length = get_code_length(system)
start_code_phase = 0.0f0
carrier_phase = 0.0f0
carrier_frequency = 1500Hz
prn = 1
# Generate the signal;
signal, sampling_frequency = gen_signal(system, prn, carrier_frequency, num_samples, num_ants = NumAnts(num_ants), start_code_phase = start_code_phase, start_carrier_phase = carrier_phase)
# Generate correlator;
correlator = EarlyPromptLateCorrelator(NumAnts(num_ants), NumAccumulators(num_correlators))
correlator_sample_shifts = get_correlator_sample_shifts(system, correlator, sampling_frequency, 0.5)
num_of_shifts = correlator_sample_shifts[end] - correlator_sample_shifts[1]
# Generate blank code and carrier replica, and downconverted signal;
# code_replica_cpu = zeros(Float32, num_samples + num_of_shifts)
code_replica = CUDA.zeros(Float32, num_samples + num_of_shifts)
code_replica_text_mem = CUDA.zeros(Float32, num_samples + num_of_shifts)
# Generate CUDA kernel tuning parameters;
threads_per_block = 768
blocks_per_grid = cld.(num_samples, threads_per_block)
@cuda threads=threads_per_block blocks=blocks_per_grid gen_code_replica_texture_mem_kernel!(
code_replica_text_mem,
codes_text_mem, # texture memory codes
code_frequency,
sampling_frequency,
start_code_phase,
prn,
num_samples + num_of_shifts,
correlator_sample_shifts[1],
code_length
)
@cuda threads=threads_per_block blocks=blocks_per_grid gen_code_replica_kernel!(
code_replica,
codes, # texture memory codes
code_frequency,
sampling_frequency,
start_code_phase,
prn,
num_samples + num_of_shifts,
correlator_sample_shifts[1],
code_length
)
# Tracking.gen_code_replica!(code_replica_cpu, system, code_frequency, sampling_frequency, start_code_phase, 1, num_samples, correlator_sample_shifts, prn)
# code_replica_h = Array(code_replica)
# code_replica_text_mem_h = Array(code_replica_text_mem)
# signal = StructArray{ComplexF32}((ones(Float32, num_samples), zeros(Float32, num_samples) ))
# code_phases = get_code_frequency(system) / sampling_frequency .* (0:num_samples-1) .+ start_code_phase
# spread_signal = StructArray(signal .* system_h.codes[1 .+ mod.(floor.(Int, code_phases), get_code_length(system)), prn])
# accums_true = Tracking.correlate(correlator, spread_signal, code_replica_cpu, correlator_sample_shifts, 1, num_samples)
# accums = Tracking.correlate(correlator, spread_signal, code_replica_h, correlator_sample_shifts, 1, num_samples)
# accums_text_mem = Tracking.correlate(correlator, spread_signal, code_replica_text_mem_h, correlator_sample_shifts, 1, num_samples)
# err_rel = sum(abs.(code_replica - code_replica_text_mem)) / num_samples
err_rel[idx] = sum(abs.(code_replica - code_replica_text_mem)) / num_samples
end
x = vec(collect(N / 0.001)) # convert to Hz
data = vec(100 .* err_rel)
data_bar = mean(data)
data_med = median(data)
data_max = maximum(data)
using CairoMakie
fig = Figure(font = "Times New Roman")
ax = Axis(
fig,
xlabel = "Sampling Frequency [Hz]",
ylabel = "Relative Code Phase Error [%]",
xscale = log10,
title = "Relative code phase error of the texture memory code replica generation for 1 ms GPS L1 C/A signal",
# xlim = [0 400_000],
xminorgridvisible = true,
xminorticksvisible = true,
xminorticks = IntervalsBetween(9),
# yticks = (10.0 .^(-5:1:-3)),
# yticks = (10.0 .^(-5:1:-3)),
xticklabelsize = 18,
yticklabelsize = 18
)
xlims!(ax, 10^6, 5*10^8)
# string_data_bar = "$(round(data_bar, sigdigits=3))%"
# string_data_max = "$(round(data_max, sigdigits=3))%"
# string_data_med = "$(round(data_med, sigdigits=3))%"
# # textmu = "μ = " * string_data_bar
# # textmax = "max = " * string_data_max
# textmed = "median = " * string_data_med
lines!(ax, x,data)
# hlines!(ax, data_bar, color = :dimgrey, linestyle = :dash)
# hlines!(ax, data_med, color = :dimgrey, linestyle = :dash)
# hlines!(ax, data_max, color = :dimgrey, linestyle = :dash)
# text!(textmu, position = (9*10^8, 0.1+data_bar), align = (:right, :baseline))
# text!(textmax, position = (9*10^8, data_max - 0.2), align = (:right, :baseline))
# text!(textmed, position = (5*10^8, 0.5+data_med), align = (:center, :baseline))
fig[1,1] = ax
fig
@quickactivate "GPUAcceleratedTracking"
raw_data_df = collect_results(datadir("benchmarks/codereplica"))
sort!(raw_data_df, :num_samples)
samples = unique(Vector{Int64}(raw_data_df[!, :num_samples]))
algorithm_names = unique(Vector{String}(raw_data_df[!, :algorithm]))
samples = unique(Vector{Int64}(raw_data_df[!, :num_samples]))
x = samples ./ 0.001 # convert to Hz
algorithm_names = unique(Vector{String}(raw_data_df[!, :algorithm]))
# fig = Figure(
# # resolution = (1000, 700),
# font = "Times New Roman"
# )
ax2 = Axis(
fig,
xlabel = "Sampling Frequency [Hz]",
ylabel = "Generation Time [s]",
xscale = log10,
yscale = log10,
title = "Comparison between global memory and texture memory code replica generation for 1 ms GPS L1 C/A signal",
xminorgridvisible = true,
xminorticksvisible = true,
xminorticks = IntervalsBetween(9),
# yticks = (10.0 .^(-5:1:-3)),
xticklabelsize = 18,
yticklabelsize = 18
)
xlims!(ax2, 10^6, 5*10^8)
ylims!(ax2, 1.0e-5, 3.0e-3)
lin = Array{Lines}(undef, length(algorithm_names));
sca = Array{Scatter}(undef, length(algorithm_names));
markers = [:circle, :rect]
for (idx, name) = enumerate(algorithm_names)
time = 10 ^ (-9) * vec((
raw_data_df |>
@filter(
_.algorithm == name
) |> DataFrame
).Minimum)
lin[idx] = lines!(
ax2,
x,
time
)
sca[idx] = scatter!(
ax2,
x,
time,
marker = markers[idx],
markersize = 15
)
end
realtime = lines!(ax2, [10^6, 5 * 10^8], [10 ^ (-3), 10 ^ (-3)], color=:grey, linestyle=:dashdot)
fig[2,1] = ax2
fig
elements = [[lin[1] sca[1]], [lin[2] sca[2]]]
labels = ["Global Memory", "Texture Memory"]
axislegend(ax2, elements, labels, "Code Replication Algorithms", position = :lt)
fig
# save( plotsdir("benchmark_textmem.pdf"), fig)
save(plotsdir("code_phase.pdf"), fig) | [
27,
34345,
29,
46521,
14,
8189,
62,
35666,
3970,
62,
23100,
3681,
13,
20362,
198,
3500,
1583,
54,
13506,
11,
11362,
12832,
7015,
515,
2898,
5430,
11,
29369,
5631,
11,
37169,
11,
15484,
5432,
11712,
874,
11,
32112,
3163,
20477,
11,
183... | 2.368508 | 3,042 |
<filename>src/uncore/pmu.jl
abstract type PMUType end
pmutype(::T) where {T} = error("`pmutype` not defined for arguments of type $T")
# Defaults
_unitstatus(x::PMUType, i...) = error("`unitstatus` undefined for $(typeof(x))")
_unitcontrol(x::PMUType, i...) = error("`unitcontrol` undefined for $(typeof(x))")
_counter(x::PMUType, i...) = error("`counter` undefined for $(typeof(x))")
_control(x::PMUType, i...) = error("`control` undefined for $(typeof(x))")
_extras(x::PMUType, i...) = error("`extras` undefined for $(typeof(x))")
writetype(::PMUType) = UInt32
numcounters(x::PMUType) = error("`numcounters` undefined for $(typeof(x))")
numbytes(x::PMUType) = sizeof(UInt64) * numcounters(x)
unpack(_) = ()
unitstatus(x, i...) = _unitstatus(pmutype(x), indexzero.((unpack(x)..., i...))...)
unitcontrol(x, i...) = _unitcontrol(pmutype(x), indexzero.((unpack(x)..., i...))...)
counter(x, i...) = _counter(pmutype(x), indexzero.((unpack(x)..., i...))...)
control(x, i...) = _control(pmutype(x), indexzero.((unpack(x)..., i...))...)
extras(x, i...) = _extras(pmutype(x), indexzero.((unpack(x)..., i...))...)
writetype(x) = writetype(pmutype(x))
numcounters(x) = numcounters(pmutype(x))
numbytes(x) = numbytes(pmutype(x))
### Integrated Memory Controller
struct IMC{T<:AbstractCPU} <: PMUType end
_unitstatus(::IMC{SkylakeServer}) = IndexZero(0xF8)
_unitcontrol(::IMC{SkylakeServer}) = IndexZero(0xF4)
_counter(::IMC{SkylakeServer}, i::IndexZero) = IndexZero(0xA0 + value(i) * 0x8)
_control(::IMC{SkylakeServer}, i::IndexZero) = IndexZero(0xD8 + value(i) * 0x4)
numcounters(::IMC) = 4
# For now, only read the fixed counters for IceLake servers.
# There are 4 such counters, starting at address 0x2290 and they are
# DRAM Read, DRAM Write, PM Read, and PM Write respectively
_counter(::IMC{IcelakeServer}, i) = IndexZero(0x2290 + value(i) * 0x8)
### CHA Counters
struct CHA <: PMUType end
_unitstatus(::CHA, i) = IndexZero(0xE07 + value(i) * 0x10)
_unitcontrol(::CHA, i) = IndexZero(0xE00 + value(i) * 0x10)
_counter(::CHA, cha, i) = IndexZero(0xE08 + value(cha) * 0x10 + value(i))
_control(::CHA, cha, i) = IndexZero(0xE01 + value(cha) * 0x10 + value(i))
_extras(::CHA, cha, i) = IndexZero(0xE05 + value(cha) * 0x10 + value(i))
writetype(::CHA) = UInt64
numcounters(::CHA) = 4
# Customize for various types Specialize
abstract type AbstractUncorePMU end
##### IMC Uncore PMU
# PMU implementation for monitoring the integrated memory controller
struct IMCUncorePMU <: AbstractUncorePMU
# A handle to the underlying
handle::Handle
end
unwrap(x::IMCUncorePMU) = x.handle
pmutype(::IMCUncorePMU) = IMC{SkylakeServer}()
Base.close(x::IMCUncorePMU) = close(x.handle)
# IceLake IMC PMU
# For now - only return the free-running counters
struct IMCUncoreICX <: AbstractUncorePMU
mmio::MMIO
end
unwrap(x::IMCUncoreICX) = x.mmio
pmutype(::IMCUncoreICX) = IMC{IcelakeServer}()
Base.close(::IMCUncoreICX) = nothing
##### CHA Uncore PMU
# PMU implementation for monitoring the CHA
struct CHAUncorePMU <: AbstractUncorePMU
# We hold on to a single handle for the MSR path, shared by all PMUs
handle::Handle
# The number of this CHA
cha::IndexZero{Int}
buffer::Vector{UInt8}
# Allow passing a buffer, or manually create one
function CHAUncorePMU(handle::Handle, cha, buffer = zeros(UInt8, numbytes(CHA())))
resize!(buffer, numbytes(CHA()))
return new(handle, indexzero(cha), buffer)
end
end
unwrap(x::CHAUncorePMU) = x.handle
pmutype(::CHAUncorePMU) = CHA()
unpack(x::CHAUncorePMU) = (x.cha,)
Base.close(x::CHAUncorePMU) = close(x.handle)
#####
##### Low level accessing functions
#####
function setunitstatus!(U::AbstractUncorePMU, v)
write(unwrap(U), convert(writetype(U), v), unitstatus(U))
end
function getunitstatus(U::AbstractUncorePMU)
return read(unwrap(U), UInt32, unitstatus(U))
end
function setunitcontrol!(U::AbstractUncorePMU, v)
write(unwrap(U), convert(writetype(U), v), unitcontrol(U))
end
function getunitcontrol(U::AbstractUncorePMU)
return read(unwrap(U), UInt32, unitcontrol(U))
end
function setcontrol!(U::AbstractUncorePMU, counter, v)
return write(unwrap(U), convert(writetype(U), v), control(U, counter))
end
function getcontrol(U::AbstractUncorePMU, i)
return read(unwrap(U), UInt32, control(U, i))
end
function getcounter(U::AbstractUncorePMU, i)
return CounterValue(read(unwrap(U), UInt64, counter(U, i)))
end
function setextra!(U::AbstractUncorePMU, i, v)
write(unwrap(U), convert(writetype(U), v), extras(U, i))
end
function getextra(U::AbstractUncorePMU, i)
return read(unwrap(U), UInt32, extras(U, i))
end
#####
##### Some higher level functions
#####
function getallcounters(U::AbstractUncorePMU)
return ntuple(i -> getcounter(U, i), Val(numcounters(U)))
end
function reset!(U::AbstractUncorePMU)
# Write to the unit control to clear all counters and control registers
val = setbits(zero(writetype(U)), (0, 1, 8, 16, 17))
setunitcontrol!(U, val)
end
function enable!(U::AbstractUncorePMU)
val = setbits(zero(writetype(U)), (16, 17))
setunitcontrol!(U, val)
end
| [
27,
34345,
29,
10677,
14,
403,
7295,
14,
4426,
84,
13,
20362,
198,
397,
8709,
2099,
3122,
3843,
2981,
886,
198,
198,
4426,
315,
2981,
7,
3712,
51,
8,
810,
1391,
51,
92,
796,
4049,
7203,
63,
4426,
315,
2981,
63,
407,
5447,
329,
7... | 2.561219 | 2,001 |
# Helper functions to read Harwell-Boeing and Rutherford-Boeing data.
function decode_int_fmt(fmt :: AbstractString)
if fmt[1] == '('
fmt = uppercase(fmt[2:end-1])
end
return map(s -> isempty(s) ? 1 : parse(Int, s), split(fmt, 'I'))
end
function decode_real_fmt(fmt :: AbstractString)
fmt = join(split(fmt)) # Remove all white spaces.
if fmt[1] == '('
fmt = uppercase(fmt[2:end-1])
end
scale = "0"
if (',' in fmt) # Process scale factor, e.g., 1P,5D16.9
scale, fmt = split(fmt, ',')
scale, _ = split(scale, 'P')
elseif ('P' in fmt)
scale, fmt = split(fmt, 'P')
end
scale = parse(Int, scale)
fmt1 = split(fmt, '.')[1]
if occursin('E', fmt1)
(npl, len) = map(s -> isempty(s) ? 1 : parse(Int, s), split(fmt1, 'E'))
elseif occursin('D', fmt1)
(npl, len) = map(s -> isempty(s) ? 1 : parse(Int, s), split(fmt1, 'D'))
elseif occursin('F', fmt1)
(npl, len) = map(s -> isempty(s) ? 1 : parse(Int, s), split(fmt1, 'F'))
else
error("Malformed real format")
end
return (npl, len, scale)
end
function standardize_real(number_as_str :: AbstractString)
s = join(split(number_as_str)) # for numbers in the form "0.24555165E 00".
# change ".16000000+006" to ".16000000e+006". The first char could be +/-.
if !any(occursin.(['E', 'D', 'e', 'd'], s))
if occursin('+', s[2:end])
s = s[1:1] * join(split(s[2:end], '+'), "e+")
elseif occursin('-', s[2:end])
s = s[1:1] * join(split(s[2:end], '-'), "e-")
end
end
return s
end
function read_array(io :: IO, n :: Int, fmt :: AbstractString; is_complex :: Bool=false)
if 'I' in fmt
scale = 0
(npl, len) = decode_int_fmt(fmt)
conv = s -> parse(Int, s)
typ = Int
else
(npl, len, scale) = decode_real_fmt(fmt)
conv = s -> parse(Float64, s)
typ = Float64
if is_complex
n *= 2
end
end
x = zeros(typ, n)
for j = 1 : div(n, npl)
if typ == Float64
line = join(split(uppercase(readline(io)), 'D'), 'e')
else
line = readline(io)
end
chunk = [line[len*(i-1)+1:len*i] for i = 1 : npl]
if typ == Float64
chunk = map(standardize_real, chunk)
end
x[npl * (j-1) + 1 : npl * j] = map(conv, chunk)
end
rem = mod(n, npl)
if rem > 0
if typ == Float64
line = join(split(uppercase(readline(io)), 'D'), 'e')
else
line = readline(io)
end
chunk = [line[len*(i-1)+1:len*i] for i = 1 : rem]
if typ == Float64
chunk = map(standardize_real, chunk)
end
x[end-rem+1 : end] = map(conv, chunk)
end
if scale != 0
x /= 10.0^scale
end
return is_complex ? [ComplexF64(x[i], x[i+1]) for i = 1 : 2 : n-1] : x
end
function sortsparse!(colptr :: Vector{Ti}, rowind :: Vector{Ti}, values :: Vector{Tf}) where {Ti <: Integer, Tf <: Number}
# ensure row indices are sorted in each column
ncol = length(colptr) - 1
for col = 1 : ncol
colbeg = colptr[col]
colend = colptr[col + 1] - 1
rows = rowind[colbeg:colend]
if !issorted(rows)
p = sortperm(rows)
rowind[colbeg:colend] = rows[p]
values[colbeg:colend] = values[colbeg:colend][p]
end
end
end
| [
2,
5053,
525,
5499,
284,
1100,
2113,
4053,
12,
33,
2577,
278,
290,
49767,
12,
33,
2577,
278,
1366,
13,
198,
198,
8818,
36899,
62,
600,
62,
69,
16762,
7,
69,
16762,
7904,
27741,
10100,
8,
198,
220,
611,
46996,
58,
16,
60,
6624,
7... | 2.201117 | 1,432 |
<reponame>UnofficialJuliaMirrorSnapshots/POMDPModels.jl-355abbd5-f08e-5560-ac9e-8b5f2592a0ca<filename>test/crying.jl<gh_stars>0
using Test
using POMDPModels
# using POMDPSimulators
using POMDPTesting
using POMDPs
using POMDPModelTools
using BeliefUpdaters
using Random
let
problem = BabyPOMDP()
# starve policy
# when the baby is never fed, the reward for starting in the hungry state should be -100
sim = RolloutSimulator(eps=0.0001)
ib = nothing
policy = Starve()
r = simulate(sim, problem, policy, updater(policy), ib, true)
@test r ≈ -100.0 atol=0.01
# test gen(::o,...)
o = gen(DDNNode(:o), problem, true, MersenneTwister(1))
@test o == 1
# test vec
ov = convert_s(Array{Float64}, true, problem)
@test ov == [1.]
o = convert_s(Bool, ov, problem)
@test o == true
POMDPTesting.probability_check(problem)
bu = DiscreteUpdater(problem)
bp = update(bu,
initialize_belief(bu, BoolDistribution(0.0)),
false,
true)
@test pdf(bp, true) ≈ 0.47058823529411764 atol=0.0001
r = simulate(sim, problem, policy, DiscreteUpdater(problem), BoolDistribution(1.0))
@test r ≈ -100.0 atol=0.01
end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
43826,
20910,
14,
47,
2662,
6322,
5841,
1424,
13,
20362,
12,
28567,
6485,
67,
20,
12,
69,
2919,
68,
12,
2816,
1899,
12,
330,
24,
68,
12,
23,
65,
20,
69,
1495,
5892,
6... | 2.291045 | 536 |
<reponame>darwinproject/CBIOMES-Processing.jl
# byproducts.jl
"""
StartWorkers(nwrkrs::Int)
Start workers if needed.
"""
function StartWorkers(nwrkrs::Int)
set_workers = nwrkrs
nworkers() < set_workers ? addprocs(set_workers) : nothing
nworkers()
end
"""
TaskDriver(indx,fn)
Broacast / distribute task (fn; e.g. loop_task1) over indices (indx; e.g. file indices)
Examples:
```
using CbiomesProcessing, Distributed, SparseArrays
TaskDriver(1,CbiomesProcessing.loop_task1)
StartWorkers(4)
@everywhere using CbiomesProcessing, SparseArrays
TaskDriver(1:4,CbiomesProcessing.loop_task1)
```
Visualize results:
```
using FortranFiles, Plots
k=1
recl=720*360*4
fil="diags_interp/ETAN/ETAN.0000000732.data"
f = FortranFile(fil,"r",access="direct",recl=recl,convert="big-endian")
tmp=read(f,rec=k,(Float32,(720,360))); close(f)
heatmap(tmp)
```
"""
function TaskDriver(indx::Union{UnitRange{Int},Array{Int,1},Int},fn::Function)
i=collect(indx)
length(i)>1 ? i=distribute(i) : nothing
isa(i,DArray) ? println(i.indices) : nothing
fn.(i)
end
"""
MetaFileRead(filIn::String)
Reads a meta file generated by MITgcm
"""
function MetaFileRead(FileName::String)
MetaFile=FileName[1:end-5]*".meta"
f = open(MetaFile)
lines = readlines(f)
close(f)
MetaFile=Dict("MetaFile" => MetaFile)
while !isempty(lines)
line=popfirst!(lines)
i0=findfirst(isequal('='), line)
i1=findfirst(isequal(';'), line)
!isnothing(i0) ? nam=strip(line[1:i0-1]) : nam=""
val=nothing
#show(line)
if nam=="dimList"
#special case: dimList
val=fill(0.,(MetaFile["nDims"],3))
for ii=1:MetaFile["nDims"]
line=popfirst!(lines)
tmp1=split(line,",")
#tmp1=map(x->(v = tryparse(Int,x); ismissing(v) ? 0.0 : v),tmp1)
val[ii,:]=parse.(Int,tmp1[1:3])
end
line=popfirst!(lines)
elseif nam=="fldList"
#special case: fldList
line=popfirst!(lines)
tmp1=split(line,"'")
val=String.(tmp1[2:2:end])
line=popfirst!(lines)
elseif nam=="dataprec"
#sepcial case: dataprec
tmp1=split(line)
tmp1[4]=="'float32'" ? val=Float32 : val=Float64
elseif nam=="nDims"
#sepcial case: nDims
tmp1=split(line[i0+1:i1-1])
val=parse(Int64,tmp1[2])
end
#
if ~isnothing(val)
tmp2=Dict(nam => val)
MetaFile=merge(MetaFile,tmp2)
end
end
return MetaFile
end
"""
MatrixInterp(in::Array{T,N},MTRX,siz) where {T,N}
Interpolate `in` using `MTRX` to grid of size `siz`.
"""
function MatrixInterp(in::Array{T,N},MTRX::SparseMatrixCSC,siz) where {T,N}
#input
l=size(in,1)*size(in,2);
m=size(in,3);
tmp1=reshape(in,l,m)
tmp0=Float64.(.!(isnan.(tmp1)))
tmp1[isnan.(tmp1)].=0.
siz=siz[1],siz[2],m
#matrix product
tmp0=MTRX*tmp0
tmp1=MTRX*tmp1
tmp1=tmp1./tmp0
#this may be redundant:
tmp1[tmp0 .== 0.] .= NaN
#output
out=reshape(tmp1,siz)
m==1 ? out=dropdims(out,dims=3) : nothing
return out
end
| [
27,
7856,
261,
480,
29,
27455,
5404,
16302,
14,
34,
3483,
2662,
1546,
12,
18709,
278,
13,
20362,
198,
2,
416,
29498,
13,
20362,
198,
198,
37811,
198,
220,
220,
220,
7253,
12468,
364,
7,
77,
18351,
74,
3808,
3712,
5317,
8,
198,
198... | 1.958962 | 1,657 |
#
# Example of a medium-scale graphene calculation. Only suitable for running
# on a cluster or machine with large memory.
#src tags: long
#
using DFTK
kgrid = [12, 12, 4]
Tsmear = 0.0009500431544769484
Ecut = 15
lattice = [4.659533614391621 -2.3297668071958104 0.0;
0.0 4.035274479829987 0.0;
0.0 0.0 15.117809010356462]
C = ElementPsp(:C, psp=load_psp("hgh/pbe/c-q4"))
atoms = [C => [[0.0, 0.0, 0.0], [0.33333333333, 0.66666666667, 0.0]]]
model = model_DFT(lattice, atoms, [:gga_x_pbe, :gga_c_pbe];
temperature=Tsmear, smearing=Smearing.Gaussian())
basis = PlaneWaveBasis(model, Ecut, kgrid=kgrid)
# Run SCF
n_bands = 6
scfres = self_consistent_field(basis; n_bands=n_bands)
# Print obtained energies
println()
display(scfres.energies)
| [
2,
198,
2,
17934,
286,
257,
7090,
12,
9888,
42463,
17952,
13,
5514,
11080,
329,
2491,
198,
2,
319,
257,
13946,
393,
4572,
351,
1588,
4088,
13,
198,
2,
10677,
15940,
25,
890,
198,
2,
198,
198,
3500,
360,
9792,
42,
198,
198,
74,
2... | 2.153846 | 364 |
<gh_stars>0
#!/usr/bin/env julia
#load path to qjulia home directory
push!(LOAD_PATH, joinpath(@__DIR__, "..", "core"))
push!(LOAD_PATH, joinpath(@__DIR__, "..", "libs/quda-routines"))
push!(LOAD_PATH, joinpath(@__DIR__, "..", "libs/scidac-routines"))
push!(LOAD_PATH, joinpath(@__DIR__, "..", "main/fields"))
import QJuliaBlas
import QJuliaReduce
import QJuliaUtils
import QJuliaEnums
import QJuliaInterface
import QJuliaGaugeUtils
import QJuliaComms
import QJuliaSolvers
import QUDARoutines
import SCIDACRoutines
using Random
using LinearAlgebra
using MPI
#create function/type alias
double = Float64
float = Float64
load_config_from_file = "/home/astrel/Configs/wl_5p5_x2p38_um0p4125_cfg_1000.lime"
##############################################################################################
[QJuliaUtils.gridsize_from_cmdline[i] = 1 for i = 1:length(QJuliaUtils.gridsize_from_cmdline)]
QJuliaUtils.get_rank_order("col")
#initialize MPI
MPI.Init()
QUDARoutines.initCommsGridQuda_qj(length(QJuliaUtils.gridsize_from_cmdline), QJuliaUtils.gridsize_from_cmdline, QJuliaUtils.lex_rank_from_coords_t_c, C_NULL)
QUDARoutines.initQuda_qj(0)
Random.seed!(2019)
solve_unit_source = true
const lx = 16
const ly = 16
const lz = 16
const lt = 64
const ls = 1
const dim = 4
const vol = lx*ly*lz*lt*ls
#field latt point sizes
const ssize = 12
const gsize = 9
const splen = vol*ssize
const gflen = vol*gsize
const sp_real_len = 2*vol*ssize
const sp_real_parity_len = Int(sp_real_len / 2)
sp_in = Vector{Complex{Float64}}(undef, splen)
sp_ou = Vector{Complex{Float64}}(undef, splen)
gauge = Matrix{Complex{Float64}}(undef, gflen, 4)
if solve_unit_source == false
QJuliaUtils.gen_random_spinor!(sp_in)
else
QJuliaUtils.gen_unit_spinor!(sp_ou)
end
gauge_param = QJuliaInterface.QJuliaGaugeParam_qj()
gauge_param.X = (lx, ly, lz, lt)
gauge_param.cpu_prec = QJuliaEnums.QJULIA_DOUBLE_PRECISION
gauge_param.t_boundary = QJuliaEnums.QJULIA_PERIODIC_T
gauge_param.gtype = QJuliaEnums.QJULIA_WILSON_LINKS
gauge_param.anisotropy = 2.38
gauge_param.cuda_prec = QJuliaEnums.QJULIA_DOUBLE_PRECISION
gauge_param.reconstruct = QJuliaEnums.QJULIA_RECONSTRUCT_12
gauge_param.cuda_prec_sloppy = QJuliaEnums.QJULIA_SINGLE_PRECISION
gauge_param.reconstruct_sloppy = QJuliaEnums.QJULIA_RECONSTRUCT_12
gauge_param.cuda_prec_precondition = QJuliaEnums.QJULIA_DOUBLE_PRECISION
gauge_param.reconstruct_precondition = QJuliaEnums.QJULIA_RECONSTRUCT_12
gauge_param.reconstruct_refinement_sloppy = QJuliaEnums.QJULIA_RECONSTRUCT_12
gauge_param.cuda_prec_refinement_sloppy = QJuliaEnums.QJULIA_HALF_PRECISION
#println("======= Gauge parameters =======")
#QJuliaInterface.printQudaGaugeParam_qj(gauge_param)
#load configuration from file or generate random one:
gauge_load_type = 1
if load_config_from_file != ""
Xdims = Vector{Cint}(undef, 4)
for i in 1:length(Xdims); Xdims[i] = gauge_param.X[i] ; end
qio_prec = Cint(8) #gauge_param.cuda_prec
SCIDACRoutines.QMPInitComms_qj(0, C_NULL, QJuliaUtils.gridsize_from_cmdline)
SCIDACRoutines.read_gauge_field_qj(load_config_from_file, gauge, qio_prec, Xdims, 0, C_NULL)
gauge_load_type = 2
end
QJuliaGaugeUtils.construct_gauge_field!(gauge, gauge_load_type, gauge_param)
gauge_param.gtype = QJuliaEnums.QJULIA_SU3_LINKS #currently cannot set QJULIA_WILSON_LINKS (=QJULIA_SU3_LINKS) for QUDA
x_face_size = gauge_param.X[2]*gauge_param.X[3]*Int(gauge_param.X[4]/2);
y_face_size = gauge_param.X[1]*gauge_param.X[3]*Int(gauge_param.X[4]/2);
z_face_size = gauge_param.X[1]*gauge_param.X[2]*Int(gauge_param.X[4]/2);
t_face_size = gauge_param.X[1]*gauge_param.X[2]*Int(gauge_param.X[3]/2);
gauge_param.ga_pad = max(x_face_size, y_face_size, z_face_size, t_face_size);
QUDARoutines.loadGaugeQuda_qj(gauge, gauge_param)
#Check plaquette
plaq = Array{Cdouble, 1}(undef, 3)
QUDARoutines.plaqQuda_qj(plaq)
println("Computed plaquette is ", plaq[1], ", (spatial = ", plaq[2], ", temporal = ", plaq[3], ")")
mass = -0.4125
#mass = -0.95
inv_param = QJuliaInterface.QJuliaInvertParam_qj()
inv_param.residual_type = QJuliaEnums.QJULIA_L2_RELATIVE_RESIDUAL
#println("======= Invert parameters =======")
#QJuliaInterface.printQudaInvertParam_qj(inv_param)
inv_param.mass = mass
inv_param.kappa = 1.0 / (2.0 * (1.0 + 3.0/gauge_param.anisotropy + mass))
inv_param.maxiter = 200
inv_param.tol = 1e-9
inv_param.cuda_prec = QJuliaEnums.QJULIA_DOUBLE_PRECISION
inv_param.cuda_prec_sloppy = QJuliaEnums.QJULIA_SINGLE_PRECISION
inv_param.cuda_prec_precondition = QJuliaEnums.QJULIA_HALF_PRECISION
inv_param.solution_type = QJuliaEnums.QJULIA_MATPC_SOLUTION
#inv_param.inv_type = QJuliaEnums.QJULIA_PIPEPCG_INVERTER
inv_param.inv_type = QJuliaEnums.QJULIA_FCG_INVERTER
println("Kappa = ", inv_param.kappa)
mdagm(out, inp) = QUDARoutines.MatDagMatQuda_qj(out, inp, inv_param)
mat(out, inp) = QUDARoutines.MatQuda_qj(out, inp, inv_param)
Doe(out, inp) = QUDARoutines.dslashQuda_qj(out, inp, inv_param, QJuliaEnums.QJULIA_EVEN_PARITY)
Deo(out, inp) = QUDARoutines.dslashQuda_qj(out, inp, inv_param, QJuliaEnums.QJULIA_ODD_PARITY )
# Setup preconditioner
precond_param = QJuliaInterface.QJuliaInvertParam_qj()
precond_param.residual_type = QJuliaEnums.QJULIA_L2_RELATIVE_RESIDUAL
#precond_param.inv_type = QJuliaEnums.QJULIA_PIPECG_INVERTER
#precond_param.inv_type = QJuliaEnums.QJULIA_INVALID_INVERTER
precond_param.inv_type = QJuliaEnums.QJULIA_LANMR_INVERTER #wroks for naive and fails for pipelined
precond_param.dslash_type_precondition = QJuliaEnums.QJULIA_WILSON_DSLASH
precond_param.kappa = 1.0 / (2.0 * (1 + 3/gauge_param.anisotropy + mass))
precond_param.cuda_prec = QJuliaEnums.QJULIA_DOUBLE_PRECISION
precond_param.cuda_prec_sloppy = QJuliaEnums.QJULIA_SINGLE_PRECISION
precond_param.cuda_prec_precondition = QJuliaEnums.QJULIA_DOUBLE_PRECISION
precond_param.solution_type = QJuliaEnums.QJULIA_MATPC_SOLUTION
precond_param.maxiter = precond_param.inv_type == QJuliaEnums.QJULIA_PCG_INVERTER ? 30 : 10
precond_param.Nsteps = 1
mdagmPre(out, inp) = QUDARoutines.MatDagMatQuda_qj(out, inp, precond_param)
pre_solv_param = QJuliaSolvers.QJuliaSolverParam_qj()
pre_solv_param.inv_type = precond_param.inv_type
pre_solv_param.tol = 1e-2
#
pre_solv_param.maxiter = precond_param.maxiter
pre_solv_param.Nsteps = 1
pre_solv_param.global_reduction = false
K(out, inp) = QJuliaSolvers.solve(out, inp, mdagmPre, mdagmPre, pre_solv_param)
x_even = view(reinterpret(double, sp_ou), 1:sp_real_parity_len)
x_odd = view(reinterpret(double, sp_ou), sp_real_parity_len+1:sp_real_len)
b_even = view(reinterpret(double, sp_in), 1:sp_real_parity_len)
b_odd = view(reinterpret(double, sp_in), sp_real_parity_len+1:sp_real_len)
tmpl_src_norm = norm(sp_ou)
if solve_unit_source == true
mat(sp_in, sp_ou)
sp_ou .=@. 0.0
end
init_src_norm = norm(sp_in)
println("Initial source norm:: ", init_src_norm, " , template src norm is: ", tmpl_src_norm)
#Auxiliary field
tmp = Vector{double}(undef, sp_real_len)
t_even = view(tmp, 1:sp_real_parity_len)
t_odd = view(tmp, sp_real_parity_len+1:sp_real_len)
#random intial guess
#QJuliaUtils.gen_random_spinor!(sp_ou, splen)
#prepare source/solution:
if inv_param.matpc_type == QJuliaEnums.QJULIA_MATPC_EVEN_EVEN
# src = b_e + k D_eo b_o
Deo(t_even, b_odd)
x_odd .=@. b_even + inv_param.kappa*t_even
end
#
init_prec_src_norm = norm(x_odd)
#
println("Initial precondtioned source norm:: ", init_prec_src_norm, " , requested tolerance: ", inv_param.tol)
solv_param = QJuliaSolvers.QJuliaSolverParam_qj()
# Set up parameters
solv_param.inv_type = inv_param.inv_type
solv_param.inv_type_precondition = precond_param.inv_type
solv_param.tol = inv_param.tol
#
solv_param.maxiter = inv_param.maxiter
solv_param.delta = 1e-2
solv_param.nKrylov = 4 #8 is very good for unit source
solv_param.Nsteps = 2
if precond_param.inv_type != QJuliaEnums.QJULIA_INVALID_INVERTER
QJuliaSolvers.solve(x_even, x_odd, mdagm, mdagm, solv_param, K)
else
QJuliaSolvers.solve(x_even, x_odd, mdagm, mdagm, solv_param)
end
#compute true residual:
r = t_odd
mdagm(r, x_even)
r .=@. x_odd - r
r2 = dot(r, r)
println("True residual norm: ", sqrt(r2))
#reconstruct source/solution:
if inv_param.matpc_type == QJuliaEnums.QJULIA_MATPC_EVEN_EVEN
# x_o = b_o + k D_oe x_e
Doe(t_odd, x_even)
x_odd .=@. b_odd + inv_param.kappa*t_odd
end
if solve_unit_source == true
QJuliaUtils.gen_unit_spinor!(sp_in)
sp_ou .=@. sp_in - sp_ou
error_norm = norm(sp_ou)
println("Solution error: ", error_norm)
end
QUDARoutines.endQuda_qj()
MPI.Finalize()
| [
27,
456,
62,
30783,
29,
15,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
2,
2220,
3108,
284,
10662,
73,
43640,
1363,
8619,
198,
14689,
0,
7,
35613,
62,
34219,
11,
4654,
6978,
7,
31,
834,
34720,
834,
11,
366,
... | 2.104389 | 4,215 |
<reponame>cscherrer/Mitosis.jl
logpdf0(x, P) = logdensity(Gaussian{(:Σ,)}(P), x)
struct Message{T,S}
q0::S
q::T
end
message(q0, q) = Message(q0, q)
message() = nothing
function backward(::BF, k::Union{AffineGaussianKernel,LinearGaussianKernel}, q::Gaussian{(:μ,:Σ)})
ν, Σ = q.μ, q.Σ
B, β, Q = params(k)
B⁻¹ = inv(B)
νp = B⁻¹*(ν - β)
Σp = B⁻¹*(Σ + Q)*B⁻¹'
q0 = Gaussian{(:μ,:Σ)}(νp, Σp)
message(q0, q), q0
end
function backward(::BF, k::ConstantGaussianKernel, q::Gaussian{(:F,:Γ)})
message(nothing, q), nothing
end
function backward(::BF, k::Union{AffineGaussianKernel,LinearGaussianKernel}, q::Gaussian{(:F,:Γ)})
@unpack F, Γ = q
# Theorem 7.1 [Automatic BFFG]
B, β, Q = params(k)
Σ = inv(Γ) # requires invertibility of Γ
K = B'*inv(Σ + Q)
ν̃ = Σ*F - β
Fp = K*ν̃
Γp = K*B
q0 = Gaussian{(:F,:Γ)}(Fp, Γp)
message(q0, q), q0
end
function backward(::BF, k::Union{AffineGaussianKernel,LinearGaussianKernel}, y)
# Theorem 7.1 [Automatic BFFG]
B, β, Q = params(k)
K = B'/Q
Fp = K*(y - β)
Γp = K*B
q0 = Gaussian{(:F,:Γ)}(Fp, Γp)
message(q0, Leaf(y)), q0
end
backward(method::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, q::Leaf; kargs...) = backward(method, k, q[]; kargs...)
backward(method::BFFG, k, q::Leaf; kargs...) = backward(method, k, q[]; kargs...)
function backward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, q::WGaussian{(:F,:Γ,:c)}; unfused=false)
@unpack F, Γ, c = q
# Theorem 7.1 [Automatic BFFG]
B, β, Q = params(k)
Σ = inv(Γ) # requires invertibility of Γ
K = B'/(Σ + Q)
ν̃ = Σ*F - β
Fp = K*ν̃
Γp = K*B
# Corollary 7.2 [Automatic BFFG]
if !unfused
cp = c - logdet(B)
else
cp = c - logdensity0(Gaussian{(:F,:Γ)}(Fp, Γp)) + logpdf0(ν̃, Σ + Q)
end
q0 = WGaussian{(:F,:Γ,:c)}(Fp, Γp, cp)
message(q0, q), q0
end
function backward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, y; unfused=false)
# Theorem 7.1 [Automatic BFFG]
B, β, Q = params(k)
K = B'/Q
Fp = K*(y - β)
Γp = K*B
# Corollary 7.2 [Automatic BFFG]
if !unfused
cp = -logdet(B)
else
cp = logpdf0(y - β, Q)
end
q0 = WGaussian{(:F,:Γ,:c)}(Fp, Γp, cp)
message(q0, Leaf(y)), Leaf(q0)
end
function forward(::BF, k::Union{AffineGaussianKernel,LinearGaussianKernel}, m::Message{<:Gaussian{(:F,:Γ)}})
@unpack F, Γ = m.q
B, β, Q = params(k)
Q⁻ = inv(Q)
Qᵒ = inv(Q⁻ + Γ)
Bᵒ = Qᵒ*Q⁻*B
βᵒ = Qᵒ*(Q⁻*β + F)
kernel(Gaussian; μ=AffineMap(Bᵒ, βᵒ), Σ=ConstantMap(Qᵒ))
end
function forward(::BF, k::ConstantGaussianKernel, m::Message{<:Gaussian{(:F,:Γ)}})
@unpack F, Γ = m.q
β, Q = params(k)
Q⁻ = inv(Q)
Qᵒ = inv(Q⁻ + Γ)
βᵒ = Qᵒ*(Q⁻*β + F)
kernel(Gaussian; μ=ConstantMap(βᵒ), Σ=ConstantMap(Qᵒ))
end
function forward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, m::Message{<:WGaussian{(:F,:Γ,:c)}})
@unpack F, Γ, c = m.q
B, β, Q = params(k)
Q⁻ = inv(Q)
Qᵒ = inv(Q⁻ + Γ)
#μᵒ = Qᵒ*(Q⁻*(B*x + β) + F )
Bᵒ = Qᵒ*Q⁻*B
βᵒ = Qᵒ*(Q⁻*β + F)
kernel(WGaussian; μ=AffineMap(Bᵒ, βᵒ), Σ=ConstantMap(Qᵒ), c=ConstantMap(0.0))
end
function forward(bffg::BFFG, k::Kernel, m::Message, x::Weighted)
p = forward_(bffg, k, m, x[])
weighted(p, x.ll)
end
forward(bffg::BFFG, k::Kernel, m::Message, x) = forward_(bffg, k, m, x)
function forward_(::BFFG, k::GaussKernel, m::Message{<:WGaussian{(:F,:Γ,:c)}}, x)
@unpack F, Γ, c = m.q
c1 = c
μ, Q = k.ops
# Proposition 7.3.
Q⁻ = inv(Q(x))
Qᵒ = inv(Q⁻ + Γ)
μᵒ = Qᵒ*(Q⁻*(μ(x)) + F)
# Q̃⁻ = inv(Q̃(x))
# Q̃ᵒ = inv(Q̃⁻ + Γ)
# μ̃ᵒ = Q̃ᵒ*(Q̃⁻*(μ̃(x)) + F)
# c = logpdf0(μ(x), Q(x)) - logpdf0(μ̃(x), Q̃(x))
# c += logpdf0(μ̃ᵒ, Q̃ᵒ) - logpdf0(μᵒ, Qᵒ)
# == logpdf0(μ(x) - Γ\F, Q(x) + inv(Γ)) - logpdf0(μ̃(x) - Γ\F, Q̃(x) + inv(Γ))
# logdensity(m.q0, x) - c1 == logpdf0(μ̃(x) - Γ\F, Q̃(x) + inv(Γ))
c = logpdf0(μ(x) - Γ\F, Q(x) + inv(Γ)) - logdensity(m.q0, x) + c1
WGaussian{(:μ,:Σ,:c)}(μᵒ, Qᵒ, c)
end
function backward(::BFFG, ::Copy, args::Union{Leaf{<:WGaussian{(:μ,:Σ,:c)}},WGaussian{(:μ,:Σ,:c)}}...; unfused=true)
unfused = false
F, H, c = params(convert(WGaussian{(:F,:Γ,:c)}, args[1]))
args[1] isa Leaf || (c += logdensity0(Gaussian{(:F,:Γ)}(F, H)))
for b in args[2:end]
F2, H2, c2 = params(convert(WGaussian{(:F,:Γ,:c)}, b))
F += F2
H += H2
c += c2
b isa Leaf|| (c += logdensity0(Gaussian{(:F,:Γ)}(F2, H2)))
end
Δ = -logdensity(Gaussian{(:F,:Γ)}(F, H), 0F)
message(), convert(WGaussian{(:μ,:Σ,:c)}, WGaussian{(:F,:Γ,:c)}(F, H, Δ + c))
end
function backward(::Union{BFFG,BF}, ::Copy, a::Gaussian{(:F,:Γ)}, args...)
F, H = params(a)
for b in args
F2, H2 = params(b::Gaussian{(:F,:Γ)})
F += F2
H += H2
end
message(), Gaussian{(:F,:Γ)}(F, H)
end
function backward(::BFFG, ::Copy, a::Union{Leaf{<:WGaussian{(:F,:Γ,:c)}}, WGaussian{(:F,:Γ,:c)}}, args...; unfused=true)
unfused = false
F, H, c = params(convert(WGaussian{(:F,:Γ,:c)}, a))
a isa Leaf || (c += logdensity(Gaussian{(:F,:Γ)}(F, H), 0F))
for b in args
F2, H2, c2 = params(convert(WGaussian{(:F,:Γ,:c)}, b))
F += F2
H += H2
c += c2
b isa Leaf || (c += logdensity(Gaussian{(:F,:Γ)}(F2, H2), 0F2))
end
Δ = -logdensity(Gaussian{(:F,:Γ)}(F, H), 0F)
message(), WGaussian{(:F,:Γ,:c)}(F, H, Δ + c)
end
function forward(::BFFG, k::Union{AffineGaussianKernel,LinearGaussianKernel}, m::Message{<:Leaf}, x::Weighted)
y = m.q
Dirac(weighted(y[], x.ll))
end
function forward(::BFFG, ::Copy{2}, _, x::Weighted)
MeasureTheory.Dirac((x, weighted(x[])))
end
| [
27,
7856,
261,
480,
29,
66,
1416,
372,
11751,
14,
43339,
5958,
13,
20362,
198,
6404,
12315,
15,
7,
87,
11,
350,
8,
796,
2604,
43337,
7,
35389,
31562,
90,
7,
25,
138,
96,
35751,
92,
7,
47,
828,
2124,
8,
198,
198,
7249,
16000,
9... | 1.764564 | 3,313 |
using NumericalMethodsforEngineers, DataFrames, Plots
pyplot(size=(700,700))
ProjDir = dirname(@__FILE__)
cd(ProjDir) #do
x = [1.0, 3.0, 6.0, 5.0]
y = [1.0, 5.0, 10.0, 9.0]
xi = [2.0, 4.5]
(dfin, dfxi) = lagrangianpolynomial(length(x), x, y, xi)
xint = 1:0.1:5
(dfin, dfxint) = lagrangianpolynomial(length(x), x, y, collect(xint))
dfin |> display
println()
dfxi |> display
println()
dfxint |> display
println()
p = plot(dfxint[:xi], dfxint[:yi], line=(:path, 1), label="interpolated curve")
scatter!(p, dfin[:x], dfin[:y], marker=(:circle, 4), label="input points", color=:blue)
scatter!(p, dfxi[:xi], dfxi[:yi], marker=(:star, 8), color=:red, label="interpolated points")
plot(p)
savefig("ex.5.1.png")
gui()
#end | [
3500,
399,
6975,
605,
46202,
1640,
28620,
11,
6060,
35439,
11,
1345,
1747,
198,
9078,
29487,
7,
7857,
16193,
9879,
11,
9879,
4008,
198,
198,
2964,
73,
35277,
796,
26672,
3672,
7,
31,
834,
25664,
834,
8,
198,
10210,
7,
2964,
73,
3527... | 2.103261 | 368 |
function check(i, j)
id, im = div(i, 9), mod(i, 9)
jd, jm = div(j, 9), mod(j, 9)
jd == id && return true
jm == im && return true
div(id, 3) == div(jd, 3) &&
div(jm, 3) == div(im, 3)
end
const lookup = zeros(Bool, 81, 81)
for i in 1:81
for j in 1:81
lookup[i,j] = check(i-1, j-1)
end
end
function solve_sudoku(callback::Function, grid::Array{Int64})
(function solve()
for i in 1:81
if grid[i] == 0
t = Dict{Int64, Void}()
for j in 1:81
if lookup[i,j]
t[grid[j]] = nothing
end
end
for k in 1:9
if !haskey(t, k)
grid[i] = k
solve()
end
end
grid[i] = 0
return
end
end
callback(grid)
end)()
end
function display(grid)
for i in 1:length(grid)
print(grid[i], " ")
i % 3 == 0 && print(" ")
i % 9 == 0 && print("\n")
i % 27 == 0 && print("\n")
end
end
grid = Int64[5, 3, 0, 0, 2, 4, 7, 0, 0,
0, 0, 2, 0, 0, 0, 8, 0, 0,
1, 0, 0, 7, 0, 3, 9, 0, 2,
0, 0, 8, 0, 7, 2, 0, 4, 9,
0, 2, 0, 9, 8, 0, 0, 7, 0,
7, 9, 0, 0, 0, 0, 0, 8, 0,
0, 0, 0, 0, 3, 0, 5, 0, 6,
9, 6, 0, 0, 1, 0, 3, 0, 0,
0, 5, 0, 6, 9, 0, 0, 1, 0]
solve_sudoku(display, grid)
| [
8818,
2198,
7,
72,
11,
474,
8,
198,
220,
220,
220,
4686,
11,
545,
796,
2659,
7,
72,
11,
860,
828,
953,
7,
72,
11,
860,
8,
198,
220,
220,
220,
474,
67,
11,
474,
76,
796,
2659,
7,
73,
11,
860,
828,
953,
7,
73,
11,
860,
8,
... | 1.548096 | 998 |
using Distributions
using PyPlot
using LsqFit
using CSV, DataFrames, DataFramesMeta
using StatsBase
""" Functions for results of Fig. 2E """
# Run:
# mean_thrs_delay_1, mean_thrs_delay_3 = SinergiafMRI_datafit.get_state_visits_bootstrapped()
function get_state_visits_bootstrapped(;
# filestr = "_Sim_",
filestr = ""
)
# Real:
data_path = "./mcmc_rl_fit/projects/fmri/data/SARSPEICZVG_all_fMRI.csv"
# Simulated:
# data_path = "./mcmc_rl_fit/projects/fmri/data_sim/SARSPEICZVG_all_fMRI_Sim_exporder_SurpAC_run2.csv"
max_episodes_per_subj = 40
(all_data, all_stats) = RL_Fit.load_SARSPEI(data_path;max_episodes_per_subj=max_episodes_per_subj)
all_subject_ids = all_stats.all_subject_ids
println("all_subject_ids: $all_subject_ids")
nrOfBootstrapRounds = 200
threshold_delay_matrices_1 = Array{Array{Float64,1}, 1}(undef, nrOfBootstrapRounds)
[threshold_delay_matrices_1[i] = zeros(Float64, 7) for i in 1:nrOfBootstrapRounds]
threshold_delay_matrices_3 = Array{Array{Float64,1}, 1}(undef, nrOfBootstrapRounds)
[threshold_delay_matrices_3[i] = zeros(Float64, 7) for i in 1:nrOfBootstrapRounds]
#
# threshold_delay_matrices_nofitting_1 = Array{Array{Float64,1}, 1}(undef, nrOfBootstrapRounds)
# [threshold_delay_matrices_nofitting_1[i] = zeros(Float64, 7) for i in 1:nrOfBootstrapRounds]
# threshold_delay_matrices_nofitting_3 = Array{Array{Float64,1}, 1}(undef, nrOfBootstrapRounds)
# [threshold_delay_matrices_nofitting_3[i] = zeros(Float64, 7) for i in 1:nrOfBootstrapRounds]
graph_1_subjIDs = [4, 5, 6, 8, 10, 12, 15, 16, 19, 21]
graph_3_subjIDs = [1, 2, 3, 7, 9, 11, 13, 14, 17, 18, 20]
for i in 1:nrOfBootstrapRounds
# Sample with replacement
bootstrapped_subject_ids_graph_1 = sample(graph_1_subjIDs, length(graph_1_subjIDs), replace=true)
bootstrapped_subject_ids_graph_3 = sample(graph_3_subjIDs, length(graph_3_subjIDs), replace=true)
bootstrapped_subject_ids = vcat(bootstrapped_subject_ids_graph_1, bootstrapped_subject_ids_graph_3)
# Get graph_visits
graph_1_visits, graph_3_visits = get_state_visits_singleround(all_data,
bootstrapped_subject_ids, max_episodes_per_subj = max_episodes_per_subj)
threshold_delay_matrices_1[i] = get_state_fittedthreshold(1, graph_1_visits)
threshold_delay_matrices_3[i] = get_state_fittedthreshold(3, graph_3_visits)
# threshold_delay_matrices_nofitting_1[i] = get_state_threshold_nofitting(1, graph_1_visits)
# threshold_delay_matrices_nofitting_3[i] = get_state_threshold_nofitting(3, graph_3_visits)
end
mean_thrs_delay_1, std_thrs_delay_1, stderror_thrs_delay_1 = get_bootstrapped_stats(threshold_delay_matrices_1)
mean_thrs_delay_3, std_thrs_delay_3, stderror_thrs_delay_3 = get_bootstrapped_stats(threshold_delay_matrices_3)
write_bootstrapped_results_csv(1, mean_thrs_delay_1, std_thrs_delay_1, stderror_thrs_delay_1, filestr = filestr)
write_bootstrapped_results_csv(3, mean_thrs_delay_3, std_thrs_delay_3, stderror_thrs_delay_3, filestr = filestr)
# ------------------------------------------
# mean_thrs_delay_nofitting_1, std_thrs_delay_nofitting_1, stderror_thrs_delay_nofitting_1 = get_bootstrapped_stats(threshold_delay_matrices_nofitting_1)
# mean_thrs_delay_nofitting_3, std_thrs_delay_nofitting_3, stderror_thrs_delay_nofitting_3 = get_bootstrapped_stats(threshold_delay_matrices_nofitting_3)
#
# write_bootstrapped_results_csv(1, mean_thrs_delay_nofitting_1, std_thrs_delay_nofitting_1, stderror_thrs_delay_nofitting_1, filestr=filestr*"nofitting_")
# write_bootstrapped_results_csv(3, mean_thrs_delay_nofitting_3, std_thrs_delay_nofitting_3, stderror_thrs_delay_nofitting_3, filestr=filestr*"nofitting_")
mean_thrs_delay_1, mean_thrs_delay_3#, mean_thrs_delay_nofitting_1, mean_thrs_delay_nofitting_3
end
""" Similar to get_state_visits() """
function get_state_visits_singleround(all_data, subject_ids; max_episodes_per_subj = 40)
graph_1_visits = zeros(Int64, max_episodes_per_subj,7, 2)
graph_3_visits = zeros(Int64, max_episodes_per_subj,7, 2)
# @show subject_ids
for subj_id in subject_ids
# @show subj_id
subj_data = RL_Fit.filter_by_person(all_data, subj_id)
graph_id = subj_data[1,Int(RL_Fit.c_graph_version)]
all_episodes = unique(subj_data[:,Int(RL_Fit.c_episode)])
trajectory_count_per_state = zeros(Int64, 7) #count for each state individually how many times it participated in a rewarded episode
# per subject, do not count (sum), just set to 0 or 1.
state_action_indicator = zeros(Int64, max_episodes_per_subj, 7, 2)
for episode in all_episodes
for s in 2:7
# check if state s has "contributed" to the current episode. if yes, count this trajectory.
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) )
trajectory_count_per_state[s] += 1
# now count the value of actions. distinguish two cases:
# for some states, the two actions are equally valid. In this case count the action
# for other states, there is a correct (1) and a wrong(-1) action
# action correct:
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) .& (subj_data[:,Int(RL_Fit.c_action_value)] .== 0))
# action A or action B?
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) .& (subj_data[:,Int(RL_Fit.c_action)] .== 1))
# A
state_action_indicator[trajectory_count_per_state[s], s, 1] = 1 # do not +=1
end
# note: it's not an ELSE here. we check for both and count both options at most once
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) .& (subj_data[:,Int(RL_Fit.c_action)] .== 2))
# B
state_action_indicator[trajectory_count_per_state[s], s, 2] = 1 # do not +=1
end
else
# action correct or wrong?
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) .& (subj_data[:,Int(RL_Fit.c_action_value)] .== +1))
# correct
state_action_indicator[trajectory_count_per_state[s], s, 2] = 1 # do not +=1
end
# note: it's not an ELSE here. we check for both and count both options at most once
if any( (subj_data[:,Int(RL_Fit.c_episode)] .== episode) .& (subj_data[:,Int(RL_Fit.c_state)] .== s) .& (subj_data[:,Int(RL_Fit.c_action_value)] .== -1))
# wrong
state_action_indicator[trajectory_count_per_state[s], s, 1] = 1 # do not +=1
end
end
end
end # end all states
end # end all episodes
# @show state_action_indicator
if graph_id == 1
graph_1_visits += state_action_indicator
elseif graph_id == 3
graph_3_visits += state_action_indicator
else
error("unknown graph id: $graph_id")
end
end # end all subject
graph_1_visits, graph_3_visits
end
function get_state_fittedthreshold(graph_id::Int,
graph_state_counts;
learning_threshold=0.8,
min_visits_requirement = 2,
savepath = "/Users/vasia/Desktop/")
dfresults = []
graph_info = get_graph_info()
states, minimal_dist, state_txt, graph = get_this_graph_info(graph_info, graph_id)
init_vals_exp = [0.5, 0.]
threshold_delay_matrix = zeros(length(states)+1)
for s = states
# println(s)
counts_per_episode_actionwrong = vec(graph_state_counts[:,s,1])
counts_per_episode_actioncorrect = vec(graph_state_counts[:,s,2])
summed_counts_per_episode = counts_per_episode_actionwrong.+counts_per_episode_actioncorrect
episodes_min_visits = findall(summed_counts_per_episode .>= min_visits_requirement)
# @show episodes_min_visits
ratio = counts_per_episode_actioncorrect[episodes_min_visits] ./ summed_counts_per_episode[episodes_min_visits]
weight = (summed_counts_per_episode[episodes_min_visits])
nr_x = length(ratio)
x_data = 1:nr_x
y_data = ratio
# markersize = 1+2*sqrt(weight[i]),
threshold_delay = 0.
weight = Float64.(weight)
if minimal_dist[1,s] != minimal_dist[2,s]
fit = curve_fit(model_exp, x_data, y_data, weight, init_vals_exp) # Weighted cost function
y_hat = model_exp(x_data, fit.param)
threshold_delay = inv_model_exp(learning_threshold, fit.param)
end
if threshold_delay < 0.
if fit.param[2] > 0.
threshold_delay = 0.
else
threshold_delay = Float64(episodes_min_visits[end])
end
# @show threshold_delay
# println("----------")
end
if threshold_delay > Float64(episodes_min_visits[end])
threshold_delay = Float64(episodes_min_visits[end])
end
threshold_delay_matrix[s] = threshold_delay
end
threshold_delay_matrix
end
# NOTE: NOT USED!!!
function get_state_threshold_nofitting(graph_id::Int,
graph_state_counts;
learning_threshold=0.8,
min_visits_requirement = 2,
savepath = "/Users/vasia/Desktop/")
dfresults = []
graph_info = get_graph_info()
states, minimal_dist, state_txt, graph = get_this_graph_info(graph_info, graph_id)
init_vals_exp = [0.5, 0.]
threshold_delay_matrix = zeros(length(states)+1)
for s = states
# println(s)
counts_per_episode_actionwrong = vec(graph_state_counts[:,s,1])
counts_per_episode_actioncorrect = vec(graph_state_counts[:,s,2])
summed_counts_per_episode = counts_per_episode_actionwrong.+counts_per_episode_actioncorrect
episodes_min_visits = findall(summed_counts_per_episode .>= min_visits_requirement)
# @show episodes_min_visits
ratio = counts_per_episode_actioncorrect[episodes_min_visits] ./ summed_counts_per_episode[episodes_min_visits]
weight = (summed_counts_per_episode[episodes_min_visits])
nr_x = length(ratio)
# markersize = 1+2*sqrt(weight[i]),
threshold_delay = 0.
weight = Float64.(weight)
if minimal_dist[1,s] != minimal_dist[2,s]
threshold_delay = findfirst(x-> x.>=0.8, ratio)
end
if isnothing(threshold_delay)
threshold_delay = Float64(episodes_min_visits[end])
end
threshold_delay_matrix[s] = threshold_delay
end
threshold_delay_matrix
end
function get_bootstrapped_stats(threshold_delay_matrices)
thrs_delay_cat = hcat(threshold_delay_matrices...)
mean_thrs_delay = mean(thrs_delay_cat, dims=2)
std_thrs_delay = std(thrs_delay_cat, dims=2)
@show size(threshold_delay_matrices,1)
stderror_thrs_delay = std(thrs_delay_cat, dims=2) ./ sqrt(size(threshold_delay_matrices,1))
mean_thrs_delay, std_thrs_delay, stderror_thrs_delay
end
function write_bootstrapped_results_csv(graph_id::Int64,
mean_thrs_delay,
std_thrs_delay,
stderror_thrs_delay;
filestr="",
# filestr = "_Sim_",
# savepath = "./"
)
savepath = makehomesavepath("learningcurvebars_bootstrap")
mkdir(savepath)
graph_info = get_graph_info()
states, minimal_dist, state_txt, graph = get_this_graph_info(graph_info, graph_id)
dfresults = DataFrame()
for s = states
min_dist = sort(minimal_dist[:,s] .- 1.)
dfresults[!, Symbol(string(s)*"_"*state_txt[s]*"_thresholddelay_mean")] = [mean_thrs_delay[s]]
dfresults[!, Symbol(string(s)*"_"*state_txt[s]*"_thresholddelay_std")] = [std_thrs_delay[s]]
dfresults[!, Symbol(string(s)*"_"*state_txt[s]*"_thresholddelay_stderror")] = [stderror_thrs_delay[s]]
dfresults[!, Symbol(string(s)*"_"*state_txt[s]*"_dist1vsdist2")] = [string(Int(min_dist[1]))*"-or-"*string(Int(min_dist[2]))]
dfresults[!, Symbol(string(s)*"_"*state_txt[s]*"_graphid")] = [graph_id]
end
CSV.write(joinpath(savepath, "learningcurvebar_bootstrap_"*filestr*"G" * string(graph_id) *".csv"), dfresults,
delim = " ");
# dfresults
end
| [
3500,
46567,
507,
198,
3500,
9485,
43328,
198,
3500,
406,
31166,
31805,
198,
3500,
44189,
11,
6060,
35439,
11,
6060,
35439,
48526,
198,
3500,
20595,
14881,
198,
198,
37811,
40480,
329,
2482,
286,
12138,
13,
362,
36,
37227,
198,
2,
5660,... | 2.041609 | 6,537 |
<gh_stars>10-100
export JITEventListener, GDBRegistrationListener, IntelJITEventListener,
OProfileJITEventListener, PerfJITEventListener
@checked struct JITEventListener
ref::API.LLVMJITEventListenerRef
end
Base.unsafe_convert(::Type{API.LLVMJITEventListenerRef}, listener::JITEventListener) = listener.ref
GDBRegistrationListener() = JITEventListener(LLVM.API.LLVMCreateGDBRegistrationListener())
IntelJITEventListener() = JITEventListener(LLVM.API.LLVMCreateIntelJITEventListener())
OProfileJITEventListener() = JITEventListener(LLVM.API.LLVMCreateOProfileJITEventListener())
PerfJITEventListener() = JITEventListener(LLVM.API.LLVMCreatePerfJITEventListener())
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
39344,
449,
2043,
9237,
33252,
11,
402,
11012,
47133,
33252,
11,
8180,
41,
2043,
9237,
33252,
11,
198,
220,
220,
220,
220,
220,
220,
440,
37046,
41,
2043,
9237,
33252,
11,
2448,
69,
41,
... | 2.935622 | 233 |
"""
hubbard_dispersion(k)
Dispersion relation for [`HubbardMom1D`](@ref). Returns `-2cos(k)`.
See also [`continuum_dispersion`](@ref).
"""
hubbard_dispersion(k) = -2cos(k)
"""
continuum_dispersion(k)
Dispersion relation for [`HubbardMom1D`](@ref). Returns `k^2`.
See also [`hubbard_dispersion`](@ref).
"""
continuum_dispersion(k) = k^2
"""
HubbardMom1D(address; u=1.0, t=1.0, dispersion=hubbard_dispersion)
Implements a one-dimensional Bose Hubbard chain in momentum space.
```math
\\hat{H} = \\sum_{k} ϵ_k n_k + \\frac{u}{M}\\sum_{kpqr} a^†_{r} a^†_{q} a_p a_k δ_{r+q,p+k}
```
# Arguments
* `address`: the starting address, defines number of particles and sites.
* `u`: the interaction parameter.
* `t`: the hopping strength.
* `dispersion`: defines ``ϵ_k =``` t*dispersion(k)`
- [`hubbard_dispersion`](@ref): ``ϵ_k = -2t \\cos(k)``
- [`continuum_dispersion`](@ref): ``ϵ_k = tk^2``
# See also
* [`HubbardReal1D`](@ref)
* [`ExtendedHubbardReal1D`](@ref)
"""
struct HubbardMom1D{TT,M,AD<:AbstractFockAddress,U,T} <: AbstractHamiltonian{TT}
add::AD # default starting address, should have N particles and M modes
ks::SVector{M,TT} # values for k
kes::SVector{M,TT} # values for kinetic energy
end
function HubbardMom1D(
add::Union{BoseFS,FermiFS2C};
u=1.0, t=1.0, dispersion = hubbard_dispersion,
)
M = num_modes(add)
U, T = promote(float(u), float(t))
step = 2π/M
if isodd(M)
start = -π*(1+1/M) + step
else
start = -π + step
end
kr = range(start; step = step, length = M)
ks = SVector{M}(kr)
# kes = SVector{M}(-2T*cos.(kr))
kes = SVector{M}(T .* dispersion.(kr))
return HubbardMom1D{typeof(U),M,typeof(add),U,T}(add, ks, kes)
end
function Base.show(io::IO, h::HubbardMom1D)
print(io, "HubbardMom1D($(h.add); u=$(h.u), t=$(h.t))")
end
function starting_address(h::HubbardMom1D)
return h.add
end
LOStructure(::Type{<:HubbardMom1D{<:Real}}) = IsHermitian()
Base.getproperty(h::HubbardMom1D, s::Symbol) = getproperty(h, Val(s))
Base.getproperty(h::HubbardMom1D, ::Val{:ks}) = getfield(h, :ks)
Base.getproperty(h::HubbardMom1D, ::Val{:kes}) = getfield(h, :kes)
Base.getproperty(h::HubbardMom1D, ::Val{:add}) = getfield(h, :add)
Base.getproperty(h::HubbardMom1D{<:Any,<:Any,<:Any,U}, ::Val{:u}) where {U} = U
Base.getproperty(h::HubbardMom1D{<:Any,<:Any,<:Any,<:Any,T}, ::Val{:t}) where {T} = T
ks(h::HubbardMom1D) = getfield(h, :ks)
"""
num_singly_doubly_occupied_sites(address)
Returns the number of singly and doubly occupied sites for a bosonic bit string address.
# Example
```jldoctest
julia> Hamiltonians.num_singly_doubly_occupied_sites(BoseFS{3,3}((1, 1, 1)))
(3, 0)
julia> Hamiltonians.num_singly_doubly_occupied_sites(BoseFS{3,3}((2, 0, 1)))
(2, 1)
```
"""
function num_singly_doubly_occupied_sites(b::BoseFS)
singlies = 0
doublies = 0
for (n, _, _) in occupied_modes(b)
singlies += 1
doublies += n > 1
end
return singlies, doublies
end
function num_singly_doubly_occupied_sites(onrep::AbstractArray)
# this one is faster by about a factor of 2 if you already have the onrep
# returns number of singly and doubly occupied sites
singlies = 0
doublies = 0
for n in onrep
singlies += n > 0
doublies += n > 1
end
return singlies, doublies
end
# standard interface function
function num_offdiagonals(ham::HubbardMom1D, add::BoseFS)
singlies, doublies = num_singly_doubly_occupied_sites(add)
return num_offdiagonals(ham, add, singlies, doublies)
end
# 4-argument version
@inline function num_offdiagonals(ham::HubbardMom1D, add::BoseFS, singlies, doublies)
M = num_modes(ham)
return singlies * (singlies - 1) * (M - 2) + doublies * (M - 1)
end
@inline function num_offdiagonals(ham::HubbardMom1D, add::FermiFS2C{N1,N2}) where {N1,N2}
M = num_modes(ham)
return N1 * N2 * (M - 1)
end
"""
momentum_transfer_diagonal(H, map::OccupiedModeMap)
Compute diagonal interaction energy term.
# Example
```jldoctest
julia> a = BoseFS{6,5}((1,2,3,0,0))
BoseFS{6,5}((1, 2, 3, 0, 0))
julia> H = HubbardMom1D(a);
julia> Hamiltonians.momentum_transfer_diagonal(H, OccupiedModeMap(a))
5.2
```
"""
@inline function momentum_transfer_diagonal(
h::HubbardMom1D{<:Any,M,<:BoseFS}, map
) where {M}
return h.u / 2M * momentum_transfer_diagonal(map)
end
@inline function momentum_transfer_diagonal(
h::HubbardMom1D{<:Any,M,<:FermiFS2C}, map_a, map_b
) where {M}
return h.u / 2M * momentum_transfer_diagonal(map_a, map_b)
end
@inline function diagonal_element(h::HubbardMom1D, add::BoseFS)
map = OccupiedModeMap(add)
return dot(h.kes, map) + momentum_transfer_diagonal(h, map)
end
@inline function diagonal_element(h::HubbardMom1D, add::FermiFS2C)
map_a = OccupiedModeMap(add.components[1])
map_b = OccupiedModeMap(add.components[2])
return dot(h.kes, map_a) + dot(h.kes, map_b) +
momentum_transfer_diagonal(h, map_a, map_b)
end
@inline function get_offdiagonal(
ham::HubbardMom1D{<:Any,M,A}, add::A, chosen, map=OccupiedModeMap(add)
) where {M,A<:BoseFS}
add, onproduct = momentum_transfer_excitation(add, chosen, map)
return add, ham.u/(2*M)*onproduct
end
@inline function get_offdiagonal(
ham::HubbardMom1D{<:Any,M,A}, add::A, chosen,
map_a=OccupiedModeMap(add.components[1]), map_b=OccupiedModeMap(add.components[2])
) where {M,A<:FermiFS2C}
add_a, add_b = add.components
new_add_a, new_add_b, onproduct = momentum_transfer_excitation(
add_a, add_b, chosen, map_a, map_b
)
return CompositeFS(new_add_a, new_add_b), ham.u/M * onproduct
end
###
### offdiagonals
###
"""
OffdiagonalsBoseMom1D
Specialized [`AbstractOffdiagonals`](@ref) that keeps track of singly and doubly occupied
sites in current address.
"""
struct OffdiagonalsBoseMom1D{
A<:BoseFS,T,H<:AbstractHamiltonian{T},O<:OccupiedModeMap
} <: AbstractOffdiagonals{A,T}
hamiltonian::H
address::A
length::Int
map::O
end
function offdiagonals(h::HubbardMom1D, a::BoseFS)
map = OccupiedModeMap(a)
singlies = length(map)
doublies = count(i -> i.occnum ≥ 2, map)
num = num_offdiagonals(h, a, singlies, doublies)
return OffdiagonalsBoseMom1D(h, a, num, map)
end
function Base.getindex(s::OffdiagonalsBoseMom1D{A,T}, i)::Tuple{A,T} where {A,T}
@boundscheck begin
1 ≤ i ≤ s.length || throw(BoundsError(s, i))
end
new_address, matrix_element = get_offdiagonal(s.hamiltonian, s.address, i, s.map)
return (new_address, matrix_element)
end
Base.size(s::OffdiagonalsBoseMom1D) = (s.length,)
struct OffdiagonalsFermiMom1D2C{
F<:FermiFS2C,T,H<:AbstractHamiltonian{T},O1,O2
} <: AbstractOffdiagonals{F,T}
hamiltonian::H
address::F
length::Int
map_a::O1
map_b::O2
end
function offdiagonals(h::HubbardMom1D, f::FermiFS2C)
comp_a, comp_b = f.components
map_a = OccupiedModeMap(comp_a)
map_b = OccupiedModeMap(comp_b)
num = num_offdiagonals(h, f)
return OffdiagonalsFermiMom1D2C(h, f, num, map_a, map_b)
end
Base.size(s::OffdiagonalsFermiMom1D2C) = (s.length,)
function Base.getindex(s::OffdiagonalsFermiMom1D2C{A,T}, i)::Tuple{A,T} where {A,T}
@boundscheck begin
i ≤ i ≤ s.length || throw(BoundsError(s, i))
end
new_address, matrix_element = get_offdiagonal(
s.hamiltonian, s.address, i, s.map_a, s.map_b
)
return (new_address, matrix_element)
end
###
### momentum
###
struct MomentumMom1D{T,H<:AbstractHamiltonian{T}} <: AbstractHamiltonian{T}
ham::H
end
LOStructure(::Type{MomentumMom1D{H,T}}) where {H,T <: Real} = IsHermitian()
num_offdiagonals(ham::MomentumMom1D, add) = 0
diagonal_element(mom::MomentumMom1D, add) = mod1(onr(add)⋅ks(mom.ham) + π, 2π) - π # fold into (-π, π]
momentum(ham::HubbardMom1D) = MomentumMom1D(ham)
| [
37811,
198,
220,
220,
220,
12575,
23024,
62,
6381,
79,
6900,
7,
74,
8,
198,
7279,
79,
6900,
8695,
329,
685,
63,
16066,
23024,
29252,
16,
35,
63,
16151,
31,
5420,
737,
16409,
4600,
12,
17,
6966,
7,
74,
8,
44646,
198,
198,
6214,
6... | 2.239726 | 3,504 |
<reponame>jagot/AtomicLevels.jl<gh_stars>1-10
module AtomicLevels
using UnicodeFun
using Formatting
using Parameters
using BlockBandedMatrices
using WignerSymbols
using HalfIntegers
using Combinatorics
include("common.jl")
include("unicode.jl")
include("parity.jl")
include("orbitals.jl")
include("relativistic_orbitals.jl")
include("spin_orbitals.jl")
include("configurations.jl")
include("excited_configurations.jl")
include("terms.jl")
include("allchoices.jl")
include("jj_terms.jl")
include("intermediate_terms.jl")
include("couple_terms.jl")
include("csfs.jl")
include("jj2lsj.jl")
include("levels.jl")
module Utils
include("utils/print_states.jl")
end
# Deprecations
@deprecate jj2lsj(args...) jj2ℓsj(args...)
end # module
| [
27,
7856,
261,
480,
29,
73,
363,
313,
14,
2953,
10179,
4971,
82,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
28976,
4971,
82,
198,
198,
3500,
34371,
24629,
198,
3500,
18980,
889,
198,
3500,
40117,
198,
3500,
9726,
... | 2.780303 | 264 |
module ARCSolver
export main, simple
using Reexport
include("grids.jl")
@reexport using .Grids
include("render.jl")
@reexport using .Render
include("solve.jl")
@reexport using .Solve
include("diff.jl")
@reexport using .Diff
using Images, ImageView
function main()
tasks = load_tasks()
# warmstart
print("warmstarting...")
to_img(diff_grids(tasks[14].ios[1]...))
println("done")
diffgrids = Vector{ARCDiffGrid}(undef, length(tasks))
@time for i in 1:length(tasks)
println(i)
diffgrids[i] = diff_grids(tasks[i].ios[1]...)
end
@time for (i,(grid,task)) in enumerate(zip(diffgrids,tasks))
println(i)
Images.save("out/diffs/$(splitpath(task.path)[end]).png",to_img(grid))
end
println(sizeof(diffgrids))
println(sizeof(tasks))
end
function simple()
task = load_tasks(n=20)[14]
dg = diff_grids(task.ios[1]...)
to_img(dg)
end
end
| [
21412,
5923,
7902,
14375,
198,
198,
39344,
1388,
11,
2829,
198,
198,
3500,
797,
39344,
198,
17256,
7203,
2164,
2340,
13,
20362,
4943,
198,
31,
631,
87,
634,
1262,
764,
8642,
2340,
198,
198,
17256,
7203,
13287,
13,
20362,
4943,
198,
31... | 2.280788 | 406 |
# Model
include_model("hopper")
mb = 3.0 # body mass
ml = 0.3 # leg mass
Jb = 0.75 # body inertia
Jl = 0.075 # leg inertia
model = Hopper{Discrete, FixedTime}(n, m, d,
mb, ml, Jb, Jl,
0.25, g,
qL, qU,
uL, uU,
nq,
nu,
nc,
nf,
nb,
ns,
idx_u,
idx_λ,
idx_b,
idx_ψ,
idx_η,
idx_s)
model_ft = free_time_model(model)
# stair traj
@load joinpath(@__DIR__, "hopper_stair.jld2") qm um γm bm ψm ηm μm hm
# Stair
function ϕ_func(model::Hopper, q)
k = kinematics(model, q)
if k[1] < 0.125
return @SVector [k[2] - 3 * 0.25]
else
return @SVector [k[2]]
end
end
# Horizon
T = 80
# Time step
# tf = 0.75
h = hm #tf / (T - 1)
# Bounds
_uu = Inf * ones(model_ft.m)
_uu[model_ft.idx_u] .= Inf#10.0#model_ft.uU
_uu[end] = h#3.0 * h
_ul = zeros(model_ft.m)
_ul[model_ft.idx_u] .= -Inf#10.0 #model_ft.uL
_ul[end] = h#0.2 * h
ul, uu = control_bounds(model_ft, T, _ul, _uu)
# Initial and final states
z_h = 3 * 0.25
q1 = [0.0, 0.5 + z_h, 0.0, 0.5]
q11 = [0.0, 0.5 + z_h + 0.125, 0.0, 0.25]
qm1 = [0.125, 0.5 + z_h + 0.25, -0.5 * π, 0.25]
qm2 = [0.25, 0.5 + z_h + 0.125, -1.5 * π, 0.25]
qm3 = [0.375, 0.5 + z_h + 0.0625, -2.0 * π, 0.5]
qT = [0.5, 0.5, -2.0 * π, 0.5]
ql1 = linear_interpolation(q1, q11, 14)
ql2 = linear_interpolation(q11, qm1, 15)
ql3 = linear_interpolation(qm1, qm2, 15)
ql4 = linear_interpolation(qm2, qm3, 15)
ql5 = linear_interpolation(qm3, qT, 14)
ql6 = linear_interpolation(qT, qT, 14)
q_ref = [ql1...,
ql2[2:end]...,
ql3[2:end]...,
ql4[2:end]...,
ql5[2:end]...,
ql6[2:end]...]
θr = range(0.0, stop = -2.0 * π, length = (58 - 14))
for (i, t) = enumerate(15:58)
q_ref[t][3] = θr[i]
end
# model_ft.qU[2] = 2.5
xl, xu = state_bounds(model_ft, T,
[model_ft.qL; model_ft.qL],
[model_ft.qU; model_ft.qU],
x1 = [q1; q1],
xT = [qT; qT])
# Objective
include_objective(["velocity", "nonlinear_stage", "control_velocity"])
qp = 0.0 * [0.01; 0.01; 1.0; 1.0]
obj_tracking = quadratic_time_tracking_objective(
[Diagonal([qp; qp]) for t = 1:T],
[Diagonal([1.0e-1, 1.0e-2,
1.0e-5 * ones(model_ft.nc)..., 1.0e-5 * ones(model_ft.nb)...,
zeros(model_ft.m - model_ft.nu - model_ft.nc - model_ft.nb - 1)..., 0.0])
for t = 1:T-1],
[[qT; qT] for t = 1:T],
[zeros(model_ft.m) for t = 1:T],
1.0)
obj_contact_penalty = PenaltyObjective(1.0e5, model_ft.m - 1)
obj_velocity = velocity_objective(
[(t > 20 && t < 60) ? Diagonal(1.0e-2 * [1.0; 1.0; 100.0; 100.0]) : Diagonal(1.0e-2 * [1.0; 1.0; 1.0; 1.0]) for t = 1:T-1],
model_ft.nq,
h = h,
idx_angle = collect([3]))
obj_ctrl_vel = control_velocity_objective(Diagonal([1.0e-1 * ones(model_ft.nu);
1.0e-3 * ones(model_ft.nc + model_ft.nb);
zeros(model_ft.m - model_ft.nu - model_ft.nc - model_ft.nb)]))
function l_stage(x, u, t)
J = 0.0
_q1 = view(x, 1:4)
p1 = kinematics(model, _q1)
_q2 = view(x, 4 .+ (1:4))
p2 = kinematics(model, _q2)
v = (p2 - p1) ./ h
if t < 40
J += 1000.0 * v[1]^2.0
end
if t > 60
J += 1000.0 * v[1]^2.0
end
if true#t > 5 #|| (t > 20 && t < T)
J += (_q1 - q_ref[t])' * Diagonal([100.0; 100.0; 1000.0; 1000.0]) * (_q1 - q_ref[t])
end
return J
end
l_stage(x) = l_stage(x, nothing, T)
obj_stage = nonlinear_stage_objective(l_stage, l_stage)
obj = MultiObjective([obj_tracking,
obj_contact_penalty,
obj_velocity,
obj_stage,
obj_ctrl_vel])
# Constraints
include_constraints(["free_time", "contact", "stage"])
con_free_time = free_time_constraints(T)
con_contact = contact_constraints(model_ft, T)
p1_ref = kinematics(model, q1)
pT_ref = kinematics(model, qT)
function pinned1!(c, x, u, t)
q = view(x, 1:4)
c[1:2] = p1_ref - kinematics(model, q)
nothing
end
function pinnedT!(c, x, u, t)
q = view(x, 4 .+ (1:4))
c[1:2] = pT_ref - kinematics(model, q)
nothing
end
function no_foot_slip!(c, x, u, t)
q = view(x, 1:4)
c[1] = kinematics(model, q)[1]
end
n_stage = 2
t_idx1 = vcat([t for t = 1:10]...)
t_idxT = vcat([(T - 10 + 1):T]...)
con_pinned1 = stage_constraints(pinned1!, 2, (1:0), t_idx1)
con_pinnedT = stage_constraints(pinnedT!, 2, (1:0), t_idxT)
con_no_slip = stage_constraints(no_foot_slip!, 1, (1:1), collect(1:40))
con = multiple_constraints([con_free_time, con_contact, con_pinned1, con_pinnedT, con_no_slip])#, con_loop])
# Problem
prob = trajectory_optimization_problem(model_ft,
obj,
T,
xl = xl,
xu = xu,
ul = ul,
uu = uu,
con = con)
# Trajectory initialization
x0 = configuration_to_state(q_ref) # linear interpolation on state
u0 = [[1.0e-2 * rand(model_ft.m-1); h] for t = 1:T-1] # random controls
# Pack trajectories into vector
z0 = pack(x0, u0, prob)
#NOTE: may need to run examples multiple times to get good trajectories
# Solve nominal problem
@time z̄, info = solve(prob, copy(z0),
nlp = :ipopt,
tol = 1.0e-3, c_tol = 1.0e-3, mapl = 5,
time_limit = 60)
@show check_slack(z̄, prob)
x̄, ū = unpack(z̄, prob)
tf, t, h̄ = get_time(ū)
q = state_to_configuration(x̄)
u = [u[model.idx_u] for u in ū]
γ = [u[model.idx_λ] for u in ū]
b = [u[model.idx_b] for u in ū]
ψ = [u[model.idx_ψ] for u in ū]
η = [u[model.idx_η] for u in ū]
h̄ = mean(h̄)
# @save joinpath(pwd(), "examples/trajectories/hopper_vertical_gait.jld2") z̄ x̄ ū h̄ q u γ b
include(joinpath(pwd(), "models/visualize.jl"))
vis = Visualizer()
open(vis)
visualize!(vis, model_ft,
q,
Δt = h̄[1],
scenario = :vertical)
setobject!(vis["box"], GeometryBasics.HyperRectangle(Vec(0.0, 0.0, 0.0),
Vec(0.25, 0.5, 3 * 0.25)), MeshPhongMaterial(color = RGBA(0.5, 0.5, 0.5, 1.0)))
settransform!(vis["box"], Translation(-0.125, -0.25, 0))
using Plots
plot(hcat(q_ref...)[1:4, :]', color = :black , width = 2.0)
plot!(hcat(q...)[1:4, :]', color = :red, width = 1.0)
plot(hcat(u...)', linetype = :steppost)
# Save
hm = h̄
μm = model.μ
qm, um, γm, bm, ψm, ηm = q, u, γ, b, ψ, η
@save joinpath(@__DIR__, "hopper_tall_flip.jld2") qm um γm bm ψm ηm μm hm
# composite stairs + flip
@load joinpath(@__DIR__, "hopper_stair.jld2") qm um γm bm ψm ηm μm hm
function step_repeat(q, u, γ, b, ψ, η, T; steps = 2)
qm = [deepcopy(q)...]
um = [deepcopy(u)...]
γm = [deepcopy(γ)...]
bm = [deepcopy(b)...]
ψm = [deepcopy(ψ)...]
ηm = [deepcopy(η)...]
stride = zero(qm[1])
for i = 1:(steps-1)
@show stride[1] += q[T+1][1] - q[2][1]
@show stride[2] += 0.25
for t = 1:T-1
push!(qm, q[t+2] + stride)
push!(um, u[t])
push!(γm, γ[t])
push!(bm, b[t])
push!(ψm, ψ[t])
push!(ηm, η[t])
end
end
return qm, um, γm, bm, ψm, ηm
end
qm, um, γm, bm, ψm, ηm = step_repeat(qm, um, γm, bm, ψm, ηm, T, steps = 3)
@save joinpath(@__DIR__, "hopper_stairs_3.jld2") qm um γm bm ψm ηm μm hm
# @load joinpath(@__DIR__, "hopper_stairs_3.jld2") qm um γm bm ψm ηm μm hm
setobject!(vis["box1"], GeometryBasics.HyperRectangle(Vec(0.0, 0.0, 0.0),
Vec(0.25, 0.5, 0.25)), MeshPhongMaterial(color = RGBA(0.5, 0.5, 0.5, 1.0)))
settransform!(vis["box1"], Translation(0.125, -0.25, 0))
setobject!(vis["box2"], GeometryBasics.HyperRectangle(Vec(0.0, 0.0, 0.0),
Vec(0.25, 0.5, 2 * 0.25)), MeshPhongMaterial(color = RGBA(0.5, 0.5, 0.5, 1.0)))
settransform!(vis["box2"], Translation(0.125 + 0.25, -0.25, 0))
setobject!(vis["box3"], GeometryBasics.HyperRectangle(Vec(0.0, 0.0, 0.0),
Vec(0.25, 0.5, 3 * 0.25)), MeshPhongMaterial(color = RGBA(0.5, 0.5, 0.5, 1.0)))
settransform!(vis["box3"], Translation(0.125 + 2 * 0.25, -0.25, 0))
tall_flip = load(joinpath(@__DIR__, "hopper_tall_flip.jld2"))
qm_f, um_f, γm_f, bm_f, ψm_f, ηm_f, μm_f, hm_f = tall_flip["qm"], tall_flip["um"], tall_flip["γm"], tall_flip["bm"], tall_flip["ψm"], tall_flip["ηm"], tall_flip["μm"], tall_flip["hm"]
str = zero(qm[1])
str[1] = qm[end][1]
for i = 1:10
t = 1
push!(qm, qm_f[t+2] + str)
push!(um, um_f[t])
push!(γm, γm_f[t])
push!(bm, bm_f[t])
push!(ψm, ψm_f[t])
push!(ηm, ηm_f[t])
end
for t = 1:length(um_f)
push!(qm, qm_f[t+2] + str)
push!(um, um_f[t])
push!(γm, γm_f[t])
push!(bm, bm_f[t])
push!(ψm, ψm_f[t])
push!(ηm, ηm_f[t])
end
@save joinpath(@__DIR__, "hopper_stairs_3_flip.jld2") qm um γm bm ψm ηm μm hm
visualize!(vis, model_ft,
qm,
Δt = h̄[1],
scenario = :stairs)
setprop!(vis["/Cameras/default/rotated/<object>"], "zoom", 20)
settransform!(vis["/Cameras/default"],
compose(Translation(0.0, -90.0, -1.0),LinearMap(RotZ(-0.5 * π))))
| [
2,
9104,
198,
17256,
62,
19849,
7203,
8873,
2848,
4943,
198,
198,
2022,
796,
513,
13,
15,
1303,
1767,
2347,
198,
4029,
796,
657,
13,
18,
220,
1303,
1232,
2347,
198,
41,
65,
796,
657,
13,
2425,
1303,
1767,
48482,
198,
41,
75,
796,
... | 1.831924 | 4,611 |
function makealltrans(N,n,Ω,basis="Hermite")
dim=length(N)
if dim==1
Nx = N[1]
ωx = Ω[1]
#n-field transforms for PGPE
x,wx,Tx = nfieldtrans(Nx,n,ω=ωx,basis=basis)
return x,wx,Tx
elseif dim==2
Nx,Ny = N
ωx,ωy = Ω
#n-field transforms for PGPE
x,wx,Tx = nfieldtrans(Nx,n,ω=ωx,basis=basis)
y,wy,Ty = nfieldtrans(Ny,n,ω=ωy,basis=basis)
return x,wx,Tx,y,wy,Ty
elseif dim==3
Nx,Ny,Nz = N
ωx,ωy,ωz = Ω
#n-field transforms for PGPE
x,wx,Tx = nfieldtrans(Nx,n,ω=ωx,basis=basis)
y,wy,Ty = nfieldtrans(Ny,n,ω=ωy,basis=basis)
z,wz,Tz = nfieldtrans(Nz,n,ω=ωz,basis=basis)
return x,wx,Tx,y,wy,Ty,z,wz,Tz
end
end
| [
8818,
787,
439,
7645,
7,
45,
11,
77,
11,
138,
102,
11,
12093,
271,
2625,
48523,
578,
4943,
198,
27740,
28,
13664,
7,
45,
8,
198,
361,
5391,
855,
16,
198,
220,
399,
87,
796,
399,
58,
16,
60,
198,
220,
18074,
231,
87,
796,
7377,... | 1.773109 | 357 |
import Distributions: logpdf, pdf
struct SDT{T1,T2} <: ContinuousUnivariateDistribution
d::T1
c::T2
end
logpdf(d::SDT, data::Vector{Int64}) = logpdf(d, data...)
logpdf(d::SDT, data::Tuple{Vararg{Int64}}) = logpdf(d, data...)
function logpdf(d::SDT, hits, fas, Nd)
@unpack d,c = d
θhit = cdf(Normal(0, 1), d/2-c)
θfa = cdf(Normal(0, 1), -d/2-c)
loghits = logpdf(Binomial(Nd, θhit), hits)
logfas = logpdf(Binomial(Nd, θfa), fas)
return loghits + logfas
end
pdf(d::SDT, data::Vector{Int64}) = exp(logpdf(d, data...))
| [
11748,
46567,
507,
25,
2604,
12315,
11,
37124,
198,
198,
7249,
9834,
51,
90,
51,
16,
11,
51,
17,
92,
1279,
25,
45012,
3118,
42524,
20344,
3890,
198,
220,
220,
220,
288,
3712,
51,
16,
198,
220,
220,
220,
269,
3712,
51,
17,
198,
4... | 2.048327 | 269 |
# This code is based on the gridap hyperelasticity demo: https://gridap.github.io/Tutorials/dev/pages/t005_hyperelasticity/
# Here I expanded it to 3D and added Makie based model visualisation.
# Note this code currently requires: ] add Makie@0.15.2 GLMakie@0.4.6
using Gridap
using Gridap.Visualization
using Gridap.ReferenceFEs
using Gridap.Geometry
using FileIO
using LineSearches: BackTracking
using GLMakie, GeometryBasics
using Colors, ColorSchemes
# Geometry and BC parameters
sample_dim = [1,1,1] #Sample dimensions
numElem = [5,5,5] #Number of elements in each direction
disp_max = 0.3 #Maximum displacement
disp_inc = disp_max/10 #Desired displacement increment per step
degree = 2 #Mesh order
# Material parameters
const λ = 100.0
const μ = 1.0
# Deformation Gradient
F(∇u) = one(∇u) + ∇u'
#Jacobian = volume ratio
J(F) = sqrt(det(C(F)))
#Green-Lagrange strain
#E(F) = 0.5*( F'*F - one(F) ) #Green-Lagrange strain
dE(∇du,∇u) = 0.5*( ∇du⋅F(∇u) + (∇du⋅F(∇u))' )
# Right Cauchy-green deformation tensor
C(F) = (F')⋅F
# Hyperelastic constitutive law for the Neo hookean material
function S(∇u)
Cinv = inv(C(F(∇u))) #Inverse of C i.e. B
μ*(one(∇u)-Cinv) + λ*log(J(F(∇u)))*Cinv
end
function dS(∇du,∇u)
Cinv = inv(C(F(∇u)))
_dE = dE(∇du,∇u)
λ*(Cinv⊙_dE)*Cinv + 2*(μ-λ*log(J(F(∇u))))*Cinv⋅_dE⋅(Cinv')
end
# Cauchy stress tensor
σ(∇u) = (1.0/J(F(∇u)))*F(∇u)⋅S(∇u)⋅(F(∇u))'
# Model
domain = (0,sample_dim[1],0,sample_dim[2],0,sample_dim[3])
partition = (numElem[1],numElem[2],numElem[3])
model = CartesianDiscreteModel(domain,partition)
# Define new boundaries
labels = get_face_labeling(model)
add_tag_from_tags!(labels,"diri_0",[1,3,5,7,13,15,17,19,25])
add_tag_from_tags!(labels,"diri_1",[2,4,6,8,14,16,18,20,26])
# Setup integration
Ω = Triangulation(model)
dΩ = Measure(Ω,degree)
# Weak form
res(u,v) = ∫( (dE∘(∇(v),∇(u))) ⊙ (S∘∇(u)) )*dΩ
jac_mat(u,du,v) = ∫( (dE∘(∇(v),∇(u))) ⊙ (dS∘(∇(du),∇(u))) )*dΩ
jac_geo(u,du,v) = ∫( ∇(v) ⊙ ( (S∘∇(u))⋅∇(du) ) )*dΩ
jac(u,du,v) = jac_mat(u,du,v) + jac_geo(u,du,v)
# Construct the FEspace
reffe = ReferenceFE(lagrangian,VectorValue{3,Float64},1)
V = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags = ["diri_0", "diri_1"])
# Setup non-linear solver
nls = NLSolver(show_trace=true,method=:newton,linesearch=BackTracking())
solver = FESolver(nls)
function run(x0,disp_x,step,nsteps,cache)
g0 = VectorValue(0.0,0.0,0.0)
g1 = VectorValue(disp_x,0.0,0.0)
U = TrialFESpace(V,[g0,g1])
#FE problem
op = FEOperator(res,jac,U,V)
println("\n+++ Solving for disp_x $disp_x in step $step of $nsteps +++\n")
uh = FEFunction(U,x0)
uh, cache = solve!(uh,solver,op,cache)
return get_free_dof_values(uh), uh, cache
end
function runs(disp_max,disp_inc)
nsteps = ceil(Int,abs(disp_max)/disp_inc)
x0 = zeros(Float64,num_free_dofs(V))
nodalDisplacements = Vector{Vector{VectorValue{3, Float64}}}(undef,nsteps+1)
cache = nothing
for step in 1:nsteps
disp_x = step * disp_max / nsteps
x0, uh, cache = run(x0,disp_x,step,nsteps,cache)
vd = visualization_data(Ω,"",cellfields=["u"=>uh])
nodalDisplacements[step+1] = vd[1].nodaldata["u"]
end
nodalDisplacements[1]=nodalDisplacements[2].*0 #Add zeros for initial state
return nodalDisplacements
end
function nodesToPointset(V)
#Convert gridap coordinate type to Makie Point3f type
P=Vector{GeometryBasics.Point{3, Float32}}(undef,size(V,1))
for q=1:1:size(V,1)
P[q]=convert(GeometryBasics.Point,convert(Tuple{Float64, Float64, Float64},V[q]))
end
return P
end
function convertToFacePointSet(Ω)
#TO DO: Implement other element types, hex->quads only shown here
#Get gridap element and node descriptions
# E=Ω.cell_node_ids[:] #Elements
# V=Ω.node_coords[:] #Nodes/Vertices
vd=visualization_data(Ω,"");
grid = vd[1].grid
E = get_cell_node_ids(grid)
V = get_node_coordinates(grid)
#Get faces and convert to QuadFace type
F=Vector{QuadFace{Int64}}(undef,size(E,1)*6)
for q=1:1:size(E,1)
F[q]=convert(QuadFace{Int64},E[q][[1,2,4,3],1]) #top
F[q+size(E,1)*1]=convert(QuadFace{Int64},E[q][[5,6,8,7],1]) #bottom
F[q+size(E,1)*2]=convert(QuadFace{Int64},E[q][[1,2,6,5],1]) #side 1
F[q+size(E,1)*3]=convert(QuadFace{Int64},E[q][[4,3,7,8],1]) #side 2
F[q+size(E,1)*4]=convert(QuadFace{Int64},E[q][[2,4,8,6],1]) #front
F[q+size(E,1)*5]=convert(QuadFace{Int64},E[q][[3,1,5,7],1]) #back
end
#Create face type labels
faceTypeLabel=[ones(Int64,size(E,1))*1;
ones(Int64,size(E,1))*2;
ones(Int64,size(E,1))*3;
ones(Int64,size(E,1))*4;
ones(Int64,size(E,1))*5;
ones(Int64,size(E,1))*6;]
P=nodesToPointset(V)
return P,F, faceTypeLabel
end
#Do the work!
nodalDisplacements = runs(disp_max,disp_inc)
#Create makie compatible face and point set
pointSet,faceSet,faceTypeLabel=convertToFacePointSet(Ω)
function getCoordStep(Ω,nodalDisplacement)
vd = visualization_data(Ω,"")
grid = vd[1].grid
V = get_node_coordinates(grid)
V2= V + nodalDisplacement
pointSet2=nodesToPointset(V2)
return pointSet2
end
function getMagnitude(U)
M=zeros(size(U,1))
for q=1:1:size(U,1)
M[q]=sqrt(U[q][1]^2 + U[q][2]^2 + U[q][3]^2)
end
return M
end
pointSet=getCoordStep(Ω,nodalDisplacements[1])
#Gather face and point set as GeometryBasics mesh
M=GeometryBasics.Mesh(pointSet,faceSet)
nodalColor=getMagnitude(nodalDisplacements[1])
#Visualize mesh
fig = Figure()
sl_step = Slider(fig[2, 1], range = 1:1:size(nodalDisplacements,1), startvalue = size(nodalDisplacements,1))
nodalColor = lift(sl_step.value) do stepIndex
getMagnitude(nodalDisplacements[stepIndex])
end
M = lift(sl_step.value) do stepIndex
GeometryBasics.Mesh(getCoordStep(Ω,nodalDisplacements[stepIndex]),faceSet)
end
titleString = lift(sl_step.value) do stepIndex
"Step: "*string(stepIndex-1)
end
ax=Axis3(fig[1, 1], aspect = :data, xlabel = "X", ylabel = "Y", zlabel = "Z", title = titleString)
hp=poly!(M, strokewidth=1,shading=false,color=nodalColor, transparency=false, overdraw=false,
colormap = (RGB(255.0, 215.0, 0.0)/255,RGB(0.0, 87.0, 183.0)/255),colorrange=(0,disp_max))
Colorbar(fig[1, 2],hp.plots[1],label = "Displacement magnitude [mm]")
fig
# ax=Axis3(fig[1, 1], aspect = :data, xlabel = "X", ylabel = "Y", zlabel = "Z", title = titleString)
# hp=poly!(M, strokewidth=3,shading=true,color=nodalColor, transparency=false, overdraw=false
# ,colormap = Reverse(:Spectral),colorrange=(0,0.8))
# Colorbar(fig[1, 2],hp.plots[1],label = "Displacement magnitude [mm]")
# fig
| [
2,
770,
2438,
318,
1912,
319,
262,
10706,
499,
20606,
2411,
3477,
414,
13605,
25,
3740,
1378,
25928,
499,
13,
12567,
13,
952,
14,
51,
44917,
82,
14,
7959,
14,
31126,
14,
83,
22544,
62,
71,
2981,
2411,
3477,
414,
14,
198,
2,
3423,
... | 2.127701 | 3,101 |
{"score": 8.04, "timestamp": 1580207216.0, "score_count": 256261}
{"score": 8.06, "timestamp": 1567156859.0, "score_count": 246192}
{"score": 8.06, "timestamp": 1566888606.0, "score_count": 245781}
{"score": 8.06, "timestamp": 1565672254.0, "score_count": 244871}
{"score": 8.06, "timestamp": 1565469084.0, "score_count": 244871}
{"score": 8.06, "timestamp": 1565467411.0, "score_count": 244871}
{"score": 8.06, "timestamp": 1565143938.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1565141737.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1565136435.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1565087565.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1564853335.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1564844609.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1564796503.0, "score_count": 244453}
{"score": 8.06, "timestamp": 1564529822.0, "score_count": 244155}
{"score": 8.06, "timestamp": 1564455235.0, "score_count": 244155}
{"score": 8.07, "timestamp": 1561629271.0, "score_count": 242413}
{"score": 8.07, "timestamp": 1553317857.0, "score_count": 237858}
{"score": 8.11, "timestamp": 1516251698.0, "score_count": 213492}
{"score": 8.18, "timestamp": 1475839536.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839535.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839530.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839525.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839521.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839515.0, "score_count": 179925}
{"score": 8.18, "timestamp": 1475839516.0, "score_count": 179925}
{"score": 8.21, "timestamp": 1462139731.0, "score_count": 166609}
{"score": 8.21, "timestamp": 1460755342.0, "score_count": 165208}
{"score": 8.25, "timestamp": 1444982827.0, "score_count": 150171}
{"score": 8.25, "timestamp": 1442286846.0, "score_count": 147979}
{"score": 8.25, "timestamp": 1441705769.0, "score_count": 147531}
{"score": 8.26, "timestamp": 1439553727.0, "score_count": 145625}
{"score": 8.06, "timestamp": 1564357491.0, "score_count": 244155}
{"score": 8.06, "timestamp": 1563037380.0, "score_count": 243384}
{"score": 8.06, "timestamp": 1562902104.0, "score_count": 243384}
{"score": 8.06, "timestamp": 1562190526.0, "score_count": 242743}
{"score": 8.06, "timestamp": 1561844942.0, "score_count": 242743}
{"score": 8.06, "timestamp": 1561836533.0, "score_count": 242743}
{"score": 8.07, "timestamp": 1561523365.0, "score_count": 242413}
{"score": 8.07, "timestamp": 1561253409.0, "score_count": 242413}
{"score": 8.07, "timestamp": 1561164753.0, "score_count": 242103}
{"score": 8.07, "timestamp": 1560997683.0, "score_count": 242103}
{"score": 8.07, "timestamp": 1560892794.0, "score_count": 242103}
{"score": 8.07, "timestamp": 1560828587.0, "score_count": 242103}
{"score": 8.07, "timestamp": 1560817712.0, "score_count": 242103}
{"score": 8.07, "timestamp": 1560545428.0, "score_count": 241838}
{"score": 8.07, "timestamp": 1560398612.0, "score_count": 241838}
{"score": 8.07, "timestamp": 1560290569.0, "score_count": 241838}
{"score": 8.07, "timestamp": 1560246048.0, "score_count": 241838}
{"score": 8.07, "timestamp": 1560193280.0, "score_count": 241838}
{"score": 8.07, "timestamp": 1560056740.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1560031181.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1560019574.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559962538.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559956315.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559953411.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559946789.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559943966.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559940608.0, "score_count": 241711}
{"score": 8.07, "timestamp": 1559869766.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559843427.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559838796.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559785813.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559773367.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559760117.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559701080.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559678935.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559626276.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559620876.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559404640.0, "score_count": 241418}
{"score": 8.07, "timestamp": 1559323906.0, "score_count": 241207}
{"score": 8.07, "timestamp": 1559259561.0, "score_count": 241207}
{"score": 8.07, "timestamp": 1559185494.0, "score_count": 241207}
{"score": 8.07, "timestamp": 1559064963.0, "score_count": 241207}
{"score": 8.07, "timestamp": 1559003064.0, "score_count": 241027}
{"score": 8.07, "timestamp": 1558980303.0, "score_count": 241027}
{"score": 8.07, "timestamp": 1558883092.0, "score_count": 241027}
{"score": 8.07, "timestamp": 1558826329.0, "score_count": 241027}
{"score": 8.07, "timestamp": 1558803746.0, "score_count": 241027}
{"score": 8.07, "timestamp": 1558710714.0, "score_count": 240893}
{"score": 8.07, "timestamp": 1558567358.0, "score_count": 240893}
{"score": 8.07, "timestamp": 1558464124.0, "score_count": 240703}
{"score": 8.07, "timestamp": 1558463519.0, "score_count": 240703}
{"score": 8.07, "timestamp": 1558457680.0, "score_count": 240703}
{"score": 8.07, "timestamp": 1558047513.0, "score_count": 240579}
{"score": 8.07, "timestamp": 1557581323.0, "score_count": 240309}
{"score": 8.07, "timestamp": 1557518565.0, "score_count": 240309}
{"score": 8.07, "timestamp": 1557369276.0, "score_count": 240186}
{"score": 8.07, "timestamp": 1557282456.0, "score_count": 240186}
{"score": 8.07, "timestamp": 1557100566.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1557097815.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1557077344.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1557019008.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1556833177.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1556832100.0, "score_count": 239873}
{"score": 8.07, "timestamp": 1556655929.0, "score_count": 239729}
{"score": 8.07, "timestamp": 1556638052.0, "score_count": 239729}
{"score": 8.07, "timestamp": 1556632641.0, "score_count": 239729}
{"score": 8.07, "timestamp": 1556508052.0, "score_count": 239729}
{"score": 8.07, "timestamp": 1556396947.0, "score_count": 239609}
{"score": 8.07, "timestamp": 1556311418.0, "score_count": 239609}
{"score": 8.07, "timestamp": 1556236238.0, "score_count": 239609}
{"score": 8.07, "timestamp": 1556141272.0, "score_count": 239444}
{"score": 8.07, "timestamp": 1556057467.0, "score_count": 239444}
{"score": 8.07, "timestamp": 1555967573.0, "score_count": 239444}
{"score": 8.07, "timestamp": 1555446393.0, "score_count": 239162}
{"score": 8.07, "timestamp": 1555378287.0, "score_count": 238978}
{"score": 8.07, "timestamp": 1555205983.0, "score_count": 238978}
{"score": 8.07, "timestamp": 1555017773.0, "score_count": 238832}
{"score": 8.07, "timestamp": 1554949617.0, "score_count": 238832}
{"score": 8.07, "timestamp": 1554929234.0, "score_count": 238832}
{"score": 8.07, "timestamp": 1554586524.0, "score_count": 238529}
{"score": 8.07, "timestamp": 1554413105.0, "score_count": 238529}
{"score": 8.07, "timestamp": 1554412442.0, "score_count": 238529}
{"score": 8.07, "timestamp": 1554392756.0, "score_count": 238529}
{"score": 8.07, "timestamp": 1554059339.0, "score_count": 238316}
{"score": 8.07, "timestamp": 1553975343.0, "score_count": 238180}
{"score": 8.07, "timestamp": 1553969103.0, "score_count": 238180}
{"score": 8.07, "timestamp": 1553823754.0, "score_count": 238180}
{"score": 8.07, "timestamp": 1553805854.0, "score_count": 238180}
{"score": 8.07, "timestamp": 1553634444.0, "score_count": 238023}
{"score": 8.07, "timestamp": 1553633529.0, "score_count": 238023}
{"score": 8.07, "timestamp": 1553557633.0, "score_count": 238023}
{"score": 8.07, "timestamp": 1553402886.0, "score_count": 237858}
{"score": 8.07, "timestamp": 1553319046.0, "score_count": 237858}
{"score": 8.07, "timestamp": 1553317869.0, "score_count": 237858}
{"score": 8.07, "timestamp": 1553313735.0, "score_count": 237858}
{"score": 8.07, "timestamp": 1553289134.0, "score_count": 237858}
{"score": 8.07, "timestamp": 1553216620.0, "score_count": 237690}
{"score": 8.07, "timestamp": 1553135565.0, "score_count": 237690}
{"score": 8.07, "timestamp": 1553130946.0, "score_count": 237690}
{"score": 8.07, "timestamp": 1553128587.0, "score_count": 237690}
{"score": 8.07, "timestamp": 1552987210.0, "score_count": 237543}
{"score": 8.07, "timestamp": 1552782150.0, "score_count": 237543}
{"score": 8.07, "timestamp": 1552773730.0, "score_count": 237543}
{"score": 8.07, "timestamp": 1552747773.0, "score_count": 237543}
{"score": 8.07, "timestamp": 1552680529.0, "score_count": 237422}
{"score": 8.07, "timestamp": 1552599311.0, "score_count": 237422}
{"score": 8.07, "timestamp": 1552528857.0, "score_count": 237422}
{"score": 8.07, "timestamp": 1552508398.0, "score_count": 237422}
{"score": 8.07, "timestamp": 1552421589.0, "score_count": 237260}
{"score": 8.07, "timestamp": 1552363442.0, "score_count": 237260}
{"score": 8.07, "timestamp": 1552339350.0, "score_count": 237260}
{"score": 8.07, "timestamp": 1552254323.0, "score_count": 237260}
{"score": 8.07, "timestamp": 1552230176.0, "score_count": 237260}
{"score": 8.07, "timestamp": 1552184939.0, "score_count": 237067}
{"score": 8.07, "timestamp": 1552147466.0, "score_count": 237067}
{"score": 8.07, "timestamp": 1552100757.0, "score_count": 237067}
{"score": 8.07, "timestamp": 1552091195.0, "score_count": 237067}
{"score": 8.07, "timestamp": 1552012358.0, "score_count": 237067}
{"score": 8.07, "timestamp": 1551907103.0, "score_count": 236892}
{"score": 8.07, "timestamp": 1551580898.0, "score_count": 236688}
{"score": 8.07, "timestamp": 1551313462.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551224789.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551221944.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551215674.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551139812.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551128162.0, "score_count": 236533}
{"score": 8.07, "timestamp": 1551061505.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1551059095.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1551045165.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1551025052.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1551019828.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1550977088.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1550944896.0, "score_count": 236317}
{"score": 8.07, "timestamp": 1550697923.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550696168.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550627487.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550621987.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550618620.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550539980.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550511362.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550450092.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550448923.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550425432.0, "score_count": 236101}
{"score": 8.07, "timestamp": 1550283921.0, "score_count": 235909}
{"score": 8.07, "timestamp": 1550272963.0, "score_count": 235909}
{"score": 8.07, "timestamp": 1550193416.0, "score_count": 235909}
{"score": 8.08, "timestamp": 1550017054.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1550006592.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1550005032.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549945599.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549930154.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549925479.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549907089.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549846558.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1549752038.0, "score_count": 235703}
{"score": 8.08, "timestamp": 1548612648.0, "score_count": 234973}
{"score": 8.08, "timestamp": 1547324635.0, "score_count": 234000}
{"score": 8.08, "timestamp": 1545418256.0, "score_count": 232888}
{"score": 8.08, "timestamp": 1545349675.0, "score_count": 232677}
{"score": 8.08, "timestamp": 1545087179.0, "score_count": 232677}
{"score": 8.08, "timestamp": 1539895330.0, "score_count": 229769}
{"score": 8.09, "timestamp": 1536798557.0, "score_count": 227840}
{"score": 8.09, "timestamp": 1534942311.0, "score_count": 226665}
{"score": 8.09, "timestamp": 1530985863.0, "score_count": 224287}
{"score": 8.09, "timestamp": 1529074145.0, "score_count": 223706}
{"score": 8.09, "timestamp": 1528939429.0, "score_count": 223665}
{"score": 8.09, "timestamp": 1528923676.0, "score_count": 223665}
{"score": 8.1, "timestamp": 1526547588.0, "score_count": 222352}
{"score": 8.1, "timestamp": 1525237024.0, "score_count": 221360}
{"score": 8.1, "timestamp": 1522273227.0, "score_count": 218643}
{"score": 8.11, "timestamp": 1520558798.0, "score_count": 217156}
{"score": 8.15, "timestamp": 1492438459.0, "score_count": 193604}
{"score": 8.15, "timestamp": 1492432480.0, "score_count": 193604}
{"score": 8.16, "timestamp": 1487116102.0, "score_count": 189287}
{"score": 8.16, "timestamp": 1485886390.0, "score_count": 188256}
{"score": 8.16, "timestamp": 1484582824.0, "score_count": 187070}
{"score": 8.16, "timestamp": 1484582656.0, "score_count": 187070}
{"score": 8.19, "timestamp": 1472985550.0, "score_count": 177231}
{"score": 8.19, "timestamp": 1472985549.0, "score_count": 177231}
{"score": 8.19, "timestamp": 1471570309.0, "score_count": 175883}
{"score": 8.19, "timestamp": 1468025360.0, "score_count": 172390}
{"score": 8.2, "timestamp": 1466066175.0, "score_count": 170252}
{"score": 8.2, "timestamp": 1465515181.0, "score_count": 169701}
{"score": 8.2, "timestamp": 1464843282.0, "score_count": 169062}
{"score": 8.21, "timestamp": 1463801656.0, "score_count": 168059}
{"score": 8.21, "timestamp": 1461846842.0, "score_count": 166352}
{"score": 8.21, "timestamp": 1461581561.0, "score_count": 166105}
{"score": 8.21, "timestamp": 1461255784.0, "score_count": 165782}
{"score": 8.21, "timestamp": 1460535721.0, "score_count": 165024}
{"score": 8.21, "timestamp": 1459930972.0, "score_count": 164366}
{"score": 8.21, "timestamp": 1459324804.0, "score_count": 163636}
{"score": 8.22, "timestamp": 1458292227.0, "score_count": 162454}
{"score": 8.22, "timestamp": 1458175372.0, "score_count": 162332}
{"score": 8.22, "timestamp": 1458121649.0, "score_count": 162292}
{"score": 8.22, "timestamp": 1457606419.0, "score_count": 161708}
{"score": 8.22, "timestamp": 1457499231.0, "score_count": 161534}
{"score": 8.22, "timestamp": 1456808459.0, "score_count": 160941}
{"score": 8.22, "timestamp": 1455635742.0, "score_count": 159820}
{"score": 8.22, "timestamp": 1454983486.0, "score_count": 159250}
{"score": 8.22, "timestamp": 1454372576.0, "score_count": 158720}
{"score": 8.23, "timestamp": 1453763673.0, "score_count": 158171}
{"score": 8.23, "timestamp": 1453156506.0, "score_count": 157575}
{"score": 8.23, "timestamp": 1452411517.0, "score_count": 156753}
{"score": 8.23, "timestamp": 1452983213.0, "score_count": 157398}
{"score": 8.23, "timestamp": 1452077136.0, "score_count": 156348}
{"score": 8.23, "timestamp": 1451761815.0, "score_count": 155917}
{"score": 8.23, "timestamp": 1451612800.0, "score_count": 155774}
{"score": 8.23, "timestamp": 1451035607.0, "score_count": 155138}
{"score": 8.24, "timestamp": 1450404652.0, "score_count": 154564}
{"score": 8.24, "timestamp": 1449791564.0, "score_count": 153961}
{"score": 8.24, "timestamp": 1448603222.0, "score_count": 153050}
{"score": 8.24, "timestamp": 1448598965.0, "score_count": 153047}
{"score": 8.24, "timestamp": 1447988532.0, "score_count": 152602}
{"score": 8.24, "timestamp": 1446774591.0, "score_count": 151652}
{"score": 8.24, "timestamp": 1446153694.0, "score_count": 151152}
{"score": 8.25, "timestamp": 1445549203.0, "score_count": 150651}
{"score": 8.25, "timestamp": 1444934965.0, "score_count": 150121}
{"score": 8.25, "timestamp": 1443245388.0, "score_count": 148725}
{"score": 8.25, "timestamp": 1441518519.0, "score_count": 147377}
{"score": 8.25, "timestamp": 1440160182.0, "score_count": 146200}
{"score": 8.26, "timestamp": 1439549050.0, "score_count": 145619}
| [
4895,
26675,
1298,
807,
13,
3023,
11,
366,
16514,
27823,
1298,
1315,
1795,
1238,
4761,
1433,
13,
15,
11,
366,
26675,
62,
9127,
1298,
17759,
30057,
92,
198,
4895,
26675,
1298,
807,
13,
3312,
11,
366,
16514,
27823,
1298,
1315,
3134,
131... | 2.360307 | 7,044 |
import UUIDs
# This function is based off of a similar function here:
# https://github.com/JuliaRegistries/RegistryCI.jl/blob/master/src/RegistryCI.jl
function gather_stdlib_uuids()
return Set{UUIDs.UUID}(x for x in keys(Pkg.Types.stdlib()))
end
| [
11748,
471,
27586,
82,
198,
198,
2,
770,
2163,
318,
1912,
572,
286,
257,
2092,
2163,
994,
25,
198,
2,
3740,
1378,
12567,
13,
785,
14,
16980,
544,
8081,
32995,
14,
8081,
4592,
25690,
13,
20362,
14,
2436,
672,
14,
9866,
14,
10677,
1... | 2.728261 | 92 |
<filename>src/Backends/Hive.jl
module HiveLoader
# https://github.com/JuliaDatabases/Hive.jl v0.3.0
using Hive # HiveSession HiveAuth
using Octo.Repo: ExecuteResult
const current = Dict{Symbol, Any}(
:sess => nothing,
)
current_sess() = current[:sess]
# db_connect
function db_connect(; host::String="localhost", port::Integer=10000, auth::HiveAuth=HiveAuth(), tprotocol::Symbol=:binary)
sess = HiveSession(host, port, auth; tprotocol=tprotocol)
current[:sess] = sess
end
# db_disconnect
function db_disconnect()
sess = current_sess()
if sess isa HiveSession
Hive.close(sess)
current[:sess] = nothing
end
end
# query
function query(sql::String)
sess = current_sess()
pending = Hive.execute(sess, sql)
rs = Hive.result(pending)
sch = Hive.schema(rs)
column_names = tuple(Symbol.(getproperty.(sch.columns, :columnName))...)
df = reduce(vcat, Hive.records(rs))
nts = NamedTuple{column_names}.(df)
Hive.close(rs)
nts
end
function query(prepared::String, vals::Vector) # throw UnsupportedError
throw(UnsupportedError("needs to be implemented"))
end
# execute
function execute(sql::String)::ExecuteResult
sess = current_sess()
result = Hive.execute(sess, sql)
ExecuteResult()
end
function execute(prepared::String, vals::Vector)::ExecuteResult # throw UnsupportedError
throw(UnsupportedError("needs to be implemented"))
end
function execute(prepared::String, nts::Vector{<:NamedTuple})::ExecuteResult # throw UnsupportedError
throw(UnsupportedError("needs to be implemented"))
end
end # module Octo.Backends.HiveLoader
| [
27,
34345,
29,
10677,
14,
7282,
2412,
14,
39,
425,
13,
20362,
198,
21412,
33235,
17401,
198,
198,
2,
3740,
1378,
12567,
13,
785,
14,
16980,
544,
27354,
18826,
14,
39,
425,
13,
20362,
410,
15,
13,
18,
13,
15,
198,
3500,
33235,
1303... | 2.715719 | 598 |
function _permute_front(t::AbstractTensorMap) # make TensorMap{S,N₁+N₂-1,1}
I = TensorKit.allind(t) # = (1:N₁+N₂...,)
if BraidingStyle(sectortype(t)) isa SymmetricBraiding
permute(t, Base.front(I), (I[end],))
else
levels = I
braid(t, levels, Base.front(I), (I[end],))
end
end
function _permute_tail(t::AbstractTensorMap) # make TensorMap{S,1,N₁+N₂-1}
I = TensorKit.allind(t) # = (1:N₁+N₂...,)
if BraidingStyle(sectortype(t)) isa SymmetricBraiding
permute(t, (I[1],), Base.tail(I))
else
levels = I
braid(t, levels, (I[1],), Base.tail(I))
end
end
function _permute_as(t1::AbstractTensorMap, t2::AbstractTensorMap)
if BraidingStyle(sectortype(t1)) isa SymmetricBraiding
permute(t1, TensorKit.codomainind(t2), TensorKit.domainind(t2))
else
levels = allind(t1)
braid(t1, TensorKit.codomainind(t2), TensorKit.domainind(t2))
end
end
_firstspace(t::AbstractTensorMap) = space(t, 1)
_lastspace(t::AbstractTensorMap) = space(t, numind(t))
"
Returns spin operators Sx,Sy,Sz,Id for spin s
"
function spinmatrices(s::Union{Rational{Int},Int})
N = Int(2*s)
Sx=zeros(Defaults.eltype,N+1,N+1)
Sy=zeros(Defaults.eltype,N+1,N+1)
Sz=zeros(Defaults.eltype,N+1,N+1)
for row=1:(N+1)
for col=1:(N+1)
term=sqrt((s+1)*(row+col-1)-row*col)/2.0
if (row+1==col)
Sx[row,col]+=term
Sy[row,col]-=1im*term
end
if(row==col+1)
Sx[row,col]+=term
Sy[row,col]+=1im*term
end
if(row==col)
Sz[row,col]+=s+1-row
end
end
end
return Sx,Sy,Sz,one(Sx)
end
function nonsym_spintensors(s)
(Sxd,Syd,Szd) = spinmatrices(s)
sp = ComplexSpace(size(Sxd,1))
Sx = TensorMap(Sxd,sp,sp);
Sy = TensorMap(Syd,sp,sp);
Sz = TensorMap(Szd,sp,sp);
return Sx,Sy,Sz,one(Sx)
end
#given a hamiltonian with unit legs on the side, decompose it using svds to form a "localmpo"
function decompose_localmpo(inpmpo::AbstractTensorMap{PS,N1,N2}) where {PS,N1,N2}
numind=N1+N2
if(numind==4)
return [permute(inpmpo,(1,2),(4,3))]
end
leftind=(1,2,Int(numind/2+1))
otherind=(ntuple(x->x+2,Val{Int((N1+N2)/2)-2}())..., ntuple(x->x+Int(numind/2+1),Val{Int((N1+N2)/2)-1}())...)
(U,S,V) = tsvd(inpmpo,leftind,otherind)
T=U*S
T=permute(T,(1,2),(4,3))
return [T;decompose_localmpo(V)]
end
function add_util_leg(tensor::AbstractTensorMap{S,N1,N2}) where {S,N1,N2}
#ntuple(x->x,Val{3+4}())
util=Tensor(ones,eltype(tensor),oneunit(space(tensor,1)))
tensor1=util*permute(tensor,(),ntuple(x->x,Val{N1+N2}()))
return permute(tensor1,ntuple(x->x,Val{N1+N2+1}()),())*util'
end
| [
8818,
4808,
16321,
1133,
62,
8534,
7,
83,
3712,
23839,
51,
22854,
13912,
8,
1303,
787,
309,
22854,
13912,
90,
50,
11,
45,
158,
224,
223,
10,
45,
158,
224,
224,
12,
16,
11,
16,
92,
198,
220,
220,
220,
314,
796,
309,
22854,
20827,... | 1.839974 | 1,531 |
<reponame>gcleroux/SnakeAI.jl
function train!(agent::AbstractAgent, game::SnakeAI.Game)
# Get the current step
old_state = SnakeAI.get_state(game)
# Get the predicted move for the state
move = get_action(agent, old_state)
SnakeAI.send_inputs!(game, move)
# Play the step
reward, done, score = SnakeAI.play_step!(game)
new_state = SnakeAI.get_state(game)
# Train the short memory
train_short_memory(agent, old_state, move, reward, new_state, done)
# Remember
remember(agent, old_state, move, reward, new_state, done)
if done
# Reset the game
train_long_memory(agent)
SnakeAI.reset!(game)
agent.n_games += 1
if score > agent.record
agent.record = score
# save_model(joinpath(MODELS_PATH, "model_$(agent.n_games).bson"), agent.model)
end
end
return done
end
function remember(
agent::AbstractAgent,
state::S,
action::S,
reward::T,
next_state::S,
done::Bool
) where {T<:Integer,S<:AbstractArray{<:T}}
push!(agent.memory.data, (state, action, [reward], next_state, convert.(Int, [done])))
end
function train_short_memory(
agent::AbstractAgent,
state::S,
action::S,
reward::T,
next_state::S,
done::Bool
) where {T<:Integer,S<:AbstractArray{<:T}}
update!(agent, state, action, reward, next_state, done)
end
function train_long_memory(agent::AbstractAgent)
if length(agent.memory.data) > BATCH_SIZE
mini_sample = sample(agent.memory.data, BATCH_SIZE)
else
mini_sample = agent.memory.data
end
states, actions, rewards, next_states, dones = map(x -> getfield.(mini_sample, x), fieldnames(eltype(mini_sample)))
update!(agent, states, actions, rewards, next_states, dones)
end
function get_action(agent::SnakeAgent, state::AbstractArray{<:Integer}; rand_range=1:200)
agent.ϵ = 80 - agent.n_games
final_move = zeros(Int, 3)
if rand(rand_range) < agent.ϵ
move = rand(1:3)
final_move[move] = 1
else
pred = agent.model(state)
final_move[Flux.onecold(pred)] = 1
end
return final_move
end
function update!(
agent::SnakeAgent,
state::Union{A,AA},
action::Union{A,AA},
reward::Union{T,AA},
next_state::Union{A,AA},
done::Union{Bool,AA};
α::Float32=0.9f0 # Step size
) where {T<:Integer,A<:AbstractArray{<:T},AA<:AbstractArray{A}}
# Batching the states and converting data to Float32 (done implicitly otherwise)
state = Flux.batch(state) |> x -> convert.(Float32, x)
next_state = Flux.batch(next_state) |> x -> convert.(Float32, x)
action = Flux.batch(action) |> x -> convert.(Float32, x)
reward = Flux.batch(reward) |> x -> convert.(Float32, x)
done = Flux.batch(done)
# Model's prediction for next state
y = agent.model(next_state)
# Get the model's params for back propagation
ps = Flux.params(agent.model)
# Calculate the gradients
gs = Flux.gradient(ps) do
# Forward pass
ŷ = agent.model(state)
# Creating buffer to allow mutability when calculating gradients
Rₙ = Buffer(ŷ, size(ŷ))
# Adjusting values of current state with next state's knowledge
for idx in 1:length(done)
# Copy preds into buffer
Rₙ[:, idx] = ŷ[:, idx]
Qₙ = reward[idx]
if done[idx] == false
Qₙ += α * maximum(y[:, idx])
end
# Adjusting the expected reward for selected move
Rₙ[argmax(action[:, idx]), idx] = Qₙ
end
# Calculate the loss
agent.criterion(ŷ, copy(Rₙ))
end
# Update model weights
Flux.Optimise.update!(agent.opt, ps, gs)
end
| [
27,
7856,
261,
480,
29,
70,
22902,
22193,
14,
49795,
20185,
13,
20362,
198,
8818,
4512,
0,
7,
25781,
3712,
23839,
36772,
11,
983,
3712,
49795,
20185,
13,
8777,
8,
198,
220,
220,
220,
1303,
3497,
262,
1459,
2239,
198,
220,
220,
220,
... | 2.324274 | 1,619 |
using VLConstraintBasedModelGenerationUtilities
# setup path to protein sequence file -
path_to_vff_file = "/Users/jeffreyvarner/Desktop/julia_work/VLConstraintBasedModelGenerationUtilities.jl/test/data/Test.vff"
path_to_system_model_file = "/Users/jeffreyvarner/Desktop/julia_work/VLConstraintBasedModelGenerationUtilities.jl/test/data/Test.bson"
# let's build the reaction table -
metabolic_reaction_table = build_metabolic_reaction_table(path_to_vff_file) |> check
# build the stm -
stm = build_stoichiometric_matrix(metabolic_reaction_table) |> check
# build the species bounds array -
species_table = build_species_table(metabolic_reaction_table) |> check
species_bounds_array = build_species_bounds_array(species_table) |> check
# build the flux bounds array -
flux_bounds_array = build_flux_bounds_array(metabolic_reaction_table) |> check
# write the system model file -
result = write_system_model_file(path=path_to_system_model_file, stoichiometric_matrix=stm,
flux_bounds_array=flux_bounds_array, species_bounds_array=species_bounds_array)
| [
3500,
569,
43,
3103,
2536,
2913,
15001,
17633,
8645,
341,
18274,
2410,
198,
198,
2,
9058,
3108,
284,
7532,
8379,
2393,
532,
198,
6978,
62,
1462,
62,
85,
487,
62,
7753,
796,
12813,
14490,
14,
73,
14822,
4364,
7785,
1008,
14,
36881,
1... | 2.917582 | 364 |
using Printf
using BenchmarkTools
function heat()
N = 1001
T0 = Matrix{Float64}(undef,N,N)
T1 = Matrix{Float64}(undef,N,N)
x = Matrix{Float64}(undef,N,N)
y = Matrix{Float64}(undef,N,N)
a = 0
b = π;
dx = (b-a)/(N-1)
for j=1:N
for i=1:N
x[i,j] = (i-1)*dx
end
end
for j=1:N
for i=1:N
y[i,j] = (j-1)*dx
end
end
dt = dx
tmax = (10-mod(10,dt))/dt
k = 0.25*dx
alpha = k*dt/dx^2
for i=1:N
T1[1,i] = 1.0
T1[N,i] = 1.0
T1[i,1] = 1.0
T1[i,N] = 1.0
end
for i=1:N
T0[1,i] = 1.0
T0[N,i] = 1.0
T0[i,1] = 1.0
T0[i,N] = 1.0
end
t = 0*dt
vel = open("Julia.dat", "w")
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
for k=1:tmax
for j=2:N-1
for i=2:N-1
T1[i,j] = T0[i,j]+alpha*((T0[i+1,j]-2*T0[i,j]+T0[i-1,j])+(T0[i,j+1]-2*T0[i,j]+T0[i,j-1]))
end
end
T0 .= T1
if mod(k,500)==0
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
end
end
t = tmax*dt
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
close(vel)
end
function heat_Vec()
N = 1001
T0 = Matrix{Float64}(undef,N,N)
T1 = Matrix{Float64}(undef,N,N)
x = Matrix{Float64}(undef,N,N)
y = Matrix{Float64}(undef,N,N)
a = 0
b = π;
dx = (b-a)/(N-1)
x[:,1] = (0:N-1)*dx
x[:,:] .= x[:,1]
y[1,:] = (0:N-1)*dx
y[:,:] .= y[1,:]
dt = dx
tmax = (10-mod(10,dt))/dt
k = 0.25*dx
alpha = k*dt/dx^2
T1[1,:] .= 1.0
T1[N,:] .= 1.0
T1[:,1] .= 1.0
T1[:,N] .= 1.0
T0[1,:] .= 1.0
T0[N,:] .= 1.0
T0[:,1] .= 1.0
T0[:,N] .= 1.0
t = 0*dt
vel = open("Julia.dat", "w")
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
for k=1:tmax
T1[2:N-1,2:N-1] .= T0[2:N-1,2:N-1].+alpha.*((T0[3:N,2:N-1].-2 .*T0[2:N-1,2:N-1].+T0[1:N-2,2:N-1]).+
(T0[2:N-1,3:N].-2 .*T0[2:N-1,2:N-1].+T0[2:N-1,1:N-2]))
T0 .= T1
if mod(k,500)==0
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
end
end
t = tmax*dt
write(vel, "title =\"ZoneTime_",string(t),"\""," \n")
write(vel, "variables = \"x\", \"y\", \"T\""," \n")
write(vel, "zone T=\"Zone_"*string(t)*"\" i=",@sprintf("%d",N)," j=",@sprintf("%d",N)," \n")
for j=1:N
for i=1:N
write(vel, @sprintf("%1.9e",x[i,j])," ",@sprintf("%1.9e",y[i,j])," ",@sprintf("%1.9e",T0[i,j])," \n")
end
end
close(vel)
end
function heat_wof()
N = 1001
T0 = Matrix{Float64}(undef,N,N)
T1 = Matrix{Float64}(undef,N,N)
x = Matrix{Float64}(undef,N,N)
y = Matrix{Float64}(undef,N,N)
a = 0
b = π;
dx = (b-a)/(N-1)
for j=1:N
for i=1:N
x[i,j] = (i-1)*dx
end
end
for j=1:N
for i=1:N
y[i,j] = (j-1)*dx
end
end
dt = dx
tmax = (10-mod(10,dt))/dt
k = 0.25*dx
alpha = k*dt/dx^2
for i=1:N
T1[1,i] = 1.0
T1[N,i] = 1.0
T1[i,1] = 1.0
T1[i,N] = 1.0
end
for i=1:N
T0[1,i] = 1.0
T0[N,i] = 1.0
T0[i,1] = 1.0
T0[i,N] = 1.0
end
for k=1:tmax
for j=2:N-1
for i=2:N-1
T1[i,j] = T0[i,j]+alpha*((T0[i+1,j]-2*T0[i,j]+T0[i-1,j])+(T0[i,j+1]-2*T0[i,j]+T0[i,j-1]))
end
end
T0 .= T1
end
end
function heat_Vec_wof()
N = 1001
T0 = Matrix{Float64}(undef,N,N)
T1 = Matrix{Float64}(undef,N,N)
x = Matrix{Float64}(undef,N,N)
y = Matrix{Float64}(undef,N,N)
a = 0
b = π;
dx = (b-a)/(N-1)
x[:,1] = (0:N-1)*dx
x[:,:] .= x[:,1]
y[1,:] = (0:N-1)*dx
y[:,:] .= y[1,:]
dt = dx
tmax = (10-mod(10,dt))/dt
k = 0.25*dx
alpha = k*dt/dx^2
T1[1,:] .= 1.0
T1[N,:] .= 1.0
T1[:,1] .= 1.0
T1[:,N] .= 1.0
T0[1,:] .= 1.0
T0[N,:] .= 1.0
T0[:,1] .= 1.0
T0[:,N] .= 1.0
for k=1:tmax
T1[2:N-1,2:N-1] .= T0[2:N-1,2:N-1].+alpha.*((T0[3:N,2:N-1].-2 .*T0[2:N-1,2:N-1].+T0[1:N-2,2:N-1]).+
(T0[2:N-1,3:N].-2 .*T0[2:N-1,2:N-1].+T0[2:N-1,1:N-2]))
T0 .= T1
end
end
results1 = @benchmark heat()
results2 = @benchmark heat_Vec()
results3 = @benchmark heat_wof()
results4 = @benchmark heat_Vec_wof()
| [
3500,
12578,
69,
201,
198,
3500,
25187,
4102,
33637,
201,
198,
8818,
4894,
3419,
201,
198,
220,
220,
220,
399,
796,
1802,
16,
201,
198,
220,
220,
220,
309,
15,
796,
24936,
90,
43879,
2414,
92,
7,
917,
891,
11,
45,
11,
45,
8,
201... | 1.486057 | 4,339 |
<reponame>henrystoldt/fvCFD
######################### Global Time Stepping ###########################
function forwardEuler(mesh::Mesh, fluxResidualFn, sln::SolutionState, boundaryConditions, fluid::Fluid, dt)
sln.fluxResiduals = fluxResidualFn(mesh, sln, boundaryConditions, fluid)
@fastmath sln.cellState .+= sln.fluxResiduals.*dt
@fastmath decodeSolution_3D(sln, fluid)
return sln
end
function RK2_Mid(mesh, fluxResidualFn, sln, boundaryConditions, fluid::Fluid, dt)
fluxResiduals1 = fluxResidualFn(mesh, sln, boundaryConditions, fluid)
halfwayEstimate = sln.cellState .+ fluxResiduals1.*dt/2
solutionState2 = SolutionState(halfwayEstimate, sln.cellFluxes, sln.cellPrimitives, sln.fluxResiduals, sln.faceFluxes)
decodeSolution_3D(solutionState2, fluid)
sln.fluxResiduals = fluxResidualFn(mesh, solutionState2, boundaryConditions, fluid)
sln.cellState .+= sln.fluxResiduals.*dt
decodeSolution_3D(sln, fluid)
return sln
end
function RK4(mesh, fluxResidualFn, sln, boundaryConditions, fluid::Fluid, dt)
fluxResiduals1 = fluxResidualFn(mesh, sln, boundaryConditions, fluid)
halfwayEstimate = sln.cellState .+ fluxResiduals1*dt/2
lastSolutionState = SolutionState(halfwayEstimate, sln.cellFluxes, sln.cellPrimitives, sln.fluxResiduals, sln.faceFluxes)
decodeSolution_3D(lastSolutionState, fluid)
fluxResiduals2 = fluxResidualFn(mesh, lastSolutionState, boundaryConditions, fluid)
halfwayEstimate2 = sln.cellState .+ fluxResiduals2*dt/2
lastSolutionState.cellState = halfwayEstimate2
decodeSolution_3D(lastSolutionState, fluid)
fluxResiduals3 = fluxResidualFn(mesh, lastSolutionState, boundaryConditions, fluid)
finalEstimate1 = sln.cellState .+ fluxResiduals3*dt
lastSolutionState.cellState = finalEstimate1
decodeSolution_3D(lastSolutionState, fluid)
fluxResiduals4 = fluxResidualFn(mesh, lastSolutionState, boundaryConditions, fluid)
sln.cellState .+= (fluxResiduals1 .+ 2*fluxResiduals2 .+ 2*fluxResiduals3 .+ fluxResiduals4 )*(dt/6)
decodeSolution_3D(sln, fluid)
return sln
end
function ShuOsher(mesh, fluxResidualFn, sln, boundaryConditions, fluid::Fluid, dt)
fluxResiduals1 = fluxResidualFn(mesh, sln, boundaryConditions, fluid)
endEstimate = sln.cellState .+ fluxResiduals1.*dt
lastSolutionState = SolutionState(endEstimate, sln.cellFluxes, sln.cellPrimitives, sln.fluxResiduals, sln.faceFluxes)
decodeSolution_3D(lastSolutionState, fluid)
fluxResiduals2 = fluxResidualFn(mesh, lastSolutionState, boundaryConditions, fluid)
estimate2 = (3/4).*sln.cellState .+ (1/4).*(endEstimate .+ fluxResiduals2.*dt)
lastSolutionState.cellState = estimate2
decodeSolution_3D(lastSolutionState, fluid)
fluxResiduals3 = fluxResidualFn(mesh, lastSolutionState, boundaryConditions, fluid)
sln.cellState .= (1/3).*sln.cellState .+ (2/3).*(estimate2 .+ dt.*fluxResiduals3)
decodeSolution_3D(sln, fluid)
return sln
end
######################### Local Time Stepping ###########################
# Incomplete, will be commented more fully once it produces nice solutions and the implementation is finalized
function LTSEuler(mesh, fluxResidualFn, sln, boundaryConditions, fluid::Fluid, dt)
targetCFL = dt[1]
fluxResiduals = fluxResidualFn(mesh, sln, boundaryConditions, fluid)
CFL!(dt, mesh, sln, fluid, 1)
dt .= targetCFL ./ dt
smoothTimeStep!(dt, mesh, 0.1)
smoothTimeStep!(dt, mesh, 0.1)
sln.cellState .+= fluxResiduals .* dt
decodeSolution_3D(sln, fluid)
return sln
end
function smoothTimeStep!(dt, mesh::Mesh, diffusionCoefficient=0.2)
nCells, nFaces, nBoundaries, nBdryFaces = unstructuredMeshInfo(mesh)
timeFluxes = zeros(nCells)
surfaceAreas = zeros(nCells)
for f in 1:nFaces-nBdryFaces
ownerCell = mesh.faces[f][1]
neighbourCell = mesh.faces[f][2]
timeFlux = (dt[ownerCell] - dt[neighbourCell]) * mag(mesh.fAVecs[f])
surfaceAreas[ownerCell] += mag(mesh.fAVecs[f])
surfaceAreas[neighbourCell] += mag(mesh.fAVecs[f])
timeFluxes[ownerCell] -= timeFlux
timeFluxes[neighbourCell] += timeFlux
end
timeFluxes .*= (diffusionCoefficient ./ surfaceAreas)
for i in eachindex(timeFluxes)
timeFluxes[i] = min(0, timeFluxes[i])
end
dt .+= timeFluxes
end
#TODO: For implicit methods, need to compute the flux Jacobians at each edge, instead of just the fluxes
# Use Jacobians as coefficients in matrix representing timestepping equations
# Then solve with GMRES or some other matrix solver
| [
27,
7856,
261,
480,
29,
831,
563,
301,
727,
83,
14,
69,
85,
22495,
35,
198,
14468,
7804,
2,
8060,
3862,
2441,
2105,
1303,
14468,
7804,
2235,
198,
8818,
2651,
36,
18173,
7,
76,
5069,
3712,
37031,
11,
28462,
4965,
312,
723,
37,
77,
... | 2.517808 | 1,825 |
################################################################################
#
# AlgAssRelOrd
#
################################################################################
# S is the element type of the base field of the algebra, T the fractional ideal
# type of this field
mutable struct AlgAssRelOrd{S, T, U} <: Ring
algebra::U
dim::Int
pseudo_basis#::Vector{Tuple{AbsAlgAssElem{S}, T}}
basis_matrix::Generic.MatSpaceElem{S}
basis_mat_inv::Generic.MatSpaceElem{S}
basis_pmatrix::PMat{S, T}
disc # an integral ideal in the base field
ismaximal::Int # 0 Not known
# 1 Known to be maximal
# 2 Known to not be maximal
trred_matrix::Generic.MatSpaceElem{S}
inv_coeff_ideals::Vector{T}
isnice::Bool
nice_order#Tuple{AlgAssAbsOrd, T}
nice_order_ideal::T
function AlgAssRelOrd{S, T, U}(A::AbsAlgAss{S}) where {S, T, U}
z = new{S, T, U}()
z.algebra = A
z.dim = dim(A)
z.ismaximal = 0
z.isnice = false
return z
end
function AlgAssRelOrd{S, T, U}(A::U, M::PMat{S, T}) where {S, T, U}
z = AlgAssRelOrd{S, T, U}(A)
z.basis_pmatrix = M
z.basis_matrix = M.matrix
return z
end
function AlgAssRelOrd{S, T, U}(A::U, M::Generic.MatSpaceElem{S}) where {S, T, U}
z = AlgAssRelOrd{S, T, U}(A)
z.basis_matrix = M
z.basis_pmatrix = pseudo_matrix(M)
return z
end
end
################################################################################
#
# AlgAssRelOrdElem
#
################################################################################
mutable struct AlgAssRelOrdElem{S, T, U} <: RingElem
parent::AlgAssRelOrd{S, T, U}
elem_in_algebra::AbsAlgAssElem{S}
coordinates::Vector{S}
has_coord::Bool
function AlgAssRelOrdElem{S, T, U}(O::AlgAssRelOrd{S, T, U}) where {S, T, U}
z = new{S, T, U}()
z.parent = O
z.elem_in_algebra = zero(algebra(O))
z.coordinates = Vector{S}(undef, degree(O))
z.has_coord = false
return z
end
function AlgAssRelOrdElem{S, T, U}(O::AlgAssRelOrd{S, T, U}, a::AbsAlgAssElem{S}) where {S, T, U}
z = new{S, T, U}()
z.parent = O
z.elem_in_algebra = a
z.coordinates = Vector{S}(undef, degree(O))
z.has_coord = false
return z
end
function AlgAssRelOrdElem{S, T, U}(O::AlgAssRelOrd{S, T, U}, a::AbsAlgAssElem{S}, arr::Vector{S}) where {S, T, U}
z = new{S, T, U}()
z.parent = O
z.elem_in_algebra = a
z.coordinates = arr
z.has_coord = true
return z
end
end
################################################################################
#
# AlgAssRelOrdIdl
#
################################################################################
mutable struct AlgAssRelOrdIdl{S, T, U}
algebra::U
pseudo_basis::Vector{Tuple{AbsAlgAssElem{S}, T}}
# The basis matrices are in the BASIS of the ALGEBRA!
basis_pmatrix::PMat{S, T}
basis_matrix::Generic.MatSpaceElem{S}
basis_mat_inv::Generic.MatSpaceElem{S}
# Basis pseudo-matrices with respect to orders
basis_pmatrix_wrt::Dict{AlgAssRelOrd{S, T}, PMat{S, T}}
# Left and right order:
# The largest orders of which the ideal is a left resp. right ideal.
left_order::AlgAssRelOrd{S, T, U}
right_order::AlgAssRelOrd{S, T, U}
# Any order contained in the left or right order, that is, an order of which
# the ideal is a (possibly fractional) ideal.
order::AlgAssRelOrd{S, T, U}
# isleft and isright with respect to `order`
isleft::Int # 0 Not known
# 1 Known to be a left ideal
# 2 Known not to be a left ideal
isright::Int # as for isleft
iszero::Int # 0: don't know, 1: known to be zero, 2: known to be not zero
norm::Dict{AlgAssRelOrd{S, T, U}, T} # The ideal has different norms with respect
# to different orders
normred::Dict{AlgAssRelOrd{S, T, U}, T}
function AlgAssRelOrdIdl{S, T, U}(A::AbsAlgAss{S}) where {S, T, U}
z = new{S, T, U}()
z.algebra = A
z.isleft = 0
z.isright = 0
z.iszero = 0
z.basis_pmatrix_wrt = Dict{AlgAssRelOrd{S, T, U}, PMat{S, T}}()
z.norm = Dict{AlgAssRelOrd{S, T, U}, T}()
z.normred = Dict{AlgAssRelOrd{S, T, U}, T}()
return z
end
function AlgAssRelOrdIdl{S, T, U}(A::AbsAlgAss{S}, M::PMat{S, T}) where {S, T, U}
z = AlgAssRelOrdIdl{S, T, U}(A)
z.basis_pmatrix = M
z.basis_matrix = M.matrix
return z
end
end
| [
29113,
29113,
14468,
198,
2,
198,
2,
220,
978,
70,
8021,
6892,
35422,
198,
2,
198,
29113,
29113,
14468,
198,
198,
2,
311,
318,
262,
5002,
2099,
286,
262,
2779,
2214,
286,
262,
37139,
11,
309,
262,
13390,
282,
7306,
198,
2,
2099,
2... | 2.262766 | 2,017 |
"
Iterating over an AbstractGroup is the same as iterating over the set.
"
abstract type AbstractGroup end
# TODO: convert `Set` to `AbstractSet` where possible to support OrderedSets et al
"""
Stucture consisting of a set and a binary operation. No constraints are put on either expression.
"""
struct Groupoid{T} <: AbstractGroup
set::Set{T}
operation::Function
end
"""
Group.
The following axioms must hold. Assuming a group G = (S, ∘), then
- Closure: the group must be closed under the binary operation.
∀x,y ∈ S: x ∘ y ∈ S
- Associativity:
∀x,y,z ∈ S: x ∘ (y ∘ z) = (x ∘ y) ∘ z
- Identity:
∃e ∈ S: ∀x ∈ S, x ∘ e = x = e ∘ x
- Inverses:
∀x ∈ S, ∃ x⁻¹ ∈ S : x ∘ x⁻¹ = e = x⁻¹ ∘ x
"""
struct Group{T} <: AbstractGroup where {T}
set::Set{T}
operation::Function
Group{T}(set, operation) where T = if _validate_group(Groupoid(set, operation)) new(set, operation) end
end
Group(set::Set{T}, operation::Function) where {T} = Group{T}(set, operation)
"""
Subgroup of a group.
Create using SubGroup(group, subset).
Must be closed under the group operation, include the identity element and include inverses for each element.
"""
struct SubGroup{T} <: AbstractGroup where {T}
group::Group{T}
set::Set{T}
operation::Function
end
function SubGroup(group::Group{T}, subset::Set{<:T})::SubGroup{T} where {T}
@assert subset ⊆ group.set
g = Groupoid(subset, group.operation)
assert_closure(g)
_assert_inverses(get_identity_element(g), g.set, g.operation)
return SubGroup(group, subset, group.operation)
end
abstract type AbstractGroupAction end
struct _GroupActionLike{T} <: AbstractGroupAction where {T}
group::Group{<:T}
set::Set{<:T}
action::Function
end
struct GroupAction{T} <: AbstractGroupAction where {T}
group::Group{<:T}
set::Set{<:T}
action::Function
#GroupAction{T}
end
# TODO: fix convert below, then simplify code as necessary
# import Base: convert
# convert(T::Set{S}, x::Array{S, 1}) where {S} = T(_ for _ in x)
# convert(Set, Array{Int, 1}([1]))
function coset(left::T, set::Set{T}, operation::Function)::Set{T} where {T}
#Set(x for x in operation.(left, set))
Set(operation(left, right) for right in set)
end
function coset(set::Set{T}, right::T, operation::Function)::Set{T} where {T}
# Set(x for x in operation.(set, right))
Set(operation(left, right) for left in set)
end
function coset(left, group::AbstractGroup)
coset(left, group.set, group.operation)
end
function coset(group::AbstractGroup, right)
coset(group.set, right, group.operation)
end
function set_composition(left_set::Set{<:T}, right_set::Set{<:T}, operation::Function)::Set where {T}
# `map` is not defined on sets
# reduce(union, coset(left, right_set, operation) for left in left_set)
Set(operation(left, right) for left in left_set for right in right_set)
end
function assert_closure(set::Set, closed_in_set::Set, operation::Function)::Nothing
# We only check left closure.
@assert set_composition(set, closed_in_set, operation) ⊆ closed_in_set
end
function assert_closure(group::AbstractGroup)::Nothing
assert_closure(group.set, group.set, group.operation)
end
function assert_closure(ga::AbstractGroupAction)::Nothing
assert_closure(ga.group.set, ga.set, ga.action)
end
function assert_associativity(set::Set, operation::Function)::Nothing
for x in set
for y in set
for z in set
a = operation(x, operation(y, z))
b = operation(operation(x, y), z)
@assert a == b "$x ∘ ($y ∘ $z) = $a ≠ $b = ($x ∘ $y) ∘ $z"
end
end
end
end
function assert_associativity(group::AbstractGroup)::Nothing
assert_associativity(group.set, group.operation)
end
function _assert_identity_element(element::T, set::Set{T}, operation::Function)::Nothing where {T}
# we assume broadcasting works on the operation
# @assert operation.(element, set) == set # should be using ordered sets or vectors here
for x in set
@assert x == operation(element, x) == operation(x, element)
end
end
function get_identity_element(group::AbstractGroup) # TODO: return type
for x in group.set
try
_assert_identity_element(x, group.set, group.operation)
return x
catch AssertionError
continue
end
end
throw(AssertionError("No identity element found"))
end
function _assert_inverses(identity_element::T, set::Set{T}, operation::Function)::Nothing where {T}
for item in set
@assert identity_element in coset(item, set, operation)
@assert identity_element in coset(set, item, operation)
end
end
function _validate_group(group::AbstractGroup)::Bool
# Validation
assert_closure(group)
assert_associativity(group)
e = get_identity_element(group) # This raises on failure
_assert_inverses(e, group.set, group.operation)
return true
end
function naive_isequal(x::T, y::T) where T
for f in fieldnames(T)
if getfield(x, f) != getfield(y, f)
return false
end
end
return true
end
import Base: ==
==(x::T, y::T) where T<:AbstractGroup = naive_isequal(x, y)
# ==(x::Group, y::Group) = naive_isequal(x, y)
# ==(x::SubGroup, y::SubGroup) = naive_isequal(x, y)
function ==(x::AbstractGroup, y::SubGroup)::Bool
if y.set == x.set && x.operation == y.operation
return true
end
return false
end
==(x::SubGroup, y::AbstractGroup) = ==(y, x)
# Prevent `MethodError: ==(::SubGroup{Int64}, ::SubGroup{Int64}) is ambiguous`
==(x::SubGroup, y::SubGroup) = naive_isequal(x, y)
import Base: iterate
iterate(group::AbstractGroup) = iterate(group.set)
iterate(group::AbstractGroup, t::T) where {T} = iterate(group.set, t)
import Base: length
length(x::AbstractGroup) = length(x.set)
"""
N is a _normal_ subgroup of G if ∀g ∈ G, gN = Ng
N is a normal subgroup of G iff N is a subgroup of G and N is a union of conjugacy classes of G
"""
function isnormal(subgroup::SubGroup)
for g in subgroup.group
left_coset = coset(g, subgroup)
right_coset = coset(subgroup, g)
if coset(g, subgroup) != coset(subgroup, g)
return false
end
end
return true
end
function iscyclic(group::AbstractGroup)
if length(find_generators(group)) > 0
return true
end
return false
end
function quotient_group(subgroup::SubGroup)
@assert isnormal(subgroup) "Subgroup must be normal"
# The set here is equal to the "partition" of the group into cosets created by group elements with the subgroup
f(x::Set, y::Set) = set_composition(x, y, subgroup.operation)
return Group(Set(coset(g, subgroup) for g in subgroup.group), f)
end
function generate_subgroup(group::AbstractGroup, generator)::SubGroup
e = get_identity_element(group)
generated_set = Set([e generator])
p = group.operation(generator, generator)
while p != e
push!(generated_set, p)
p = group.operation(p, generator)
end
SubGroup(group, generated_set, group.operation)
end
function find_generators(group::AbstractGroup)::Set
filter(g->generate_subgroup(group, g) == group, group.set)
end
function _cayley_table(group::AbstractGroup) # TODO: move to utils
elements = [x for x in group.set]
inside = map(x->
map(y->group.operation(x, y),
elements
), elements)
return elements, inside
end
# _cayley_table((generate_subgroup(G, 6) |> quotient_group))[2]
"""
Conjugate x by g, i.e. perform gxg⁻¹
"""
function conjugate(group::AbstractGroup, x::T, g::T) where {T}
group.operation(group.operation(g, x), inv(group, g))
end
"""
Return { gxg⁻¹: ∀g ∈ G}
"""
function conjugacy_class(group::AbstractGroup, x)::Set
Set(conjugate(group, x, g) for g in group)
end
"""
Return the distinct conjugacy classes in `group`.
The set of distinct conjugacy classes forms a partition of the group.
"""
function conjugacy_classes(group::AbstractGroup)::Set
Set(conjugacy_class(group, x) for x in group)
end
"""
Obtain the inverse of `x` in `group`.
"""
function inv(group::AbstractGroup, x)
e = get_identity_element(group)
for h in group
if group.operation(x, h) == group.operation(h, x) == e
return h
end
end
return missing # TODO: missing or nothing?
end
abstract type AbstractHomomorphism end
"""
An isomorphism ϕ: (G, ∘) → (H, ⋆) is a mapping which satisfies the following properties
- ϕ is one-to-one and onto
See also: Homomorphism
"""
struct Isomorphism <: AbstractHomomorphism
from_group::AbstractGroup
to_group::AbstractGroup
mapping::Dict
end
"""
An isomorphism ϕ: (G, ∘) → (H, ⋆) is a mapping which satisfies the following property:
∀x,y ∈ G, ϕ(x ∘ y) = ϕ(x) ⋆ ϕ(y) (i.e. it preserves composites)
"""
struct Homomorphism <: AbstractHomomorphism
from_group::AbstractGroup
to_group::AbstractGroup
mapping::Dict
end
function order(group::AbstractGroup)
return length(group.set)
end
function order(group::AbstractGroup, x)
return length(generate_subgroup(group, x))
end
function get_isomorphism(a::AbstractGroup, b::AbstractGroup)
if a == b # Automorphism
return Dict(k=>k for k in a)
end
if iscyclic(a) && iscyclic(b) && order(a) == order(b) # Cyclic subgroups of the same order
gen_a, gen_b = find_generators(a) |> first, find_generators(b) |> first
mapping = Dict(gen_a => gen_b)
p_a = gen_a
p_b = gen_b
for i in 1:order(a)
p_a = a.operation(p_a, gen_a)
p_b = b.operation(p_b, gen_b)
mapping[p_a] = p_b
end
return mapping
end
throw("Not implemented")
end
"""
Assert the following property, where mapping=ϕ, from=(G, ∘), to=(H, ⋆)
∀ x,y ∈ G; ϕ(x, y) = ϕ(x) ⋆ ϕ(y)
"""
function _assert_homomorphism_property(mapping::Dict, from::AbstractGroup, to::AbstractGroup)
for x in from
for y in from
@assert mapping[from.operation(x, y)] == to.operation(mapping[x], mapping[y])
end
end
end
"""
Let (G, ∘) be a group, X be a set and ^ be a group action.
∀ g,h ∈ G; ∀x in X; g ^ (h ^ x) = (g ∘ h) ^ x
"""
function _assert_homomorphism_property(ga::AbstractGroupAction)
for g in ga.group
for h in ga.group
for x in ga.set
@assert ga.action(g, ga.action(h, x)) == ga.action(ga.group.operation(g, h), x)
end
end
end
end
function _transform(ϕ::Function, arr::Union{Vector, AbstractSet})::Union{Vector, AbstractSet}
return ϕ.(arr)
end
function _transform(ϕ::Dict, arr::Union{Vector, AbstractSet})::Union{Vector, AbstractSet}
return _transform(x-> ϕ[x], arr)
end
"""
Let G be a group and ϕ: G → H be a homomorphism. Then,
Ker ϕ = {g ∈ G: ϕ(g) = e_H}
Let V, W be vector subspaces and t: V → W be a linear transfomation. Then,
Ker t = {v⃗ ∈ V: ϕ(v⃗) = 0⃗}
"""
function kernel(transformation::Union{Function, Dict}, from::Union{AbstractSet, AbstractGroup}, identity)
v = [g for g in from]
t = _transform(transformation, v)
mask = t .== identity
return Set(v[mask])
end
function image(transformation, from::AbstractSet)::Set
_transform(transformation, from) |> Set
end
function image(transformation, from::AbstractGroup)::Set
_transform(transformation, from.set) |> Set
end
function GroupAction(group::Group{<:T}, set::Set{<:T}, action::Function)::GroupAction{T} where {T}
ga = _GroupActionLike(group, set, action)
assert_closure(ga)
e = get_identity_element(ga.group)
for x in ga.set
@assert ga.action(e, x) == x
end
_assert_homomorphism_property(ga)
return GroupAction{T}(group, set, action)
end
"""
Orb x = {∀g ∈ G, g ^ x}
"""
function orbit(ga::AbstractGroupAction, x)::Set # TODO: define type
@assert x in ga.set
return ga.action.(ga.group, x) |> Set
end
"""
Get the set of all orbits for the group action
"""
function orbits(ga::AbstractGroupAction)::Set
return Set(orbit(ga, x) for x in ga.set)
end
"""
Stab x = {g ∈ G: g^x = x}
"""
function stabilizer(ga::AbstractGroupAction, x)
@assert x in ga.set
v = [g for g in ga.group.set]
t = ga.action.(v, x)
stable_mask = t .== x
return Set(v[stable_mask])
end
# function stabilizers(ga::AbstractGroupAction)
# return Set(stabilizer(ga, x) for x in ga.set)
# end
"""
Fix g = {x ∈ X: g^x = x}
"""
function fixed_set(ga::AbstractGroupAction, g)
@assert g in ga.group.set
v = [x for x in ga.set]
t = ga.action.(g, v)
return Set(v[t .== v])
end
| [
198,
1,
198,
220,
220,
220,
40806,
803,
625,
281,
27741,
13247,
318,
262,
976,
355,
11629,
803,
625,
262,
900,
13,
198,
1,
198,
397,
8709,
2099,
27741,
13247,
886,
198,
198,
2,
16926,
46,
25,
10385,
4600,
7248,
63,
284,
4600,
2383... | 2.477052 | 5,142 |
<gh_stars>0
# Ospa dist
function ospa_dist(pca1::Vector{Pointcloud},
pca2::Vector{Pointcloud},
c::S
) where {S <: Real}
#dmat = Matrix{Float64}(length(pca1), length(pca2))
dmat = Matrix{Float64}(undef, length(pca1), length(pca2))
for i=1:length(pca1)
for j=1:length(pca2)
dmat[i,j] = ospa_dist(pca1[i],pca2[j],c)
end
end
dmat
end
function ospa_dist(pc1::Pointcloud,
pc2::Pointcloud,
c::S
) where {S <: Real}
if size(pc1)[1] > size(pc2)[1]
return ospa_dist(pc2, pc1, c)
end
dmat = p2dist(pc1, pc2)
assignments = hungarian(dmat)[1]
cost = sum([min(dmat[i, assignments[i]], c) for i=1:size(pc1)[1] if assignments[i] != 0])
1/size(pc2)[1]*(cost + c*(size(pc2)[1] - size(pc1)[1])) |> sqrt
end
function optimal_assignments(barycenter::Pointcloud,
measurements::Vector{Pointcloud}
)
map(x -> hungarian(p2dist(barycenter, measurements[x]))[1], 1:length(measurements))
end
function p2dist(x,y)
[sqrt.(sum((x[i,:] .- y[j,:]).^2)) for i=1:size(x)[1], j=1:size(y)[1]]
end
function p2dist(x)
p2dist(x,x)
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
440,
2777,
64,
1233,
198,
198,
8818,
267,
2777,
64,
62,
17080,
7,
79,
6888,
16,
3712,
38469,
90,
12727,
17721,
5512,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.778561 | 709 |
<reponame>mkg33/Catalyst.jl<gh_stars>0
using Catalyst
rn = @reaction_network begin
α, S + I --> 2I
β, I --> R
S^2, R --> 0
end α β
# check can make a graph
gr = Graph(rn)
# check can save a graph
fname = Base.Filesystem.tempname()
savegraph(gr, fname, "png")
| [
27,
7856,
261,
480,
29,
76,
10025,
2091,
14,
21979,
21470,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
48238,
198,
35906,
796,
2488,
260,
2673,
62,
27349,
2221,
198,
220,
220,
220,
26367,
11,
311,
1343,
314,
14610,
362,
40,
... | 2.382609 | 115 |
<filename>src/plot_recipes/recipes_populations.jl
import ..UncertainValues: UncertainScalarPopulation
using RecipesBase
#@recipe f(::Type{UncertainScalarPopulation{T}}, x::UncertainScalarPopulation{T}) where {T} =
# rand(x, 10000)
@recipe function f(p::UncertainScalarPopulation{T}) where T
@series begin
rand(p, 10000)
end
end
@recipe function f(populations::Vector{UncertainScalarPopulation{T}}) where {T}
for p in populations
@series begin
p
end
end
end | [
27,
34345,
29,
10677,
14,
29487,
62,
8344,
18636,
14,
8344,
18636,
62,
12924,
5768,
13,
20362,
198,
11748,
11485,
3118,
39239,
40161,
25,
28304,
1425,
3351,
282,
283,
45251,
198,
198,
3500,
44229,
14881,
628,
198,
2,
31,
29102,
431,
2... | 2.448598 | 214 |
export pointwise_log_likelihoods
const ARRAY_DIMS_WARNING = "The supplied array of mcmc samples indicates you have more
parameters than mcmc samples.This is possible, but highly unusual. Please check that your
array of mcmc samples has the following dimensions: [n_samples,n_parms,n_chains]."
"""
pointwise_log_likelihoods(
ll_fun::Function,
samples::AbstractArray{<:Real,3},
data;
splat::Bool=true
)
Compute the pointwise log likelihood.
# Arguments
- $LIKELIHOOD_FUNCTION_ARG
- `samples::AbstractArray`: A three dimensional array of MCMC samples. Here, the first
dimension should indicate the iteration of the MCMC ; the second dimension should
indicate the parameter ; and the third dimension represents the chains.
- `data`: A vector of data used to estimate the parameters of the model.
- `splat`: If `true` (default), `f` must be a function of `n` different parameters.
Otherwise, `f` is assumed to be a function of a single parameter vector.
# Returns
- `Array`: A three dimensional array of pointwise log-likelihoods.
"""
function pointwise_log_likelihoods(
ll_fun::Function,
samples::AbstractArray{<:Union{Real, Missing}, 3},
data;
splat::Bool=true,
)
n_posterior, n_parms, n_chains = size(samples)
if n_parms > n_posterior
@info ARRAY_DIMS_WARNING
end
if splat
fun = (p, d) -> ll_fun(p..., d)
else
fun = (p, d) -> ll_fun(p, d)
end
n_posterior, _, n_chains = size(samples)
n_data = length(data)
pointwise_lls = similar(samples, n_data, n_posterior, n_chains)
for index in CartesianIndices(pointwise_lls)
datum, iteration, chain = Tuple(index)
pointwise_lls[datum, iteration, chain] = fun(
samples[iteration, :, chain], data[datum]
)
end
return pointwise_lls
end
function pointwise_log_likelihoods(
ll_fun::Function,
samples::AbstractMatrix{<:Union{Real, Missing}},
data;
chain_index::AbstractVector{<:Integer}=_assume_one_chain(samples),
kwargs...,
)
samples = _convert_to_array(samples, chain_index)
return pointwise_log_likelihoods(ll_fun, samples, data)
end
| [
39344,
966,
3083,
62,
6404,
62,
2339,
11935,
82,
198,
198,
9979,
5923,
30631,
62,
35,
3955,
50,
62,
31502,
796,
366,
464,
14275,
7177,
286,
285,
11215,
66,
8405,
9217,
345,
423,
517,
220,
198,
17143,
7307,
621,
285,
11215,
66,
8405,... | 2.595041 | 847 |
<reponame>n-kishaloy/FinanceLib.jl
import FinanceLib
import Dates
@testset "FinanceLib " begin
@testset "tv" begin
@test FinanceLib.yearFrac(Dates.Date(2027,2,12), Dates.Date(2018,2,12)) ≈ -8.999315537303216
@test FinanceLib.invYearFrac(Dates.Date(2027,2,12), -8.999315537303216) == Dates.Date(2018,2,12)
@test FinanceLib.disFactAnnual(0.07) == 0.9345794392523364
@test FinanceLib.disFact(0.09, 3) == 0.7721834800610642
@test FinanceLib.fwdDisFact((0.07, 1), (0.09, 3)) == 0.8262363236653387
@test FinanceLib.xdisFact(0.09, Dates.Date(2015,3,15), Dates.Date(2018,10,8)) == 0.7353328680759499
@test FinanceLib.tMul(0.06/12, -120.0) == 0.5496327333641637
@test FinanceLib.tMul(0.06, -10.0, 12.0) == 0.5496327333641637
@test FinanceLib.rateGwth(7.35, 8.52, 5.0) == -0.029111071029244595
@test FinanceLib.periodGwth(100.0,50.0,0.07) == 10.244768351058712
end
@testset "pv" begin
@test FinanceLib.pv(10_000_000., 0.09, 5.0) == 6_499_313.862983453
@test FinanceLib.pv(12_704_891.6109538, 0.06, 4.0, 12.0) ≈ 10_000_000.
@test FinanceLib.pvr(10_000_000., 1.09, 5.0) == 6_499_313.862983453
@test FinanceLib.pvc(11_735.108709918102, 0.08, 2.0) == 10_000
end
@testset "fv" begin
@test FinanceLib.fv(6_499_313.862983453, 0.09, 5.0) == 10_000_000.0
@test FinanceLib.fv(10_000_000.0, 0.06, 4.0, 12.0) ≈ 12_704_891.6109538
@test FinanceLib.fvc(10_000., 0.08, 2.0) == 11_735.108709918102
end
@testset "annuity" begin
@test FinanceLib.pvAnnuity(1000.0, 0.12, 5.0) == 3_604.776202345007
@test FinanceLib.pvAnnuity(7.33764573879378, 0.08, 30.0, 12.0) == 1000
@test FinanceLib.pvAnnuity(100.0, 0.05) == 2000.0
@test FinanceLib.fvAnnuity(1000.0, 0.05, 5.0) == 5_525.631250000007
@test FinanceLib.fvAnnuity(2000.0, 0.24, 5.0, 3.0) == 54_304.2278549568
@test FinanceLib.pmt(3_604.776202345007, 0.12, 5.0) == 1000.0
@test FinanceLib.pmt(1000.0, 0.08, 30.0, 12.0) == 7.33764573879378
@test FinanceLib.fmt(5_525.631250000007, 0.05, 5.0) == 1000
@test FinanceLib.fmt(54_304.2278549568, 0.24, 5.0, 3.0) == 2000
@test FinanceLib.pv(FinanceLib.pvAnnuity(10.0^6,.05,30.0),0.05,9.0) == 9_909_218.99605011
end
@testset "effective rates" begin
@test FinanceLib.effR(0.08, 2.0) ≈ 0.0816
@test FinanceLib.expR(0.08, 2.0) == 0.07844142630656266
@test FinanceLib.expR(0.08) == 0.0769610411361284
@test FinanceLib.nomR(FinanceLib.effR(0.08, 4), 4) ≈ 0.08
FinanceLib.pvc(20,FinanceLib.expR(0.07,4),4.25) == FinanceLib.pvr(20,1+FinanceLib.effR(0.07,4),4.25)
eT = FinanceLib.RateCurve{FinanceLib.NomRate}([0.0016, 0.0021, 0.0027, 0.0033, 0.0037, 0.0041], 2)
eR = FinanceLib.effR(eT)
@test eR.rate[1] ≈ 0.0016006400
@test eR.rate[2] ≈ 0.0021011025
@test eR.rate[3] ≈ 0.0027018225
@test eR.rate[4] ≈ 0.0033027225
@test eR.rate[5] ≈ 0.0037034225
@test eR.rate[6] ≈ 0.0041042025
eN = FinanceLib.nomR(eR)
@test eN.rate[1] ≈ 0.0016
@test eN.rate[2] ≈ 0.0021
@test eN.rate[3] ≈ 0.0027
@test eN.rate[4] ≈ 0.0033
@test eN.rate[5] ≈ 0.0037
@test eN.rate[6] ≈ 0.0041
eZ = FinanceLib.nomR(FinanceLib.effR(FinanceLib.expR(eT))) # N - X - E - N
eY = FinanceLib.nomR(FinanceLib.expR(FinanceLib.effR(eT))) # N - E - X - N
eW = FinanceLib.nomR(FinanceLib.expR(eT)) # N - X - N
@test eZ.rate[3] == eW.rate[3]
@test eY.rate[4] == eW.rate[4]
@test eZ.rate[2] == eY.rate[2]
@test eZ.rate[5] == eY.rate[5]
@test eZ.rate[1] == eW.rate[1]
end
@testset "npv" begin
@test FinanceLib.npv(0.05, [0.0:1.0:4.0;],[1000.,2000.0,4000.0,5000.0,6000.0],-1.45)==14709.923338335731
@test FinanceLib.npv(0.08, [0.25,6.25,3.5,4.5,1.25], [-6.25,1.2,1.25,3.6,2.5], -0.45) == 0.36962283798505946
@test FinanceLib.npv(0.08, zip([0.25,6.25,3.5,4.5,1.25],[-6.25,1.2,1.25,3.6,2.5]), -0.45) == 0.36962283798505946
@test FinanceLib.npv(0.08, [0.25,6.25,3.5,4.5,1.25], [-6.25,1.2,1.25,3.6,2.5], 6.25) == 0.619010419015909
@test FinanceLib.irr([0.125,0.29760274,0.49760274,0.55239726,0.812671233], [-10.25,-2.5,3.5,9.5,1.25]) ≈ 0.31813386476788824
ts = collect(zip([0.125,0.29760274,0.49760274,0.55239726,0.812671233], [-10.25,-2.5,3.5,9.5,1.25])) :: FinanceLib.PeriodSeries
@test FinanceLib.irr(ts) ≈ 0.31813386476788824
@test FinanceLib.irr(zip([0.125,0.29760274,0.49760274,0.55239726,0.812671233], [-10.25,-2.5,3.5,9.5,1.25])) ≈ 0.31813386476788824
@test FinanceLib.xnpv(0.08, [Dates.Date(2012,2,25), Dates.Date(2012,6,28),
Dates.Date(2013,2,15), Dates.Date(2014,9,18), Dates.Date(2015,2,20)],
[-15, 5, 25, -10, 50], Dates.Date(2012,1,10) ) == 44.15557928534869
@test FinanceLib.xnpv(0.08, zip([Dates.Date(2012,2,25), Dates.Date(2012,6,28),
Dates.Date(2013,2,15), Dates.Date(2014,9,18), Dates.Date(2015,2,20)],
[-15, 5, 25, -10, 50.]), Dates.Date(2012,1,10) ) == 44.15557928534869
@test FinanceLib.xirr([Dates.Date(2012,2,25), Dates.Date(2012,6,28),
Dates.Date(2013,2,15), Dates.Date(2014,9,18), Dates.Date(2015,2,20)],
[-115, 5, 25, -10, 200] ) == 0.2783166029306355
td = collect(zip([Dates.Date(2012,2,25), Dates.Date(2012,6,28),
Dates.Date(2013,2,15), Dates.Date(2014,9,18), Dates.Date(2015,2,20)],
[-115, 5, 25, -10, 200]))
td1 = FinanceLib.dateToPeriodSeries(Dates.Date(2010,05,12), td)
@test td1[3][1] == 2.7652292950034223
@test FinanceLib.irr(td1) == 0.2783166029306353
@test FinanceLib.xirr(td) == 0.2783166029306355
@test FinanceLib.xirr(zip([Dates.Date(2012,2,25), Dates.Date(2012,6,28),
Dates.Date(2013,2,15), Dates.Date(2014,9,18), Dates.Date(2015,2,20)],
[-115, 5, 25, -10, 200]) ) == 0.2783166029306355
@test FinanceLib.npv(FinanceLib.PeriodSeries([(0.5,0.05), (1.25, 0.0575), (2, 0.0485), (3.5, 0.0625), (4.25, 0.055)]), [-150, 20, 15, 80, 100], 0.3) == 31.530253870718543
@test FinanceLib.xnpv(FinanceLib.DateSeries([(Dates.Date(2014,9,20),0.05), (Dates.Date(2015,2,1), 0.0575), (Dates.Date(2016,10,5), 0.0485), (Dates.Date(2017,12,5), 0.0625), (Dates.Date(2019,1,5), 0.055)]), [-150, 20, 15, 80, 100], Dates.Date(2014,2,15)) == 29.323165765999597
end
@testset "Sharpe" begin
@test FinanceLib.sharpe(1.58,9.26,22.36) ≈ 0.3434704830053667
end
@testset "Rates" begin
@test FinanceLib.discFactorToNominalRate(FinanceLib.DiscountFactor([0.9524, 0.89, 0.8163, 0.735],1)).rate[3] == 0.0699990723472752
dsc = FinanceLib.discFactorToNominalRate(FinanceLib.DiscountFactor([ 0.99920063949, 0.99790330288, 0.99596091045, 0.99342713542, 0.99080111671, 0.98778777227 ],2)).rate
@test dsc[1] ≈ 0.0016
@test dsc[2] ≈ 0.0021
@test dsc[3] ≈ 0.0027
@test dsc[4] ≈ 0.0033
@test dsc[5] ≈ 0.0037
@test dsc[6] ≈ 0.0041
@test FinanceLib.estimR(FinanceLib.RateCurve{FinanceLib.NomRate}([0.05, 0.06, 0.07, 0.08], 2), 1.5) == 0.07
@test FinanceLib.estimR(FinanceLib.RateCurve{FinanceLib.NomRate}([0.05, 0.06, 0.07, 0.08], 2), 1.2) == 0.064
end
end
include("FixedIncomes/runtests.jl")
include("Derivatives/runtests.jl")
include("Statements/runtests.jl")
| [
27,
7856,
261,
480,
29,
77,
12,
31501,
282,
726,
14,
37,
14149,
25835,
13,
20362,
198,
11748,
15007,
25835,
198,
11748,
44712,
198,
198,
31,
9288,
2617,
366,
37,
14149,
25835,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.925748 | 3,744 |
<filename>src/lib/broadcast.jl<gh_stars>0
# .-'''-. _..._
# ' _ \ _______ .-'_..._''.
# /| / /` '. \ \ ___ `'. .' .' '.\
# || . | \ ' ' |--.\ \ / .'
# || .-,.--. | ' | ' | | \ ' . ' .|
# || __ | .-. |\ \ / / __ | | | '| | __ .' |_
# ||/'__ '. | | | | `. ` ..' /.:--.'. | | | || | .:--.'. _ .' |
# |:/` '. '| | | | '-...-'`/ | \ | | | ' .'. ' / | \ | .' |'--. .-'
# || | || | '- `" __ | | | |___.' /' \ '. .`" __ | | . | / | |
# ||\ / '| | .'.''| | /_______.'/ '. `._____.-'/ .'.''| | .'.'| |// | |
# |/\'..' / | | / / | |_\_______|/ `-.______ / / / | |_.'.'.-' / | '.'
# ' `'-'` |_| \ \._,\ '/ ` \ \._,\ '/.' \_.' | /
# `--' `" `--' `" `'-'
using Base.Broadcast
using Base.Broadcast: Broadcasted, AbstractArrayStyle, DefaultArrayStyle, broadcasted,
instantiate, materialize, flatten, combine_eltypes, _broadcast_getindex
using ForwardDiff: Dual
trim(x, Δ) = reshape(Δ, ntuple(i -> size(Δ, i), Val(ndims(x))))
unbroadcast(x::AbstractArray, Δ) =
size(x) == size(Δ) ? Δ :
length(x) == length(Δ) ? trim(x, Δ) :
trim(x, sum(Δ, dims = ntuple(i -> size(x, i) == 1 ? i : ndims(Δ)+1, Val(ndims(Δ)))))
unbroadcast(x::Number, Δ) = sum(Δ)
dual(x, p) = x
dual(x::Real, p) = Dual(x, p)
dualtype(::Type{Dual{G,T,P}}) where {G,T,P} = T
function dual_function(f::F) where F
function (args::Vararg{Any,N}) where N
ds = map(args, ntuple(identity,Val(N))) do x, i
dual(x, ntuple(j -> i==j, Val(N)))
end
return f(ds...)
end
end
dualify(bc::Broadcasted{S}) where S = Broadcasted{S}(dual_function(bc.f), bc.args, bc.axes)
function broadcast_gradient!(bc::Broadcasted, dest::AbstractArray, grads::Vararg{Any})
@simd for I in eachindex(bc)
@inbounds begin
out = bc[I]
dest[I] = out.value
map((g, p) -> g[I] = p, grads, out.partials.values)
end
end
end
function broadcast_gradient(bc::Broadcasted, ::Type{T}) where T
dest = similar(bc, T)
grads = map(_ -> similar(bc, T), bc.args)
broadcast_gradient!(bc, dest, grads...)
return dest, grads
end
@inline function ∇broadcast(bc′::Broadcasted) where {F,N}
bc = dualify(instantiate(flatten(bc′)))
T = combine_eltypes(bc.f, bc.args)
y, gs = broadcast_gradient(bc, dualtype(T))
back(Δ) = map((x, d) -> unbroadcast(x, Δ.*d), bc.args, gs)
return y, back
end
function ∇broadcast(bc::Broadcasted{<:AbstractArrayStyle{0}})
out = dualify(instantiate(flatten(bc)))[]
return out.value, Δ -> map(x -> x*Δ, out.partials.values)
end
using Base: tail
_unflatten(x, xs) = first(xs), tail(xs)
_unflatten(x::Tuple{}, xs) = (), xs
function _unflatten(x::Tuple, xs)
t1, xs1 = _unflatten(first(x), xs)
t2, xs2 = _unflatten(tail(x), xs1)
(t1, t2...), xs2
end
function _unflatten(bc::Broadcasted, xs)
t, xs′ = _unflatten(bc.args, xs)
(args=t,f=nothing,axes=nothing), xs′
end
unflatten(x, xs) = _unflatten(x, xs)[1]
@grad function broadcasted(f, args...)
broadcasted(f, args...), Δ -> (nothing, Δ.args...)
end
@grad function materialize(bc::Broadcasted{<:DefaultArrayStyle})
let (y, back) = ∇broadcast(bc)
y, Δ -> (unflatten(bc, back(Δ)),)
end
end
| [
27,
34345,
29,
10677,
14,
8019,
14,
36654,
2701,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
764,
12,
7061,
... | 1.835768 | 1,985 |
<filename>src/categorical_algebra/FinSets.jl
""" The category of finite sets and functions, and its skeleton.
"""
module FinSets
export FinSet, FinFunction, FinDomFunction, TabularSet, TabularLimit,
force, is_indexed, preimage,
JoinAlgorithm, SmartJoin, NestedLoopJoin, SortMergeJoin, HashJoin,
SubFinSet, SubOpBoolean
using AutoHashEquals
using DataStructures: OrderedDict, IntDisjointSets, union!, find_root!
using Reexport
import StaticArrays
using StaticArrays: StaticVector, SVector, SizedVector, similar_type
import Tables, PrettyTables
@reexport using ..Sets
using ...GAT, ...Theories, ...CSetDataStructures, ...Graphs
using ..FinCats, ..FreeDiagrams, ..Limits, ..Subobjects
import ...Theories: Ob, meet, ∧, join, ∨, top, ⊤, bottom, ⊥
import ..Categories: ob, hom, dom, codom, compose, id, ob_map, hom_map
import ..FinCats: force, ob_generators, hom_generators, graph, is_discrete
using ..FinCats: dicttype
import ..Limits: limit, colimit, universal, pushout_complement,
can_pushout_complement
import ..Subobjects: Subobject, SubobjectLattice
using ..Sets: IdentityFunction, SetFunctionCallable
# Finite sets
#############
""" Finite set.
A finite set has abstract type `FinSet{S,T}`. The second type parameter `T` is
the element type of the set and the first parameter `S` is the collection type,
which can be a subtype of `AbstractSet` or another Julia collection type. In
addition, the skeleton of the category **FinSet** is the important special case
`S = Int`. The set ``{1,…,n}`` is represented by the object `FinSet(n)` of type
`FinSet{Int,Int}`.
"""
abstract type FinSet{S,T} <: SetOb{T} end
FinSet(set::FinSet) = set
""" Finite set of the form ``{1,…,n}`` for some number ``n ≥ 0``.
"""
@auto_hash_equals struct FinSetInt <: FinSet{Int,Int}
n::Int
end
FinSet{Int,Int}(n::Int) = FinSetInt(n)
FinSet(n::Int) = FinSetInt(n)
Base.iterate(set::FinSetInt, args...) = iterate(1:set.n, args...)
Base.length(set::FinSetInt) = set.n
Base.in(set::FinSetInt, elem) = in(elem, 1:set.n)
Base.show(io::IO, set::FinSetInt) = print(io, "FinSet($(set.n))")
""" Finite set given by Julia collection.
The underlying collection should be a Julia iterable of definite length. It may
be, but is not required to be, set-like (a subtype of `AbstractSet`).
"""
@auto_hash_equals struct FinSetCollection{S,T} <: FinSet{S,T}
collection::S
end
FinSetCollection(collection::S) where S =
FinSetCollection{S,eltype(collection)}(collection)
FinSet(collection::S) where {T, S<:Union{AbstractVector{T},AbstractSet{T}}} =
FinSetCollection{S,T}(collection)
Base.iterate(set::FinSetCollection, args...) = iterate(set.collection, args...)
Base.length(set::FinSetCollection) = length(set.collection)
Base.in(set::FinSetCollection, elem) = in(elem, set.collection)
function Base.show(io::IO, set::FinSetCollection)
print(io, "FinSet(")
show(io, set.collection)
print(io, ")")
end
""" Finite set whose elements are rows of a table.
The underlying table should be compliant with Tables.jl. For the sake of
uniformity, the rows are provided as named tuples, which assumes that the table
is not "extremely wide". This should not be a major limitation in practice but
see the Tables.jl documentation for further discussion.
"""
@auto_hash_equals struct TabularSet{Table,Row} <: FinSet{Table,Row}
table::Table
function TabularSet(table::Table) where Table
schema = Tables.schema(table)
new{Table,NamedTuple{schema.names,Tuple{schema.types...}}}(table)
end
end
FinSet(nt::NamedTuple) = TabularSet(nt)
Base.iterate(set::TabularSet, args...) =
iterate(Tables.namedtupleiterator(set.table), args...)
Base.length(set::TabularSet) = Tables.rowcount(set.table)
Base.collect(set::TabularSet) = Tables.rowtable(set.table)
function Base.show(io::IO, set::TabularSet)
print(io, "TabularSet(")
show(io, set.table)
print(io, ")")
end
function Base.show(io::IO, ::MIME"text/plain", set::TabularSet{T}) where T
print(io, "$(length(set))-element TabularSet{$T}")
if !get(io, :compact, false)
println(io, ":")
PrettyTables.pretty_table(io, set.table, nosubheader=true)
end
end
function Base.show(io::IO, ::MIME"text/html", set::TabularSet)
println(io, "<div class=\"tabular-set\">")
println(io, "$(length(set))-element TabularSet")
PrettyTables.pretty_table(io, set.table, backend=Val(:html), standalone=false,
nosubheader=true)
println(io, "</div>")
end
# Discrete categories
#--------------------
""" Discrete category on a finite set.
The only morphisms in a discrete category are the identities, which are here
identified with the objects.
"""
@auto_hash_equals struct DiscreteCat{Ob,S<:FinSet{<:Any,Ob}} <: FinCat{Ob,Ob}
set::S
end
DiscreteCat(n::Integer) = DiscreteCat(FinSet(n))
FinCat(s::Union{FinSet,Integer}) = DiscreteCat(s)
ob_generators(C::DiscreteCat) = C.set
hom_generators(::DiscreteCat) = ()
is_discrete(::DiscreteCat) = true
graph(C::DiscreteCat{Int,FinSetInt}) = Graph(length(C.set))
dom(C::DiscreteCat{T}, f) where T = f::T
codom(C::DiscreteCat{T}, f) where T = f::T
id(C::DiscreteCat{T}, x) where T = x::T
compose(C::DiscreteCat{T}, f, g) where T = (f::T == g::T) ? f :
error("Nontrivial composite in discrete category: $f != $g")
hom_map(F::FinDomFunctor{<:DiscreteCat}, x) = id(codom(F), ob_map(F, x))
Base.show(io::IO, C::DiscreteCat{Int,FinSetInt}) =
print(io, "FinCat($(length(C.set)))")
# Finite functions
##################
""" Function between finite sets.
The function can be defined implicitly by an arbitrary Julia function, in which
case it is evaluated lazily, or explictly by a vector of integers. In the vector
representation, the function (1↦1, 2↦3, 3↦2, 4↦3), for example, is represented
by the vector [1,3,2,3].
This type is mildly generalized by [`FinDomFunction`](@ref).
"""
const FinFunction{S, S′, Dom <: FinSet{S}, Codom <: FinSet{S′}} =
SetFunction{Dom,Codom}
FinFunction(f::Function, dom, codom) =
SetFunctionCallable(f, FinSet(dom), FinSet(codom))
FinFunction(::typeof(identity), args...) =
IdentityFunction((FinSet(arg) for arg in args)...)
FinFunction(f::AbstractDict, args...) =
FinFunctionDict(f, (FinSet(arg) for arg in args)...)
function FinFunction(f::AbstractVector{Int}, args...; index=false)
if index == false
FinDomFunctionVector(f, (FinSet(arg) for arg in args)...)
else
index = index == true ? nothing : index
IndexedFinFunctionVector(f, args...; index=index)
end
end
FinFunction(f::AbstractVector{Int}; kw...) =
FinFunction(f, FinSet(isempty(f) ? 0 : maximum(f)); kw...)
Sets.show_type_constructor(io::IO, ::Type{<:FinFunction}) =
print(io, "FinFunction")
""" Function out of a finite set.
This class of functions is convenient because it is exactly the class that can
be represented explicitly by a vector of values from the codomain.
"""
const FinDomFunction{S, Dom<:FinSet{S}, Codom<:SetOb} = SetFunction{Dom,Codom}
FinDomFunction(f::Function, dom, codom) =
SetFunctionCallable(f, FinSet(dom), codom)
FinDomFunction(::typeof(identity), args...) =
IdentityFunction((FinSet(arg) for arg in args)...)
FinDomFunction(f::AbstractDict, args...) = FinDomFunctionDict(f, args...)
function FinDomFunction(f::AbstractVector, args...; index=false)
if index == false
FinDomFunctionVector(f, args...)
else
index = index == true ? nothing : index
IndexedFinDomFunctionVector(f, args...; index=index)
end
end
Sets.show_type_constructor(io::IO, ::Type{<:FinDomFunction}) =
print(io, "FinDomFunction")
# Note: Cartesian monoidal structure is implemented generically for Set but
# cocartesian only for FinSet.
@cocartesian_monoidal_instance FinSet FinFunction
Ob(C::FinCat{Int}) = FinSet(length(ob_generators(C)))
Ob(F::Functor{<:FinCat{Int}}) = FinDomFunction(collect_ob(F), Ob(codom(F)))
# Vector-based functions
#-----------------------
""" Function in **Set** represented by a vector.
The domain of this function is always of type `FinSet{Int}`, with elements of
the form ``{1,...,n}``.
"""
struct FinDomFunctionVector{T,V<:AbstractVector{T}, Codom<:SetOb{T}} <:
FinDomFunction{Int,FinSetInt,Codom}
func::V
codom::Codom
end
FinDomFunctionVector(f::AbstractVector{T}) where T =
FinDomFunctionVector(f, TypeSet{T}())
function FinDomFunctionVector(f::AbstractVector, dom::FinSet{Int}, codom)
length(f) == length(dom) ||
error("Length of vector $f does not match domain $dom")
FinDomFunctionVector(f, codom)
end
dom(f::FinDomFunctionVector) = FinSet(length(f.func))
(f::FinDomFunctionVector)(x) = f.func[x]
function Base.show(io::IO, f::FinDomFunctionVector)
print(io, "FinDomFunction($(f.func), ")
Sets.show_domains(io, f)
print(io, ")")
end
force(f::FinDomFunction{Int}) = FinDomFunctionVector(map(f, dom(f)), codom(f))
force(f::FinDomFunctionVector) = f
Base.collect(f::SetFunction) = force(f).func
""" Function in **FinSet** represented explicitly by a vector.
"""
const FinFunctionVector{S,T,V<:AbstractVector{T}} =
FinDomFunctionVector{T,V,<:FinSet{S,T}}
Base.show(io::IO, f::FinFunctionVector) =
print(io, "FinFunction($(f.func), $(length(dom(f))), $(length(codom(f))))")
Sets.do_compose(f::FinFunctionVector, g::FinDomFunctionVector) =
FinDomFunctionVector(g.func[f.func], codom(g))
# Indexed vector-based functions
#-------------------------------
""" Indexed function out of a finite set of type `FinSet{Int}`.
Works in the same way as the special case of [`IndexedFinFunctionVector`](@ref),
except that the index is typically a dictionary, not a vector.
"""
struct IndexedFinDomFunctionVector{T,V<:AbstractVector{T},Index,Codom<:SetOb{T}} <:
FinDomFunction{Int,FinSetInt,Codom}
func::V
index::Index
codom::Codom
end
IndexedFinDomFunctionVector(f::AbstractVector{T}; kw...) where T =
IndexedFinDomFunctionVector(f, TypeSet{T}(); kw...)
function IndexedFinDomFunctionVector(f::AbstractVector{T}, codom::SetOb{T};
index=nothing) where T
if isnothing(index)
index = Dict{T,Vector{Int}}()
for (i, x) in enumerate(f)
push!(get!(index, x) do; Int[] end, i)
end
end
IndexedFinDomFunctionVector(f, index, codom)
end
Base.:(==)(f::Union{FinDomFunctionVector,IndexedFinDomFunctionVector},
g::Union{FinDomFunctionVector,IndexedFinDomFunctionVector}) =
# Ignore index when comparing for equality.
f.func == g.func && codom(f) == codom(g)
function Base.show(io::IO, f::IndexedFinDomFunctionVector)
print(io, "FinDomFunction($(f.func), ")
Sets.show_domains(io, f)
print(io, ", index=true)")
end
dom(f::IndexedFinDomFunctionVector) = FinSet(length(f.func))
force(f::IndexedFinDomFunctionVector) = f
(f::IndexedFinDomFunctionVector)(x) = f.func[x]
""" Whether the given function is indexed, i.e., supports efficient preimages.
"""
is_indexed(f::SetFunction) = false
is_indexed(f::IdentityFunction) = true
is_indexed(f::IndexedFinDomFunctionVector) = true
is_indexed(f::FinDomFunctionVector{T,<:AbstractRange{T}}) where T = true
""" The preimage (inverse image) of the value y in the codomain.
"""
preimage(f::IdentityFunction, y) = SVector(y)
preimage(f::FinDomFunction, y) = [ x for x in dom(f) if f(x) == y ]
preimage(f::IndexedFinDomFunctionVector, y) = get_preimage_index(f.index, y)
@inline get_preimage_index(index::AbstractDict, y) = get(index, y, 1:0)
@inline get_preimage_index(index::AbstractVector, y) = index[y]
preimage(f::FinDomFunctionVector{T,<:AbstractRange{T}}, y::T) where T =
# Both `in` and `searchsortedfirst` are specialized for AbstractRange.
y ∈ f.func ? SVector(searchsortedfirst(f.func, y)) : SVector{0,Int}()
""" Indexed function between finite sets of type `FinSet{Int}`.
Indexed functions store both the forward map ``f: X → Y``, as a vector of
integers, and the backward map ``f: Y → X⁻¹``, as a vector of vectors of
integers, accessible through the [`preimage`](@ref) function. The backward map
is called the *index*. If it is not supplied through the keyword argument
`index`, it is computed when the object is constructed.
This type is mildly generalized by [`IndexedFinDomFunctionVector`](@ref).
"""
const IndexedFinFunctionVector{V,Index} =
IndexedFinDomFunctionVector{Int,V,Index,FinSetInt}
function IndexedFinFunctionVector(f::AbstractVector{Int}; index=nothing)
codom = isnothing(index) ? (isempty(f) ? 0 : maximum(f)) : length(index)
IndexedFinFunctionVector(f, codom; index=index)
end
function IndexedFinFunctionVector(f::AbstractVector{Int}, codom; index=nothing)
codom = FinSet(codom)
if isnothing(index)
index = [ Int[] for j in codom ]
for (i, j) in enumerate(f)
push!(index[j], i)
end
elseif length(index) != length(codom)
error("Index length $(length(index)) does not match codomain $codom")
end
IndexedFinDomFunctionVector(f, index, codom)
end
Base.show(io::IO, f::IndexedFinFunctionVector) =
print(io, "FinFunction($(f.func), $(length(dom(f))), $(length(codom(f))), index=true)")
# For now, we do not preserve or compose indices, only the function vectors.
Sets.do_compose(f::Union{FinFunctionVector,IndexedFinFunctionVector},
g::Union{FinDomFunctionVector,IndexedFinDomFunctionVector}) =
FinDomFunctionVector(g.func[f.func], codom(g))
# Dict-based functions
#---------------------
""" Function in **Set** represented by a dictionary.
The domain is a `FinSet{S}` where `S` is the type of the dictionary's `keys`
collection.
"""
@auto_hash_equals struct FinDomFunctionDict{K,D<:AbstractDict{K},Codom<:SetOb} <:
FinDomFunction{D,FinSet{AbstractSet{K},K},Codom}
func::D
codom::Codom
end
FinDomFunctionDict(d::AbstractDict{K,V}) where {K,V} =
FinDomFunctionDict(d, TypeSet{V}())
dom(f::FinDomFunctionDict) = FinSet(keys(f.func))
(f::FinDomFunctionDict)(x) = f.func[x]
function Base.show(io::IO, f::F) where F <: FinDomFunctionDict
Sets.show_type_constructor(io, F)
print(io, "(")
show(io, f.func)
print(io, ", ")
Sets.show_domains(io, f, domain=false)
print(io, ")")
end
force(f::FinDomFunction) =
FinDomFunctionDict(Dict(x => f(x) for x in dom(f)), codom(f))
force(f::FinDomFunctionDict) = f
""" Function in **FinSet** represented by a dictionary.
"""
const FinFunctionDict{K,D<:AbstractDict{K},Codom<:FinSet} =
FinDomFunctionDict{K,D,Codom}
FinFunctionDict(d::AbstractDict, codom::FinSet) = FinDomFunctionDict(d, codom)
FinFunctionDict(d::AbstractDict{K,V}) where {K,V} =
FinDomFunctionDict(d, FinSet(Set(values(d))))
Sets.do_compose(f::FinFunctionDict{K,D}, g::FinDomFunctionDict) where {K,D} =
FinDomFunctionDict(dicttype(D)(x => g.func[y] for (x,y) in pairs(f.func)),
codom(g))
# Limits
########
limit(Xs::EmptyDiagram{<:FinSet{Int}}) = Limit(Xs, SMultispan{0}(FinSet(1)))
universal(lim::Limit{<:FinSet{Int},<:EmptyDiagram}, cone::SMultispan{0}) =
ConstantFunction(1, apex(cone), FinSet(1))
limit(Xs::SingletonDiagram{<:FinSet{Int}}) = limit(Xs, SpecializeLimit())
function limit(Xs::ObjectPair{<:FinSet{Int}})
m, n = length.(Xs)
indices = CartesianIndices((m, n))
π1 = FinFunction(i -> indices[i][1], m*n, m)
π2 = FinFunction(i -> indices[i][2], m*n, n)
Limit(Xs, Span(π1, π2))
end
function universal(lim::Limit{<:FinSet{Int},<:ObjectPair}, cone::Span)
f, g = cone
m, n = length.(codom.(cone))
indices = LinearIndices((m, n))
FinFunction(i -> indices[f(i),g(i)], apex(cone), ob(lim))
end
function limit(Xs::DiscreteDiagram{<:FinSet{Int}})
ns = length.(Xs)
indices = CartesianIndices(Tuple(ns))
n = prod(ns)
πs = [FinFunction(i -> indices[i][j], n, ns[j]) for j in 1:length(ns)]
Limit(Xs, Multispan(FinSet(n), πs))
end
function universal(lim::Limit{<:FinSet{Int},<:DiscreteDiagram}, cone::Multispan)
ns = length.(codom.(cone))
indices = LinearIndices(Tuple(ns))
FinFunction(i -> indices[(f(i) for f in cone)...], apex(cone), ob(lim))
end
function limit(pair::ParallelPair{<:FinSet{Int}})
f, g = pair
m = length(dom(pair))
eq = FinFunction(filter(i -> f(i) == g(i), 1:m), m)
Limit(pair, SMultispan{1}(eq))
end
function limit(para::ParallelMorphisms{<:FinSet{Int}})
@assert !isempty(para)
f1, frest = para[1], para[2:end]
m = length(dom(para))
eq = FinFunction(filter(i -> all(f1(i) == f(i) for f in frest), 1:m), m)
Limit(para, SMultispan{1}(eq))
end
function universal(lim::Limit{<:FinSet{Int},<:ParallelMorphisms},
cone::SMultispan{1})
ι = collect(incl(lim))
h = only(cone)
FinFunction(Int[only(searchsorted(ι, h(i))) for i in dom(h)], length(ι))
end
""" Limit of finite sets with a reverse mapping or index into the limit set.
This type provides a fallback for limit algorithms that do not come with a
specialized algorithm to apply the universal property of the limit. In such
cases, you can explicitly construct a mapping from tuples of elements in the
feet of the limit cone to elements in the apex of the cone.
The index is constructed the first time it is needed. Thus there is no extra
cost to using this type if the universal property will not be invoked.
"""
mutable struct FinSetIndexedLimit{Ob<:FinSet,Diagram,Cone<:Multispan{Ob}} <:
AbstractLimit{Ob,Diagram}
diagram::Diagram
cone::Cone
index::Union{AbstractDict,Nothing}
end
FinSetIndexedLimit(diagram, cone::Multispan) =
FinSetIndexedLimit(diagram, cone, nothing)
function make_limit_index(cone::Multispan{<:FinSet})
πs = Tuple(legs(cone))
index = Dict{Tuple{map(eltype∘codom, πs)...}, eltype(apex(cone))}()
for x in apex(cone)
index[map(π -> π(x), πs)] = x
end
return index
end
function universal(lim::FinSetIndexedLimit, cone::Multispan)
if isnothing(lim.index)
lim.index = make_limit_index(lim.cone)
end
fs = Tuple(legs(cone))
FinFunction(Int[lim.index[map(f -> f(x), fs)] for x in apex(cone)],
apex(cone), ob(lim))
end
""" Algorithm for limit of cospan or multicospan with feet being finite sets.
In the context of relational databases, such limits are called *joins*. The
trivial join algorithm is [`NestedLoopJoin`](@ref), which is algorithmically
equivalent to the generic algorithm `ComposeProductEqualizer`. The algorithms
[`HashJoin`](@ref) and [`SortMergeJoin`](@ref) are usually much faster. If you
are unsure what algorithm to pick, use [`SmartJoin`](@ref).
"""
abstract type JoinAlgorithm <: LimitAlgorithm end
""" Meta-algorithm for joins that attempts to pick an appropriate algorithm.
"""
struct SmartJoin <: JoinAlgorithm end
function limit(cospan::Multicospan{<:SetOb,<:FinDomFunction{Int}};
alg::LimitAlgorithm=ComposeProductEqualizer())
limit(cospan, alg)
end
function limit(cospan::Multicospan{<:SetOb,<:FinDomFunction{Int}}, ::SmartJoin)
# Handle the important special case where one of the legs is a constant
# (function out of a singleton set). In this case, we just need to take a
# product of preimages of the constant value.
funcs = legs(cospan)
i = findfirst(f -> length(dom(f)) == 1, funcs)
if !isnothing(i)
c = funcs[i](1)
ιs = map(deleteat(funcs, i)) do f
FinFunction(preimage(f, c), dom(f))
end
x, πs = if length(ιs) == 1
dom(only(ιs)), ιs
else
prod = product(map(dom, ιs))
ob(prod), map(compose, legs(prod), ιs)
end
πs = insert(πs, i, ConstantFunction(1, x, FinSet(1)))
return FinSetIndexedLimit(cospan, Multispan(πs))
end
# In the general case, for now we always just do a hash join, although
# sort-merge joins can sometimes be faster.
limit(cospan, HashJoin())
end
deleteat(vec::StaticVector, i) = StaticArrays.deleteat(vec, i)
deleteat(vec::Vector, i) = deleteat!(copy(vec), i)
insert(vec::StaticVector{N,T}, i, x::S) where {N,T,S} =
StaticArrays.insert(similar_type(vec, typejoin(T,S))(vec), i, x)
insert(vec::Vector{T}, i, x::S) where {T,S} =
insert!(collect(typejoin(T,S), vec), i, x)
""" [Nested-loop join](https://en.wikipedia.org/wiki/Nested_loop_join) algorithm.
This is the naive algorithm for computing joins.
"""
struct NestedLoopJoin <: JoinAlgorithm end
function limit(cospan::Multicospan{<:SetOb,<:FinDomFunction{Int}},
::NestedLoopJoin)
# A nested-loop join is algorithmically the same as `ComposeProductEqualizer`,
# but for completeness and performance we give a direct implementation here.
funcs = legs(cospan)
ns = map(length, feet(cospan))
πs = map(_ -> Int[], funcs)
for I in CartesianIndices(Tuple(ns))
values = map((f, i) -> f(I[i]), funcs, eachindex(funcs))
if all(==(values[1]), values)
for i in eachindex(πs)
push!(πs[i], I[i])
end
end
end
cone = Multispan(map((π,f) -> FinFunction(π, dom(f)), πs, funcs))
FinSetIndexedLimit(cospan, cone)
end
""" [Sort-merge join](https://en.wikipedia.org/wiki/Sort-merge_join) algorithm.
"""
struct SortMergeJoin <: JoinAlgorithm end
function limit(cospan::Multicospan{<:SetOb,<:FinDomFunction{Int}},
::SortMergeJoin)
funcs = map(collect, legs(cospan))
sorts = map(sortperm, funcs)
values = similar_mutable(funcs, eltype(apex(cospan)))
ranges = similar_mutable(funcs, UnitRange{Int})
function next_range!(i::Int)
f, sort = funcs[i], sorts[i]
n = length(f)
start = last(ranges[i]) + 1
ranges[i] = if start <= n
val = values[i] = f[sort[start]]
stop = start + 1
while stop <= n && f[sort[stop]] == val; stop += 1 end
start:(stop - 1)
else
start:n
end
end
πs = map(_ -> Int[], funcs)
for i in eachindex(ranges)
ranges[i] = 0:0
next_range!(i)
end
while !any(isempty, ranges)
if all(==(values[1]), values)
indices = CartesianIndices(Tuple(ranges))
for i in eachindex(πs)
append!(πs[i], (sorts[i][I[i]] for I in indices))
next_range!(i)
end
else
next_range!(argmin(values))
end
end
cone = Multispan(map((π,f) -> FinFunction(π, length(f)), πs, funcs))
FinSetIndexedLimit(cospan, cone)
end
similar_mutable(x::AbstractVector, T::Type) = similar(x, T)
function similar_mutable(x::StaticVector{N}, T::Type) where N
# `similar` always returns an `MVector` but `setindex!(::MVector, args...)`
# only works when the element type is a bits-type.
isbitstype(T) ? similar(x, T) : SizedVector{N}(Vector{T}(undef, N))
end
""" [Hash join](https://en.wikipedia.org/wiki/Hash_join) algorithm.
"""
struct HashJoin <: JoinAlgorithm end
function limit(cospan::Multicospan{<:SetOb,<:FinDomFunction{Int}}, ::HashJoin)
# We follow the standard terminology for hash joins: in a multiway hash join,
# one function, called the *probe*, will be iterated over and need not be
# indexed, whereas the other functions, call *build* inputs, must be indexed.
#
# We choose as probe the unindexed function with largest domain. If all
# functions are already indexed, we arbitrarily choose the first one.
i = argmax(map(legs(cospan)) do f
is_indexed(f) ? -1 : length(dom(f))
end)
probe = legs(cospan)[i]
builds = map(ensure_indexed, deleteat(legs(cospan), i))
πs_build, π_probe = hash_join(builds, probe)
FinSetIndexedLimit(cospan, Multispan(insert(πs_build, i, π_probe)))
end
function hash_join(builds::AbstractVector{<:FinDomFunction{Int}},
probe::FinDomFunction{Int})
π_builds, πp = map(_ -> Int[], builds), Int[]
for y in dom(probe)
val = probe(y)
preimages = map(build -> preimage(build, val), builds)
n_preimages = Tuple(map(length, preimages))
n = prod(n_preimages)
if n > 0
indices = CartesianIndices(n_preimages)
for j in eachindex(π_builds)
πb, xs = π_builds[j], preimages[j]
append!(πb, (xs[I[j]] for I in indices))
end
append!(πp, (y for i in 1:n))
end
end
(map(FinFunction, π_builds, map(dom, builds)), FinFunction(πp, dom(probe)))
end
function hash_join(builds::StaticVector{1,<:FinDomFunction{Int}},
probe::FinDomFunction{Int})
πb, πp = hash_join(builds[1], probe)
(SVector((πb,)), πp)
end
function hash_join(build::FinDomFunction{Int}, probe::FinDomFunction{Int})
πb, πp = Int[], Int[]
for y in dom(probe)
xs = preimage(build, probe(y))
n = length(xs)
if n > 0
append!(πb, xs)
append!(πp, (y for i in 1:n))
end
end
(FinFunction(πb, dom(build)), FinFunction(πp, dom(probe)))
end
ensure_indexed(f::FinFunction{Int,Int}) = is_indexed(f) ? f :
FinFunction(collect(f), codom(f), index=true)
ensure_indexed(f::FinDomFunction{Int}) = is_indexed(f) ? f :
FinDomFunction(collect(f), index=true)
function limit(d::BipartiteFreeDiagram{Ob,Hom}) where
{Ob<:SetOb, Hom<:FinDomFunction{Int}}
# As in a pullback, this method assumes that all objects in layer 2 have
# incoming morphisms.
@assert !any(isempty(incident(d, v, :tgt)) for v in vertices₂(d))
d_original = d
# For uniformity, e.g. when pairing below, ensure that all objects in layer 2
# are type sets.
if !all(x isa TypeSet for x in ob₂(d))
d = map(d, ob₁=identity, ob₂=ensure_type_set, hom=ensure_type_set_codom)
end
# It is generally optimal to compute all equalizers (self joins) first, so as
# to reduce the sizes of later pullbacks (joins) and products (cross joins).
d, ιs = equalize_all(d)
rem_vertices₂!(d, [v for v in vertices₂(d) if
length(incident(d, v, :tgt)) == 1])
# Perform all pairings before computing any joins.
d = pair_all(d)
# Having done this preprocessing, if there are any nontrivial joins, perform
# one of them and recurse; otherwise, we have at most a product to compute.
#
# In the binary case (`nv₁(d) == 2`), the preprocessing guarantees that there
# is at most one nontrivial join, so there are no choices to make. When there
# are multiple possible joins, do the one with smallest base cardinality
# (product of sizes of relations to join). This is a simple greedy heuristic.
# For more control over the order of the joins, create a UWD schedule.
if nv₂(d) == 0
# FIXME: Shouldn't need FinSetIndexedLimit in these special cases.
if nv₁(d) == 1
FinSetIndexedLimit(d_original, SMultispan{1}(ιs[1]))
else
πs = legs(product(SVector(ob₁(d)...)))
FinSetIndexedLimit(d_original, Multispan(map(compose, πs, ιs)))
end
else
# Select the join to perform.
v = argmin(map(vertices₂(d)) do v
edges = incident(d, v, :tgt)
@assert length(edges) >= 2
prod(e -> length(dom(hom(d, e))), edges)
end)
# Compute the pullback (inner join).
join_edges = incident(d, v, :tgt)
to_join = src(d, join_edges)
to_keep = setdiff(vertices₁(d), to_join)
pb = pullback(SVector(hom(d, join_edges)...), alg=SmartJoin())
# Create a new bipartite diagram with joined vertices.
d_joined = BipartiteFreeDiagram{Ob,Hom}()
copy_parts!(d_joined, d, V₁=to_keep, V₂=setdiff(vertices₂(d),v), E=edges(d))
joined = add_vertex₁!(d_joined, ob₁=apex(pb))
for (u, π) in zip(to_join, legs(pb))
for e in setdiff(incident(d, u, :src), join_edges)
set_subparts!(d_joined, e, src=joined, hom=π⋅hom(d,e))
end
end
rem_edges!(d_joined, join_edges)
# Recursively compute the limit of the new diagram.
lim = limit(d_joined)
# Assemble limit cone from cones for pullback and reduced limit.
πs = Vector{Hom}(undef, nv₁(d))
for (i, u) in enumerate(to_join)
πs[u] = compose(last(legs(lim)), legs(pb)[i], ιs[u])
end
for (i, u) in enumerate(to_keep)
πs[u] = compose(legs(lim)[i], ιs[u])
end
FinSetIndexedLimit(d_original, Multispan(πs))
end
end
ensure_type_set(s::FinSet) = TypeSet(eltype(s))
ensure_type_set(s::TypeSet) = s
ensure_type_set_codom(f::FinFunction) =
SetFunctionCallable(f, dom(f), TypeSet(eltype(codom(f))))
ensure_type_set_codom(f::IndexedFinFunctionVector) =
IndexedFinDomFunctionVector(f.func, index=f.index)
ensure_type_set_codom(f::FinDomFunction) = f
""" Compute all possible equalizers in a bipartite free diagram.
The result is a new bipartite free diagram that has the same vertices but is
*simple*, i.e., has no multiple edges. The list of inclusion morphisms into
layer 1 of the original diagram is also returned.
"""
function equalize_all(d::BipartiteFreeDiagram{Ob,Hom}) where {Ob,Hom}
d_simple = BipartiteFreeDiagram{Ob,Hom}()
copy_parts!(d_simple, d, V₂=vertices₂(d))
ιs = map(vertices₁(d)) do u
# Collect outgoing edges of u, key-ed by target vertex.
out_edges = OrderedDict{Int,Vector{Int}}()
for e in incident(d, u, :src)
push!(get!(out_edges, tgt(d,e)) do; Int[] end, e)
end
# Equalize all sets of parallel edges out of u.
ι = id(ob₁(d, u))
for es in values(out_edges)
if length(es) > 1
fs = SVector((ι⋅f for f in hom(d, es))...)
ι = incl(equalizer(fs)) ⋅ ι
end
end
add_vertex₁!(d_simple, ob₁=dom(ι)) # == u
for (v, es) in pairs(out_edges)
add_edge!(d_simple, u, v, hom=ι⋅hom(d, first(es)))
end
ι
end
(d_simple, ιs)
end
""" Perform all possible pairings in a bipartite free diagram.
The resulting diagram has the same layer 1 vertices but a possibly reduced set
of layer 2 vertices. Layer 2 vertices are merged when they have exactly the same
multiset of adjacent vertices.
"""
function pair_all(d::BipartiteFreeDiagram{Ob,Hom}) where {Ob,Hom}
d_paired = BipartiteFreeDiagram{Ob,Hom}()
copy_parts!(d_paired, d, V₁=vertices₁(d))
# Construct mapping to V₂ vertices from multisets of adjacent V₁ vertices.
outmap = OrderedDict{Vector{Int},Vector{Int}}()
for v in vertices₂(d)
push!(get!(outmap, sort(inneighbors(d, v))) do; Int[] end, v)
end
for (srcs, tgts) in pairs(outmap)
in_edges = map(tgts) do v
sort(incident(d, v, :tgt), by=e->src(d,e))
end
if length(tgts) == 1
v = add_vertex₂!(d_paired, ob₂=ob₂(d, only(tgts)))
add_edges!(d_paired, srcs, fill(v, length(srcs)),
hom=hom(d, only(in_edges)))
else
prod = product(SVector(ob₂(d, tgts)...))
v = add_vertex₂!(d_paired, ob₂=ob(prod))
for (i,u) in enumerate(srcs)
f = pair(prod, hom(d, getindex.(in_edges, i)))
add_edge!(d_paired, u, v, hom=f)
end
end
end
d_paired
end
""" Limit of general diagram of FinSets computed by product-then-filter.
See `Limits.CompositePullback` for a very similar construction.
"""
struct FinSetCompositeLimit{Ob<:FinSet, Diagram,
Cone<:Multispan{Ob}, Prod<:Product{Ob},
Incl<:FinFunction} <: AbstractLimit{Ob,Diagram}
diagram::Diagram
cone::Cone
prod::Prod
incl::Incl # Inclusion for the "multi-equalizer" in general formula.
end
limit(d::FreeDiagram{<:FinSet{Int}}) = limit(FinDomFunctor(d))
function limit(F::Functor{<:FinCat{Int},<:TypeCat{<:FinSet{Int}}})
# Uses the general formula for limits in Set (Leinster, 2014, Basic Category
# Theory, Example 5.1.22 / Equation 5.16). This method is simple and direct,
# but extremely inefficient!
J = dom(F)
prod = product(map(x -> ob_map(F, x), ob_generators(J)))
n, πs = length(ob(prod)), legs(prod)
ι = FinFunction(filter(1:n) do i
all(hom_generators(J)) do f
s, t, h = dom(J, f), codom(J, f), hom_map(F, f)
h(πs[s](i)) == πs[t](i)
end
end, n)
cone = Multispan(dom(ι), map(x -> ι⋅πs[x], ob_generators(J)))
FinSetCompositeLimit(F, cone, prod, ι)
end
function universal(lim::FinSetCompositeLimit, cone::Multispan{<:FinSet{Int}})
ι = collect(lim.incl)
h = universal(lim.prod, cone)
FinFunction(Int[only(searchsorted(ι, h(i))) for i in dom(h)],
apex(cone), ob(lim))
end
""" Limit of finite sets viewed as a table.
Any limit of finite sets can be canonically viewed as a table
([`TabularSet`](@ref)) whose columns are the legs of the limit cone and whose
rows correspond to elements of the limit object. To construct this table from an
already computed limit, call `TabularLimit(::AbstractLimit; ...)`. The column
names of the table are given by the optional argument `names`.
In this tabular form, applying the universal property of the limit is trivial
since it is just tupling. Thus, this representation can be useful when the
original limit algorithm does not support efficient application of the universal
property. On the other hand, this representation has the disadvantage of
generally making the element type of the limit set more complicated.
"""
const TabularLimit = Limit{<:TabularSet}
function TabularLimit(lim::AbstractLimit; names=nothing)
πs = legs(lim)
names = isnothing(names) ? (1:length(πs)) : names
names = Tuple(column_name(name) for name in names)
table = TabularSet(NamedTuple{names}(Tuple(map(collect, πs))))
cone = Multispan(table, map(πs, eachindex(πs)) do π, i
FinFunction(row -> Tables.getcolumn(row, i), table, codom(π))
end)
Limit(lim.diagram, cone)
end
function universal(lim::Limit{<:TabularSet{Table,Row}},
cone::Multispan) where {Table,Row}
fs = Tuple(legs(cone))
FinFunction(x -> Row(map(f -> f(x), fs)), apex(cone), ob(lim))
end
column_name(name) = Symbol(name)
column_name(i::Integer) = Symbol("x$i") # Same default as DataFrames.jl.
# Colimits
##########
# Colimits in Skel(FinSet)
#-------------------------
colimit(Xs::EmptyDiagram{<:FinSet{Int}}) = Colimit(Xs, SMulticospan{0}(FinSet(0)))
function universal(colim::Initial{<:FinSet{Int}}, cocone::SMulticospan{0})
cod = apex(cocone)
FinDomFunction(SVector{0,eltype(cod)}(), cod)
end
colimit(Xs::SingletonDiagram{<:FinSet{Int}}) = colimit(Xs, SpecializeColimit())
function colimit(Xs::ObjectPair{<:FinSet{Int}})
m, n = length.(Xs)
ι1 = FinFunction(1:m, m, m+n)
ι2 = FinFunction(m+1:m+n, n, m+n)
Colimit(Xs, Cospan(ι1, ι2))
end
function universal(colim::BinaryCoproduct{<:FinSet{Int}}, cocone::Cospan)
f, g = cocone
FinDomFunction(vcat(collect(f), collect(g)), ob(colim), apex(cocone))
end
function colimit(Xs::DiscreteDiagram{<:FinSet{Int}})
ns = length.(Xs)
n = sum(ns)
offsets = [0,cumsum(ns)...]
ιs = [FinFunction((1:ns[j]) .+ offsets[j],ns[j],n) for j in 1:length(ns)]
Colimit(Xs, Multicospan(FinSet(n), ιs))
end
function universal(colim::Coproduct{<:FinSet{Int}}, cocone::Multicospan)
cod = apex(cocone)
FinDomFunction(mapreduce(collect, vcat, cocone, init=eltype(cod)[]),
ob(colim), cod)
end
function colimit(pair::ParallelPair{<:FinSet{Int}})
f, g = pair
m, n = length(dom(pair)), length(codom(pair))
sets = IntDisjointSets(n)
for i in 1:m
union!(sets, f(i), g(i))
end
Colimit(pair, SMulticospan{1}(quotient_projection(sets)))
end
function colimit(para::ParallelMorphisms{<:FinSet{Int}})
@assert !isempty(para)
f1, frest = para[1], para[2:end]
m, n = length(dom(para)), length(codom(para))
sets = IntDisjointSets(n)
for i in 1:m
for f in frest
union!(sets, f1(i), f(i))
end
end
Colimit(para, SMulticospan{1}(quotient_projection(sets)))
end
function universal(coeq::Coequalizer{<:FinSet{Int}}, cocone::SMulticospan{1})
pass_to_quotient(proj(coeq), only(cocone))
end
""" Create projection map π: X → X/∼ from partition of X.
"""
function quotient_projection(sets::IntDisjointSets)
h = [ find_root!(sets, i) for i in 1:length(sets) ]
roots = unique!(sort(h))
FinFunction([ searchsortedfirst(roots, r) for r in h ], length(roots))
end
""" Given h: X → Y, pass to quotient q: X/~ → Y under projection π: X → X/~.
"""
function pass_to_quotient(π::FinFunction{Int,Int}, h::FinFunction{Int,Int})
@assert dom(π) == dom(h)
q = zeros(Int, length(codom(π)))
for i in dom(h)
j = π(i)
if q[j] == 0
q[j] = h(i)
else
q[j] == h(i) || error("Quotient map of colimit is ill-defined")
end
end
any(==(0), q) && error("Projection map is not surjective")
FinFunction(q, codom(h))
end
function pass_to_quotient(π::FinFunction{Int,Int}, h::FinDomFunction{Int})
@assert dom(π) == dom(h)
q = Vector{Union{Some{eltype(codom(h))},Nothing}}(nothing, length(codom(π)))
for i in dom(h)
j = π(i)
if isnothing(q[j])
q[j] = Some(h(i))
else
something(q[j]) == h(i) || error("Quotient map of colimit is ill-defined")
end
end
any(isnothing, q) && error("Projection map is not surjective")
FinDomFunction(map(something, q), codom(h))
end
function colimit(span::Multispan{<:FinSet{Int}})
colimit(span, ComposeCoproductCoequalizer())
end
""" Colimit of general diagram of FinSets computed by coproduct-then-quotient.
See `Limits.CompositePushout` for a very similar construction.
"""
struct FinSetCompositeColimit{Ob<:FinSet, Diagram,
Cocone<:Multicospan{Ob}, Coprod<:Coproduct{Ob},
Proj<:FinFunction} <: AbstractColimit{Ob,Diagram}
diagram::Diagram
cocone::Cocone
coprod::Coprod
proj::Proj # Projection for the "multi-coequalizer" in general formula.
end
function colimit(d::BipartiteFreeDiagram{<:FinSet{Int}})
# As in a pushout, this method assume that all objects in layer 1 have
# outgoing morphisms so that they can be excluded from the coproduct.
@assert !any(isempty(incident(d, u, :src)) for u in vertices₁(d))
coprod = coproduct(ob₂(d))
n, ιs = length(ob(coprod)), legs(coprod)
sets = IntDisjointSets(n)
for u in vertices₁(d)
out_edges = incident(d, u, :src)
for (e1, e2) in zip(out_edges[1:end-1], out_edges[2:end])
h1, h2 = hom(d, e1), hom(d, e2)
ι1, ι2 = ιs[tgt(d, e1)], ιs[tgt(d, e2)]
for i in ob₁(d, u)
union!(sets, ι1(h1(i)), ι2(h2(i)))
end
end
end
π = quotient_projection(sets)
cocone = Multicospan(codom(π), [ ιs[i]⋅π for i in vertices₂(d) ])
FinSetCompositeColimit(d, cocone, coprod, π)
end
colimit(d::FreeDiagram{<:FinSet{Int}}) = colimit(FinDomFunctor(d))
function colimit(F::Functor{<:FinCat{Int},<:TypeCat{<:FinSet{Int}}})
# Uses the general formula for colimits in Set (Leinster, 2014, Basic Category
# Theory, Example 5.2.16).
J = dom(F)
coprod = coproduct(map(x -> ob_map(F, x), ob_generators(J)))
n, ιs = length(ob(coprod)), legs(coprod)
sets = IntDisjointSets(n)
for f in hom_generators(J)
s, t, h = dom(J, f), codom(J, f), hom_map(F, f)
for i in dom(h)
union!(sets, ιs[s](i), ιs[t](h(i)))
end
end
π = quotient_projection(sets)
cocone = Multicospan(codom(π), map(x -> ιs[x]⋅π, ob_generators(J)))
FinSetCompositeColimit(F, cocone, coprod, π)
end
function universal(colim::FinSetCompositeColimit, cocone::Multicospan)
h = universal(colim.coprod, cocone)
pass_to_quotient(colim.proj, h)
end
# Colimits with names
#--------------------
""" Compute colimit of finite sets whose elements are meaningfully named.
This situation seems to be mathematically uninteresting but is practically
important. The colimit is computed by reduction to the skeleton of **FinSet**
(`FinSet{Int}`) and the names are assigned afterwards, following some reasonable
conventions and add tags where necessary to avoid name clashes.
"""
struct NamedColimit <: ColimitAlgorithm end
function colimit(::Type{<:Tuple{<:FinSet{<:Any,T},<:FinFunction}}, d) where
{T <: Union{Symbol,AbstractString}}
colimit(d, NamedColimit())
end
function colimit(d::FixedShapeFreeDiagram{<:FinSet{<:Any,T},Hom},
alg::NamedColimit) where {T,Hom}
# Reducing to the case of bipartite free diagrams is a bit lazy, but at least
# using `SpecializeColimit` below should avoid some gross inefficiencies.
colimit(BipartiteFreeDiagram{FinSet{<:Any,T},Hom}(d), alg)
end
function colimit(d::BipartiteFreeDiagram{<:FinSet{<:Any,T}}, ::NamedColimit) where T
# Compute colimit of diagram in the skeleton of FinSet (`FinSet{Int}`).
# Note: no performance would be gained by using `DisjointSets{T}` from
# DataStructures.jl because it is just a wrapper around `IntDisjointSets` that
# internally builds the very same indices that we use below.
sets₁_skel = map(set -> skeletize(set, index=false), ob₁(d))
sets₂_skel = map(set -> skeletize(set, index=true), ob₂(d))
funcs = map(edges(d)) do e
skeletize(hom(d,e), sets₁_skel[src(d,e)], sets₂_skel[tgt(d,e)])
end
d_skel = BipartiteFreeDiagram{FinSetInt,eltype(funcs)}()
add_vertices₁!(d_skel, nv₁(d), ob₁=dom.(sets₁_skel))
add_vertices₂!(d_skel, nv₂(d), ob₂=dom.(sets₂_skel))
add_edges!(d_skel, src(d), tgt(d), hom=funcs)
colim_skel = colimit(d_skel, SpecializeColimit())
# Assign elements/names to the colimit set.
elems = Vector{T}(undef, length(apex(colim_skel)))
for (ι, Y) in zip(colim_skel, sets₂_skel)
for i in dom(Y)
elems[ι(i)] = Y(i)
end
end
# The vector should already be filled, but to reduce arbitrariness we prefer
# names from the layer 1 sets whenever possible. For example, when computing a
# pushout, we prefer names from the apex of cospan to names from the feet.
for (u, X) in zip(vertices₁(d_skel), sets₁_skel)
e = first(incident(d_skel, u, :src))
f, ι = hom(d_skel, e), legs(colim_skel)[tgt(d_skel, e)]
for i in dom(X)
elems[ι(f(i))] = X(i)
end
end
# Eliminate clashes in provisional list of names.
unique_by_tagging!(elems)
ιs = map(colim_skel, sets₂_skel) do ι, Y
FinFunction(Dict(Y(i) => elems[ι(i)] for i in dom(Y)), FinSet(elems))
end
Colimit(d, Multicospan(FinSet(elems), ιs))
end
function skeletize(set::FinSet; index::Bool=false)
# FIXME: We should support `unique_index` and it should be used here.
FinDomFunction(collect(set), set, index=index)
end
function skeletize(f::FinFunction, X, Y)
FinFunction(i -> only(preimage(Y, f(X(i)))), dom(X), dom(Y))
end
""" Make list of elements unique by adding tags if necessary.
If the elements are already unique, they will not be mutated.
"""
function unique_by_tagging!(elems::AbstractVector{T}; tag=default_tag) where T
tag_count = Dict{T,Int}()
for x in elems
tag_count[x] = haskey(tag_count, x) ? 1 : 0
end
for (i, x) in enumerate(elems)
(j = tag_count[x]) > 0 || continue
tagged = tag(x, j)
@assert !haskey(tag_count, tagged) # Don't conflict with original elems!
elems[i] = tagged
tag_count[x] += 1
end
elems
end
default_tag(x::Symbol, t) = Symbol(x, "#", t)
default_tag(x::AbstractString, t) = string(x, "#", t)
# Pushout complements
#--------------------
""" Compute a pushout complement of finite sets, if possible.
Given functions ``l: I → L`` and ``m: L → G`` to form a pushout square
l
L ← I
m ↓ ↓k
G ← K
g
define the set ``K := G / m(L / l(I))`` and take ``g: K ↪ G`` to be the
inclusion. Then the map ``k: I → K`` is determined by the map ``l⋅m: I → G``
from the requirement that the square commutes.
Pushout complements exist only if the identification condition is satisfied. An
error will be raised if the pushout complement cannot be constructed. To check
this in advance, use [`can_pushout_complement`](@ref).
"""
function pushout_complement(pair::ComposablePair{<:FinSet{Int}})
l, m = pair
I, L, G = dom(l), codom(l), codom(m)
# Construct inclusion g: K ↪ G.
l_image = Set(collect(l))
m_image = Set([ m(x) for x in L if x ∉ l_image ])
g = FinFunction([x for x in G if x ∉ m_image], G)
K = dom(g)
# Construct morphism k: I → K using partial inverse of g.
g_inv = Dict{Int,Int}(zip(collect(g), K))
k = FinFunction(map(I) do x
y = m(l(x))
get(g_inv, y) do; error("Identification failed for domain element $x") end
end, I, K)
return ComposablePair(k, g)
end
can_pushout_complement(pair::ComposablePair{<:FinSet{Int}}) =
all(isempty, id_condition(pair))
""" Check identification condition for pushout complement of finite sets.
The identification condition says that the functions do not map (1) both a
deleted item and a preserved item in L to the same item in G or (2) two distinct
deleted items to the same item. It is trivially satisfied for injective functions.
Returns pair of iterators of
(1) a nondeleted item that maps to a deleted item in G
(2) a pair of distinct items in L that are deleted yet mapped to the same
item in G.
"""
function id_condition(pair::ComposablePair{<:FinSet{Int}})
l, m = pair
l_image = Set(collect(l))
l_imageᶜ = [ x for x in codom(l) if x ∉ l_image ]
m_image = Set(map(m, l_imageᶜ))
((i for i in l_image if m(i) ∈ m_image),
((i, j) for i in eachindex(l_imageᶜ)
for j in i+1:length(l_imageᶜ)
if m(l_imageᶜ[i]) == m(l_imageᶜ[j])))
end
# Subsets
#########
""" Subset of a finite set.
"""
const SubFinSet{S,T} = Subobject{<:FinSet{S,T}}
Subobject(X::FinSet, f) = Subobject(FinFunction(f, X))
SubFinSet(X, f) = Subobject(FinFunction(f, X))
force(A::SubFinSet{Int}) = Subobject(force(hom(A)))
Base.collect(A::SubFinSet) = collect(hom(A))
Base.sort(A::SubFinSet) = SubFinSet(ob(A), sort(collect(A)))
const AbstractBoolVector = Union{AbstractVector{Bool},BitVector}
""" Subset of a finite set represented as a boolean vector.
This is the subobject classifier representation since `Bool` is the subobject
classifier for `Set`.
"""
@auto_hash_equals struct SubFinSetVector{S<:FinSet} <: Subobject{S}
set::S
predicate::AbstractBoolVector
function SubFinSetVector(X::S, pred::AbstractBoolVector) where S<:FinSet
length(pred) == length(X) ||
error("Size of predicate $pred does not equal size of object $X")
new{S}(X, pred)
end
end
Subobject(X::FinSet, pred::AbstractBoolVector) = SubFinSetVector(X, pred)
SubFinSet(pred::AbstractBoolVector) = Subobject(FinSet(length(pred)), pred)
ob(A::SubFinSetVector) = A.set
hom(A::SubFinSetVector) = FinFunction(findall(A.predicate), A.set)
predicate(A::SubFinSetVector) = A.predicate
function predicate(A::SubFinSet)
f = hom(A)
pred = falses(length(codom(f)))
for x in dom(f)
pred[f(x)] = true
end
pred
end
@instance SubobjectLattice{FinSet,SubFinSet} begin
@import ob
meet(A::SubFinSet, B::SubFinSet) = meet(A, B, SubOpBoolean())
join(A::SubFinSet, B::SubFinSet) = join(A, B, SubOpBoolean())
top(X::FinSet) = top(X, SubOpWithLimits())
bottom(X::FinSet) = bottom(X, SubOpWithLimits())
end
""" Algorithm to compute subobject operations using elementwise boolean logic.
"""
struct SubOpBoolean <: SubOpAlgorithm end
meet(A::SubFinSet{Int}, B::SubFinSet{Int}, ::SubOpBoolean) =
SubFinSet(predicate(A) .& predicate(B))
join(A::SubFinSet{Int}, B::SubFinSet{Int}, ::SubOpBoolean) =
SubFinSet(predicate(A) .| predicate(B))
top(X::FinSet{Int}, ::SubOpBoolean) = SubFinSet(trues(length(X)))
bottom(X::FinSet{Int}, ::SubOpBoolean) = SubFinSet(falses(length(X)))
end
| [
27,
34345,
29,
10677,
14,
66,
2397,
12409,
62,
282,
29230,
14,
18467,
50,
1039,
13,
20362,
198,
37811,
383,
6536,
286,
27454,
5621,
290,
5499,
11,
290,
663,
18328,
13,
198,
37811,
198,
21412,
4463,
50,
1039,
198,
39344,
4463,
7248,
... | 2.538146 | 18,272 |
<gh_stars>1-10
"""
ForceDirectedLayout
The fields are, in order:
- `move`, a tuple to specify whether moves on the x and y axes are allowed
- `k`, a tuple (kₐ,kᵣ) giving the strength of attraction and repulsion
- `exponents`, a tuple (a,b,c,d) giving the exponents for the attraction and
repulsion functions
- `gravity`, the strength of attraction towards the center, set to `0.0` as a
default
- `δ`, a floating point constant regulating the attractive force of interaction
strength -- when set to its default value of 0.0, all edges have the same
attraction
- `degree`, a boolean to specificy whether the nodes repel one another according
to their degree
The various coefficients are used to decide how strongly nodes will *attract* or
*repel* one another, as a function of their distance Δ. Specifically, the
default is that connected nodes will attract one another proportionally to
(Δᵃ)×(kₐᵇ), with a=2 and b=-1, and all nodes repel one another proportionally to
(Δᶜ)×(kᵣᵈ) with c=-1 and d=2.
The parameterization for the Fruchterman-Rheingold layout is the default one,
particularly if kₐ=kᵣ. The Force Atlas 2 parameters are kₐ=1 (or b=0), kᵣ set to
any value, a=1, c=-1, d=1. Note that in all cases, the gravity is a multiplying
constant of the resulting attraction force, so it will also be sensitive to
these choices. The `FruchtermanRheingold` and `ForceAtlas2` functions will
return a `ForceDirectedLayout` -- as this object is mutable, you can replace the
exponents at any time.
The δ parameter is particularly important for probabilistic networks, as these
tend to have *all* their interactions set to non-zero values. As such, setting a
value of δ=1 means that the interactions only attract as much as they are
probable.
"""
mutable struct ForceDirectedLayout
move::Tuple{Bool,Bool}
k::Tuple{Float64,Float64}
exponents::Tuple{Float64,Float64,Float64,Float64}
gravity::Float64
δ::Float64
degree::Bool
end
"""
ForceDirectedLayout(ka::Float64, kr::Float64; gravity::Float64=0.75)
TODO
"""
ForceDirectedLayout(ka::Float64, kr::Float64; gravity::Float64=0.75) = ForceDirectedLayout((true,true), (ka,kr), (2.0, -1.0, -1.0, 2.0), gravity, 0.0, true)
"""
FruchtermanRheingold(k::Float64; gravity::Float64=0.75)
The default `ForceDirectedLayout` uses the Fruchterman-Rheingold parameters -
this function is simply here to make the code more explicity, and to use a
"strict" version where kᵣ=kₐ.
"""
FruchtermanRheingold(k::Float64; gravity::Float64=0.75) = ForceDirectedLayout(k, k; gravity=gravity)
"""
ForceAtlas2(k::Float64; gravity::Float64=0.75)
In the Force Atlas 2 layout, the attraction is proportional to the distance, and
the repulsion to the inverse of the distance. Note that kₐ in this layout is set
to 1, so kᵣ is the *relative* repulsion.
"""
ForceAtlas2(k::Float64; gravity::Float64=0.75) = ForceDirectedLayout((true, true), (1.0, k), (1.0, 0.0, -1.0, 1.0), gravity, 0.0, true)
"""
SpringElectric(k::Float64; gravity::Float64=0.75)
In the spring electric layout, attraction is proportional to distance, and
repulsion to the inverse of the distance squared.
"""
SpringElectric(k::Float64; gravity::Float64=0.75) = ForceDirectedLayout((true,true), (k, k), (1.0, 1.0, -2.0, 1.0), gravity, 0.0, true)
"""
Stops the movement of a node position.
"""
function stop!(n::NodePosition)
n.vx = 0.0
n.vy = 0.0
end
"""
Repel two nodes
"""
function repel!(LA::T, n1::NodePosition, n2::NodePosition, fr) where {T <: ForceDirectedLayout}
δx = n1.x - n2.x
δy = n1.y - n2.y
Δ = sqrt(δx^2.0+δy^2.0)
Δ = Δ == 0.0 ? 0.0001 : Δ
if LA.move[1]
n1.vx += δx/Δ*fr(Δ)
n2.vx -= δx/Δ*fr(Δ)
end
if LA.move[2]
n1.vy += δy/Δ*fr(Δ)
n2.vy -= δy/Δ*fr(Δ)
end
end
"""
Attract two connected nodes
"""
function attract!(LA::T, n1::NodePosition, n2::NodePosition, fa) where {T <: ForceDirectedLayout}
δx = n1.x - n2.x
δy = n1.y - n2.y
Δ = sqrt(δx^2.0+δy^2.0)
if !iszero(Δ)
if LA.move[1]
n1.vx -= δx/Δ*fa(Δ)
n2.vx += δx/Δ*fa(Δ)
end
if LA.move[2]
n1.vy -= δy/Δ*fa(Δ)
n2.vy += δy/Δ*fa(Δ)
end
end
end
"""
Update the position of a node
"""
function update!(n::NodePosition)
Δ = sqrt(n.vx^2.0+n.vy^2.0)
if !iszero(Δ)
n.x += n.vx/Δ*min(Δ, 0.01)
n.y += n.vy/Δ*min(Δ, 0.01)
end
stop!(n)
end
"""
position!(LA::ForceDirectedLayout, L::Dict{K,NodePosition}, N::T) where {T <: EcologicalNetworks.AbstractEcologicalNetwork} where {K}
One iteration of the force-directed layout routine. Because these algorithms can
take some time to converge, it may be useful to stop every 500 iterations to
have a look at the results. Note that to avoid oscillations, the maximum
displacement at any given time is set to 0.01 units.
These layouts tend to have O(N³) complexity, where N is the number of nodes in
the network. This is because repulsion required to do (N×(N-1))/2 visits on
pairs of nodes, and an optimal layout usually requires s×N steps to converge.
With the maximal displacement set to 0.01, we have found that k ≈ 100 gives
acceptable results. This will depend on the complexity of the network, and its
connectance, as well as the degree and edge strengths distributions.
"""
function position!(LA::ForceDirectedLayout, L::Dict{K,NodePosition}, N::T) where {T <: EcologicalNetworks.AbstractEcologicalNetwork} where {K}
degdistr = degree(N)
# Exponents and forces - the attraction and repulsion functions are
# (Δᵃ)×(kₐᵇ) and (Δᶜ)×(kᵣᵈ)
a,b,c,d = LA.exponents
ka, kr = LA.k
fa(x) = (x^a)*(ka^b)
fr(x) = (x^c)*(kr^d)
plotcenter = NodePosition(0.0, 0.0, 0.0, 0.0)
for (i, s1) in enumerate(species(N))
attract!(LA, L[s1], plotcenter, (x) -> LA.gravity*fa(x))
for (j, s2) in enumerate(species(N))
if j > i
if LA.degree
repel!(LA, L[s1], L[s2], (x) -> (degdistr[s1]+1)*(degdistr[s2]+1)*fr(x))
else
repel!(LA, L[s1], L[s2], fr)
end
end
end
end
for int in interactions(N)
# We can do Bool^δ and it returns the Bool, so that's tight
attract!(LA, L[int.from], L[int.to], (x) -> N[int.from, int.to]^LA.δ*fa(x))
end
for s in species(N)
update!(L[s])
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
198,
220,
220,
220,
5221,
13470,
276,
32517,
198,
198,
464,
7032,
389,
11,
287,
1502,
25,
198,
198,
12,
4600,
21084,
47671,
257,
46545,
284,
11986,
1771,
6100,
319,
262,
2124,
290,
... | 2.39918 | 2,683 |
<reponame>tawheeler/AutoDrivers.jl
type GMR{M<:MvNormal}
# μ₁₋₂ = μ₁ + Σ₁₂ * Σ₂₂⁻¹ * (x₂ - μ₂) = A*x₂ + b
vec_A::Vector{Matrix{Float64}} # [n_components [ntargets×nindicators]]
vec_b::Vector{Vector{Float64}} # [n_components [ntargets]]
# pdf(p), all pre-computed. Used to compute βⱼ(p)
mixture_Obs::MixtureModel{Multivariate,Continuous,M} # p(obs), all pre-computed, should never be edited
# pdf(a|p), means μⱼ_ₚ and weights βⱼ(p) are functions of p and must be updated every time, covariance is constant
mixture_Act_given_Obs::MixtureModel{Multivariate,Continuous,M}
end
function GMR{M<:MvNormal}(mix::MixtureModel{Multivariate,Continuous,M}, n_targets::Int=2)
#=
Construct a Gaussian Mixture Regressor using a Gaussian mixture over both
the features and the actions, and the features are at the end of the input
=#
weights = probs(mix.prior) # [n_components]
n_components = length(weights)
n_indicators = length(mix) - n_targets
vec_A = Array(Matrix{Float64}, n_components) # μ₁₋₂ = μ₁ + Σ₁₂ * Σ₂₂⁻¹ * (x₂ - μ₂) = A*x₂ + b
vec_b = Array(Vector{Float64}, n_components)
vec_G = Array(MvNormal, n_components)
vec_H = Array(MvNormal, n_components)
for i = 1 : n_components
μ = mix.components[i].μ
μₐ = μ[1:n_targets]
μₚ = μ[n_targets+1:end]
Σ = full(mix.components[i].Σ)
Σₐₐ = Σ[1:n_targets,1:n_targets]
Σₐₚ = Σ[1:n_targets,n_targets+1:end]
Σₚₚ = nearestSPD(Σ[n_targets+1:end,n_targets+1:end])
iΣₚₚ = inv(Σₚₚ)
A = Σₐₚ * iΣₚₚ
vec_A[i] = A
vec_b[i] = vec(μₐ - A*μₚ)
C = nearestSPD(Σₐₐ - Σₐₚ * iΣₚₚ * ((Σₐₚ)'))
vec_G[i] = MvNormal(Array(Float64, n_targets), C) # p(action|obs), mean and weighting must be updated with each observation, cov is pre-computed
vec_H[i] = MvNormal(μₚ, Σₚₚ) # p(obs), all pre-computed, should never be edited
end
mixture_Act_given_Obs = MixtureModel(vec_G) # p(action|obs), mean and weighting must be updated with each observation, cov is pre-computed
mixture_Obs = MixtureModel(vec_H, weights) # p(obs), all pre-computed, should never be edited
GMR(vec_A, vec_b, mixture_Obs, mixture_Act_given_Obs)
end
function Base.print(model::GMR)
println("GMR:")
for (i, mat) in enumerate(model.vec_A)
println(i)
print("\t[")
for j in 1:size(mat,2)
@printf(" %10.6f", mat[1,j])
end
@printf("] + [ %10.6f]\n", model.vec_b[i][1])
print("\t[")
for j in 1:size(mat,2)
@printf(" %10.6f", mat[2,j])
end
@printf("] + [ %10.6f]\n", model.vec_b[i][2])
end
println("\tmixture_Obs: ")
println("\t\tprior: ", model.mixture_Obs.prior)
end
n_targets(gmr::GMR) = size(gmr.vec_A[1], 1)
n_features(gmr::GMR) = size(gmr.vec_A[1], 2)
n_components(gmr::GMR) = length(gmr.vec_A)
function nsuffstats(gmr::GMR)
dimA = length(gmr.vec_A[1])
n_components(gmr) * (2*dimA + 2 # bias
+ 3 # covariance in mixture_Act_given_Obs
+ div(dimA*dimA,2)) # covariance for mixture_Obs
end
function nearestSPD(A::Matrix{Float64})
# see http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
# output:
# α, β ≥ 0.0 such that
# α ≤ δ₂(A) ≤ β ≤ α + 2 max(fα, tol)
# and a PSD matrix X such that |A - X|₂ = β
n = size(A, 1)
@assert(n == size(A, 2)) # ensure it is square
I = eye(n)
# symmetrize A into B
B = (A+A')./2
# Compute the symmetric polar factor of B. Call it H.
# Clearly H is itself SPD.
U, σ, V = svd(B)
H = V*diagm(σ)*V'
# get Ahat in the above formula
Ahat = (B+H)/2
# ensure symmetry
Ahat = (Ahat + Ahat')/2;
# test that Ahat is in fact PD. if it is not so, then tweak it just a bit.
worked = false
iteration_count = 0
while !worked && iteration_count < 100
iteration_count += 1
try
chol(Ahat)
worked = true
catch
# do nothing
end
if !worked
# Ahat failed the chol test. It must have been just a hair off,
# due to floating point trash, so it is simplest now just to
# tweak by adding a tiny multiple of an identity matrix.
min_eig = minimum(eigvals(Ahat))
Ahat = Ahat + (-min_eig*iteration_count.^2 + eps(Float32))*I
end
end
Ahat
end
@compat function (gmr::GMR)(features::Vector{Float64})
mixture_Act_given_Obs = gmr.mixture_Act_given_Obs
mixture_Obs = gmr.mixture_Obs
nc = n_components(gmr)
for j in 1 : nc
# compute the β value, unweighted
# βⱼ(f) ∝ wⱼ Nⱼ(μₚ, Σₚ)
wⱼ = mixture_Obs.prior.p[j]
Nⱼ = mixture_Obs.components[j]
mixture_Act_given_Obs.prior.p[j] = wⱼ * pdf(Nⱼ, features)
# compute the conditional mean
# μₐ_ₚ = A⋅f + b
A = gmr.vec_A[j]
b = gmr.vec_b[j]
copy!(mixture_Act_given_Obs.components[j].μ, A*features + b)
end
# normalize the β values
sum_β = sum(mixture_Act_given_Obs.prior.p)
if sum_β > 0.0 && !isnan(sum_β) && !isinf(sum_β)
for i in 1 : nc
mixture_Act_given_Obs.prior.p[i] /= sum_β
end
else
fill!(mixture_Act_given_Obs.prior.p, 1/nc) # set all to equal weight
for i in 1 : nc
fill!(mixture_Act_given_Obs.components[i].μ, 0.0) # set mean to zero
end
end
mixture_Act_given_Obs
end
| [
27,
7856,
261,
480,
29,
83,
707,
258,
417,
263,
14,
27722,
20564,
690,
13,
20362,
198,
4906,
6951,
49,
90,
44,
27,
25,
44,
85,
26447,
92,
628,
220,
220,
220,
1303,
18919,
158,
224,
223,
158,
224,
233,
158,
224,
224,
796,
18919,
... | 1.930055 | 2,888 |
import Base: position
using FFTW
"""
PositionBasis(xmin, xmax, Npoints)
PositionBasis(b::MomentumBasis)
Basis for a particle in real space.
For simplicity periodic boundaries are assumed which means that
the rightmost point defined by `xmax` is not included in the basis
but is defined to be the same as `xmin`.
When a [`MomentumBasis`](@ref) is given as argument the exact values
of ``x_{min}`` and ``x_{max}`` are due to the periodic boundary conditions
more or less arbitrary and are chosen to be
``-\\pi/dp`` and ``\\pi/dp`` with ``dp=(p_{max}-p_{min})/N``.
"""
struct PositionBasis{T,X1,X2} <: Basis
shape::Vector{T}
xmin::Float64
xmax::Float64
N::T
function PositionBasis{X1,X2}(xmin::Real, xmax::Real, N::T) where {X1,X2,T<:Int}
@assert isa(X1, Real) && isa(X2, Real)
new{T,X1,X2}([N], xmin, xmax, N)
end
end
PositionBasis(xmin::Real, xmax::Real, N::Int) = PositionBasis{xmin,xmax}(xmin,xmax,N)
"""
MomentumBasis(pmin, pmax, Npoints)
MomentumBasis(b::PositionBasis)
Basis for a particle in momentum space.
For simplicity periodic boundaries are assumed which means that
`pmax` is not included in the basis but is defined to be the same as `pmin`.
When a [`PositionBasis`](@ref) is given as argument the exact values
of ``p_{min}`` and ``p_{max}`` are due to the periodic boundary conditions
more or less arbitrary and are chosen to be
``-\\pi/dx`` and ``\\pi/dx`` with ``dx=(x_{max}-x_{min})/N``.
"""
struct MomentumBasis{P1,P2} <: Basis
shape::Vector{Int}
pmin::Float64
pmax::Float64
N::Int
function MomentumBasis{P1,P2}(pmin::Real, pmax::Real, N::Int) where {P1,P2}
@assert isa(P1, Real) && isa(P2, Real)
new([N], pmin, pmax, N)
end
end
MomentumBasis(pmin::Real, pmax::Real, N::Int) = MomentumBasis{pmin,pmax}(pmin, pmax, N)
PositionBasis(b::MomentumBasis) = (dp = (b.pmax - b.pmin)/b.N; PositionBasis(-pi/dp, pi/dp, b.N))
MomentumBasis(b::PositionBasis) = (dx = (b.xmax - b.xmin)/b.N; MomentumBasis(-pi/dx, pi/dx, b.N))
==(b1::PositionBasis, b2::PositionBasis) = b1.xmin==b2.xmin && b1.xmax==b2.xmax && b1.N==b2.N
==(b1::MomentumBasis, b2::MomentumBasis) = b1.pmin==b2.pmin && b1.pmax==b2.pmax && b1.N==b2.N
"""
gaussianstate(b::PositionBasis, x0, p0, sigma)
gaussianstate(b::MomentumBasis, x0, p0, sigma)
Create a Gaussian state around `x0` and` p0` with width `sigma`.
In real space the gaussian state is defined as
```math
\\Psi(x) = \\frac{1}{\\pi^{1/4}\\sqrt{\\sigma}}
e^{i p_0 (x-\\frac{x_0}{2}) - \\frac{(x-x_0)^2}{2 \\sigma^2}}
```
and is connected to the momentum space definition
```math
\\Psi(p) = \\frac{\\sqrt{\\sigma}}{\\pi^{1/4}}
e^{-i x_0 (p-\\frac{p_0}{2}) - \\frac{1}{2}(p-p_0)^2 \\sigma^2}
```
via a Fourier-transformation
```math
\\Psi(p) = \\frac{1}{\\sqrt{2\\pi}}
\\int_{-\\infty}^{\\infty} e^{-ipx}\\Psi(x) \\mathrm{d}x
```
The state has the properties
* ``⟨p⟩ = p_0``
* ``⟨x⟩ = x_0``
* ``\\mathrm{Var}(x) = \\frac{σ^2}{2}``
* ``\\mathrm{Var}(p) = \\frac{1}{2 σ^2}``
Due to the numerically necessary discretization additional scaling
factors ``\\sqrt{Δx}`` and ``\\sqrt{Δp}`` are used so that
``\\langle x_i|Ψ\\rangle = \\sqrt{Δ x} Ψ(x_i)`` and ``\\langle p_i|Ψ\\rangle = \\sqrt{Δ p} Ψ(p_i)`` so
that the resulting Ket state is normalized.
"""
function gaussianstate(b::PositionBasis, x0::Real, p0::Real, sigma::Real)
psi = Ket(b)
dx = spacing(b)
alpha = 1.0/(pi^(1/4)*sqrt(sigma))*sqrt(dx)
x = b.xmin
for i=1:b.N
psi.data[i] = alpha*exp(1im*p0*(x-x0/2) - (x-x0)^2/(2*sigma^2))
x += dx
end
return psi
end
function gaussianstate(b::MomentumBasis, x0::Real, p0::Real, sigma::Real)
psi = Ket(b)
dp = spacing(b)
alpha = sqrt(sigma)/pi^(1/4)*sqrt(dp)
p = b.pmin
for i=1:b.N
psi.data[i] = alpha*exp(-1im*x0*(p-p0/2) - (p-p0)^2/2*sigma^2)
p += dp
end
return psi
end
"""
spacing(b::PositionBasis)
Difference between two adjacent points of the real space basis.
"""
spacing(b::PositionBasis) = (b.xmax - b.xmin)/b.N
"""
spacing(b::MomentumBasis)
Momentum difference between two adjacent points of the momentum basis.
"""
spacing(b::MomentumBasis) = (b.pmax - b.pmin)/b.N
"""
samplepoints(b::PositionBasis)
x values of the real space basis.
"""
samplepoints(b::PositionBasis) = (dx = spacing(b); Float64[b.xmin + i*dx for i=0:b.N-1])
"""
samplepoints(b::MomentumBasis)
p values of the momentum basis.
"""
samplepoints(b::MomentumBasis) = (dp = spacing(b); Float64[b.pmin + i*dp for i=0:b.N-1])
"""
position(b::PositionBasis)
Position operator in real space.
"""
position(b::PositionBasis) = SparseOperator(b, sparse(Diagonal(complex(samplepoints(b)))))
"""
position(b:MomentumBasis)
Position operator in momentum space.
"""
function position(b::MomentumBasis)
b_pos = PositionBasis(b)
transform(b, b_pos)*dense(position(b_pos))*transform(b_pos, b)
end
"""
momentum(b:MomentumBasis)
Momentum operator in momentum space.
"""
momentum(b::MomentumBasis) = SparseOperator(b, sparse(Diagonal(complex(samplepoints(b)))))
"""
momentum(b::PositionBasis)
Momentum operator in real space.
"""
function momentum(b::PositionBasis)
b_mom = MomentumBasis(b)
transform(b, b_mom)*dense(momentum(b_mom))*transform(b_mom, b)
end
"""
potentialoperator(b::PositionBasis, V(x))
Operator representing a potential ``V(x)`` in real space.
"""
function potentialoperator(b::PositionBasis, V::Function)
x = samplepoints(b)
diagonaloperator(b, V.(x))
end
"""
potentialoperator(b::MomentumBasis, V(x))
Operator representing a potential ``V(x)`` in momentum space.
"""
function potentialoperator(b::MomentumBasis, V::Function)
b_pos = PositionBasis(b)
transform(b, b_pos)*dense(potentialoperator(b_pos, V))*transform(b_pos, b)
end
"""
potentialoperator(b::CompositeBasis, V(x, y, z, ...))
Operator representing a potential ``V`` in more than one dimension.
# Arguments
* `b`: Composite basis consisting purely either of `PositionBasis` or
`MomentumBasis`. Note, that calling this with a composite basis in
momentum space might consume a large amount of memory.
* `V`: Function describing the potential. ATTENTION: The number of arguments
accepted by `V` must match the spatial dimension. Furthermore, the order
of the arguments has to match that of the order of the tensor product of
bases (e.g. if `b=bx⊗by⊗bz`, then `V(x,y,z)`).
"""
function potentialoperator(b::CompositeBasis, V::Function)
if isa(b.bases[1], PositionBasis)
potentialoperator_position(b, V)
elseif isa(b.bases[1], MomentumBasis)
potentialoperator_momentum(b, V)
else
throw(IncompatibleBases())
end
end
function potentialoperator_position(b::CompositeBasis, V::Function)
for base=b.bases
@assert isa(base, PositionBasis)
end
points = [samplepoints(b1) for b1=b.bases]
dims = length.(points)
n = length(b.bases)
data = Array{ComplexF64}(undef, dims...)
@inbounds for i=1:length(data)
index = Tuple(CartesianIndices(data)[i])
args = (points[j][index[j]] for j=1:n)
data[i] = V(args...)
end
diagonaloperator(b, data[:])
end
function potentialoperator_momentum(b::CompositeBasis, V::Function)
bases_pos = []
for base=b.bases
@assert isa(base, MomentumBasis)
push!(bases_pos, PositionBasis(base))
end
b_pos = tensor(bases_pos...)
transform(b, b_pos)*dense(potentialoperator_position(b_pos, V))*transform(b_pos, b)
end
"""
FFTOperator
Abstract type for all implementations of FFT operators.
"""
abstract type FFTOperator{BL<:Basis, BR<:Basis, T} <: AbstractOperator{BL,BR} end
Base.eltype(x::FFTOperator) = promote_type(eltype(x.mul_before), eltype(x.mul_after))
"""
FFTOperators
Operator performing a fast fourier transformation when multiplied with a state
that is a Ket or an Operator.
"""
mutable struct FFTOperators{BL<:Basis,BR<:Basis,T<:Array,P1,P2,P3,P4} <: FFTOperator{BL, BR, T}
basis_l::BL
basis_r::BR
fft_l!::P1
fft_r!::P2
fft_l2!::P3
fft_r2!::P4
mul_before::T
mul_after::T
function FFTOperators(b1::BL, b2::BR,
fft_l!::P1,
fft_r!::P2,
fft_l2!::P3,
fft_r2!::P4,
mul_before::T,
mul_after::T) where {BL<:Basis,BR<:Basis,T,P1,P2,P3,P4}
new{BL,BR,T,P1,P2,P3,P4}(b1, b2, fft_l!, fft_r!, fft_l2!, fft_r2!, mul_before, mul_after)
end
end
"""
FFTKets
Operator that can only perform fast fourier transformations on Kets.
This is much more memory efficient when only working with Kets.
"""
mutable struct FFTKets{BL<:Basis,BR<:Basis,T<:Array,P1,P2} <: FFTOperator{BL, BR, T}
basis_l::BL
basis_r::BR
fft_l!::P1
fft_r!::P2
mul_before::T
mul_after::T
function FFTKets(b1::BL, b2::BR,
fft_l!::P1,
fft_r!::P2,
mul_before::T,
mul_after::T) where {BL<:Basis,BR<:Basis, T, P1, P2}
new{BL, BR, T, P1, P2}(b1, b2, fft_l!, fft_r!, mul_before, mul_after)
end
end
"""
transform(b1::MomentumBasis, b2::PositionBasis)
transform(b1::PositionBasis, b2::MomentumBasis)
Transformation operator between position basis and momentum basis.
"""
function transform(basis_l::MomentumBasis, basis_r::PositionBasis; ket_only::Bool=false)
Lx = (basis_r.xmax - basis_r.xmin)
dp = spacing(basis_l)
dx = spacing(basis_r)
if basis_l.N != basis_r.N || abs(2*pi/dp - Lx)/Lx > 1e-12
throw(IncompatibleBases())
end
mul_before = exp.(-1im*basis_l.pmin*(samplepoints(basis_r) .- basis_r.xmin))
mul_after = exp.(-1im*basis_r.xmin*samplepoints(basis_l))/sqrt(basis_r.N)
x = Vector{ComplexF64}(undef, length(basis_r))
if ket_only
FFTKets(basis_l, basis_r, plan_bfft!(x), plan_fft!(x), mul_before, mul_after)
else
A = Matrix{ComplexF64}(undef, length(basis_r), length(basis_r))
FFTOperators(basis_l, basis_r, plan_bfft!(x), plan_fft!(x), plan_bfft!(A, 2), plan_fft!(A, 1), mul_before, mul_after)
end
end
"""
transform(b1::CompositeBasis, b2::CompositeBasis)
Transformation operator between two composite bases. Each of the bases
has to contain bases of type PositionBasis and the other one a corresponding
MomentumBasis.
"""
function transform(basis_l::PositionBasis, basis_r::MomentumBasis; ket_only::Bool=false)
Lx = (basis_l.xmax - basis_l.xmin)
dp = spacing(basis_r)
dx = spacing(basis_l)
if basis_l.N != basis_r.N || abs(2*pi/dp - Lx)/Lx > 1e-12
throw(IncompatibleBases())
end
mul_before = exp.(1im*basis_l.xmin*(samplepoints(basis_r) .- basis_r.pmin))
mul_after = exp.(1im*basis_r.pmin*samplepoints(basis_l))/sqrt(basis_r.N)
x = Vector{ComplexF64}(undef, length(basis_r))
if ket_only
FFTKets(basis_l, basis_r, plan_fft!(x), plan_bfft!(x), mul_before, mul_after)
else
A = Matrix{ComplexF64}(undef, length(basis_r), length(basis_r))
FFTOperators(basis_l, basis_r, plan_fft!(x), plan_bfft!(x), plan_fft!(A, 2), plan_bfft!(A, 1), mul_before, mul_after)
end
end
function transform(basis_l::CompositeBasis, basis_r::CompositeBasis; ket_only::Bool=false, index::Vector{Int}=Int[])
@assert length(basis_l.bases) == length(basis_r.bases)
if length(index) == 0
check_pos = [isa.(basis_l.bases, PositionBasis)...]
check_mom = [isa.(basis_l.bases, MomentumBasis)...]
if any(check_pos) && !any(check_mom)
index = [1:length(basis_l.bases);][check_pos]
elseif any(check_mom) && !any(check_pos)
index = [1:length(basis_l.bases);][check_mom]
else
throw(IncompatibleBases())
end
end
if all(isa.(basis_l.bases[index], PositionBasis))
@assert all(isa.(basis_r.bases[index], MomentumBasis))
transform_xp(basis_l, basis_r, index; ket_only=ket_only)
elseif all(isa.(basis_l.bases[index], MomentumBasis))
@assert all(isa.(basis_r.bases[index], PositionBasis))
transform_px(basis_l, basis_r, index; ket_only=ket_only)
else
throw(IncompatibleBases())
end
end
function transform_xp(basis_l::CompositeBasis, basis_r::CompositeBasis, index::Vector{Int}; ket_only::Bool=false)
n = length(basis_l.bases)
Lx = [(b.xmax - b.xmin) for b=basis_l.bases[index]]
dp = [spacing(b) for b=basis_r.bases[index]]
dx = [spacing(b) for b=basis_l.bases[index]]
N = [length(b) for b=basis_l.bases]
for i=1:n
if N[i] != length(basis_r.bases[i])
throw(IncompatibleBases())
end
end
for i=1:length(index)
if abs(2*pi/dp[i] - Lx[i])/Lx[i] > 1e-12
throw(IncompatibleBases())
end
end
if index[1] == 1
mul_before = exp.(1im*basis_l.bases[1].xmin*(samplepoints(basis_r.bases[1]) .- basis_r.bases[1].pmin))
mul_after = exp.(1im*basis_r.bases[1].pmin*samplepoints(basis_l.bases[1]))/sqrt(basis_r.bases[1].N)
else
mul_before = ones(N[1])
mul_after = ones(N[1])
end
for i=2:n
if any(i .== index)
mul_before = kron(exp.(1im*basis_l.bases[i].xmin*(samplepoints(basis_r.bases[i]) .- basis_r.bases[i].pmin)), mul_before)
mul_after = kron(exp.(1im*basis_r.bases[i].pmin*samplepoints(basis_l.bases[i]))/sqrt(basis_r.bases[i].N), mul_after)
else
mul_before = kron(ones(N[i]), mul_before)
mul_after = kron(ones(N[i]), mul_after)
end
end
mul_before = reshape(mul_before, (N...,))
mul_after = reshape(mul_after, (N...,))
x = Array{ComplexF64}(undef, N...)
if ket_only
FFTKets(basis_l, basis_r, plan_fft!(x, index), plan_bfft!(x, index), mul_before, mul_after)
else
A = Array{ComplexF64}(undef, [N; N]...)
FFTOperators(basis_l, basis_r, plan_fft!(x, index), plan_bfft!(x, index), plan_fft!(A, [n + 1:2n;][index]), plan_bfft!(A, [1:n;][index]), mul_before, mul_after)
end
end
function transform_px(basis_l::CompositeBasis, basis_r::CompositeBasis, index::Vector{Int}; ket_only::Bool=false)
n = length(basis_l.bases)
Lx = [(b.xmax - b.xmin) for b=basis_r.bases[index]]
dp = [spacing(b) for b=basis_l.bases[index]]
dx = [spacing(b) for b=basis_r.bases[index]]
N = [length(b) for b=basis_l.bases]
for i=1:n
if N[i] != length(basis_r.bases[i])
throw(IncompatibleBases())
end
end
for i=1:length(index)
if abs(2*pi/dp[i] - Lx[i])/Lx[i] > 1e-12
throw(IncompatibleBases())
end
end
if index[1] == 1
mul_before = exp.(-1im*basis_l.bases[1].pmin*(samplepoints(basis_r.bases[1]) .- basis_r.bases[1].xmin))
mul_after = exp.(-1im*basis_r.bases[1].xmin*samplepoints(basis_l.bases[1]))/sqrt(N[1])
else
mul_before = ones(N[1])
mul_after = ones(N[1])
end
for i=2:n
if i in index
mul_before = kron(exp.(-1im*basis_l.bases[i].pmin*(samplepoints(basis_r.bases[i]) .- basis_r.bases[i].xmin)), mul_before)
mul_after = kron(exp.(-1im*basis_r.bases[i].xmin*samplepoints(basis_l.bases[i]))/sqrt(N[i]), mul_after)
else
mul_before = kron(ones(N[i]), mul_before)
mul_after = kron(ones(N[i]), mul_after)
end
end
mul_before = reshape(mul_before, (N...,))
mul_after = reshape(mul_after, (N...,))
x = Array{ComplexF64}(undef, N...)
if ket_only
FFTKets(basis_l, basis_r, plan_bfft!(x, index), plan_fft!(x, index), mul_before, mul_after)
else
A = Array{ComplexF64}(undef, [N; N]...)
FFTOperators(basis_l, basis_r, plan_bfft!(x, index), plan_fft!(x, index), plan_bfft!(A, [n + 1:2n;][index]), plan_fft!(A, [1:n;][index]), mul_before, mul_after)
end
end
DenseOperator(op::FFTOperator) = op*identityoperator(DenseOpType, op.basis_r)
dagger(op::FFTOperators) = transform(op.basis_r, op.basis_l)
dagger(op::FFTKets) = transform(op.basis_r, op.basis_l; ket_only=true)
tensor(A::FFTOperators, B::FFTOperators) = transform(tensor(A.basis_l, B.basis_l), tensor(A.basis_r, B.basis_r))
tensor(A::FFTKets, B::FFTKets) = transform(tensor(A.basis_l, B.basis_l), tensor(A.basis_r, B.basis_r); ket_only=true)
function mul!(result::Ket{B1},M::FFTOperator{B1,B2},b::Ket{B2},alpha_,beta_) where {B1<:Basis,B2<:Basis}
alpha = convert(ComplexF64, alpha_)
beta = convert(ComplexF64, beta_)
N::Int = length(M.basis_r)
if beta==Complex(0.)
@inbounds for i=1:N
result.data[i] = M.mul_before[i] * b.data[i]
end
M.fft_r! * reshape(result.data, size(M.mul_before))
@inbounds for i=1:N
result.data[i] *= M.mul_after[i] * alpha
end
else
psi_ = Ket(M.basis_l, copy(b.data))
@inbounds for i=1:N
psi_.data[i] *= M.mul_before[i]
end
M.fft_r! * reshape(psi_.data, size(M.mul_before))
@inbounds for i=1:N
result.data[i] = beta*result.data[i] + alpha * psi_.data[i] * M.mul_after[i]
end
end
result
end
function mul!(result::Bra{B2},b::Bra{B1},M::FFTOperator{B1,B2},alpha_,beta_) where {B1<:Basis,B2<:Basis}
alpha = convert(ComplexF64, alpha_)
beta = convert(ComplexF64, beta_)
N::Int = length(M.basis_l)
if beta==Complex(0.)
@inbounds for i=1:N
result.data[i] = conj(M.mul_after[i]) * conj(b.data[i])
end
M.fft_l! * reshape(result.data, size(M.mul_after))
@inbounds for i=1:N
result.data[i] = conj(result.data[i]) * M.mul_before[i] * alpha
end
else
psi_ = Bra(M.basis_r, conj(b.data))
@inbounds for i=1:N
psi_.data[i] *= conj(M.mul_after[i])
end
M.fft_l! * reshape(psi_.data, size(M.mul_after))
@inbounds for i=1:N
result.data[i] = beta*result.data[i] + alpha * conj(psi_.data[i]) * M.mul_before[i]
end
end
result
end
function mul!(result::Operator{B1,B3,T},A::Operator{B1,B2},B::FFTOperators{B2,B3},alpha_,beta_) where {B1<:Basis,B2<:Basis,B3<:Basis,T}
alpha = convert(ComplexF64, alpha_)
beta = convert(ComplexF64, beta_)
if beta != Complex(0.)
data = similar(result.data, size(result.data, 1), size(result.data, 2))
else
data = result.data
end
copyto!(data, A.data)
@inbounds for j=1:length(B.mul_after), i=1:length(B.mul_after)
data[i, j] *= B.mul_after[j]
end
conj!(data)
n = size(B.mul_after)
B.fft_l2! * reshape(data, n..., n...)
conj!(data)
N = prod(n)
@inbounds for j=1:N, i=1:N
data[i, j] *= B.mul_before[j]
end
if alpha != Complex(1.)
lmul!(alpha, data)
end
if beta != Complex(0.)
rmul!(result.data, beta)
result.data += data
end
result
end
function mul!(result::Operator{B1,B3,T},A::FFTOperators{B1,B2},B::Operator{B2,B3},alpha_,beta_) where {B1<:Basis,B2<:Basis,B3<:Basis,T}
alpha = convert(ComplexF64, alpha_)
beta = convert(ComplexF64, beta_)
if beta != Complex(0.)
data = similar(result.data, size(result.data, 1), size(result.data, 2))
else
data = result.data
end
copyto!(data, B.data)
@inbounds for j=1:length(A.mul_before), i=1:length(A.mul_before)
data[i, j] *= A.mul_before[i]
end
n = size(A.mul_before)
A.fft_r2! * reshape(data, n...,n...)
N = prod(n)
@inbounds for j=1:N, i=1:N
data[i, j] *= A.mul_after[i]
end
if alpha != Complex(1.)
lmul!(alpha, data)
end
if beta != Complex(0.)
rmul!(result.data, beta)
result.data += data
end
result
end
| [
11748,
7308,
25,
2292,
198,
3500,
376,
9792,
54,
198,
198,
37811,
198,
220,
220,
220,
23158,
15522,
271,
7,
87,
1084,
11,
2124,
9806,
11,
399,
13033,
8,
198,
220,
220,
220,
23158,
15522,
271,
7,
65,
3712,
29252,
298,
388,
15522,
2... | 2.07227 | 9,506 |
#Constant mean function
"""
MeanConst <: Mean
Constant mean function
```math
m(x) = β
```
with constant ``β``.
"""
mutable struct MeanConst <: Mean
"Constant"
β::Float64
"Priors for mean parameters"
priors::Array
"""
MeanConst(β::Float64)
Create `MeanConst` with constant `β`.
"""
MeanConst(β::Float64) = new(β, [])
end
mean(mConst::MeanConst, x::AbstractVector) = mConst.β
mean(mConst::MeanConst, X::AbstractMatrix) = fill(mConst.β, size(X,2))
get_params(mConst::MeanConst) = Float64[mConst.β]
get_param_names(::MeanConst) = [:β]
num_params(mConst::MeanConst) = 1
function set_params!(mConst::MeanConst, hyp::AbstractVector)
length(hyp) == 1 || throw(ArgumentError("Constant mean function only has 1 parameter"))
mConst.β = hyp[1]
end
function grad_mean(mConst::MeanConst, x::AbstractVector)
dM_theta = ones(1)
return dM_theta
end
| [
2,
3103,
18797,
1612,
2163,
198,
198,
37811,
198,
220,
220,
220,
22728,
34184,
1279,
25,
22728,
198,
198,
3103,
18797,
1612,
2163,
198,
15506,
63,
11018,
198,
76,
7,
87,
8,
796,
27169,
198,
15506,
63,
198,
4480,
6937,
7559,
26638,
1... | 2.494444 | 360 |
# __BEGIN_LICENSE__
#
# ThreeDeconv.jl
#
# Copyright (c) 2018, Stanford University
#
# All rights reserved.
#
# Redistribution and use in source and binary forms for academic and other
# non-commercial purposes with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code, including modified source code, must retain
# the above copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form or a modified form of the source code must
# reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of The Leland Stanford Junior University, any of its
# trademarks, the names of its employees, nor contributors to the source code
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# * Where a modified version of the source code is redistributed publicly in
# source or binary forms, the modified source code must be published in a freely
# accessible manner, or otherwise redistributed at no charge to anyone
# requesting a copy of the modified source code, subject to the same terms as
# this agreement.
#
# THIS SOFTWARE IS PROVIDED BY THE TRUSTEES OF THE LELAND STANFORD JUNIOR
# UNIVERSITY "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE LELAND STANFORD JUNIOR
# UNIVERSITY OR ITS TRUSTEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# __END_LICENSE__
using QuadGK
import Optim
const ϵ_reg = 1e-6
struct ParametricNoiseModel
m_hat::Vector{Float64}
σ_hat::Vector{Float64}
model::Function
model_init::Function
params
params_init
end
struct localEstimation{T<:AbstractFloat}
y::T
σ::T
κ::T
n::Int
end
function foi_noiseestimation(z::AbstractArray{Float32}; τ=0.2, maxnum_pairs::Int, verbose::Bool=false)
# τ = 0.2 is good to reject a lot of outliers.
@assert maximum(z) <= 1.0
@assert minimum(z) >= 0.0
# Compute the local sample mean and standard deviation over a smooth region
println("Computing local noise variance.")
num_imgs = size(z,3)
z_stack = (z[:,:,i] for i in 1:num_imgs)
est_pairs_array = map(x->localnoiseestimation(x, τ)[1], z_stack)
est_pairs = vcat(est_pairs_array...)
println("Initializing parameters by least-squares.")
ab_init, y_hat, σ_hat = initialize_parameters(est_pairs)
num_pairs = length(est_pairs)
if num_pairs > maxnum_pairs
idx = rand(1:num_pairs, maxnum_pairs)
est_pairs = est_pairs[idx]
end
println("Initialization done.")
println("Starting likelihood maximization.")
likelihood0(x::Vector{Float64}) = nonclipped_negloglikelihood(x, est_pairs)
result = Optim.optimize(likelihood0, ab_init, Optim.NelderMead(), Optim.Options(show_trace = verbose))
println("Finished the maximization.")
ab_hat = Optim.minimizer(result)
@assert Optim.converged(result)
return ab_hat
end
function localnoiseestimation(z::AbstractArray{Float32,2}, τ)
# Wavelet and scaling functions used in the original paper are the followings:
# ψ = Array{Float32}([0.035, 0.085, -0.135, -0.460, 0.807, -0.333])
# ϕ = Array{Float32}([0.025, -0.060, -0.095, 0.325, 0.571, 0.235])
ϕ = [0.035226291882100656f0, -0.08544127388224149f0, -0.13501102001039084f0, 0.4598775021193313f0, 0.8068915093133388f0, 0.3326705529509569f0]
ϕ ./= sum(ϕ)
ψ = [-0.3326705529509569f0, 0.8068915093133388f0, -0.4598775021193313f0, -0.13501102001039084f0, 0.08544127388224149f0, 0.035226291882100656f0]
ψ ./= norm(ψ)
σ_gauss = 1.2f0
gauss = [exp(-x^2 / (2.0f0 * σ_gauss^2)) for x in -10.f0:10.f0]
gauss ./= sum(gauss)
z_wdet = circconv(z, ψ)[1:2:end, 1:2:end]
z_wapp = circconv(z, ϕ)[1:2:end, 1:2:end]
ω = ones(Float32, 7) ./ 7.0f0
z_smo = circconv(z_wapp, ω, ω)
s = sqrt(0.5 * π) .* circconv(abs.(z_wdet), ω, ω)
g = [-0.5f0, 0.0f0, 0.5f0]
smoothed_zwapp = circconv(z_wapp, gauss, gauss)
dx_wapp = circconv(smoothed_zwapp, [1.0f0], g)
dy_wapp = circconv(smoothed_zwapp, g, [1.0f0])
x_smo = sqrt.(dx_wapp.^2 .+ dy_wapp.^2) .< τ .* s
N = length(z_smo)
num_bins = 300
histogram_zwapp = [Vector{Float32}() for _ in 1:num_bins]
histogram_zwdet = [Vector{Float32}() for _ in 1:num_bins]
min_wapp = sum(ϕ[ϕ .< 0.f0])
max_wapp = sum(ϕ[ϕ .>= 0.f0])
Δ = (max_wapp - min_wapp) / num_bins
for i in 1:N
if x_smo[i]
idx = floor(Int, (z_smo[i] - min_wapp) / Δ)
push!(histogram_zwapp[idx], z_wapp[i])
push!(histogram_zwdet[idx], z_wdet[i])
end
end
est_pairs = Vector{localEstimation{Float64}}()
for i in 1:num_bins
if histogram_zwdet[i] != []
n = length(histogram_zwdet[i])
κ = madBiasFactor(n)
σ = sqrt(max(Float64(mad(histogram_zwdet[i], κ))^2, .0))
y = Float64(mean(histogram_zwapp[i]))
push!(est_pairs, localEstimation(y, σ, κ, n))
end
end
return est_pairs, x_smo
end
function initialize_parameters(est_pairs::Vector{localEstimation{T}}) where T
num_pairs = length(est_pairs)
y_hat = zeros(num_pairs)
σ_hat = zeros(num_pairs)
Φ_tmp = Vector{Float64}()
v = Vector{Float64}()
for i = 1:num_pairs
tmp = est_pairs[i]
y_hat[i] = tmp.y
σ_hat[i] = tmp.σ
push!(Φ_tmp, tmp.y)
push!(v, tmp.σ^2)
end
Φ = hcat(Φ_tmp, ones(length(Φ_tmp)))
ab0 = Φ \ v
return ab0, y_hat, σ_hat
end
function nonclipped_negloglikelihood(ab::Vector{Float64}, est_pairs::Vector{localEstimation{T}}) where T
Δ = .1
total_val = .0
for l in est_pairs
c_i = 1.0 / l.n
d_i = 1.35 / (l.n + 1.5)
y_i = l.y
σ_i = l.σ
# This integrand is sometimes a very sharp peak-like function like a Dirac funciton.
# Therefore, the direct numerical integration over [0.0, 1.0] is realatively difficult
# because the algorithm may not evaluate the integrand around the peak.
# To avoid this issue, the integration interval is separated into multiple intervals.
# Since the peak is known to be close to y_i, the function value at y_i should be enough large than zero.
integrand(y::Float64)::Float64 = 1.0 / σsq_reg(y, ab) *
exp(-1.0 / (2.0 * σsq_reg(y, ab)) * ( (y_i - y)^2 / c_i + (σ_i - sqrt(σsq_reg(y, ab)) )^2 / d_i))
val = .0
if y_i - Δ > .0
val += quadgk(integrand, .0, y_i - Δ)[1]
end
val += quadgk(integrand, max(.0, y_i - Δ), y_i)[1]
val += quadgk(integrand, y_i, min(1.0, y_i + Δ))[1]
if y_i + Δ < 1.0
val += quadgk(integrand, y_i + Δ, 1.0)[1]
end
total_val -= log(1 / (2π * sqrt(c_i * d_i) ) * abs(val) + ϵ_reg)
end
return total_val
end
σsq_reg(y::Float64, ab::Vector{Float64}) = max(ϵ_reg^2, σsq(y, ab))
σsq(y::Float64, ab::Vector{Float64}) = ab[1] * y + ab[2]
| [
2,
11593,
33,
43312,
62,
43,
2149,
24290,
834,
198,
2,
198,
2,
7683,
10707,
261,
85,
13,
20362,
198,
2,
198,
2,
15069,
357,
66,
8,
2864,
11,
13863,
2059,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
... | 2.277254 | 3,394 |
<filename>backend/anime_data/snapshots_8676.jl
{"score": 7.47, "score_count": 105041, "timestamp": 1567156691.0}
{"score": 7.47, "score_count": 104512, "timestamp": 1565255920.0}
{"score": 7.48, "score_count": 103497, "timestamp": 1560521897.0}
{"score": 7.48, "score_count": 103335, "timestamp": 1559873532.0}
{"score": 7.48, "score_count": 103198, "timestamp": 1559003157.0}
{"score": 7.48, "score_count": 103052, "timestamp": 1558463581.0}
{"score": 7.48, "score_count": 102789, "timestamp": 1557516079.0}
{"score": 7.48, "score_count": 101524, "timestamp": 1552987254.0}
{"score": 7.49, "score_count": 100705, "timestamp": 1550375712.0}
{"score": 7.49, "score_count": 99947, "timestamp": 1547695191.0}
{"score": 7.49, "score_count": 99947, "timestamp": 1547687718.0}
{"score": 7.51, "score_count": 95020, "timestamp": 1529254514.0}
{"score": 7.51, "score_count": 92899, "timestamp": 1522339580.0}
{"score": 7.52, "score_count": 91002, "timestamp": 1516872218.0}
{"score": 7.53, "score_count": 89448, "timestamp": 1513059306.0}
{"score": 7.48, "score_count": 103721, "timestamp": 1561629302.0}
{"score": 7.52, "score_count": 91002, "timestamp": 1516872202.0}
{"score": 7.53, "score_count": 89161, "timestamp": 1512000427.0}
{"score": 7.61, "score_count": 68355, "timestamp": 1460091010.0}
{"score": 7.63, "score_count": 63191, "timestamp": 1448940393.0}
{"score": 7.64, "score_count": 60580, "timestamp": 1441778723.0}
{"score": 7.53, "score_count": 89161, "timestamp": 1512000504.0}
{"score": 7.57, "score_count": 78551, "timestamp": 1484614279.0}
{"score": 7.57, "score_count": 78551, "timestamp": 1484614083.0}
{"score": 7.57, "score_count": 77762, "timestamp": 1483740846.0}
{"score": 7.57, "score_count": 77762, "timestamp": 1483740565.0}
{"score": 7.57, "score_count": 76955, "timestamp": 1482315429.0}
{"score": 7.59, "score_count": 73055, "timestamp": 1471569583.0}
{"score": 7.59, "score_count": 71544, "timestamp": 1467977324.0}
{"score": 7.6, "score_count": 69360, "timestamp": 1462469333.0}
{"score": 7.6, "score_count": 69086, "timestamp": 1461802266.0}
{"score": 7.6, "score_count": 68917, "timestamp": 1461442258.0}
{"score": 7.61, "score_count": 67692, "timestamp": 1458728550.0}
{"score": 7.61, "score_count": 67553, "timestamp": 1458389046.0}
{"score": 7.61, "score_count": 67425, "timestamp": 1458105758.0}
{"score": 7.61, "score_count": 67038, "timestamp": 1457206805.0}
{"score": 7.62, "score_count": 66676, "timestamp": 1456247903.0}
{"score": 7.62, "score_count": 66396, "timestamp": 1455586820.0}
{"score": 7.62, "score_count": 65922, "timestamp": 1454420322.0}
{"score": 7.62, "score_count": 64558, "timestamp": 1451761564.0}
{"score": 7.62, "score_count": 64111, "timestamp": 1451038037.0}
{"score": 7.63, "score_count": 63703, "timestamp": 1450143738.0}
{"score": 7.63, "score_count": 63545, "timestamp": 1449791766.0}
{"score": 7.63, "score_count": 62257, "timestamp": 1446153782.0}
| [
27,
34345,
29,
1891,
437,
14,
272,
524,
62,
7890,
14,
45380,
20910,
62,
23,
42548,
13,
20362,
198,
4895,
26675,
1298,
767,
13,
2857,
11,
366,
26675,
62,
9127,
1298,
47235,
3901,
11,
366,
16514,
27823,
1298,
1315,
3134,
1314,
2791,
6... | 2.346216 | 1,242 |
@testset "isconvex" begin
m = JuMP.Model()
JuMP.@variables m begin
x
y
z
end
# AffExpr
@test MultilinearOpt.isconvex(x + y)
@test MultilinearOpt.isconvex(x - z - 3)
# QuadExpr
@test MultilinearOpt.isconvex(x^2)
@test MultilinearOpt.isconvex(x^2 + 0 * z^2) # test positive semidefinite gramian
@test MultilinearOpt.isconvex(x^2 + 3 * y - z + 5)
@test !MultilinearOpt.isconvex(-x^2)
@test !MultilinearOpt.isconvex(x + y - z ^2 + x^2 + y^2)
G = rand(3, 3)
G = G * G'
vars = [x, y, z]
expr = dot(vars, G * vars)
G_back, vars_back = MultilinearOpt.gramian(expr)
@test vars_back == vars
@test G_back ≈ G atol=1e-10
@test MultilinearOpt.isconvex(expr)
# LinearConstraint
@test MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, x + 3 * y == 0)))
@test MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, 2 * x - y >= z)))
@test MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, 2 * x - y <= z)))
# QuadConstr
@test !MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, x == y * z)))
@test MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, x^2 + y^2 <= z)))
@test !MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, x^2 + y^2 <= z^2)))
@test MultilinearOpt.isconvex(JuMP.constraint_object(JuMP.@constraint(m, -x^2 - y^2 >= -z)))
end
| [
31,
9288,
2617,
366,
271,
1102,
303,
87,
1,
2221,
198,
220,
220,
220,
285,
796,
12585,
7378,
13,
17633,
3419,
628,
220,
220,
220,
12585,
7378,
13,
31,
25641,
2977,
285,
2221,
198,
220,
220,
220,
220,
220,
220,
2124,
198,
220,
220,... | 2.048433 | 702 |
module FullRegisterGate
export expandGateToFullRegister
# expandGateToFullRegister expands the given gate with optional control qubits to entire quantum register with the given size.
function expandGateToFullRegister(register_size::Integer,
gate::AbstractMatrix{Complex{Float64}},
gate_lowest_index::Integer,
control_bit_indexes::AbstractArray{Int64,1} = Array{Int64,1}([])
)::AbstractMatrix{Complex{Float64}}
small_gate_size = size(gate)[1]
small_gate_qubit_count = Int(log2(small_gate_size))
gate_bitmask = gate_full_register_bitmask(small_gate_qubit_count, gate_lowest_index)
if gate_bitmask == 0
error("gate_bitmask cannot be 0")
end
n = 2 ^ register_size
big_gate = zeros(Complex{Float64}, n, n)
control_bitmask = control_full_register_bitmask(control_bit_indexes)
for big_gate_column_index ∈ 0:n-1
if !all_control_bits_set(big_gate_column_index, control_bitmask)
big_gate[big_gate_column_index+1, big_gate_column_index+1] = Complex(1)
continue
end
target_bits = (big_gate_column_index & gate_bitmask) >>> gate_lowest_index
output_state = gate[:,target_bits+1] # Selecting a column here yields the result of matrix-vector multiplication.
for state_index ∈ 0:small_gate_size-1
big_gate_row_index = (big_gate_column_index & ~gate_bitmask) | (state_index << gate_lowest_index)
big_gate[big_gate_row_index+1, big_gate_column_index+1] = Complex(output_state[state_index+1])
end
end
return big_gate
end
function gate_full_register_bitmask(gate_qubit_count::Integer, gate_lowest_index::Integer)::UInt64
bitmask = zero(UInt64)
for _ ∈ 1:gate_qubit_count
bitmask <<= 1
bitmask |= 1
end
return bitmask << gate_lowest_index
end
function control_full_register_bitmask(control_bit_indexes::AbstractArray{Int64,1})::UInt64
bitmask = zero(UInt64)
for i ∈ control_bit_indexes
bitmask |= one(UInt64) << i
end
return bitmask
end
@inline function all_control_bits_set(i::Integer, control_bitmask::Integer)
return i & control_bitmask == control_bitmask
end
end # module
| [
21412,
6462,
38804,
22628,
198,
198,
39344,
4292,
22628,
2514,
13295,
38804,
198,
198,
2,
4292,
22628,
2514,
13295,
38804,
27513,
262,
1813,
8946,
351,
11902,
1630,
627,
9895,
284,
2104,
14821,
7881,
351,
262,
1813,
2546,
13,
198,
8818,
... | 2.738128 | 737 |
<gh_stars>0
# ---
# layout: post
# title: "π day"
# date: 2019-03-13 00:00:00 +0000
# categories: blog
# mathjax: true
# ---
# >In the UK we have started to celebrate π day (the 3rd month's 14th day) every year, even though we don't use the USA's date formatting convention of `monthnumber` followed by `daynumber`. But we can't really celebrate the 31st of April (31/4) or the 3rd of Quatember (?) (3/14), so we'll happily celebrate π day on 14/3 along with everyone else!
# >I set myself a challenge at the beginning of March: make a π-related image using Julia and the Luxor.jl package every day until π day. Some days it worked out well, others didn't, but I've gathered them all here anyway. This post has a fair few images, but not very much code or mathematical content.
# The images here are in low-resolution: they should be available on my [Flickr page](https://www.flickr.com/photos/153311384@N03/) at their full resolution if you want to download or re-use them.
# ### Day 1: Circle packing
# Circle packing may be a well-trodden path, but it always looks neat, and it's a nice easy start. You maintain a list of circles (center point and radius). Then you create a random circle, check it against all the other ones, draw it if it doesn't overlap, or reduce the radius and try again. It's not very efficient but you can set it going and go and make some coffee.
# To make the π shape appear, the code creates a path:
#md fontsize(480)
#md textoutlines("π", O, :path, halign=:center, valign=:middle)
#md πoutline = first(pathtopoly())
# then checks whether each circle's centerpoint is inside or outside the outline of the π shape:
#md isinside(pt, πoutline)
# and colors it accordingly.
# 
# ### Day 2: Dry and wet
# I repeated myself today, thinking I could develop the circles a bit more, and ended up with this glossier wet-look version. The apparently random-looking shapes in the background are Bézier splodges that are supposed to be splashes...
# 
# ### Day 3: π packing
# This is π packing rather than circle packing, although the code is again quite similar in outline: choose a point at random, find the largest font size at which the π character fits without overlapping others in the list, and then place it and add it to the list. The colors are a bit murky though.
# 
# ### Day 4: Rainbow
# Combining concentric circles and rainbow colors, this image shows about 350 digits of π.
# 
# To generate the digits of π, I use this function:
function pidigits(n)
result = BigInt[]
k, a, b, a1, b1 = big.([2, 4, 1, 12, 4])
while n > 0
p, q, k = k^2, 2k + 1, k + 1
a, b, a1, b1 = a1, b1, p * a + q * a1, p * b + q * b1
d, d1 = a ÷ b, a1 ÷ b1
while d == d1
push!(result, d)
n -= 1
a, a1 = 10(a % b), 10(a1 % b1)
d, d1 = a ÷ b, a1 ÷ b1
end
end
return result
end
# It looks like witchcraft to me, but I understand that it's a "spigot" algorithm. I was hoping for a while that it was named after a Professor Spigot, but in fact it's describing the way the digits trickle out one by one like drops of water. It's quick enough for a thousand digits or so, but slows down a lot when you ask for 100_000 or more, probably due to the hard work that the big integer library has to do: even when you're just calculating the first 15 digits of π, the values of `a1` and `b1` are way over the limits of Int64s.
#md julia-1.1> @time pidigits(1000);
#md 0.014522 seconds (44.90 k allocations: 9.425 MiB, 28.97% gc time)
# The image might work better on white:
# 
# Sometimes I wanted to check where certain sequences of digits appeared. I couldn't find a built-in function that looked for a sequence of digits in an array, but this worked well enough for my purposes:
function findsubsequence(needle, haystack)
result = Int64[]
for k in 1:length(haystack) - length(needle)
if needle == view(haystack, k:k + length(needle) - 1)
push!(result, k)
end
end
return result
end
findsubsequence(str::String, digits) =
findsubsequence(map(x -> parse(Int, x), split(str, "")), digits)
findsubsequence("999999", pidigits(2000)) # => [763]
# ### Day 5: Low-fat
# A chunky typeface like the Avenir Heavy I used yesterday is good for masking and clipping. But I wondered how the narrowest typeface would look. I found <NAME>, designed by <NAME> at the Royal Academy of Fine Arts in Copenhagen. Adobe's description says:
# >The most compressed version works best where legibility is less important than dramatic visual effect.
# and I like the abstract look even though it's almost illegible... Would this make nice bathroom tiles?
# 
# ### Day 6 Breakfast and Tiffany
# I'm still thinking about using typefaces. I'm a fan of <NAME>'s ITC Tiffany font, his nostalgic look back from the 1970s to the age of Edwardian elegance.
# 
# It's easy to do this with tables. Like the circle packing, the code checks whether the coordinates of each table cell fall within a given perimeter, and changes the font accordingly.
# ### Day 7 Distinguished
# The excellent Colors.jl package has a function called `distinguishable_colors()` (which fortunately tab-completes). The help text says:
# > This uses a greedy brute-force approach to choose `n` colors that are maximally distinguishable. Given `seed` color(s), and a set of possible hue, chroma, and lightness values (in LCHab space), it repeatedly chooses the next color as the one that maximizes the minimum pairwise distance to any of the colors already in the palette.
# Much to do with color depends on the viewer's perception, but I think it works well here. It starts at the top left, and works from left to right. (That pesky decimal point defaults to using the previous color...) You can spot the Feynman point (`999999`) halfway down on the left (look for the six consecutive sandy brown squares), or the four purple sevens on the bottom row.
# 
# I remembered to try to choose the color for the small labels (probably unreadable in the low-resolution PNG you see here) so that they're either light on dark, or dark on light.
#md ... r, g, b = color of square
#md gamma = 2.2
#md luminance = 0.2126 * r^gamma + 0.7152 * g^gamma + 0.0722 * b^gamma
#md (luminance > 0.5^gamma) ? sethue("black") : sethue("white")
# ### Day 8 Candy crush edition
# 
# I must have seen an advert for CandyCrush yesterday, or perhaps all that talk of gamma and LCHab spaces caused a reaction, but this sugar rush of an image was the result. The SVG version looks tasty but is too big for this web page.
# ### Day 9 Like a circle in a spiral, a wheel within a wheel
# Arranging the sweets in a spiral looks tidy.
# 
# ### Day 10 π into circumference
# Luxor's `polysample()` function takes a polygon and samples it at regular intervals. This allows the following idea, where each point on a shape (here, the outline of the π character) is slowly moved to a matching location on the circular shape around the outside.
# 
# For a point on the π border `p1`, and a matching point on the circumference polygon `p2`, the intermediate point is given by `between(p1, p2, n)`, where `n` is between 0 and 1.
# I like the almost 3D effect you get from this.
# ### Day 11 Charcoal
# Time for a charcoal sketch:
# 
# The crinkly edges of the paper are made by the `polysample()` function on a rectangle then applying simplex-`noise()`-y nudges to the vertices. The paper is textured with `rule()`d lines, and there's some very low values for `setopacity()` smudges. Shifting the Bézier curve handles slightly for each iteration gives a brushy/sketchy feel. (It's fortunate I can copy and paste some of this code from drawings I've made before: I've learnt the hard way that it's better keep things than throw them away...)
# ### Day 12
# I ran out of time on this one, and there are still some problems with the text spacing. The idea is to have the infinite digits of π spiral into some fiery star with some space-y stuff. Probably not the sort of image I should be attempting at all with simple vector-based 2D graphics tools, but it feels like a challenge. Those wispy trails are the same as yesterday's brush strokes, but using custom `setdash()` dashing patterns.
# 
# ### Day 13
# The idea here is to show which digit of π is the current leader, in terms of how many times that digit has appeared already. (Yes, a stupid idea, I know!) Then I couldn't decide on how many digits to show, so it's going to be an animated GIF showing the first 1000 digits. At the 200 digit mark poor old "7" is struggling at the back of the field, but the glory days are ahead - after 1000 digits, it's overtaken 0, 4, and 6.
# 
# The animation turned into a video rather than a GIF, because I don't like the low resolution of GIFs today.
# And now of course I have to add a suitable audio soundtrack. Luckily I've recently been playing with <NAME>' [MIDI interface for Julia](https://github.com/JuliaMusic), so it was easy enough to make a musical version of the first 1000 digits of π, where the digits from 0 to 9 choose the appropriate note from a reasonably harmonious scale.
using MIDI
function savetrack(track, notes)
file = MIDIFile()
addnotes!(track, notes)
addtrackname!(track, "a track")
push!(file.tracks, track)
writeMIDIFile("/tmp/sound-of-pi.mid", file)
end
scales = [46, 48, 51, 53, 55, 57, 58, 60, 62, 65, 67]
function generatetune!(notes)
pos = 1
dur = 80
k = 1
manypidigits = pidigits(1000)
for i in manypidigits
dur = k * 960
pos += k * 960
n = scales[i + 1]
note = Note(n, 76, pos, dur)
push!(notes, note)
end
end
notes = Notes()
track = MIDITrack()
generatetune!(notes)
savetrack(track, notes)
# 
# This "sonification" (or "audification") is just for fun. For a more convincing critique of these sonifications than I can provide, watch the always entertaining [Tantacrul](https://www.youtube.com/watch?v=Ocq3NeudsVk)'s presentation on YouTube.
# And while you're on YouTube, the π video is on [my YouTube channel](https://www.youtube.com/channel/UCfd52kTA5JpzOEItSqXLQxg), and it's my entry for YouTube's Most Boring Video of 2019 competition, but I suspect it won't do very well—competition in this category is fierce, even if sometimes the contestants are unwilling participants.
# Happy π day!
# [2019-03-13]
# {: .center-image}
using Literate #src
# preprocess for notebooks #src
function setimagefolder(content) #src
content = replace(content, "IMAGEFOLDER" => "$IMAGEFOLDER") #src
return content #src
end #src
# for Jupyter notebook, put images in subfolder #src
#IMAGEFOLDER = "images/piday" #src
#Literate.notebook("source/piday.jl", "notebooks", preprocess = setimagefolder) #src
# for Markdown/Jekyll notebook, put images in "/images" #src
IMAGEFOLDER = "/images/piday" #src
Literate.markdown("source/piday.jl", ".", name="_posts/2019-03-13-piday", #src
preprocess = setimagefolder, #src
codefence = "{% highlight julia %}" => "{% endhighlight julia %}", #src
documenter=false) #src
#src
| [
27,
456,
62,
30783,
29,
15,
198,
2,
11420,
198,
2,
12461,
25,
1281,
198,
2,
3670,
25,
366,
46582,
1110,
1,
198,
2,
3128,
25,
13130,
12,
3070,
12,
1485,
3571,
25,
405,
25,
405,
1343,
2388,
198,
2,
9376,
25,
4130,
198,
2,
10688,... | 2.88238 | 4,370 |
typealias ReComp Union{Real,Complex}
immutable Dual{T<:ReComp} <: Number
value::T
epsilon::T
end
Dual{S<:ReComp,T<:ReComp}(x::S, y::T) = Dual(promote(x,y)...)
Dual(x::ReComp) = Dual(x, zero(x))
const ɛ = Dual(false, true)
const imɛ = Dual(Complex(false, false), Complex(false, true))
typealias Dual128 Dual{Float64}
typealias Dual64 Dual{Float32}
typealias Dual32 Dual{Float16}
typealias DualComplex256 Dual{Complex128}
typealias DualComplex128 Dual{Complex64}
typealias DualComplex64 Dual{Complex32}
convert{T<:ReComp}(::Type{Dual{T}}, z::Dual{T}) = z
convert{T<:ReComp}(::Type{Dual{T}}, z::Dual) = Dual{T}(convert(T, value(z)), convert(T, epsilon(z)))
convert{T<:ReComp}(::Type{Dual{T}}, x::Number) = Dual{T}(convert(T, x), convert(T, 0))
convert{T<:ReComp}(::Type{T}, z::Dual) = (epsilon(z)==0 ? convert(T, value(z)) : throw(InexactError()))
promote_rule{T<:ReComp, S<:ReComp}(::Type{Dual{T}}, ::Type{Dual{S}}) = Dual{promote_type(T, S)}
promote_rule{T<:ReComp, S<:ReComp}(::Type{Dual{T}}, ::Type{S}) = Dual{promote_type(T, S)}
promote_rule{T<:ReComp}(::Type{Dual{T}}, ::Type{T}) = Dual{T}
widen{T}(::Type{Dual{T}}) = Dual{widen(T)}
value(z::Dual) = z.value
epsilon(z::Dual) = z.epsilon
dual(x::ReComp, y::ReComp) = Dual(x, y)
dual(x::ReComp) = Dual(x)
dual(z::Dual) = z
Compat.@dep_vectorize_1arg ReComp dual
Compat.@dep_vectorize_2arg ReComp dual
Compat.@dep_vectorize_1arg Dual dual
Compat.@dep_vectorize_1arg Dual value
Compat.@dep_vectorize_1arg Dual epsilon
const realpart = value
const dualpart = epsilon
isnan(z::Dual) = isnan(value(z))
isinf(z::Dual) = isinf(value(z))
isfinite(z::Dual) = isfinite(value(z))
isdual(x::Dual) = true
isdual(x::Number) = false
eps(z::Dual) = eps(value(z))
eps{T}(::Type{Dual{T}}) = eps(T)
function dual_show{T<:Real}(io::IO, z::Dual{T}, compact::Bool)
x, y = value(z), epsilon(z)
if isnan(x) || isfinite(y)
compact ? showcompact(io,x) : show(io,x)
if signbit(y)==1 && !isnan(y)
y = -y
print(io, compact ? "-" : " - ")
else
print(io, compact ? "+" : " + ")
end
compact ? showcompact(io, y) : show(io, y)
printtimes(io, y)
print(io, "ɛ")
else
print(io, "Dual{",T,"}(", x, ",", y, ")")
end
end
function dual_show{T<:Complex}(io::IO, z::Dual{T}, compact::Bool)
x, y = value(z), epsilon(z)
xr, xi = reim(x)
yr, yi = reim(y)
if isnan(x) || isfinite(y)
compact ? showcompact(io,x) : show(io,x)
if signbit(yr)==1 && !isnan(y)
yr = -yr
print(io, " - ")
else
print(io, " + ")
end
if compact
if signbit(yi)==1 && !isnan(y)
yi = -yi
showcompact(io, yr)
printtimes(io, yr)
print(io, "ɛ-")
showcompact(io, yi)
else
showcompact(io, yr)
print(io, "ɛ+")
showcompact(io, yi)
end
else
if signbit(yi)==1 && !isnan(y)
yi = -yi
show(io, yr)
printtimes(io, yr)
print(io, "ɛ - ")
show(io, yi)
else
show(io, yr)
print(io, "ɛ + ")
show(io, yi)
end
end
printtimes(io, yi)
print(io, "imɛ")
else
print(io, "Dual{",T,"}(", x, ",", y, ")")
end
end
function dual_show{T<:Bool}(io::IO, z::Dual{T}, compact::Bool)
x, y = value(z), epsilon(z)
if !value(z) && epsilon(z)
print(io, "ɛ")
else
print(io, "Dual{",T,"}(", x, ",", y, ")")
end
end
function dual_show{T<:Bool}(io::IO, z::Dual{Complex{T}}, compact::Bool)
x, y = value(z), epsilon(z)
xr, xi = reim(x)
yr, yi = reim(y)
if !xr
if xi*!yr*!yi
print(io, "im")
elseif !xi*yr*!yi
print(io, "ɛ")
elseif !xi*!yr*yi
print(io, "imɛ")
end
else
print(io, "Dual{",T,"}(", x, ",", y, ")")
end
end
function printtimes(io::IO, x::Real)
if !(isa(x,Integer) || isa(x,Rational) ||
isa(x,AbstractFloat) && isfinite(x))
print(io, "*")
end
end
show(io::IO, z::Dual) = dual_show(io, z, false)
showcompact(io::IO, z::Dual) = dual_show(io, z, true)
function read{T<:ReComp}(s::IO, ::Type{Dual{T}})
x = read(s, T)
y = read(s, T)
Dual{T}(x, y)
end
function write(s::IO, z::Dual)
write(s, value(z))
write(s, epsilon(z))
end
## Generic functions of dual numbers ##
convert(::Type{Dual}, z::Dual) = z
convert(::Type{Dual}, x::Number) = Dual(x)
==(z::Dual, w::Dual) = value(z) == value(w)
==(z::Dual, x::Number) = value(z) == x
==(x::Number, z::Dual) = value(z) == x
isequal(z::Dual, w::Dual) = isequal(value(z),value(w)) && isequal(epsilon(z), epsilon(w))
isequal(z::Dual, x::Number) = isequal(value(z), x) && isequal(epsilon(z), zero(x))
isequal(x::Number, z::Dual) = isequal(z, x)
isless{T<:Real}(z::Dual{T},w::Dual{T}) = value(z) < value(w)
isless{T<:Real}(z::Real,w::Dual{T}) = z < value(w)
isless{T<:Real}(z::Dual{T},w::Real) = value(z) < w
hash(z::Dual) = (x = hash(value(z)); epsilon(z)==0 ? x : bitmix(x,hash(epsilon(z))))
float{T<:AbstractFloat}(z::Union{Dual{T},Dual{Complex{T}}})=z
complex{T<:Real}(z::Dual{Complex{T}})=z
floor{T<:Real}(::Type{T}, z::Dual) = floor(T, value(z))
ceil{ T<:Real}(::Type{T}, z::Dual) = ceil( T, value(z))
trunc{T<:Real}(::Type{T}, z::Dual) = trunc(T, value(z))
round{T<:Real}(::Type{T}, z::Dual) = round(T, value(z))
for op in (:real,:imag,:conj,:float,:complex)
@eval begin
$op(z::Dual) = Dual($op(value(z)),$op(epsilon(z)))
end
end
abs(z::Dual) = sqrt(abs2(z))
abs2(z::Dual) = real(conj(z)*z)
real{T<:Real}(z::Dual{T}) = z
abs{T<:Real}(z::Dual{T}) = z ≥ 0 ? z : -z
angle{T<:Real}(z::Dual{T}) = z ≥ 0 ? zero(z) : one(z)*π
angle{T<:Real}(z::Dual{Complex{T}}) = z == 0 ? (imag(epsilon(z)) == 0 ? Dual(zero(T),zero(T)) : Dual(zero(T),convert(T, Inf))) : real(log(sign(z))/im)
# algebraic definitions
conjdual(z::Dual) = Dual(value(z),-epsilon(z))
absdual(z::Dual) = abs(value(z))
abs2dual(z::Dual) = abs2(value(z))
# algebra
+(z::Dual, w::Dual) = Dual(value(z)+value(w), epsilon(z)+epsilon(w))
+(z::Number, w::Dual) = Dual(z+value(w), epsilon(w))
+(z::Dual, w::Number) = Dual(value(z)+w, epsilon(z))
-(z::Dual) = Dual(-value(z), -epsilon(z))
-(z::Dual, w::Dual) = Dual(value(z)-value(w), epsilon(z)-epsilon(w))
-(z::Number, w::Dual) = Dual(z-value(w), -epsilon(w))
-(z::Dual, w::Number) = Dual(value(z)-w, epsilon(z))
# avoid ambiguous definition with Bool*Number
*(x::Bool, z::Dual) = ifelse(x, z, ifelse(signbit(real(value(z)))==0, zero(z), -zero(z)))
*(x::Dual, z::Bool) = z*x
*(z::Dual, w::Dual) = Dual(value(z)*value(w), epsilon(z)*value(w)+value(z)*epsilon(w))
*(x::Number, z::Dual) = Dual(x*value(z), x*epsilon(z))
*(z::Dual, x::Number) = Dual(x*value(z), x*epsilon(z))
/(z::Dual, w::Dual) = Dual(value(z)/value(w), (epsilon(z)*value(w)-value(z)*epsilon(w))/(value(w)*value(w)))
/(z::Number, w::Dual) = Dual(z/value(w), -z*epsilon(w)/value(w)^2)
/(z::Dual, x::Number) = Dual(value(z)/x, epsilon(z)/x)
for f in [:^, :(NaNMath.pow)]
@eval function ($f)(z::Dual, w::Dual)
if epsilon(w) == 0.0
return $f(z,value(w))
end
val = $f(value(z),value(w))
du =
epsilon(z)*value(w)*(($f)(value(z),value(w)-1))+epsilon(w)*($f)(value(z),value(w))*log(value(z))
Dual(val, du)
end
end
mod(z::Dual, n::Number) = Dual(mod(value(z), n), epsilon(z))
# these two definitions are needed to fix ambiguity warnings
^(z::Dual, n::Integer) = Dual(value(z)^n, epsilon(z)*n*value(z)^(n-1))
^(z::Dual, n::Rational) = Dual(value(z)^n, epsilon(z)*n*value(z)^(n-1))
^(z::Dual, n::Number) = Dual(value(z)^n, epsilon(z)*n*value(z)^(n-1))
NaNMath.pow(z::Dual, n::Number) = Dual(NaNMath.pow(value(z),n), epsilon(z)*n*NaNMath.pow(value(z),n-1))
NaNMath.pow(z::Number, w::Dual) = Dual(NaNMath.pow(z,value(w)), epsilon(w)*NaNMath.pow(z,value(w))*log(z))
# force use of NaNMath functions in derivative calculations
function to_nanmath(x::Expr)
if x.head == :call
funsym = Expr(:.,:NaNMath,Base.Meta.quot(x.args[1]))
return Expr(:call,funsym,[to_nanmath(z) for z in x.args[2:end]]...)
else
return Expr(:call,[to_nanmath(z) for z in x.args]...)
end
end
to_nanmath(x) = x
for (funsym, exp) in Calculus.symbolic_derivatives_1arg()
funsym == :exp && continue
funsym == :abs2 && continue
@eval function $(funsym)(z::Dual)
x = value(z)
xp = epsilon(z)
Dual($(funsym)(x),xp*$exp)
end
# extend corresponding NaNMath methods
if funsym in (:sin, :cos, :tan, :asin, :acos, :acosh, :atanh, :log, :log2, :log10,
:lgamma, :log1p)
funsym = Expr(:.,:NaNMath,Base.Meta.quot(funsym))
@eval function $(funsym)(z::Dual)
x = value(z)
xp = epsilon(z)
Dual($(funsym)(x),xp*$(to_nanmath(exp)))
end
end
end
# only need to compute exp/cis once
exp(z::Dual) = (expval = exp(value(z)); Dual(expval, epsilon(z)*expval))
cis(z::Dual) = (cisval = cis(value(z)); Dual(cisval, im*epsilon(z)*cisval))
## TODO: should be generated in Calculus
sinpi(z::Dual) = Dual(sinpi(value(z)),epsilon(z)*cospi(value(z))*π)
cospi(z::Dual) = Dual(cospi(value(z)),-epsilon(z)*sinpi(value(z))*π)
| [
4906,
26011,
797,
7293,
4479,
90,
15633,
11,
5377,
11141,
92,
198,
198,
8608,
18187,
20446,
90,
51,
27,
25,
3041,
7293,
92,
1279,
25,
7913,
198,
220,
220,
220,
1988,
3712,
51,
198,
220,
220,
220,
304,
862,
33576,
3712,
51,
198,
43... | 1.935233 | 4,879 |
<reponame>hessammehr/Chain.jl<gh_stars>0
using Plots
using MCMCChain
n_iter = 500
n_name = 3
n_chain = 2
val = randn(n_iter, n_name, n_chain) .+ [1, 2, 3]'
val = hcat(val, rand(1:2, n_iter, 1, n_chain))
chn = Chains(val)
# plotting singe plotting types
ps_trace = plot(chn, :trace)
ps_mean = plot(chn, :mean)
ps_density = plot(chn, :density)
ps_autocor = plot(chn, :autocor)
#ps_contour = plot(chn, :contour)
ps_hist = plot(chn, :histogram)
ps_mixed = plot(chn, :mixeddensity)
# plotting combinations
ps_trace_mean = plot(chn, [:trace, :mean])
ps_mixed_auto = plot(chn, [:mixeddensity, :autocor])
| [
27,
7856,
261,
480,
29,
33979,
321,
1326,
11840,
14,
35491,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
1345,
1747,
198,
3500,
13122,
9655,
35491,
198,
198,
77,
62,
2676,
796,
5323,
198,
77,
62,
3672,
796,
513,
198,
77,
62,
... | 2.342412 | 257 |
<reponame>kfgarrity/TightlyBound.jl
using Test
using TightlyBound
using Suppressor
#include("../includes_laguerre.jl")
#include("../Ewald.jl")
TESTDIR=TightlyBound.TESTDIR
function loaddata(dirs; scf=true)
tbc_list = []
dft_list = []
for t in dirs
# println(t*"/qe.save")
tfull = "$TESTDIR/"*t
dft = TightlyBound.QE.loadXML(tfull*"/qe.save")
tbc = []
tbc_scf = []
try
if scf
tbc_scf = TightlyBound.TB.read_tb_crys("projham_scf.xml.gz", directory=tfull)
else
tbc_scf = TightlyBound.TB.read_tb_crys("projham.xml.gz", directory=tfull)
end
catch
tbc = TightlyBound.AtomicProj.projwfx_workf(dft, directory=tfull, writefile="projham.xml", skip_og=true, skip_proj=true, freeze=true, localized_factor = 0.15)
if scf
tbc_scf = TightlyBound.SCF.remove_scf_from_tbc(tbc)
TightlyBound.TB.write_tb_crys(t*"/projham_scf.xml.gz", tbc_scf)
else
tbc_scf = tbc
end
end
push!(dft_list, dft)
push!(tbc_list, tbc_scf)
end
return tbc_list, dft_list
end
function test_force()
@testset "testing force dimer" begin
if true
# @suppress begin
ft = open("$TESTDIR/data_forces/fil_MgS_dimer", "r");
dirst = readlines(ft);
close(ft);
# println(dirst)
# for scf = [false true]
f_cart = zeros(2,3)
f_cartFD = 0.0
for scf = [false, true]
@suppress begin
tbc_list, dft_list = loaddata(dirst, scf=scf);
database_rec = TightlyBound.FitTB.do_fitting_recursive(tbc_list,dft_list = dft_list, fit_threebody=false, fit_threebody_onsite=false);
x = 4;
smearing = 0.01;
#en, tbc_x, flag = scf_energy(tbc_list[x].crys, database = database_rec)
#en, f_cart,stress = TightlyBound.Force_Stress.get_energy_force_stress(tbc_x, database_rec, smearing = smearing);
en, f_cart,stress = TightlyBound.Force_Stress.get_energy_force_stress(tbc_list[x].crys, database_rec, smearing = smearing);
enFD, f_cartFD = TightlyBound.Force_Stress.finite_diff(tbc_list[x].crys, database_rec,1, 3, smearing = smearing);
end
# println(scf, " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ", f_cartFD, " ", f_cart[1,3])
@test abs(f_cartFD - f_cart[1,3]) < 1e-3
# @test abs(f_cartFD - f_cart_fft[1,3]) < 1e-3
# println("SCF $scf TEST1 finite diff: ", f_cartFD , " autodiff: ", f_cart[1,3], " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
# println("TEST1 dft ref ", dft_list[x].forces[1,3])
end
# x = 3;
# smearing = 0.01;
# en, f_cart,stress = Force_Stress.get_energy_force_stress(tbc_list[x].crys, database_rec, smearing = smearing);
# enFD, f_cartFD = Force_Stress.finite_diff(tbc_list[x].crys, database_rec,1, 3, smearing = smearing);
# @test abs(f_cartFD - f_cart[1,3]) < 1e-3
# println("TEST3 finite diff: ", f_cartFD , " autodiff: ", f_cart[1,3], " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
# println("TEST3 dft ref ", dft_list[x].forces[1,3])
end
end
end
function test_stress()
@testset "testing force znse" begin
@suppress begin
ft = open("$TESTDIR/data_forces/fil_MgS_znse", "r");
dirst = readlines(ft);
close(ft);
# println(dirst)
tbc_list, dft_list = loaddata(dirst, scf=false);
database = TightlyBound.FitTB.do_fitting(tbc_list, fit_threebody=false, fit_threebody_onsite=false, do_plot=false);
# database = FitTB.do_fitting_recursive(tbc_list,dft_list = dft_list, fit_threebody=true, fit_threebody_onsite=false);
x = 1;
smearing = 0.01;
en, f_cart, stress = TightlyBound.Force_Stress.get_energy_force_stress(tbc_list[x].crys, database, smearing = smearing);
enFD, f_cartFD = TightlyBound.Force_Stress.finite_diff(tbc_list[x].crys, database,1, 3, smearing = smearing);
# println("TEST force finite diff: ", f_cartFD , " autodiff: ", f_cart[1,3], " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
# println("TEST dft ref ", dft_list[x].forces[1,3])
@test abs(f_cartFD - f_cart[1,3]) < 1e-4
x=1
enFD, stressFD = TightlyBound.Force_Stress.finite_diff(tbc_list[x].crys, database,1, 1, stress_mode=true, smearing = smearing);
# println("TEST stress11 finite diff: ", stressFD , " autodiff: ", stress[1,1], " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
# println("TEST dft ref ", dft_list[x].stress[1,1])
@test abs(stressFD - stress[1,1]) < 1e-5
x=1
enFD, stressFD = TightlyBound.Force_Stress.finite_diff(tbc_list[x].crys, database,1, 2, stress_mode=true, smearing = smearing);
# println("TEST stress12 finite diff: ", stressFD , " autodiff: ", stress[1,2], " xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
# println("TEST dft ref ", dft_list[x].stress[1,2])
@test abs(stressFD - stress[1,2]) < 1e-5
# println("done")
end
end
end
test_force()
#println("sleep ")
#sleep(3)
test_stress()
| [
27,
7856,
261,
480,
29,
74,
40616,
3258,
414,
14,
51,
432,
306,
49646,
13,
20362,
198,
3500,
6208,
198,
3500,
44643,
306,
49646,
198,
3500,
8105,
44292,
198,
198,
2,
17256,
7203,
40720,
42813,
62,
75,
11433,
263,
260,
13,
20362,
494... | 1.920926 | 3,111 |
struct Match
rule::AbstractRule
# the rhs pattern to instantiate
pat_to_inst::Union{Nothing,Pattern}
# the substitution
sub::Sub
# the id the matched the lhs
id::EClassId
end
const MatchesBuf = Vector{Match}
function cached_ids(g::EGraph, p::Pattern)# ::Vector{Int64}
collect(keys(g.classes))
end
# FIXME
function cached_ids(g::EGraph, p::PatTerm)
# println("pattern $p, $(p.head)")
# println("all ids")
# keys(g.classes) |> println
# println("cached symbols")
# cached = get(g.symcache, p.head, Set{Int64}())
# println("symbols where $(p.head) appears")
# appears = Set{Int64}()
# for (id, class) ∈ g.classes
# for n ∈ class
# if n.head == p.head
# push!(appears, id)
# end
# end
# end
# # println(appears)
# if !(cached == appears)
# @show cached
# @show appears
# end
collect(keys(g.classes))
# cached
# get(g.symcache, p.head, [])
end
# function cached_ids(g::EGraph, p::PatLiteral)
# get(g.symcache, p.val, [])
# end
function (r::SymbolicRule)(g::EGraph, id::EClassId)
ematch(g, r.ematch_program, id) .|> sub -> Match(r, r.right, sub, id)
end
function (r::DynamicRule)(g::EGraph, id::EClassId)
ematch(g, r.ematch_program, id) .|> sub -> Match(r, nothing, sub, id)
end
function (r::BidirRule)(g::EGraph, id::EClassId)
vcat(ematch(g, r.ematch_program_l, id) .|> sub -> Match(r, r.right, sub, id),
ematch(g, r.ematch_program_r, id) .|> sub -> Match(r, r.left, sub, id))
end
macro maybethreaded(x, body)
esc(quote
if $x
Threads.@threads $body
else
$body
end
end)
end
"""
Returns an iterator of `Match`es.
"""
function eqsat_search!(egraph::EGraph, theory::Vector{<:AbstractRule},
scheduler::AbstractScheduler, report; threaded=false)
match_groups = Vector{Match}[]
function pmap(f, xs)
# const propagation should be able to optimze one of the branch away
if threaded
# # try to divide the work evenly between threads without adding much overhead
# chunks = Threads.nthreads() * 10
# basesize = max(length(xs) ÷ chunks, 1)
# ThreadsX.mapi(f, xs; basesize=basesize)
ThreadsX.map(f, xs)
else
map(f, xs)
end
end
inequalities = filter(Base.Fix2(isa, UnequalRule), theory)
# never skip contradiction checks
append_time = TimerOutput()
for rule ∈ inequalities
@timeit report.to repr(rule) begin
ids = cached_ids(egraph, rule.left)
rule_matches = pmap(i -> rule(egraph, i), ids)
@timeit append_time "appending matches" begin
append!(match_groups, rule_matches)
end
end
end
other_rules = filter(theory) do rule
!(rule isa UnequalRule)
end
for rule ∈ other_rules
@timeit report.to repr(rule) begin
# don't apply banned rules
if !cansearch(scheduler, rule)
# println("skipping banned rule $rule")
continue
end
ids = cached_ids(egraph, rule.left)
rule_matches = pmap(i -> rule(egraph, i), ids)
n_matches = sum(length, rule_matches)
can_yield = inform!(scheduler, rule, n_matches)
if can_yield
@timeit append_time "appending matches" begin
append!(match_groups, rule_matches)
end
end
end
end
# @timeit append_time "appending matches" begin
# result = reduce(vcat, match_groups) # this should be more efficient than multiple appends
# end
merge!(report.to, append_time, tree_point=["Search"])
return Iterators.flatten(match_groups)
# return result
end
| [
7249,
13225,
198,
220,
220,
220,
3896,
3712,
23839,
31929,
220,
198,
220,
220,
220,
1303,
262,
9529,
82,
3912,
284,
9113,
9386,
220,
198,
220,
220,
220,
1458,
62,
1462,
62,
8625,
3712,
38176,
90,
18465,
11,
47546,
92,
198,
220,
220,... | 2.135194 | 1,827 |
<reponame>AndrewSerra/Knet.jl<filename>src/ops20_gpu/rnn.jl
import Knet.Ops20: rnnforw
using Knet.Ops20: RNN
using Knet.KnetArrays: DevArray, KnetArray, Cptr
using CUDA: CuArray, CUDNN, CU_NULL
using AutoGrad: AutoGrad, @primitive1, value, recording, Param, Value
"RNN descriptor"
mutable struct RD; ptr; end
"Dropout descriptor"
mutable struct DD; ptr; states; end
"Keeps an array of 3D tensor descriptors"
mutable struct TDs; pvec::Vector{Cptr}; xDesc::Vector{TD}; end # Keep xDesc in TDs so it does not get gc'ed
Base.unsafe_convert(::Type{Cptr}, dd::DD)=dd.ptr
Base.unsafe_convert(::Type{Cptr}, rd::RD)=rd.ptr
Base.unsafe_convert(::Type{Ptr{Cptr}}, tds::TDs)=pointer(tds.pvec)
function DD(; atype, handle=CUDNN.handle(), dropout=0.0, seed=0, o...)
if seed==0; seed=floor(Culonglong,time()); end
d = Cptr[0]; s = Csize_t[0] # TODO: Can multiple RNNs share dropout descriptors? Can dropout probability be changed?
CUDNN.cudnnCreateDropoutDescriptor(d)
CUDNN.cudnnDropoutGetStatesSize(handle,s)
states = rnnworkspace(s[1], atype)
@cudnn_retry CUDNN.unsafe_cudnnSetDropoutDescriptor(d[1],handle,dropout,states,bytes(states),seed)
dd = DD(d[1],states)
finalizer(x->CUDNN.cudnnDestroyDropoutDescriptor(x.ptr),dd)
return dd
end
function RD()
d = Cptr[0]
@cudnn_retry CUDNN.unsafe_cudnnCreateRNNDescriptor(d)
rd = RD(d[1])
finalizer(x->CUDNN.cudnnDestroyRNNDescriptor(x.ptr),rd)
return rd
end
function RD(hiddenSize,numLayers,dropoutDesc,inputMode,direction,mode,algo,dataType; handle=CUDNN.handle())
rnnDesc = RD()
inputMode = CUDNN.cudnnRNNInputMode_t(inputMode)
direction = CUDNN.cudnnDirectionMode_t(direction)
mode = CUDNN.cudnnRNNMode_t(mode)
algo = CUDNN.cudnnRNNAlgo_t(algo)
dt = CUDNN.cudnnDataType_t(DT(dataType))
if CUDNN.version() < v"8"
CUDNN.cudnnSetRNNDescriptor(handle,rnnDesc,hiddenSize,numLayers,dropoutDesc,inputMode,direction,mode,algo,dt)
else
CUDNN.cudnnSetRNNDescriptor_v6(handle,rnnDesc,hiddenSize,numLayers,dropoutDesc,inputMode,direction,mode,algo,dt)
end
return rnnDesc
end
Base.length(tds::TDs)=length(tds.pvec)
function TDs(x::DevArray{A},::Nothing) where {A} # Treat x: (X,B?,T?) as a 4D array: (1,X,B,T)
xDesc = TD(A,1,size(x,1),size(x,2)) # we can use a single xDesc
pvec = Vector{Cptr}(undef, size(x,3))
pvec[:] .= xDesc.ptr
return TDs(pvec, [xDesc])
end
function TDs(x::DevArray{A},batchSizes) where {A} # x: (X,B*), batchSizes gives us Bt sizes
@assert sum(batchSizes) == div(length(x),size(x,1))
X = size(x,1)
xs = [ TD(A,1,X,B) for B in batchSizes ]
ps = [ xd.ptr for xd in xs ]
return TDs(ps,xs)
end
function TD3(a::DevArray) # Treat a as a 3D array, pad from right
n = ndims(a)
if n==3; TD(a)
elseif n==2; TD(reshape(a, size(a,1), size(a,2), 1))
elseif n==1; TD(reshape(a, size(a,1), 1, 1))
else; throw(DimensionMismatch())
end
end
function FD3(a::DevArray) # Treat a as a 3D array, pad from left
n = ndims(a)
if n==3; FD(a)
elseif n==2; FD(reshape(a, 1, size(a,1), size(a,2)))
elseif n==1; FD(reshape(a, 1, 1, size(a,1)))
else; throw(DimensionMismatch())
end
end
function rnnforw(r::RNN, w, x::Union{DevArray{T},Value{<:DevArray{T}}}, hx=nothing, cx=nothing;
handle=CUDNN.handle(), batchSizes=nothing, hy = (hx != nothing), cy = (cx != nothing && r.mode == 2)) where T
@assert value(w) === value(r.w)
@assert size(x,1) == r.inputSize
x3 = reshape(value(x), size(x,1), size(x,2), size(x,3))
@assert typeof(x3) == typeof(value(w)) "$(typeof(value(w))) weights do not match $(typeof(x)) input. Please use RNN(;atype) option."
if r.rnnDesc === nothing # initialize rnn for gpu with first input
r.dataType = eltype(x3)
r.dropoutDesc = DD(handle=CUDNN.handle(),dropout=r.dropout,seed=r.seed,atype=typeof(x3))
r.rnnDesc = RD(r.hiddenSize,r.numLayers,r.dropoutDesc,r.inputMode,r.direction,r.mode,r.algo,r.dataType)
end
_rnnforw(w,x,hx,cx; rnn=r,handle=handle,batchSizes=batchSizes,hy=hy,cy=cy)
end
function _rnnforw(w, x, hx, cx; rnn, handle, batchSizes, hy, cy)
# Input descriptors
seqLength = batchSizes==nothing ? size(x,3) : length(batchSizes) # (X,B,T) or (X,B+) with batchSizes
wDesc = FD3(w) # (1,1,W)
xtds = TDs(x,batchSizes) # (1,X,Bt) x T
isnothing(a) = a === nothing || a === C_NULL || a === CU_NULL
if hx==nothing; hx=CU_NULL; hxDesc=C_NULL; else; hxDesc=TD3(hx); end # (H,B,L/2L)
if cx==nothing || rnn.mode != 2; cx=CU_NULL; cxDesc=C_NULL; else; cxDesc=TD3(cx); end
# Output arrays and descriptors
ysize = collect(size(x))
ysize[1] = rnn.hiddenSize * (rnn.direction == 1 ? 2 : 1)
y = similar(x, ysize...) # (H/2H,B,T) or (H/2H,B+) -- y mirrors x except for the first dimension
ytds = TDs(y,batchSizes) # (1,H/2H,Bt) x T
# Optionally output hidden and cell of last step
hyout = cyout = CU_NULL
hyDesc = cyDesc = C_NULL
if hy || cy
firstBatchSize = batchSizes==nothing ? size(x,2) : batchSizes[1]
hsize = (Int(rnn.hiddenSize), Int(firstBatchSize), Int(rnn.numLayers * (rnn.direction == 1 ? 2 : 1))) # (H,B,L/2L)
if hy; hyout=similar(y,hsize); hyDesc=TD3(hyout); end
if cy && rnn.mode==2; cyout=similar(y,hsize); cyDesc=TD3(cyout); end
if !isnothing(hx) && any(size(hx,i)!=hsize[i] for i=1:3) # compare one by one in case hx is 1-D or 2-D
throw(DimensionMismatch("size(hx)=$(size(hx)) does not match hsize=$(hsize)"))
end
if !isnothing(cx) && rnn.mode == 2 && any(size(cx,i)!=hsize[i] for i=1:3)
throw(DimensionMismatch("size(cx)=$(size(cx)) does not match hsize=$(hsize)"))
end
end
# workSpace and reserveSpace
wss = cudnnGetRNNWorkspaceSize(rnn.rnnDesc, xtds; handle=handle)
ws = rnnworkspace(wss, typeof(value(w)))
if AutoGrad.recording()
rss = cudnnGetRNNTrainingReserveSize(rnn.rnnDesc, xtds; handle=handle)
rs = rnnworkspace(rss, typeof(value(w)))
@cudnn_retry CUDNN.unsafe_cudnnRNNForwardTraining(handle, rnn.rnnDesc, seqLength, xtds, x, hxDesc, hx, cxDesc, cx, wDesc, w, ytds, y, hyDesc, hyout, cyDesc, cyout, ws, wss, rs, rss)
else
rs = nothing
@cudnn_retry CUDNN.unsafe_cudnnRNNForwardInference(handle, rnn.rnnDesc, seqLength, xtds, x, hxDesc, hx, cxDesc, cx, wDesc, w, ytds, y, hyDesc, hyout, cyDesc, cyout, ws, wss)
end
if hyout === CU_NULL; hyout = nothing; end
if cyout === CU_NULL; cyout = nothing; end
return y, hyout, cyout, rs, ws
end
function _rnnback(dt, t, w, x, hx, cx; rnn, o...)
@assert value(rnn.w) === value(w)
y,hy,cy,rs,ws = value(t)
dy,dhy,dcy,drs,dws = value(dt)
rnn=value(rnn); w=value(w); x=value(x); hx=value(hx); cx=value(cx)
# To prevent dependencies to next iteration we need to clear the Result type from rnn.h,rnn.c
# We can't do this during forward, because another forward may be run within the same iteration.
# Doing it here is safe, means the iteration is done and we are taking gradients.
# Note that this does not work on the cpu and these have to be cleaned by hand.
# The cpu version is not a primitive and has no back function. (TODO: find better solution)
rnn.h = value(rnn.h); rnn.c = value(rnn.c)
_rnnback2(rnn, w, x, y, dy, hx, cx, dhy, dcy, rs, ws; o...)
end
function _rnnback2(r, w, x, y, dy, hx, cx, dhy, dcy, rs, ws;
handle=CUDNN.handle(), batchSizes=nothing, o...)
@assert value(r.w) === value(w)
# Input descriptors:
seqLength = batchSizes==nothing ? size(x,3) : length(batchSizes) # (X,B,T) or (X,B+) with batchSizes
wDesc = FD3(w) # (1,1,W)
xtds = TDs(x,batchSizes) # (X,B,T) -> (1,X,B) x T
ytds = TDs(y,batchSizes) # (H/2H,B,T) -> (1,H/2H,B) x T
# dytds = TDs(dy,batchSizes) # we use ytds for dytds
if dy == nothing; dy=zero(y); end
if hx == nothing; hx=CU_NULL; hxDesc=C_NULL; else; hxDesc=TD3(hx); end
if cx == nothing || r.mode != 2; cx=CU_NULL; cxDesc=C_NULL; else; cxDesc=TD3(cx); end
if dhy == nothing; dhy=CU_NULL; dhyDesc=C_NULL; else; dhyDesc=TD3(dhy); end
if dcy == nothing || r.mode != 2; dcy=CU_NULL; dcyDesc=C_NULL; else; dcyDesc=TD3(dcy); end
# Output arrays and descriptors:
dx = similar(x) # (X,B,T) or (X,B+) with batchSizes
# dxtds = TDs(dx,batchSizes) # we use xtds here
dw = zero(w) # dw is used additively, so we need zero
dwDesc = FD3(dw)
if hx === CU_NULL; dhx=CU_NULL; dhxDesc=C_NULL; else; dhx=similar(hx); dhxDesc=TD3(dhx); end
if cx === CU_NULL; dcx=CU_NULL; dcxDesc=C_NULL; else; dcx=similar(cx); dcxDesc=TD3(dcx); end
# workSpace and reserveSpace
# ws = cudnnWorkSpace()
wss = bytes(ws)
rss = bytes(rs)
@cudnn_retry CUDNN.unsafe_cudnnRNNBackwardData(handle, r.rnnDesc, seqLength, ytds, y, ytds, dy, dhyDesc, dhy, dcyDesc, dcy, wDesc, w, hxDesc, hx, cxDesc, cx, xtds, dx, dhxDesc, dhx, dcxDesc, dcx, ws, wss, rs, rss)
@cudnn_retry CUDNN.unsafe_cudnnRNNBackwardWeights(handle, r.rnnDesc, seqLength, xtds, x, hxDesc, hx, ytds, y, ws, wss, dwDesc, dw, rs, rss)
# Update the cache
if dhx===CU_NULL; dhx=nothing; end
if dcx===CU_NULL; dcx=nothing; end
r.dx, r.dhx, r.dcx = dx, dhx, dcx
return dw
end
@primitive1 _rnnforw(w,x,hx,cx; rnn, o...),dy,y _rnnback(dy,y,w,x,hx,cx; rnn=rnn, o...) value(rnn).dx value(rnn).dhx value(rnn).dcx
#506: Because r.dx,dhx,dcx may be freed by gcnode, their C_NULL pointers cause trouble in deepcopy.
import Base: deepcopy_internal
function deepcopy_internal(x::RNN, s::IdDict)
if !haskey(s,x)
s[x] = RNN(deepcopy_internal(x.w,s), deepcopy_internal(x.h,s), deepcopy_internal(x.c,s), x.inputSize, x.hiddenSize, x.numLayers, x.dropout, x.seed, x.inputMode, x.direction, x.mode, x.algo, x.dataType, deepcopy_internal(x.rnnDesc,s), deepcopy_internal(x.dropoutDesc,s), nothing, nothing, nothing)
end
return s[x]
end
function rnnworkspace(n, type)
n8 = (n-1)÷sizeof(Int)+1
if type <: KnetArray
buf = KnetArray{Int}(undef, n8)
elseif type <: CuArray
buf = CuArray{Int}(undef, n8)
else
error("$type not a known GPU array type.")
end
return buf
end
function cudnnGetRNNParamsSize(r::RNN)
res = Csize_t[0]
xDesc = TD(r.dataType, 1, r.inputSize, 1) # xDesc: (1,X,B) where X = inputSize, B is ignored, so assume 1
dt = CUDNN.cudnnDataType_t(DT(r.dataType))
CUDNN.cudnnGetRNNParamsSize(CUDNN.handle(), r.rnnDesc, xDesc, res, dt)
div(res[1], sizeof(r.dataType))
end
# This is buggy, why?
# X,H,L,I = r.inputSize, r.hiddenSize, r.numLayers, rnnids(r)
# biases = L*I
# inputMatrices = (r.inputMode == 1 ? 0 : r.direction == 1 ? I : div(I,2))
# hiddenMatrices = (r.direction == 1 ? (L-1)*I : (L-1)*I + div(I,2))
# biases * H + inputMatrices * X * H + hiddenMatrices * H * H
function cudnnGetRNNWorkspaceSize(rd::RD, tds::TDs; handle=CUDNN.handle())
res = Csize_t[1]
CUDNN.cudnnGetRNNWorkspaceSize(handle, rd, length(tds), tds, res)
return Int(res[1])
end
function cudnnGetRNNTrainingReserveSize(rd::RD, tds::TDs; handle=CUDNN.handle())
res = Csize_t[1]
CUDNN.cudnnGetRNNTrainingReserveSize(handle, rd, length(tds), tds, res)
return Int(res[1])
end
# Return eltype,size
function cudnnGetFilterNdDescriptor(wDesc::FD; nbDimsRequested = 8)
dataType = Cint[0]
format = Cint[0]
nbDims = Cint[0]
filterDimA = Vector{Cint}(undef,nbDimsRequested)
CUDNN.cudnnGetFilterNdDescriptor(wDesc, nbDimsRequested, dataType, format, nbDims, filterDimA)
if nbDims[1] > nbDimsRequested
cudnnGetFilterNdDescriptor(wDesc::FD; nbDimsRequested = nbDims[1])
else
(Float32,Float64,Float16)[1+dataType[1]],
(filterDimA[nbDims[1]:-1:1]...,)
end
end
function cudnnGetRNNParam(r::RNN, layer::Integer, id::Integer, par::Integer; useview=false)
params_are_good =
((1 <= par <= 2) &&
((r.direction == 0 && 1 <= layer <= r.numLayers) ||
(r.direction == 1 && 1 <= layer <= 2*r.numLayers)) &&
((r.mode == 0 && 1 <= id <= 2) ||
(r.mode == 1 && 1 <= id <= 2) ||
(r.mode == 2 && 1 <= id <= 8) ||
(r.mode == 3 && 1 <= id <= 6)))
params_are_good || throw(ArgumentError("Bad arguments for rnnparam, please see doc."))
should_return_nothing =
((r.inputMode == 1) &&
(par == 1) &&
((r.mode == 0 && id == 1) ||
(r.mode == 1 && id == 1) ||
(r.mode == 2 && 1 <= id <= 4) ||
(r.mode == 3 && 1 <= id <= 3)) &&
((layer == 1) ||
(r.direction == 1 && layer == 2)))
i1 = i2 = len = 0
w = value(r.w)
@assert isa(w, DevArray)
T = eltype(w)
xDesc = TD(T,1,r.inputSize,1)
wDesc = FD(T,1,1,length(w))
paramDesc = FD(T,1,1,1,1)
param = Cptr[0]
if par == 1 # matrix
CUDNN.cudnnGetRNNLinLayerMatrixParams(handle, r.rnnDesc, layer-1, xDesc, wDesc, w, id-1, paramDesc, param)
else # bias
CUDNN.cudnnGetRNNLinLayerBiasParams(handle, r.rnnDesc, layer-1, xDesc, wDesc, w, id-1, paramDesc, param)
end
dt,sz = cudnnGetFilterNdDescriptor(paramDesc)
if should_return_nothing
@assert param[1] === C_NULL
@assert sz == ()
return nothing
end
len = prod(sz)
i1 = 1 + div(Int(param[1] - pointer(w)), sizeof(T))
i2 = i1 + len - 1
if i1 > i2
@assert should_return_nothing
nothing
elseif par == 1 # matrix; weights are transposed
h = Int(r.hiddenSize)
reshape(view(r.w, i1:i2),:,h)
else # bias
view(r.w, i1:i2)
end
end
# CuArray specific support: should move this to cuarrays
TD(a::CuArray{T}) where {T} = TD(T, size(a))
FD(a::CuArray{T}) where {T} = FD(T, size(a))
bytes(x::CuArray{T}) where T = length(x)*sizeof(T)
# KnetArray getindex contiguous indices already returns a view.
# We need the following for rnnparam/rnntest to work:
Base.view(A::KnetArray, I::AbstractUnitRange{Int}) = getindex(A, I)
# This supports cpucopy/gpucopy:
import Knet.KnetArrays: _ser
function _ser(x::RNN, s::IdDict, m::Val)
if !haskey(s,x)
# we need rd,dd only if there is a gpu, we are not in cpumode,
# and if we are in jldmode we are loading, not saving
# if (CUDA.functional() && m != CPUMODE && !(m == JLDMODE && x.rnnDesc != nothing))
# dd = DD(dropout=x.dropout,seed=x.seed)
# rd = RD(x.hiddenSize,x.numLayers,dd,x.inputMode,x.direction,x.mode,x.algo,x.dataType)
# else
# rd = dd = nothing
# end
# 20200806: We no longer need to load/save rd/dd: rnnforw will construct as needed.
rd = dd = nothing
# dx, dhx, dcx are temporary fields used by rnnback, they do not need to be copied
# gcnode sets dx.ptr to C_NULL which breaks serialize, best not to try
s[x] = RNN(_ser(x.w,s,m), _ser(x.h,s,m), _ser(x.c,s,m), x.inputSize, x.hiddenSize, x.numLayers, x.dropout, x.seed, x.inputMode, x.direction, x.mode, x.algo, x.dataType, rd, dd, nothing, nothing, nothing)
end
return s[x]
end
import JLD2
struct JLD2RNN; w; h; c; inputSize; hiddenSize; numLayers; dropout; seed; inputMode; direction; mode; algo; dataType; end
JLD2RNN(x::RNN) = JLD2RNN(x.w, x.h, x.c, x.inputSize, x.hiddenSize, x.numLayers, x.dropout, x.seed, x.inputMode, x.direction, x.mode, x.algo, x.dataType)
RNN(x::JLD2RNN) = RNN(x.w, x.h, x.c, x.inputSize, x.hiddenSize, x.numLayers, x.dropout, x.seed, x.inputMode, x.direction, x.mode, x.algo, x.dataType, nothing, nothing, nothing, nothing, nothing)
JLD2.writeas(::Type{RNN}) = JLD2RNN
JLD2.wconvert(::Type{JLD2RNN}, x::RNN) = JLD2RNN(x)
JLD2.rconvert(::Type{RNN}, x::JLD2RNN) = RNN(x)
| [
27,
7856,
261,
480,
29,
20508,
7089,
430,
14,
42,
3262,
13,
20362,
27,
34345,
29,
10677,
14,
2840,
1238,
62,
46999,
14,
81,
20471,
13,
20362,
198,
11748,
509,
3262,
13,
41472,
1238,
25,
374,
20471,
1640,
86,
198,
3500,
509,
3262,
... | 2.143205 | 7,395 |
"""
critic(decisionMat, fns)
Apply CRITIC (Combined Compromise Solution) method for a given matrix and criteria types.
# Arguments:
- `decisionMat::DataFrame`: n × m matrix of objective values for n alternatives and m criteria
- `fns::Array{Function, 1}`: m-vector of functions to be applied on the columns.
# Description
critic() applies the CRITIC method to rank n alterntives subject to m criteria which are supposed to be
either maximized or minimized.
# Output
- `::CRITICResult`: CRITICResult object that holds multiple outputs including weighting and best index.
# Examples
```julia-repl
julia> decmat
3×4 Array{Float64,2}:
12.9918 0.7264 -1.1009 1.59814
4.1201 5.8824 3.4483 1.02156
4.1039 0.0 -0.5076 0.984469
julia> df = makeDecisionMatrix(decmat)
3×4 DataFrame
Row │ Crt1 Crt2 Crt3 Crt4
│ Float64 Float64 Float64 Float64
─────┼─────────────────────────────────────
1 │ 12.9918 0.7264 -1.1009 1.59814
2 │ 4.1201 5.8824 3.4483 1.02156
3 │ 4.1039 0.0 -0.5076 0.984469
julia> fns = [maximum, maximum, minimum, maximum];
julia> result = critic(df, fns);
julia> result.w
4-element Array{Float64,1}:
0.16883905506169491
0.41844653698732126
0.24912338769165807
0.16359102025932576
julia> result.bestIndex
2
```
# References
<NAME>., <NAME>., & <NAME>. (1995). Determining objective weights in multiple criteria problems: The critic method. Computers & Operations Research, 22(7), 763–770. doi:10.1016/0305-0548(94)00059-h
<NAME>., <NAME>., <NAME>. (2018). CRITIC ve MDL Temelli EDAS Yöntemi ile TR-61 Bölgesi Bankalarının Performans Değerlendirmesi. Süleyman Demirel Üniversitesi Sosyal Bilimler Enstitüsü Dergisi, 1 (32), 1-24.
"""
function critic(decisionMat::DataFrame, fns::Array{Function,1})::CRITICResult
row, col = size(decisionMat)
colMax = colmaxs(decisionMat)
colMin = colmins(decisionMat)
A = similar(decisionMat)
for i in 1:row
for j in 1:col
if fns[j] == maximum
@inbounds A[i, j] = (decisionMat[i, j] - colMin[j]) / (colMax[j] - colMin[j])
elseif fns[j] == minimum
@inbounds A[i, j] = (colMax[j] - decisionMat[i, j]) / (colMax[j] - colMin[j])
end
end
end
# normalizedMat = convert(Matrix, A)
normalizedMat = Matrix(A)
corMat = 1 .- cor(normalizedMat)
scores = zeros(Float64, col)
for i in 1:col
scores[i] = sum(corMat[:, i]) .* std(normalizedMat[:, i])
end
w = zeros(Float64, col)
for i in 1:col
w[i] = scores[i] ./ sum(scores)
end
rankings = sortperm(w)
bestIndex = rankings |> last
result = CRITICResult(
decisionMat,
w,
rankings,
bestIndex
)
return result
end
"""
critic(setting)
Apply CRITIC (Combined Compromise Solution) method for a given matrix and criteria types.
# Arguments:
- `setting::MCDMSetting`: MCDMSetting object.
# Description
critic() applies the CRITIC method to rank n alterntives subject to m criteria which are supposed to be
either maximized or minimized.
# Output
- `::CRITICResult`: CRITICResult object that holds multiple outputs including weighting and best index.
"""
function critic(setting::MCDMSetting)::CRITICResult
critic(
setting.df,
setting.fns
)
end | [
37811,
198,
220,
220,
220,
220,
220,
220,
220,
4014,
7,
12501,
1166,
19044,
11,
277,
5907,
8,
198,
198,
44836,
8740,
2043,
2149,
357,
20575,
1389,
3082,
398,
786,
28186,
8,
2446,
329,
257,
1813,
17593,
290,
9987,
3858,
13,
198,
198,... | 2.341997 | 1,462 |
using InfrastructureSystems
using PowerSystems
using InteractiveUtils
const IS = InfrastructureSystems
const PSY = PowerSystems
IS.strip_module_name
function _check_exception(T, exceptions::Vector)
for type_exception in exceptions
if T <: type_exception
return true
end
end
return false
end
function _write_first_level_markdown(c::String)
file_name = "model_library/generated_$(c).md"
open(joinpath("docs/src", file_name), "w") do io
print(
io,
"""
# $(c)
```@autodocs
Modules = [PowerSystems]
Pages = ["generated/$(c).jl"]
Order = [:type, :function]
Public = true
```
""",
)
end
return file_name
end
function _write_second_level_markdown(input::DataType, subtypes::Vector{DataType}, exceptions)
c = IS.strip_module_name(input)
file_name = "model_library/generated_$(c).md"
open(joinpath("docs/src", file_name), "w") do io
print(io, "# $input\n\n")
for T_ in subtypes
_check_exception(T_, exceptions) && continue
T = IS.strip_module_name(T_)
print(
io,
"""
## $(T)
```@autodocs
Modules = [PowerSystems]
Pages = ["/$(T).jl"]
Order = [:type, :function]
Public = true
```
""",
)
end
end
return file_name
end
function make_dynamics_library!(model_library;
dyn_categories =[
PSY.DynamicGeneratorComponent,
PSY.DynamicInverterComponent,
],
exceptions = [PSY.OuterControl,
PSY.ActivePowerControl,
PSY.ReactivePowerControl,],
manual_additions = Dict{String, Any}("DynamicInverterComponent" => Any["OuterControl" => "model_library/outer_control.md"])
)
for abstract_type in dyn_categories
@info "Making entries for subtypes of $abstract_type"
abstract_type_string = IS.strip_module_name(abstract_type)
addition = Dict{String, Any}()
internal_index = Any[]
for c_ in subtypes(abstract_type)
c_string = IS.strip_module_name(c_)
_check_exception(c_, exceptions) && continue
concretes = IS.get_all_concrete_subtypes(c_)
file_name = _write_second_level_markdown(c_,
concretes, exceptions)
push!(internal_index, c_string => file_name)
end
push!(model_library, abstract_type_string => internal_index)
if haskey(manual_additions, abstract_type_string)
addition = get(manual_additions, abstract_type_string, nothing)
push!(model_library[abstract_type_string], addition...)
end
end
end
function make_model_library(;
categories = [],
exceptions = [],
manual_additions = Dict{String, Any}()
)
model_library = Dict{String, Any}()
for abstract_type in categories
@info "Making entries for subtypes of $abstract_type"
internal_index = Any[]
concrete = IS.get_all_concrete_subtypes(abstract_type)
for c_ in concrete
_check_exception(c_, exceptions) && continue
c = IS.strip_module_name(c_)
file_name = _write_first_level_markdown(c)
push!(internal_index, c => file_name)
end
isempty(internal_index) && continue
model_library[IS.strip_module_name(abstract_type)] = internal_index
end
make_dynamics_library!(model_library)
for (k, v) in manual_additions
if haskey(model_library, k)
push!(model_library[k], v...)
else
model_library[k] = v
end
end
return Any[p for p in model_library]
end
| [
3500,
33709,
11964,
82,
198,
3500,
4333,
11964,
82,
198,
3500,
21365,
18274,
4487,
198,
9979,
3180,
796,
33709,
11964,
82,
198,
9979,
6599,
56,
796,
4333,
11964,
82,
198,
198,
1797,
13,
36311,
62,
21412,
62,
3672,
198,
198,
8818,
4808... | 1.971566 | 2,075 |
# create Vec
@testset "Vec{$ST}" begin
vtype = PETSc.C.VECMPI
vec = PETSc.Vec(ST, vtype)
resize!(vec, 4)
@test_throws ArgumentError resize!(vec)
len_ret = length(vec)
@test length(vec) == 4
@test size(vec) == (4,)
@test lengthlocal(vec) == 4
@test sizelocal(vec) == (4,)
@test PETSc.gettype(vec) == PETSc.C.VECMPI
vt = complex(2.,2) # use vt to hold temporary values
vec[1] = RC(vt)
val_ret = vec[1]
@test vec[1] == RC(vt)
vec2 = similar(vec,ST)
PETSc.AssemblyBegin(vec2)
PETSc.AssemblyEnd(vec2)
@test isassembled(vec2)
val2_ret = vec2[1]
@test val2_ret != val_ret
if gettype(vec2) == PETSc.C.VECSEQ
lv2 = localpart(vec2)
@test lv2 == vec2
end
vec_tmp = Vec([1., 2, 3])
@test PETSc.isfinalized(vec_tmp) == false
PETSc.PetscDestroy(vec_tmp)
@test PETSc.isfinalized(vec_tmp) == true
vec3 = similar(vec, ST, 5)
@test length(vec3) == 5
vec4 = copy(vec)
@test vec4 ≈ vec
idx = [1,3, 4]
vt = RC(complex(2.,2))
vec4[idx] = vt
vals_ret = vec4[idx]
@test vals_ret == fill(vt,length(idx))
vt = RC(complex(3.,3))
fill!(vec4, vt)
@test vec4 ≈ fill(vt,length(vec4))
vt = RC(complex( 4.,4))
vec4[1:2] = vt
@test vec4[1:2] == [vt, vt]
vals = [RC(complex(1,1.)), RC(complex(3.,3)), RC(complex(4., 3))]
vec4[idx] = vals
@test vec4[idx] == vals
vec5 = Vec(Float64, 4)
varr = LocalVector(vec5)
@test length(vec5) == 4
@test length(varr) == length(vec5)
@test stride(varr, 1) == 1
vec5j = [1., 2, 3, 4]
for i=1:length(vec5) varr[i] = vec5j[i] end
@test varr[1] == vec5j[1]
@test varr == vec5j
varr2 = similar(varr)
T2 = eltype(varr)
@test typeof(varr2) == Array{eltype(T2), 1}
ptr = Base.unsafe_convert(Ptr{T2}, varr)
@test ptr == varr.ref[]
restore(varr)
@test vec5 == vec5j
varr = LocalVector_readonly(vec5)
for i=1:length(vec5) @test varr[i] == vec5[i] end
restore(varr)
# test mlocal constructor
vec5 = Vec(ST, mlocal=3)
@test length(vec5) == 3
@testset "testing logical indexing" begin
logicals = Array(Bool, length(vec4))
for i=eachindex(logicals)
logicals[i] = false
end
logicals[2] = true
vt = RC(complex(5,5.))
vec4[logicals] = vt
@test vec4[2] ≈ vt
@test vec4[1] != vt
vt = RC(complex(rand(), rand()))
vals = [vt]
vec4[logicals] = vals
@test vec4[2] ≈ vals[1]
@test vec4[1] != vals[1]
# reset vec4
vec4_j = zeros(ST, length(vec4))
for i=1:length(vec4)
vec4[i] = RC(complex(Float64(-i), Float64(-i)))
vec4_j[i] = RC(complex(Float64(-i), Float64(-i)))
end
end
@testset "testing math functions" begin
@testset "testin chop" begin
jvec = RC([complex(1.0, 1.0), complex(2.0, 2.0), complex(3.0, 3.0)])
pvec = Vec(jvec)
chop!(pvec, RT(1.5))
jvec[1] = 0.0
@test pvec ≈ jvec
end
vec4_j = zeros(ST, length(vec4))
for i=1:length(vec4)
vec4[i] = RC(complex(Float64(-i), Float64(-i)))
vec4_j[i] = RC(complex(Float64(-i), Float64(-i)))
end
@testset "testing abs" begin
vec4_j = abs(vec4_j)
absv4 = abs(vec4)
abs!(vec4)
if VERSION >= v"0.5.0-dev+0"
@test real(vec4) ≈ vec4_j
@test real(absv4) ≈ vec4_j
@test imag(vec4) ≈ zeros(vec4_j)
@test imag(absv4) ≈ zeros(vec4_j)
else
@test vec4 == vec4_j
@test absv4 == vec4_j
end
end
@testset "testing exp" begin
vec4_j = exp(vec4_j)
exp!(vec4)
@test vec4 ≈ vec4_j
end
@testset "testing log" begin
vec4_j = log(vec4_j)
log!(vec4)
@test vec4 ≈ vec4_j
end
onevec = PETSc.Vec(ST, vtype)
resize!(onevec, 4)
PETSc.AssemblyBegin(onevec)
PETSc.AssemblyEnd(onevec)
for i=1:length(onevec)
onevec[i] = one(ST)
end
@testset "testing norm" begin
@test_throws ArgumentError norm(onevec,3)
@test norm(onevec,Inf) == 1
normvec = copy(onevec)
PETSc.normalize!(normvec)
@test norm(normvec,2) == one(ST)
end
if ST <: Real
@testset "testing max and min" begin
maxvec = copy(onevec)
maxvec[1] = ST(2)
@test maximum(maxvec) == 2
@test findmax(maxvec) == (2.0,1)
minvec = copy(onevec)
minvec[1] = ST(0)
@test minimum(minvec) == 0
@test findmin(minvec) == (0.0,1)
end
end
@testset "testing pointwise max, min, /" begin
div1vec = 2*copy(onevec)
div2vec = 4*copy(onevec)
@test max(div1vec,div2vec) == div2vec
@test min(div1vec,div2vec) == div1vec
@test div1vec .* div2vec == 8*onevec
@test div2vec ./ div1vec == div1vec
end
@testset "testing scale! and negation" begin
scalevec = scale!(copy(onevec),2)
@test scalevec == fill(2,length(onevec))
minusvec = -onevec
@test minusvec == -onevec
end
@testset "testing sum, +, -, *, and /" begin
@test sum(onevec) == length(onevec)
multvec = copy(onevec)
multvec = multvec * 2 * 3 * 4
@test multvec == 24*onevec
multvec = copy(onevec)
multvec = 2 .* multvec
@test multvec == 2*onevec
divvec = copy(onevec)
divvec = divvec * 2 * 3
divvec = divvec ./ 2
@test divvec == 3*onevec
divvec = 3 .\ divvec
@test divvec == onevec
divvec = 2*copy(onevec)
divvec = 2 ./ divvec
@test divvec == onevec
addvec = copy(onevec)
addvec = addvec + 2
addvec = addvec - 2
@test addvec == onevec
addvec = copy(onevec)
addvec = 2 - addvec
addvec = 2 + addvec
@test addvec == 3*onevec
end
end
@testset "testing dot product" begin
val = dot(vec4, vec)
val_j = dot(vec4, vec)
@test val == val_j
end
# make copies of vecs 1 2 4
@testset "testing level 1 Blas" begin
vecj = zeros(ST, length(vec))
vec2j = zeros(ST, length(vec))
vec4j = zeros(ST, length(vec))
for i=1:length(vec)
vecj[i] = vec[i]
vec2j[i] = vec2[i]
vec4j[i] = vec4[i]
end
@testset "testing axpy" begin
vt = RC(complex(2.,2))
axpy!(vt, vec, vec2)
vec2j = vt*vecj + vec2j
@test vec2j == vec2
@testset "testing 4 argument axpy" begin
axpy!(vt, vec, vec2, vec4)
vec4j = vt*vecj + vec2j
@test vec2j == vec2
end
@testset "testing aypx" begin
aypx!(vec, vt, vec2)
vec2j = vt*vec2j + vec
@test vec2j == vec2
end
vt2 = RC(complex(3.,3))
vt3 = RC(complex(4.,4))
@testset "testing axpby" begin
axpby!(vt, vec, vt2, vec2)
vec2j = vt*vecj + vt2*vec2j
@test vec2j == vec2
axpbypcz!(vt, vec, vt2, vec2, vt3, vec4)
vec4j = vt*vecj + vt2*vec2j + vt3*vec4j
@test vec4j == vec4
end
vecs = Array(typeof(vec), 2)
vecs[1] = vec
vecs[2] = vec2
alphas = [vt2, vt3]
axpy!(vec4, alphas, vecs)
vec4j = vec4j + vt2*vecj + vt3*vec2j
@test vec4j == vec4
end
@testset "testing .*, ./, .^" begin
vec5 = Vec(ST, 3, vtype=PETSc.C.VECMPI)
vec6 = similar(vec5)
vec5j = zeros(ST, 3)
vec6j = zeros(ST, 3)
for i=1:3
i_float = Float64(i)
vec5[i] = RC(complex(i_float, i_float))
vec6[i] = RC(complex(i_float+3, i_float+3))
vec5j[i] = RC(complex(i_float, i_float))
vec6j[i] = RC(complex(i_float +3, i_float+3))
end
vec7 = vec5.*vec6
vec7j = vec5j.*vec6j
@test vec7 ≈ vec7j
vec8 = vec5./vec6
vec8j = vec5j./vec6j
@test vec8 ≈ vec8j
vec9 = vec5.^3
vec9j = vec5j.^3
@test vec9 ≈ vec9j
vec10 = vec5 + vec6
vec10j = vec5j + vec6j
@test vec10 ≈ vec10j
vec11 = vec5 - vec6
vec11j = vec5j - vec6j
@test vec11 ≈ vec11j
end
@testset "test unconjugated dot product" begin
x = Vec(ST, 2)
y = Vec(ST, 2)
copy!(y, [1, 1])
if ST <: Complex
copy!(x, [1, im])
@test (x'*y)[1] == 1-im
@test (x.'*y)[1] == 1+im
else
copy!(x, [2, 3])
@test (x'*y)[1] == 5
@test (x.'*y)[1] == 5
end
end
end
let x = rand(ST, 7)
@test Vec(x) == x
end
@testset "map" begin
x = rand(3)
y = Vec(x)
map!(sin, x)
map!(sin, y)
@test x ≈ y
x2 = map(sin, x)
y2 = map(sin, y)
@test x2 ≈ y2
function myfunc(a, b)
return a + b
end
x3 = copy(x2)
y3 = copy(y2)
map!(myfunc, x3, x2, x)
map!(myfunc, y3, y2, y)
@test x3 ≈ y3
end
@testset "advanced indexing" begin
x = zeros(ST, 5)
y = Vec(ST, 5)
idxs = Int32[0, 1]
vals = ST[1, 2]
set_values!(x, idxs, vals)
set_values!(y, idxs, vals)
assemble(x)
assemble(y)
for i=1:length(idxs)
@test x[idxs[i]+1] ≈ vals[i]
@test y[idxs[i]+1] ≈ vals[i]
end
vals = ST[2,3]
ltog = local_to_global_mapping(y)
set_local_to_global_mapping(y, ltog)
set_values_local!(x, idxs, vals)
set_values_local!(y, idxs, vals)
assemble(x)
assemble(y)
for i=1:length(idxs)
@test x[idxs[i]+1] ≈ vals[i]
@test y[idxs[i]+1] ≈ vals[i]
end
y2 = Vec(ST, 4, bs=2)
@test get_blocksize(y2) == 2
idxs = Int32[0]
vals = ST[1, 2]
set_values_blocked!(y2, idxs, vals)
@test y2[idxs[1]+1] ≈ vals[1]
@test y2[idxs[1]+2] ≈ vals[2]
rng = localpart(y2)
ltog = local_to_global_mapping(y2)
set_local_to_global_mapping(y2, ltog)
idx = Int32[1]
vals = ST[2,3]
set_values_blocked_local!(y2, idxs, vals)
@test y2[idxs[1]+1] ≈ vals[1]
@test y2[idxs[1]+2] ≈ vals[2]
end
end
| [
2,
2251,
38692,
198,
31,
9288,
2617,
366,
53,
721,
90,
3,
2257,
36786,
2221,
198,
220,
410,
4906,
796,
32043,
3351,
13,
34,
13,
53,
2943,
7378,
40,
198,
220,
43030,
796,
32043,
3351,
13,
53,
721,
7,
2257,
11,
410,
4906,
8,
198,
... | 1.88146 | 5,205 |
export SymbolContext, ContextualSymbol, show
import Base.show, Base.show_unquoted
import Crayons: CrayonStack, Crayon
"""
SymbolContext(syms, function [,display_expression])
A symbol context is a special function, evaluating symbols within the body of
the function within the context of a single argument. Generally, these are
constructed with the `@syms` macro, which will modify an expression, replacing
any unescaped symbols in the expression with calls to `sym(<obj>, :symbol)`
This allows contexts to be described for arbitrary objects and for better
extension of symbols as a domain-specific abstraction to arbitrary data.
### Arguments
* `syms` : An `Array` of `Symbol`s, itemizing which symbols are represented
contextually.
* `function` : A unary function which is to be called with the contextual data,
or alternatively with keyworded arguments for each of the symbols.
* `display_expression` : An optional argument used to store a cleaned expression
for printing the symbolic expression that was used to generate the contextual
function.
### Examples
Creating a symbol context from a hand-crafted function, representing symbols
as calls to `sym`. Generally, creating a context in this way is only done
by developers.
```
julia> SymbolContext([:x, :y], x -> sym(x, :x) + sym(x, :y))
```
More commonly a symbol context is created using the `@syms` macro
```
julia> @syms begin
:x + :y
end
```
"""
struct SymbolContext
syms::AbstractArray
f::Function
display_expr
end
SymbolContext(syms, f) = SymbolContext(syms, f, nothing)
(x::SymbolContext)(; kwargs...) = x(kwargs)
function (x::SymbolContext)(arg)
found, miss = match_syms(arg, setdiff(x.syms, [:.]))
@assert(length(miss) == 0,
"Context of type `$(type_pretty(typeof(arg)))` cannot find " *
"representation for symbol(s) " *
join(":" .* string.(miss[1:(end-1)]), ", ") *
(length(miss) > 1 ? " and " : "") *
":" * string(miss[end]))
x.f(arg)
end
function show(io::IO, x::SymbolContext)
stack = CrayonStack(incremental = true)
print("SymbolContext[")
for (i, sym)=enumerate(x.syms)
print(i == 1 ? "" : ",")
print(io, push!(stack, Crayon(foreground = :blue)))
print(":")
show_unquoted(io, sym)
print(io, pop!(stack))
end
print("] ")
if !(x.display_expr isa Expr && x.display_expr.head == :block)
println()
end
Base.show_unquoted(io, x.display_expr)
end
struct Highlighted{T}
x::T
end
function show(io::IO, x::Highlighted{<:Number})
stack = CrayonStack(incremental = true)
print(io, push!(stack, Crayon(foreground = :blue)))
print(":")
show(io, x.x)
print(io, pop!(stack))
end
function show(io::IO, x::Highlighted{<:Symbol})
stack = CrayonStack(incremental = true)
print(io, push!(stack, Crayon(foreground = :blue)))
if x.x == :.; print(":.")
else; show(io, x.x)
end
print(io, pop!(stack))
end
| [
39344,
38357,
21947,
11,
30532,
723,
13940,
23650,
11,
905,
198,
198,
11748,
7308,
13,
12860,
11,
7308,
13,
12860,
62,
403,
421,
5191,
198,
11748,
327,
2433,
684,
25,
327,
2433,
261,
25896,
11,
327,
2433,
261,
628,
198,
198,
37811,
... | 2.682948 | 1,126 |
<reponame>aviks/Logging.jl
using Logging
function log_test()
debug("debug message")
info("info message")
warn("warning message")
err("error message")
critical("critical message")
end
println("Setting level=DEBUG")
Logging.configure(level=DEBUG)
log_test()
println()
println("Setting level=INFO")
Logging.configure(level=INFO)
log_test()
println()
println("Setting level=WARNING")
Logging.configure(level=WARNING)
log_test()
println()
println("Setting level=ERROR")
Logging.configure(level=ERROR)
log_test()
println()
println("Setting level=CRITICAL")
Logging.configure(level=CRITICAL)
log_test()
| [
27,
7856,
261,
480,
29,
15820,
591,
14,
11187,
2667,
13,
20362,
198,
3500,
5972,
2667,
198,
198,
8818,
2604,
62,
9288,
3419,
198,
220,
220,
220,
14257,
7203,
24442,
3275,
4943,
198,
220,
220,
220,
7508,
7203,
10951,
3275,
4943,
198,
... | 2.938679 | 212 |
#######################################################################
#
# An example of creating an Excel charts with a date axis using
# XlsxWriter.
#
# Original Python Copyright 2013-2016, <NAME>, <EMAIL>
# https://github.com/jmcnamara/XlsxWriter
using Dates
using XlsxWriter
function test()
wb = Workbook("chart_date_axis.xlsx")
ws = add_worksheet!(wb)
# Add a format for the headings.
chart = add_chart!(wb, Dict("type"=> "line"))
date_format = add_format!(wb, Dict("num_format"=> "dd/mm/yyyy"))
# Widen the first column to display the dates.
set_column!(ws, "A:A", 12)
# Some data to be plotted in the worksheet.
dates = [Date(2013, 1, 1),
Date(2013, 1, 2),
Date(2013, 1, 3),
Date(2013, 1, 4),
Date(2013, 1, 5),
Date(2013, 1, 6),
Date(2013, 1, 7),
Date(2013, 1, 8),
Date(2013, 1, 9),
Date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
# Write the date to the worksheet.
write_column!(ws, "A1", dates, fmt=date_format)
write_column!(ws, "B1", values)
# Add a series to the chart.
add_series!(chart, Dict(
"categories"=> "=Sheet1!\$A\$1:\$A\$10",
"values"=> "=Sheet1!\$B\$1:\$B\$10",
))
# Configure the X axis as a Date axis and set the max and min limits.
set_x_axis!(chart, Dict(
"date_axis"=> true,
"min"=> Date(2013, 1, 2),
"max"=> Date(2013, 1, 9),
))
# Turn off the legend.
set_legend!(chart, Dict("none"=> true))
# Insert the chart into the worksheet.
insert_chart!(ws, "D2", chart)
close(wb)
isfile("chart_date_axis.xlsx")
end
test()
| [
29113,
29113,
4242,
21017,
198,
2,
198,
2,
1052,
1672,
286,
4441,
281,
24134,
15907,
351,
257,
3128,
16488,
1262,
198,
2,
1395,
7278,
87,
34379,
13,
198,
2,
198,
2,
13745,
11361,
15069,
2211,
12,
5304,
11,
1279,
20608,
22330,
1279,
... | 2.434851 | 637 |
<filename>src/julia_sets.jl<gh_stars>0
module julia_sets
using PyPlot
export gen_jset,show_jset
function gen_jset{T<:Real,U<:Real}(R::Function,x::Array{T,1},y::Array{T,1},n_iter::Int64,escape_tol::U)
A = zeros(length(x),length(y));
for i=1:length(x)
for j=1:length(y)
z = Complex(x[i],y[j])
for k = 1:n_iter
z = R(z)
if abs(z) > escape_tol
A[i,j] = k
break;
end
end
if A[i,j]==0
A[i,j] = escape_tol+1;
end
end
end
return A
end
function show_jset{T<:Real}(A::Array{T,2})
matshow(A)
end
end
| [
27,
34345,
29,
10677,
14,
73,
43640,
62,
28709,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
21412,
474,
43640,
62,
28709,
198,
3500,
9485,
43328,
198,
39344,
2429,
62,
73,
2617,
11,
12860,
62,
73,
2617,
198,
198,
8818,
2429,
62,
7... | 1.762463 | 341 |
<reponame>zsunberg/ContinuousPOMDPTreeSearchExperiments.jl
using DataFrames
using CSV
data = CSV.read("data/multilane_Saturday_28_Apr_16_17.csv")
means = by(data, :solver) do df
n = size(df, 1)
return DataFrame(reward=mean(df[:reward]),
reward_sem=std(df[:reward])/sqrt(n)
)
end
println(means)
| [
27,
7856,
261,
480,
29,
89,
19155,
3900,
14,
17875,
5623,
47,
2662,
6322,
27660,
18243,
20468,
6800,
13,
20362,
198,
3500,
6060,
35439,
198,
3500,
44189,
198,
198,
7890,
796,
44189,
13,
961,
7203,
7890,
14,
16680,
346,
1531,
62,
19844... | 2.047337 | 169 |
# Routines related to fragmenting CAD entities in gmsh, while preserving physical groups
function process_material_hierarchy!(
new_physical_groups::Dict{String, Vector{Tuple{Int32,Int32}}},
material_hierarchy::Vector{String})
# Get the material groups and the entities in each group
groups = collect(keys(new_physical_groups))
material_indices = findall(x->occursin("MATERIAL", x), groups)
material_groups = groups[material_indices]
# Ensure each material group is present in the hierarchy and warn now if it's not.
# Otherwise the error that occurs later is not as easy to decipher
for material in material_groups
@assert material ∈ material_hierarchy "material_hierarchy does not contain: '$material'"
end
material_dict = Dict{String, Vector{Tuple{Int32,Int32}}}()
all_material_entities = Tuple{Int32,Int32}[]
for material in material_groups
# Note that this is assignment by reference. Changes to material_dict are reflected
# in new_physical_groups
material_dict[material] = new_physical_groups[material]
append!(all_material_entities, new_physical_groups[material])
end
# Remove duplicates
all_material_entities = collect(Set(all_material_entities))
# For each entity with a material, ensure that it only exists in one of material groups.
# If it exists in more than one material group, apply the material hierarchy so that the
# entity only has one material.
numerical_material_hierarchy = Dict{String,Int32}()
i = 1
for material in material_hierarchy
numerical_material_hierarchy[material] = i
i += 1
end
for ent in all_material_entities
materials = String[]
for material in material_groups
if ent ∈ material_dict[material]
push!(materials, material)
end
end
if 1 < length(materials)
# Get the highest priority material
mat_num = minimum([numerical_material_hierarchy[mat] for mat in materials])
priority_mat = material_hierarchy[mat_num]
# Pop ent from all other materials in dict
deleteat!(materials, materials .== priority_mat)
for material in materials
deleteat!(material_dict[material], findfirst(x-> x == ent, material_dict[material]))
end
end
end
end
function gmsh_group_preserving_fragment(object_dim_tags::Vector{Tuple{Signed,Int32}},
tool_dim_tags::Vector{Tuple{Signed,Int32}};
material_hierarchy::Vector{String} = String[])
# Get all the physical groups
old_physical_groups = Dict{String,Array{Tuple{Int32,Int32},1}}()
groups = gmsh.model.get_physical_groups()
names = [gmsh.model.get_physical_name(grp[1], grp[2]) for grp in groups]
for (i, name) in enumerate(names)
ents = gmsh.model.get_entities_for_physical_group(groups[i][1], groups[i][2])
dim = groups[i][1]
old_physical_groups[name] = [(dim, ent) for ent in ents]
end
# Fragment
nents = length(object_dim_tags) + length(tool_dim_tags)
@info "Fragmenting $nents entities"
out_dim_tags, out_dim_tags_map = gmsh.model.occ.fragment(object_dim_tags, tool_dim_tags)
# Create a dictionary of new physical groups using the parent child relationship
# between input_dim_tags and out_dim_tags_map. The parent at index i of input_dim_tags
# has children out_dim_tags_map[i]
new_physical_groups = Dict{String, Vector{Tuple{Int32,Int32}}}()
input_dim_tags = vcat(object_dim_tags, tool_dim_tags)
# For each physical group
for name in names
new_physical_groups[name] = Tuple{Int32, Int32}[]
# For each of the dim tags in the physical group
for dim_tag in old_physical_groups[name]
# If the dim_tag was one of the entities in the fragment
if dim_tag ∈ input_dim_tags
# Get its children
index = findfirst(x->x == dim_tag, input_dim_tags)
children = out_dim_tags_map[index]
# Add the children to the new physical group
for child in children
if child ∉ new_physical_groups[name]
push!(new_physical_groups[name], child)
end
end
else
# If it wasn't in the fragment, no changes necessary.
push!(new_physical_groups[name], dim_tag)
end
end
end
# Remove old groups and synchronize
for name in names
gmsh.model.remove_physical_name(name)
end
@debug "Synchronizing model"
gmsh.model.occ.synchronize()
# Process the material hierarchy if it exists so that each entity has one
# or less material physical groups
if 0 < length(material_hierarchy)
process_material_hierarchy!(new_physical_groups, material_hierarchy)
end
# Create new physical groups
for (i, name) in enumerate(names)
dim = groups[i][1]
tags = [dim_tag[2] for dim_tag in new_physical_groups[name]]
ptag = gmsh.model.add_physical_group(dim, tags)
gmsh.model.set_physical_name(dim, ptag, name)
end
return out_dim_tags
end
function gmsh_group_preserving_fragment(object_dim_tags::Vector{Tuple{Int32,Int32}},
tool_dim_tags::Vector{Tuple{Int32,Int32}};
material_hierarchy::Vector{String} = String[])
# Get all the physical groups
old_physical_groups = Dict{String, Vector{Tuple{Int32,Int32}}}()
groups = gmsh.model.get_physical_groups()
names = [gmsh.model.get_physical_name(grp[1], grp[2]) for grp in groups]
for (i, name) in enumerate(names)
ents = gmsh.model.get_entities_for_physical_group(groups[i][1], groups[i][2])
dim = groups[i][1]
old_physical_groups[name] = [(dim, ent) for ent in ents]
end
# Fragment
nents = length(object_dim_tags) + length(tool_dim_tags)
@info "Fragmenting $nents entities"
out_dim_tags, out_dim_tags_map = gmsh.model.occ.fragment(object_dim_tags, tool_dim_tags)
# Create a dictionary of new physical groups using the parent child relationship
# between input_dim_tags and out_dim_tags_map. The parent at index i of input_dim_tags
# has children out_dim_tags_map[i]
new_physical_groups = Dict{String, Vector{Tuple{Int32,Int32}}}()
input_dim_tags = vcat(object_dim_tags, tool_dim_tags)
# For each physical group
for name in names
new_physical_groups[name] = Tuple{Int32,Int32}[]
# For each of the dim tags in the physical group
for dim_tag in old_physical_groups[name]
# If the dim_tag was one of the entities in the fragment
if dim_tag ∈ input_dim_tags
# Get its children
index = findfirst(x->x == dim_tag, input_dim_tags)
children = out_dim_tags_map[index]
# Add the children to the new physical group
for child in children
if child ∉ new_physical_groups[name]
push!(new_physical_groups[name], child)
end
end
else
# If it wasn't in the fragment, no changes necessary.
push!(new_physical_groups[name], dim_tag)
end
end
end
# Remove old groups and synchronize
for name in names
gmsh.model.remove_physical_name(name)
end
@debug "Synchronizing model"
gmsh.model.occ.synchronize()
# Process the material hierarchy if it exists so that each entity has one
# or less material physical groups
if 0 < length(material_hierarchy)
process_material_hierarchy!(new_physical_groups, material_hierarchy)
end
# Create new physical groups
for (i, name) in enumerate(names)
dim = groups[i][1]
tags = [dim_tag[2] for dim_tag in new_physical_groups[name]]
ptag = gmsh.model.add_physical_group(dim, tags)
gmsh.model.set_physical_name(dim, ptag, name)
end
return out_dim_tags
end
| [
2,
39602,
1127,
3519,
284,
24225,
278,
37292,
12066,
287,
308,
907,
71,
11,
981,
23934,
3518,
2628,
198,
198,
8818,
1429,
62,
33665,
62,
71,
959,
9282,
0,
7,
198,
220,
220,
220,
220,
220,
220,
220,
649,
62,
42854,
62,
24432,
3712,... | 2.352891 | 3,511 |
using Test;
using AdalmPluto;
@testset "libIIO/scan.jl" begin
# disable the assertions
toggleNoAssertions(true);
C_iio_has_backend("usb") || (@error "Library doesn't have the USB backend available. Skipping tests."; return;)
@testset "Scan context" begin
# C_iio_create_scan_context
@test (global scan_context = C_iio_create_scan_context("usb")) != C_NULL;
if scan_context == C_NULL
@error "C_iio_create_scan_context failed, skipping the remaining tests";
else
info = Ref{Ptr{Ptr{iio_context_info}}}(0);
# C_iio_scan_context_get_info_list
@test (global nb_contexts = C_iio_scan_context_get_info_list(scan_context, info)) > 0;
if nb_contexts < 0
@error "C_iio_scan_context_get_info_list failed, skipping the remaining tests";
C_iio_scan_context_destroy(scan_context);
elseif nb_contexts == 0
@warn "0 contexts found, skipping the remaining tests";
C_iio_context_info_list_free(info[]);
C_iio_scan_context_destroy(scan_context);
else
loaded_info = unsafe_load(info[], 1);
# C_iio_context_info_get_description
@test C_iio_context_info_get_description(loaded_info) != "";
# C_iio_context_info_get_uri
@test C_iio_context_info_get_uri(loaded_info) != "";
# C_iio_context_info_list_free
@test C_iio_context_info_list_free(info[]) === nothing;
# C_iio_scan_context_destroy
@test C_iio_scan_context_destroy(scan_context) === nothing;
end
end
end
# @testset "Scan block" begin
# # C_iio_create_scan_block
# @test (global scan_block = C_iio_create_scan_block("usb")) != C_NULL;
# if scan_block == C_NULL
# @error "C_iio_create_scan_block failed, skipping the remaining tests.";
# else
# # C_iio_scan_block_scan
# @test (global nb_contexts = C_iio_scan_block_scan(scan_block)) > 0;
# if nb_contexts < 0
# @error "C_iio_scan_block_scan failed, skipping the remaining tests";
# C_iio_scan_block_destroy(scan_block);
# elseif nb_contexts == 0
# @warn "0 contexts found, skipping the remaining tests";
# C_iio_scan_block_destroy(scan_block);
# else
# # C_iio_scan_block_get_info
# @test C_iio_scan_block_get_info(scan_block, UInt32(0)) != C_NULL;
# # C_iio_scan_block_destroy
# @test C_iio_scan_block_destroy(scan_block) === nothing;
# end
# end
# end
end
| [
3500,
6208,
26,
198,
198,
3500,
1215,
38182,
3646,
9390,
26,
198,
198,
31,
9288,
2617,
366,
8019,
3978,
46,
14,
35836,
13,
20362,
1,
2221,
198,
220,
220,
220,
1303,
15560,
262,
29965,
198,
220,
220,
220,
19846,
2949,
8021,
861,
507,... | 1.997143 | 1,400 |
<reponame>mjirik/LarSurf.jl<gh_stars>1-10
# check speed of new and old sparse filter
using Revise
using LarSurf
using LinearAlgebraicRepresentation
Lar = LinearAlgebraicRepresentation
using Plasm, SparseArrays
using Pandas
using Seaborn
using Dates
using Logging
using Profile
using ProfileView
b3, something = LarSurf.get_boundary3([30,30,30])
LarSurf.sparse_filter!(b3, 1, 1, 0)
LarSurf.sparse_filter_old!(b3, 1, 1, 0)
println("start")
@time LarSurf.sparse_filter!(b3, 1, 1, 0)
@time LarSurf.sparse_filter!(b3, 1, 1, 0)
@time LarSurf.sparse_filter!(b3, 1, 1, 0)
@time LarSurf.sparse_filter!(b3, 1, 1, 0)
@time LarSurf.sparse_filter!(b3, 1, 1, 0)
println("asdf")
# b3 = LarSurf.get_boundary3([30,30,30])
@time LarSurf.sparse_filter_old!(b3, 1, 1, 0)
# b3 = LarSurf.get_boundary3([30,30,30])
@time LarSurf.sparse_filter_old!(b3, 1, 1, 0)
@time LarSurf.sparse_filter_old!(b3, 1, 1, 0)
@time LarSurf.sparse_filter_old!(b3, 1, 1, 0)
@time LarSurf.sparse_filter_old!(b3, 1, 1, 0)
| [
27,
7856,
261,
480,
29,
76,
73,
343,
1134,
14,
43,
283,
14214,
69,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
2198,
2866,
286,
649,
290,
1468,
29877,
8106,
198,
198,
3500,
5416,
786,
198,
3500,
25577,
14214,
69,
198... | 2.194631 | 447 |
<reponame>zyth0s/PySCF.jl
module PySCF
using PyCall: pyimport
pyscf = pyimport("pyscf")
mp = pyimport("pyscf.mp") # Had to import mp alone ??!
cc = pyimport("pyscf.cc") # Had to import mp alone ??!
# Utilities
function pyscf_atom_from_xyz(fpath::String)
join(split(read(open(fpath),String),"\n")[3:end],"\n")
end
function index(i::Int,j::Int)
m,M = minmax(i-1,j-1)
M*(M+1)÷2 + m + 1
end
# Compound indices ijkl
function get_4idx(i::Int,j::Int,k::Int,l::Int)
ij = index(i,j)
kl = index(k,l)
index(ij,kl)
end
# Calculation of the electronic structure of a molecule
# with the help of PySCF to calculate AO integrals and hold molecular data
# * Restricted Hartree-Fock
# * Møller-Plesset order 2
# * Coupled Cluster Singles and Doubles
# Calculates the energy of any molecule.
# Instructions at https://github.com/CrawfordGroup/ProgrammingProjects
# may be of interest.
using Formatting: printfmt
using LinearAlgebra: Diagonal, Hermitian, eigen, norm, tr, diag, dot
include("diis.jl")
include("scf.jl")
include("mp2.jl")
include("ccsd.jl")
include("properties.jl")
end
| [
27,
7856,
261,
480,
29,
89,
5272,
15,
82,
14,
20519,
6173,
37,
13,
20362,
198,
198,
21412,
9485,
6173,
37,
198,
198,
3500,
9485,
14134,
25,
12972,
11748,
198,
198,
79,
893,
12993,
796,
12972,
11748,
7203,
79,
893,
12993,
4943,
198,
... | 2.554779 | 429 |
<filename>src/profiling.jl
# Code used for latency profiling
using StatsBase, Statistics, Dates
struct ProfilerInput
worker::Int
θ::Float64
q::Float64
timestamp::Time
comp_delay::Float64
comm_delay::Float64
end
struct ProfilerOutput
worker::Int # worker index
θ::Float64 # fraction of the dataset stored by this worker, averaged over all input samples that make up this output
q::Float64 # fraction of local data processed per iteration, averaged over all input samples that make up this output
comp_mc::Float64 # = (mean comp. delay) / (θ*q)
comp_vc::Float64 # = (var of comp. delay) / (θ*q)
comm_mc::Float64 # = mean comm. delay
comm_vc::Float64 # = var of comm. delay
end
Base.isless(p::Pair{Time, CodedComputing.ProfilerInput}, q::Pair{Time, CodedComputing.ProfilerInput}) = isless(first(p), first(q))
function setup_profiler_channels(;chin_size=200, chout_size=200)
chin = ConcurrentCircularBuffer{ProfilerInput}(chin_size)
chout = ConcurrentCircularBuffer{ProfilerOutput}(chout_size)
chin, chout
end
function StatsBase.var(f::Function, itr)
g = (x) -> f(x)^2
mean(g, itr) - mean(f, itr)^2
end
"""
Remove all values at the end of the window older than windowsize, and return the number of
elements removed.
"""
function Base.filter!(w::CircularBuffer{ProfilerInput}; windowsize)
rv = 0
while length(w) > 0 && (w[1].timestamp - w[end].timestamp) > windowsize
pop!(w)
rv += 1
end
rv
end
"""
Return a view into the elements of w in beween the qlower and qupper quantiles.
"""
function comp_quantile_view(w::CircularBuffer{ProfilerInput}, buffer::Vector{ProfilerInput}, qlower::Real, qupper::Real)
0 <= qlower <= qupper <= 1.0 || throw(ArgumentError("qlower is $qlower and qupper is $qupper"))
n = length(w)
for i in 1:n
buffer[i] = w[i]
end
@views sort!(buffer[1:n], by=(x)->getfield(x, :comp_delay), alg=QuickSort)
il = max(1, ceil(Int, n*qlower))
iu = min(length(buffer), floor(Int, n*qupper))
view(buffer, il:iu)
end
"""
Return a view into the elements of w in beween the qlower and qupper quantiles.
"""
function comm_quantile_view(w::CircularBuffer{ProfilerInput}, buffer::Vector{ProfilerInput}, qlower::Real, qupper::Real)
0 <= qlower <= qupper <= 1.0 || throw(ArgumentError("qlower is $qlower and qupper is $qupper"))
n = length(w)
for i in 1:n
buffer[i] = w[i]
end
@views sort!(buffer[1:n], by=(x)->getfield(x, :comm_delay), alg=QuickSort)
il = max(1, ceil(Int, n*qlower))
iu = min(length(buffer), floor(Int, n*qupper))
view(buffer, il:iu)
end
function comp_mean_var(w::CircularBuffer{ProfilerInput}; buffer::Vector{ProfilerInput}, qlower::Real, qupper::Real, minsamples::Integer)
vs = comp_quantile_view(w, buffer, qlower, qupper)
if length(vs) < minsamples
return NaN, NaN
end
m = mean((x)->getfield(x, :comp_delay) / (getfield(x, :θ) * getfield(x, :q)), vs)
v = var((x)->getfield(x, :comp_delay) / (getfield(x, :θ) * getfield(x, :q)), vs)
m, v
end
function comm_mean_var(w::CircularBuffer{ProfilerInput}; buffer::Vector{ProfilerInput}, qlower::Real, qupper::Real, minsamples::Integer)
vs = comm_quantile_view(w, buffer, qlower, qupper)
if length(vs) < minsamples
return NaN, NaN
end
m = mean((x)->getfield(x, :comm_delay), vs)
v = var((x)->getfield(x, :comm_delay), vs)
m, v
end
function process_window(w::CircularBuffer{ProfilerInput}, i::Integer; buffer::Vector{ProfilerInput}, qlower::Real, qupper::Real, minsamples::Integer)::ProfilerOutput
length(w) > 0 || throw(ArgumentError("window must not be empty"))
θ = mean((x)->getfield(x, :θ), w)
q = mean((x)->getfield(x, :q), w)
comp_mc, comp_vc = comp_mean_var(w; buffer, qlower, qupper, minsamples)
comm_mc, comm_vc = comm_mean_var(w; buffer, qlower, qupper, minsamples)
ProfilerOutput(i, θ, q, comp_mc, comp_vc, comm_mc, comm_vc)
end
"""
Latency profiling sub-system. Receives latency observations on `chin`, computes the mean and
variance over a moving time window of length `windowsize`, and sends the results on `chout`.
"""
function latency_profiler(chin::ConcurrentCircularBuffer{ProfilerInput}, chout::ConcurrentCircularBuffer{ProfilerOutput}; nworkers::Integer, qlower::Real=0.1, qupper::Real=0.9, buffersize::Integer=1000, minsamples::Integer=10, windowsize::Dates.AbstractTime=Second(60))
0 < nworkers || throw(ArgumentError("nworkers is $nworkers"))
0 <= qlower <= qupper <= 1.0 || throw(ArgumentError("qlower is $qlower and qupper is $qupper"))
@info "latency_profiler task started with windowsize $windowsize and minsamples $minsamples on thread $(Threads.threadid())"
# maintain a window of latency samples for each worker
ws = [CircularBuffer{CodedComputing.ProfilerInput}(buffersize) for _ in 1:nworkers]
buffer = Vector{ProfilerInput}(undef, buffersize)
# process incoming latency samples
while isopen(chin)
# consume all values currently in the channel
try
vin::ProfilerInput = take!(chin)
if !isnan(vin.comp_delay) && !isnan(vin.comm_delay)
pushfirst!(ws[vin.worker], vin)
end
catch e
if e isa InvalidStateException
@info "error taking value from input channel" e
break
else
rethrow()
end
end
while isready(chin)
try
vin::ProfilerInput = take!(chin)
if !isnan(vin.comp_delay) && !isnan(vin.comm_delay)
pushfirst!(ws[vin.worker], vin)
end
catch e
if e isa InvalidStateException
@info "error taking value from input channel" e
break
else
rethrow()
end
end
end
# filter out values older than windowsize
for i in 1:nworkers
filter!(ws[i]; windowsize)
end
# compute updated statistics for all workers
for i in 1:nworkers
if length(ws[i]) == 0
continue
end
vout = process_window(ws[i], i; buffer, qlower, qupper, minsamples)
if isnan(vout.θ) || isnan(vout.q) || isnan(vout.comp_mc) || isnan(vout.comp_vc) || isnan(vout.comm_mc) || isnan(vout.comm_vc)
continue
end
if vout.comp_mc < 0 || vout.comp_vc < 0 || vout.comm_mc < 0 || vout.comm_vc < 0
continue
end
if isapprox(vout.comp_mc, 0) || isapprox(vout.comp_vc, 0) || isapprox(vout.comm_mc, 0) || isapprox(vout.comm_vc, 0)
continue
end
try
push!(chout, vout)
catch e
if e isa InvalidStateException
@info "error pushing value into output channel" e
break
else
rethrow()
end
end
end
end
@info "latency_profiler task finished"
end | [
27,
34345,
29,
10677,
14,
5577,
4386,
13,
20362,
198,
2,
6127,
973,
329,
24812,
31582,
198,
3500,
20595,
14881,
11,
14370,
11,
44712,
198,
198,
7249,
4415,
5329,
20560,
198,
220,
220,
220,
8383,
3712,
5317,
198,
220,
220,
220,
7377,
... | 2.24776 | 3,237 |
<filename>src/dracula.jl<gh_stars>100-1000
# Names follow:
# https://draculatheme.com/contribute#color-palette
dracula_palette = [
colorant"#8be9fd" # Cyan
colorant"#ff79c6" # Pink
colorant"#50fa7b" # Green
colorant"#bd93f9" # Purple
colorant"#ffb86c" # Orange
colorant"#ff5555" # Red
colorant"#f1fa8c" # Yellow
colorant"#6272a4" # Comment
]
dracula_bg = colorant"#282a36"
dracula_fg = colorant"#f8f8f2"
_themes[:dracula] = PlotTheme(
bg = dracula_bg,
bginside = colorant"#30343B",
fg = dracula_fg,
fgtext = dracula_fg,
fgguide = dracula_fg,
fglegend = dracula_fg,
legendfontcolor = dracula_fg,
legendtitlefontcolor = dracula_fg,
titlefontcolor = dracula_fg,
palette = expand_palette(dracula_bg, dracula_palette),
colorgradient = :viridis
)
| [
27,
34345,
29,
10677,
14,
7109,
330,
4712,
13,
20362,
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
2,
28531,
1061,
25,
198,
2,
3740,
1378,
7109,
330,
377,
26221,
1326,
13,
785,
14,
3642,
4163,
2,
8043,
12,
18596,
5857,
198,
7109,... | 2.113695 | 387 |
<reponame>briochemc/AIBECS.jl<gh_stars>10-100
# Reexport SinkingParticles as they are useful outside too
#@reexport module SinkingParticles
using Unitful
using LinearAlgebra, SparseArrays
using OceanGrids
"""
PFDO(grd; w_top)
Builds the particle-flux-divergence operator `PFDO` for a given particle sinking speed
(`w_top`).
Schematic of a grid cell:
```
top ┌─────────────────────────────────┐ ┬
│ ↓ w_top ↓ Φ_top │ │
│ (settling velovity) (flux) │ │
│ │ │
│ x │ δz
│ (particle conc.) │ │
│ │ │
│ │ │
bottom └─────────────────────────────────┘ ┴
```
- `δz` is the height of grid cell [m]
- `w_top` is the particle sinking speed at the top of the grid cell [m s⁻¹]
- `Φ_top` is the flux at the top of the grid cell [mol m⁻² s⁻¹]
- `x` is the particle concentration of the cell [mol m⁻³]
The PFDO is defined by
PFDO * x = δΦ/δz ≈ dΦ/dz,
i.e., so that applied to `x` it approximates the flux divergence of `x`, dΦ/δz.
It is calculated as the matrix product of DIVO and FATO:
PFDO = DIVO * FATO.
where the divergence operator `DIVO`, is defined by (`z` increasing downward)
DIVO * ϕ_top = 1/δz * (ϕ_top[below] - ϕ_top) = δϕ/δz ≈ dΦ/δz.
and FATO, the flux-at-top operator, is defined by
FATO * x = w_top * x[above] ≈ ϕ_top.
# Example
```julia-repl
julia> PFDO(grd, w_top=1.0) # 1.0 m/s (SI units assumed)
```
"""
function PFDO(grd; w_top)
iwet = findall(vec(grd.wet3D))
DIVO = DIVO(grd)
Iabove = buildIabove(grd.wet3D, iwet)
w_top = ustrip.(upreferred.(w_top))
return PFDO(w_top, DIVO, Iabove)
end
"""
PFDO(w, DIVO, Iabove)
Returns `DIVO * FATO(w, Iabove)`
This function is useful to avoid reconstructing `DIVO` and `Iabove` every time.
It should allow for faster runs.
"""
PFDO(w_top, DIVO, Iabove) = DIVO * FATO(w_top, Iabove)
"""
PFDO(grd, δz, w_top, w_bot, frac_seafloor, cumfrac_seafloor, fsedremin, Iabove)
Returns the particle-flux-divergence operator for a given sinking speed as a function of depth.
This is a slightly different construction where I take in top and bottom settling velocities,
and where the bottom one can be modified to further allow a fraction of particles to sink through
(buried into) the sea floor.
Below is a detailed explanation of how this function computes the particle-flux divergence.
Take these 3 boxes on top of each other:
```
┌──────────────────┐
│ water │
│←----c----→ │
├───────────┲━━━━━━┥ ←- Φ_top
│ ┃⣿⣿⣿⣿⣿⣿│
│ ┃⣿⣿⣿⣿⣿⣿│
│←-d-→ ←-a-→┃⣿⣿⣿⣿⣿⣿│
├─────┲━━━━━┹──────┤ ←- Φ_bot
│ ┃←----b-----→│
│ ┃⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿│
│ ┃⣿⣿⣿ land ⣿⣿⣿│
│ ┃⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿│
└━━━━━┹────────────┘
```
A part of each of these boxes is water, while the rest is land. For the middle box,
we will use the fractional area `a = frac_seafloor` of seafloor,
and the cumulated fractional area `b = cumfrac_seafloor` of seafloor, to determine the
flux of particles at the top `ϕ_top` and bottom `ϕ_bot`.
From the top box, only the particles in the water can enter the middle box. So the flux
at the top of the middle box, `ϕ_top`, is proportional to `c = 1 - Iabove * b`.
At the bottom of the middle box, we have to take care of the case of particles hitting
the sediments and potentially buried there, or kept in the water.
For the part going through the area `d`, the flux is proportional to `d = 1 - b`.
For the part going through `a` (hitting the sediments), the flux is proportional to
`a * (1 - f)` where `f` is the fraction of particles forcibly kept in the water.
(So if `f = 0`, the particles appear to move out of the middle box, but are not carried
into the bottom one, and thus are removed from the system / buried.)
"""
function PFDO(grd, δz, w_top, w_bot, frac_seafloor, cumfrac_seafloor, fsedremin, Iabove)
fw_bot = @. (1.0 - cumfrac_seafloor + (1.0 - fsedremin) * frac_seafloor) * w_bot
fw_top = (1.0 .- Iabove * cumfrac_seafloor) .* w_top
return sparse(Diagonal(fw_bot ./ δz)) - sparse(Diagonal(fw_top ./ δz)) * Iabove
end
"""
DIVO(grd)
Build the `DIVO` operator such that
DIVO * ϕ_top = 1/δz * (ϕ_top - ϕ_top[below]) ≈ dΦ/δz.
"""
function DIVO(grd)
Ibelow = buildIbelow(grd)
iwet = indices_of_wet_boxes(grd)
δz = ustrip.(grd.δz_3D[iwet])
return sparse(Diagonal(1 ./ δz)) * (Ibelow - I) # divergence with positive downwards
end
"""
FATO(w_top, Iabove)
Build the `FATO` operator for a particle sinking speed `w_top`
(`w_top` is the sinking speed at the top of each grid cell.)
The `FATO` operator is defined by
FATO * x = w_top * x(above) ≈ ϕ_top.
"""
FATO(w_top::Vector, Iabove) = sparse(Diagonal(w_top)) * Iabove
FATO(w_top::Number, Iabove) = w_top * Iabove
export DIVO, PFDO, FATO
#end # module
"""
transportoperator(grd, w)
Returns the transportoperator for the given settling velocity `w`.
The settling velocity can be provided as either a scalar
(e.g., `w = 100.0` in units of meters per second)
or as a function of depth
(e.g., `w(z) = 2z + 1`).
# Examples
Create the particle flux divergence with settling velocity of 100m/s
```julia-repl
julia> T = transportoperator(grd, 100.0)
```
Or with settling velocity function w(z) = 2z + 1
```julia-repl
julia> T = transportoperator(grd, z -> 2z + 1)
```
By default, the seafloor flux is set to zero, so that all the particles
that reach it are remineralized there. You can let particles go through
by setting `fsedremin=0.0`, via, e.g.,
```julia-repl
julia> T = transportoperator(grd, z -> 2z + 1; fsedremin=0.0)
```
For finer control and advanced use, see the particle-flux divergence
operator function, `PFDO`.
"""
transportoperator(grd, w_top; DIVop=DIVO(grd), Iabove=buildIabove(grd)) = PFDO(w_top, DIVop, Iabove)
function transportoperator(grd, w::Function;
δz = ustrip.(grd.δz_3D[iswet(grd)]),
Iabove = buildIabove(grd),
fsedremin = 1.0,
z_top = topdepthvec(grd),
z_bot = bottomdepthvec(grd),
frac_seafloor = float.(isseafloorvec(grd)),
cumfrac_seafloor = zcumsum(frac_seafloor, grd))
return PFDO(grd, δz, ustrip.(upreferred.(w.(z_top))), ustrip.(upreferred.(w.(z_bot))), frac_seafloor, cumfrac_seafloor, fsedremin, Iabove)
end
export transportoperator
| [
27,
7856,
261,
480,
29,
65,
380,
18958,
66,
14,
32,
9865,
2943,
50,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2,
797,
39344,
311,
8040,
7841,
2983,
355,
484,
389,
4465,
2354,
1165,
198,
2,
31,
631,
87,
634,
8265,
... | 2.196544 | 2,951 |
<reponame>BlancaCC/TFG-Estudio-de-las-redes-neuronales
@testset "Nodes initialization algorithm n=3 entry = 3 output = 2" begin
M = 1 # Constante para la función rampa
# Bien definido para tamaño n = 2 y salida de dimensión 1
f_regression(x,y,z)=[x*y-z,x]
data_set_size = 6
entry_dimension = 3
output_dimension = 2
# Número de neuronas
n = data_set_size # Debe de ser mayor que 1 para que no de error
X_train= rand(Float32, data_set_size, entry_dimension)
Y_train::Matrix = mapreduce(permutedims, vcat, map(x->f_regression(x...), eachrow(X_train)))
h = nn_from_data(X_train, Y_train, n, M)
# veamos que el tamaño de la salida es la adecuada
@test size(h.W1) == (n,entry_dimension+1)
@test size(h.W2) == (output_dimension,n)
# Si ha sido bien construida:
# Evaluar la red neuronal en los datos con los que se construyó
# debería de resultar el valor de Y_train respectivo
evaluar(x)=forward_propagation(h,
RampFunction,x)
for i in 1:n
@test evaluar(X_train[i,:]) ≈ Y_train[i,:]
end
end
| [
27,
7856,
261,
480,
29,
3629,
42124,
4093,
14,
10234,
38,
12,
36,
19149,
952,
12,
2934,
12,
21921,
12,
445,
274,
12,
710,
44372,
2040,
198,
2488,
9288,
2617,
366,
45,
4147,
37588,
11862,
299,
28,
18,
5726,
796,
513,
5072,
796,
362... | 2.215415 | 506 |
<reponame>szarnyasg/SuiteSparseGraphBLAS.jl<gh_stars>0
@testset "operations.jl" begin
@testset "ewise" begin
m = GBMatrix([[1,2,3] [4,5,6]])
n = GBMatrix([1,2,3,2], [1,2,2,1], [1,2,3,4])
#eadd correctness
@test eadd(m, n) == GBMatrix([1,1,2,2,3,3], [1,2,1,2,1,2], [2,4,6,7,3,9])
@test eadd(m, n, BinaryOps.GT)[1, 1] == 0
#check that the (+) op is being picked up from the semiring.
@test eadd(m, n, Semirings.PLUS_MAX) == eadd(m, n, BinaryOps.PLUS)
#emul correctness
@test emul(m, n, BinaryOps.POW)[3, 2] == m[3,2] ^ n[3,2]
#check that the (*) op is being picked up from the semiring
@test emul(m, n, Semirings.MAX_PLUS) == emul(m, n, BinaryOps.PLUS)
@test eltype(m .== n) == Bool
end
@testset "kron" begin
m1 = GBMatrix(UInt64[1, 2, 3, 5], UInt64[1, 3, 1, 2], Int8[1, 2, 3, 5])
n1 = GBMatrix(ones(UInt32, 4, 4))
m2 = sparse([1, 2, 3, 5], [1, 3, 1, 2], Int8[1, 2, 3, 5])
n2 = ones(Int32, 4, 4)
o1 = kron(m1, n1)
@test o1 == GBMatrix(kron(m2, n2)) #basic kron is equivalent
mask = GBMatrix{Bool}(20, 12)
mask[17:20, 5:8] = false #don't care value, using structural
#mask out bottom chunk using structural complement
o2 = kron(m1, n1; mask, desc=SC)
@test o2[20, 5] === nothing #We don't want values in masked out area
@test o2[1:2:15, :] == o1[1:2:15, :] #The rest should match, test indexing too.
end
@testset "map" begin
m = sprand(5, 5, 0.25)
n = GBMatrix(m)
@test map(UnaryOps.LOG, n)[1,1] == map(log, m)[1,1]
o = map!(BinaryOps.GT, GBMatrix{Bool}(5, 5), 0.1, n)
@test o[1,4] == (0.1 > m[1,4])
@test map(BinaryOps.SECOND, n, 1.5)[1,1] == 1.5
@test (n .* 10)[1,1] == n[1,1] * 10
end
@testset "mul" begin
m = rand(10, 10)
n = rand(10, 100)
#NOTE: Can someone check this, not sure if that's fine, or egregious.
@test isapprox(Matrix(mul(GBMatrix(m), GBMatrix(n))), m * n, atol=8e-15)
m = GBMatrix([1,3,5,7], [7,5,3,1], [1,2,3,4])
n = GBMatrix{Int8}(7, 1)
n[1:2:7, 1] = [1, 10, 20, 30]
o = mul(m, n)
@test size(o) == (7,1)
@test eltype(o) == Int64
@test o[7, 1] == 4 && o[5, 1] == 30
o = GBMatrix(ones(Int64, 7, 1))
mask = GBMatrix(ones(Bool, 7, 1))
mask[3,1] = false
@test mul!(o, m, n; mask, accum=BinaryOps.PLUS) ==
GBMatrix([31,1,1,1,31,1,5])
m = GBMatrix([[1,2,3] [4,5,6]])
n = GBVector([10,20,30])
@test_throws DimensionMismatch m * n
@test m' * n == GBVector([140, 320]) == n * m
end
@testset "reduce" begin
m = GBMatrix([[1,2,3] [4,5,6] [7,8,9]])
reduce(max, m, dims=2) == reduce(Monoids.MAX_MONOID, m) #this only works for dense
reduce(Monoids.MAX_MONOID, m, dims=(1,2)) == 9
@test_throws ArgumentError reduce(BinaryOps.TIMES, m)
end
@testset "select" begin
m = GBMatrix([[1,2,3] [4,5,6] [7,8,9]])
s = select(tril, m)
@test s[1,2] === nothing && s[3,1] == 3
s = select(<, m, 6)
@test s[2,2] == 5 && s[3,3] === nothing
end
@testset "transpose" begin
m = GBMatrix(sprand(3, 3, 0.5))
@test gbtranspose(m') == m
@test m[1,2] == m'[2,1]
@test m[1,2] == gbtranspose(m)[2,1]
end
end
| [
27,
7856,
261,
480,
29,
82,
89,
1501,
88,
292,
70,
14,
5606,
578,
50,
29572,
37065,
9148,
1921,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
31,
9288,
2617,
366,
3575,
602,
13,
20362,
1,
2221,
198,
220,
220,
220,
2488,
9288,
2617... | 1.780639 | 1,942 |
using SimpleTest
using YAML
println("Enter two numbers")
num1 = parse(Float64, readline())
num2 = parse(Float64, readline())
result = simple_operation(num1, num2)
println("The sum is ", result)
sep = "/"
working_path = pwd()
settings_path = joinpath(working_path, "settings.yml")
testpath = joinpath(pwd(), "src")
push!(LOAD_PATH, testpath)
settings = YAML.load(open(settings_path))
output_path = joinpath(working_path, "output")
start_of_opt(settings, sep, output_path) | [
3500,
17427,
14402,
198,
3500,
575,
2390,
43,
198,
35235,
7203,
17469,
734,
3146,
4943,
198,
22510,
16,
796,
21136,
7,
43879,
2414,
11,
1100,
1370,
28955,
198,
22510,
17,
796,
21136,
7,
43879,
2414,
11,
1100,
1370,
28955,
198,
20274,
... | 2.901235 | 162 |
@testset "fsm_active_close.jl" begin
base_seq = WrappingInt32(1 << 31)
DEFAULT_CAPACITY = 64000
TIMEOUT_DFLT = 1000
@testset "start in TIME_WAIT, timeout" begin
conn = TCPConnection()
#Listen will do nothing
expect_state(conn, JLSponge.LISTEN)
tick!(conn, 1)
expect_state(conn, JLSponge.LISTEN)
send_syn!(conn, WrappingInt32(0))
tick!(conn, 1)
seg = expect_one_seg(conn; ack=true, syn=true, ackno=WrappingInt32(1))
expect_state(conn, JLSponge.SYN_RCVD)
send_ack!(conn, WrappingInt32(1), seg.header.seqno + 1)
tick!(conn, 1)
expect_no_seg(conn)
expect_state(conn, JLSponge.ESTABLISHED)
end
end | [
31,
9288,
2617,
366,
69,
5796,
62,
5275,
62,
19836,
13,
20362,
1,
2221,
198,
220,
220,
220,
2779,
62,
41068,
796,
27323,
2105,
5317,
2624,
7,
16,
9959,
3261,
8,
198,
220,
220,
220,
5550,
38865,
62,
33177,
2246,
9050,
796,
5598,
83... | 1.97035 | 371 |
using Test
@testset "config" begin
include("config.jl")
end
@testset "fileio" begin
include("fileio.jl")
end
@testset "json" begin
include("json.jl")
end | [
3500,
6208,
198,
198,
31,
9288,
2617,
366,
11250,
1,
2221,
198,
220,
220,
220,
2291,
7203,
11250,
13,
20362,
4943,
198,
437,
198,
198,
31,
9288,
2617,
366,
7753,
952,
1,
2221,
198,
220,
220,
220,
2291,
7203,
7753,
952,
13,
20362,
... | 2.507463 | 67 |
<gh_stars>1-10
module LifeContingencies
using ActuaryUtilities
using MortalityTables
using Transducers
using Dates
using Yields
const mt = MortalityTables
export LifeContingency,
Insurance, AnnuityDue, AnnuityImmediate,
APV,
SingleLife, Frasier, JointLife,
LastSurvivor,
survival,
reserve_premium_net,
insurance,
annuity_due,
annuity_immediate,
premium_net,
omega,
survival,
discount,
benefit,
probability,
cashflows,
cashflows,
timepoints,
present_value
# 'actuarial objects' that combine multiple forms of decrements (lapse, interest, death, etc)
abstract type Life end
"""
struct SingleLife
mort
issue_age::Int
alive::Bool
fractional_assump::MortalityTables.DeathDistribution
end
A `Life` object containing the necessary assumptions for contingent maths related to a single life. Use with a `LifeContingency` to do many actuarial present value calculations.
Keyword arguments:
- `mort` pass a mortality vector, which is an array of applicable mortality rates indexed by attained age
- `issue_age` is the assumed issue age for the `SingleLife` and is the basis of many contingency calculations.
- `alive` Default value is `true`. Useful for joint insurances with different status on the lives insured.
- `fractional_assump`. Default value is `Uniform()`. This is a `DeathDistribution` from the `MortalityTables.jl` package and is the assumption to use for non-integer ages/times.
# Examples
using MortalityTables
mort = MortalityTables.table("2001 VBT Residual Standard Select and Ultimate - Male Nonsmoker, ANB")
SingleLife(
mort = mort.select[30],
issue_age = 30
)
"""
struct SingleLife <: Life
mort
issue_age
alive
fractional_assump
end
function SingleLife(;mort,issue_age=nothing,alive=true,fractional_assump = mt.Uniform())
return SingleLife(mort;issue_age,alive,fractional_assump)
end
function SingleLife(mort;issue_age=nothing,alive=true,fractional_assump = mt.Uniform())
if isnothing(issue_age)
issue_age = firstindex(mort)
end
if !(eltype(mort) <: Real)
# most likely case is that mort is an array of vectors
# use issue age to select the right one (assuming indexed with issue age
return SingleLife(mort[issue_age],issue_age,alive,fractional_assump)
else
return SingleLife(mort,issue_age,alive,fractional_assump)
end
end
"""
JointAssumption()
An abstract type representing the different assumed relationship between the survival of the lives on a JointLife. Available options to use include:
- `Frasier()`
"""
abstract type JointAssumption end
"""
Frasier()
The assumption of independnt lives in a joint life calculation.
Is a subtype of `JointAssumption`.
"""
struct Frasier <: JointAssumption end
"""
Contingency()
An abstract type representing the different triggers for contingent benefits. Available options to use include:
- `LastSurvivor()`
"""
abstract type Contingency end
"""
LastSurvivor()
The contingency whereupon benefits are payable upon both lives passing.
Is a subtype of `Contingency`
"""
struct LastSurvivor <: Contingency end
# TODO: Not Implemented
# """
# FirstToDie()
# The contingency whereupon benefits are payable upon the first life passing.
# Is a subtype of `Contingency`
# """
# struct FirstToDie <: Contingency end
"""
struct JointLife
lives
contingency
joint_assumption
end
A `Life` object containing the necessary assumptions for contingent maths related to a joint life insurance. Use with a `LifeContingency` to do many actuarial present value calculations.
Keyword arguments:
- `lives` is a tuple of two `SingleLife`s
- `contingency` default is `LastSurvivor()`. It is the trigger for contingent benefits. See `?Contingency`.
- `joint_assumption` Default value is `Frasier()`. It is the assumed relationship between the mortality of the two lives. See `?JointAssumption`.
# Examples
using MortalityTables
mort = MortalityTables.table("2001 VBT Residual Standard Select and Ultimate - <NAME>, ANB")
l1 = SingleLife(
mort = mort.select[30],
issue_age = 30
)
l2 = SingleLife(
mort = mort.select[30],
issue_age = 30
)
jl = JointLife(
lives = (l1,l2),
contingency = LastSurvivor(),
joint_assumption = Frasier()
)
"""
Base.@kwdef struct JointLife <: Life
lives::Tuple{SingleLife,SingleLife}
contingency::Contingency = LastSurvivor()
joint_assumption::JointAssumption = Frasier()
end
"""
struct LifeContingency
life::Life
"""
struct LifeContingency
life::Life
int
end
Base.broadcastable(lc::LifeContingency) = Ref(lc)
"""
omega(lc::LifeContingency)
omega(l::Life)
omega(i::InterestRate)
# `Life`s and `LifeContingency`s
Returns the last defined time_period for both the interest rate and mortality table.
Note that this is *different* than calling `omega` on a `MortalityTable`, which will give you the last `attained_age`.
Example: if the `LifeContingency` has issue age 60, and the last defined attained age for the `MortalityTable` is 100, then `omega` of the `MortalityTable` will be `100` and `omega` of the
`LifeContingency` will be `40`.
# `InterestRate`s
The last period that the interest rate is defined for. Assumed to be infinite (`Inf`) for
functional and constant interest rate types. Returns the `lastindex` of the vector if
a vector type.
"""
function mt.omega(lc::LifeContingency)
# if one of the omegas is infinity, that's a Float so we need
# to narrow the type with Int
return Int(omega(lc.life))
end
function mt.omega(l::SingleLife)
return mt.omega(l.mort) - l.issue_age + 1
end
function mt.omega(l::JointLife)
return minimum( omega.(l.lives) )
end
###################
## COMMUTATIONS ###
###################
"""
D(lc::LifeContingency, to_time)
``D_x`` is a retrospective actuarial commutation function which is the product of the survival and discount factor.
"""
function D(lc::LifeContingency, to_time)
return discount(lc.int, to_time) * survival(lc,to_time)
end
"""
l(lc::LifeContingency, to_time)
``l_x`` is a retrospective actuarial commutation function which is the survival up to a certain point in time. By default, will have a unitary basis (ie `1.0`), but you can specify `basis` keyword argument to use something different (e.g. `1000` is common in the literature.)
"""
function l(lc::LifeContingency, to_time; basis=1.0)
return survival(lc.life,to_time) * basis
end
"""
C(lc::LifeContingency, to_time)
``C_x`` is a retrospective actuarial commutation function which is the product of the discount factor and the difference in `l` (``l_x``).
"""
function C(lc::LifeContingency, to_time)
discount(lc.int, to_time+1) * (l(lc,to_time) - l(lc, to_time+1))
end
"""
N(lc::LifeContingency, from_time)
``N_x`` is a prospective actuarial commutation function which is the sum of the `D` (``D_x``) values from the given time to the end of the mortality table.
"""
function N(lc::LifeContingency, from_time)
range = from_time:(omega(lc)-1)
return foldxt(+,Map(from_time->D(lc, from_time)), range)
end
"""
M(lc::LifeContingency, from_time)
The ``M_x`` actuarial commutation function where the `from_time` argument is `x`.
Issue age is based on the issue_age in the LifeContingency `lc`.
"""
function M(lc::LifeContingency, from_time)
range = from_time:omega(lc)-1
return foldxt(+,Map(from_time->C(lc, from_time)), range)
end
E(lc::LifeContingency, t, x) = D(lc,x + t) / D(lc,x)
##################
### Insurances ###
##################
abstract type Insurance end
LifeContingency(ins::Insurance) = LifeContingency(ins.life,ins.int)
struct WholeLife <: Insurance
life
int
end
struct Term <: Insurance
life
int
n
end
"""
Insurance(lc::LifeContingency; n=nothing)
Insurance(life,interest; n=nothing)
Life insurance with a term period of `n`. If `n` is `nothing`, then whole life insurance.
Issue age is based on the `issue_age` in the LifeContingency `lc`.
# Examples
```
ins = Insurance(
SingleLife(mort = UltimateMortality([0.5,0.5]),issue_age = 0),
Yields.Constant(0.05),
n = 1
)
```
"""
Insurance(lc::LifeContingency; n=nothing) = Insurance(lc.life,lc.int;n)
function Insurance(lc,int;n=nothing)
if isnothing(n)
return WholeLife(lc,int)
elseif n < 1
return ZeroBenefit(lc,int)
else
Term(lc,int,n)
end
end
struct Due end
struct Immediate end
struct Annuity <: Insurance
life
int
payable
n
start_time
certain
frequency
end
struct ZeroBenefit <: Insurance
life
int
end
function ZeroBenefit(lc::LifeContingency)
return ZeroBenefit(lc.life,lc.int)
end
"""
AnnuityDue(lc::LifeContingency; n=nothing, start_time=0; certain=nothing,frequency=1)
AnnuityDue(life, interest; n=nothing, start_time=0; certain=nothing,frequency=1)
Annuity due with the benefit period starting at `start_time` and ending after `n` periods with `frequency` payments per year of `1/frequency` amount and a `certain` period with non-contingent payments.
# Examples
```
ins = AnnuityDue(
SingleLife(mort = UltimateMortality([0.5,0.5]),issue_age = 0),
Yields.Constant(0.05),
n = 1
)
```
"""
function AnnuityDue(life, int; n=nothing,start_time=0,certain=nothing,frequency=1)
if ~isnothing(n) && n < 1
return ZeroBenefit(life,int)
else
Annuity(life,int,Due(),n,start_time,certain,frequency)
end
end
function AnnuityDue(lc::LifeContingency; n=nothing,start_time=0,certain=nothing,frequency=1)
return AnnuityDue(lc.life,lc.int;n,start_time,certain,frequency)
end
"""
AnnuityImmediate(lc::LifeContingency; n=nothing, start_time=0; certain=nothing,frequency=1)
AnnuityImmediate(life, interest; n=nothing, start_time=0; certain=nothing,frequency=1)
Annuity immediate with the benefit period starting at `start_time` and ending after `n` periods with `frequency` payments per year of `1/frequency` amount and a `certain` period with non-contingent payments.
# Examples
```
ins = AnnuityImmediate(
SingleLife(mort = UltimateMortality([0.5,0.5]),issue_age = 0),
Yields.Constant(0.05),
n = 1
)
```
"""
function AnnuityImmediate(life, int; n=nothing,start_time=0,certain=nothing,frequency=1)
if ~isnothing(n) && n < 1
return ZeroBenefit(life,int)
else
return Annuity(life,int,Immediate(),n,start_time,certain,frequency)
end
end
function AnnuityImmediate(lc::LifeContingency; n=nothing,start_time=0,certain=nothing,frequency=1)
return AnnuityImmediate(lc.life,lc.int;n,start_time,certain,frequency)
end
"""
survival(Insurance)
The survorship vector for the given insurance.
"""
function MortalityTables.survival(ins::Insurance)
return [survival(ins.life,t-1) for t in timepoints(ins)]
end
function MortalityTables.survival(ins::Annuity)
return [survival(ins.life,t) for t in timepoints(ins)]
end
"""
discount(Insurance)
The discount vector for the given insurance.
"""
function Yields.discount(ins::Insurance)
return Yields.discount.(ins.int,timepoints(ins))
end
"""
benefit(Insurance)
The unit benefit vector for the given insurance.
"""
function benefit(ins::Insurance)
return ones(length(timepoints(ins)))
end
function benefit(ins::ZeroBenefit)
return zeros(length(timepoints(ins)))
end
function benefit(ins::Annuity)
return ones(length(timepoints(ins))) ./ ins.frequency
end
"""
probability(Insurance)
The vector of contingent benefit probabilities for the given insurance.
"""
function probability(ins::Insurance)
return [survival(ins.life,t-1) * decrement(ins.life,t-1,t) for t in timepoints(ins)]
end
function probability(ins::ZeroBenefit)
return ones(length(timepoints(ins)))
end
function probability(ins::Annuity)
if isnothing(ins.certain)
return [survival(ins.life,t) for t in timepoints(ins)]
else
return [t <= ins.certain + ins.start_time ? 1.0 : survival(ins.life,t) for t in timepoints(ins)]
end
end
"""
cashflows(Insurance)
The vector of decremented benefit cashflows for the given insurance.
"""
function cashflows(ins::Insurance)
return probability(ins) .* benefit(ins)
end
"""
timepoints(Insurance)
The vector of times corresponding to the cashflow vector for the given insurance.
"""
function timepoints(ins::Insurance)
return collect(1:omega(ins.life))
end
function timepoints(ins::Term)
return collect(1:min(omega(ins.life),ins.n))
end
function timepoints(ins::ZeroBenefit)
return [0.]
end
function timepoints(ins::Annuity)
return timepoints(ins,ins.payable)
end
function timepoints(ins::Annuity,payable::Due)
if isnothing(ins.n)
end_time = omega(ins.life)
else
end_time = ins.n + ins.start_time - 1 / ins.frequency
end
timestep = 1 / ins.frequency
collect(ins.start_time:timestep:end_time)
end
function timepoints(ins::Annuity,payable::Immediate)
if isnothing(ins.n)
end_time = omega(ins.life)
else
end_time = ins.n + ins.start_time
end
timestep = 1 / ins.frequency
end_time = max(ins.start_time + timestep,end_time) # return at least one timepoint to avoid returning empty array
collect((ins.start_time + timestep):timestep:end_time)
end
"""
present_value(Insurance)
The actuarial present value of the given insurance.
"""
function ActuaryUtilities.present_value(ins)
return present_value(ins.int,cashflows(ins),timepoints(ins))
end
"""
premium_net(lc::LifeContingency)
premium_net(lc::LifeContingency,to_time)
The net premium for a whole life insurance (without second argument) or a term life insurance through `to_time`.
The net premium is based on 1 unit of insurance with the death benfit payable at the end of the year and assuming annual net premiums.
"""
premium_net(lc::LifeContingency) = A(lc) / ä(lc)
premium_net(lc::LifeContingency,to_time) = A(lc,to_time) / ä(lc,to_time)
"""
reserve_premium_net(lc::LifeContingency,time)
The net premium reserve at the end of year `time`.
"""
function reserve_premium_net(lc::LifeContingency, time)
PVFB = A(lc) - A(lc,n=time)
PVFP = premium_net(lc) * (ä(lc) - ä(lc,n=time))
return (PVFB - PVFP) / APV(lc,time)
end
"""
APV(lc::LifeContingency,to_time)
The **actuarial present value** which is the survival times the discount factor for the life contingency.
"""
function APV(lc::LifeContingency,to_time)
return survival(lc,to_time) * discount(lc.int,to_time)
end
"""
decrement(lc::LifeContingency,to_time)
decrement(lc::LifeContingency,from_time,to_time)
Return the probablity of death for the given LifeContingency.
"""
mt.decrement(lc::LifeContingency,from_time,to_time) = 1 - survival(lc.life,from_time,to_time)
"""
survival(lc::LifeContingency,from_time,to_time)
survival(lc::LifeContingency,to_time)
Return the probablity of survival for the given LifeContingency.
"""
mt.survival(lc::LifeContingency,to_time) = survival(lc.life, 0, to_time)
mt.survival(lc::LifeContingency,from_time,to_time) = survival(lc.life, from_time, to_time)
mt.survival(l::SingleLife,to_time) = survival(l,0,to_time)
mt.survival(l::SingleLife,from_time,to_time) =survival(l.mort,l.issue_age + from_time,l.issue_age + to_time, l.fractional_assump)
"""
surival(life)
Return a survival vector for the given life.
"""
function mt.survival(l::Life)
ω = omega(l)
return [survival(l,t) for t in 0:ω]
end
mt.survival(l::JointLife,to_time) = survival(l::JointLife,0,to_time)
function mt.survival(l::JointLife,from_time,to_time)
return survival(l.contingency,l.joint_assumption,l::JointLife,from_time,to_time)
end
function mt.survival(ins::LastSurvivor,assump::JointAssumption,l::JointLife,from_time,to_time)
to_time == 0 && return 1.0
l1,l2 = l.lives
ₜpₓ = survival(l1.mort,l1.issue_age + from_time,l1.issue_age + to_time,l1.fractional_assump)
ₜpᵧ = survival(l2.mort,l2.issue_age + from_time,l2.issue_age + to_time,l2.fractional_assump)
return ₜpₓ + ₜpᵧ - ₜpₓ * ₜpᵧ
end
Yields.discount(lc::LifeContingency,t) = discount(lc.int,t)
Yields.discount(lc::LifeContingency,t1,t2) = discount(lc.int,t1,t2)
# function compostion with kwargs.
# https://stackoverflow.com/questions/64740010/how-to-alias-composite-function-with-keyword-arguments
⋄(f, g) = (x...; kw...)->f(g(x...; kw...))
# unexported aliases
const V = reserve_premium_net
const v = Yields.discount
const A = present_value ⋄ Insurance
const a = present_value ⋄ AnnuityImmediate
const ä = present_value ⋄ AnnuityDue
const P = premium_net
const ω = omega
end # module
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
5155,
4264,
278,
3976,
198,
198,
3500,
2191,
2838,
18274,
2410,
198,
3500,
10788,
1483,
51,
2977,
198,
3500,
3602,
41213,
198,
3500,
44712,
198,
3500,
575,
1164,
82,
198,
220,
220,
22... | 2.723 | 6,213 |
<filename>test/all.jl
include("orderbook.jl")
include("blotter.jl")
include("trades.jl")
include("portfolio.jl")
include("account.jl")
include("utilities.jl")
| [
27,
34345,
29,
9288,
14,
439,
13,
20362,
198,
17256,
7203,
2875,
2070,
13,
20362,
4943,
198,
17256,
7203,
2436,
313,
353,
13,
20362,
4943,
198,
17256,
7203,
2213,
2367,
13,
20362,
4943,
198,
17256,
7203,
634,
13652,
13,
20362,
4943,
1... | 2.789474 | 57 |
using StanSample, DataFrames
model = "
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
y ~ bernoulli(theta);
}
";
sm = SampleModel("bernoulli", model);
data = Dict("N" => 10, "y" => [0, 1, 0, 1, 0, 0, 0, 0, 0, 1]);
rc = stan_sample(sm; num_chains=4, data);
if success(rc)
df = read_samples(sm, :dataframe);
df |> display
end
| [
3500,
7299,
36674,
11,
6060,
35439,
198,
198,
19849,
796,
366,
198,
7890,
1391,
220,
198,
220,
493,
27,
21037,
28,
15,
29,
399,
26,
220,
198,
220,
493,
27,
21037,
28,
15,
11,
45828,
28,
16,
29,
331,
58,
45,
11208,
198,
92,
220,
... | 2.135678 | 199 |
###Define functions to be used in testing below
###Test functions for TargetModel
function targetsin!(r::Vector,t::AbstractVector,paras::Vector)
for i=1:length(t)
r[i] = sin(paras[1]*t[i])
end
r
end
targetsin(t::AbstractVector,paras::Vector) = targetsin!(zeros(eltype(t),length(t)),t,paras)
###Test function for ODEModel
function odesin(t,y,ydot,paras)
ydot[1] = paras[1]*cos(paras[1]*t)
end
#########################################################################################################
### Test model creation and evaluation functions
timepoints1 = linspace(0.0,10.0,100)
parameters1 = parameters([:a],[1.0],[5.0],[3.0])
measurements1 = data(:function,timepoints1,targetsin,timepoints1,[3.0])
measurements2 = data(:function,timepoints1,targetsin,timepoints1,[2.5])
measurements3 = data(:function,timepoints1,targetsin,timepoints1,[2.0])
noisemodel1 = noise(:gaussian,[0.01])
initial1 = [0.0]
model1 = model(:target,parameters1,measurements1,noisemodel1,targetsin;name="Test")
model2 = model(:target!,parameters1,measurements1,noisemodel1,targetsin!;name="Test!")
model3 = model(:ode,parameters1,measurements1,noisemodel1,odesin,initial1,1,[1];name="Test")
@test_approx_eq_eps evaluate!(model1,[3.0]) measurements(model1) 1e-4
@test_approx_eq_eps evaluate!(model2,[3.0]) measurements(model2) 1e-4
@test_approx_eq_eps evaluate!(model3,[3.0]) measurements(model3) 1e-4
#########################################################################################################
### Test the common initialize! function
s0 = samples(:base,1,1,Float64,Float64)
@test initialize!(trait(:initialize,:default),model1,s0).values == [3.0]
@test (srand(345) ; initialize!(trait(:initialize,:prior),model1,s0).values) == (srand(345) ; [rand(parameters1[1].prior)])
### Test the common interface functions
type TestModel <: AbstractModel
parameters::Vector
end
@test numparas(TestModel(parameters1)) == 1
@test parameters(TestModel(parameters1)) == parameters1
@test_throws MethodError evaluate!(TestModel(parameters1),[1.0])
@test_throws MethodError dataindex(TestModel(parameters1))
@test_throws MethodError measurements(TestModel(parameters1))
@test_throws MethodError noisemodel(TestModel(parameters1))
########################################################################################################
### Test the geometry calculations
r1 = copy(evaluate!(model1,[3.0]))
r2 = copy(evaluate!(model2,[2.5]))
r3 = copy(evaluate!(model3,[2.0]))
@test_approx_eq loglikelihood(model1,r1) loglikelihood(noisemodel1,datavalues(measurements1),r1)
@test_approx_eq loglikelihood(model2,r2) loglikelihood(noisemodel1,datavalues(measurements1),r2)
@test_approx_eq loglikelihood(model3,r3) loglikelihood(noisemodel1,datavalues(measurements1),r3)
### Test the geometry function for zero-order samples
for m in [model1,model2,model3]
s = samples(:base,1,3,Float64,Float64)
copy!(s.values,[3.0,2.0,0.0])
geometry!(m,s)
ll = [loglikelihood(m,evaluate!(m,[3.0])) loglikelihood(m,evaluate!(m,[2.0])) -Inf]
@test_approx_eq s.logprior logprior(parameters1,s.values,Float64)
@test_approx_eq s.loglikelihood ll
end
###test the show functions
println()
println("====================")
println("Test show() function")
println("====================")
show(model1)
show(model2)
show(model3)
println("====================")
println("End show() function")
println("====================")
println()
| [
21017,
7469,
500,
5499,
284,
307,
973,
287,
4856,
2174,
198,
198,
21017,
14402,
5499,
329,
12744,
17633,
198,
8818,
6670,
259,
0,
7,
81,
3712,
38469,
11,
83,
3712,
23839,
38469,
11,
1845,
292,
3712,
38469,
8,
198,
220,
220,
220,
329... | 2.779294 | 1,246 |
<reponame>JakeGrainger/WhittleLikelihoodInference.jl
using Plots
@testset "plotting" begin
@testset "plotsdf" begin
@test_throws ArgumentError plotsdf(1.0)
@test_throws ArgumentError plotsdf(OU,1:2)
@test_throws ArgumentError plotsdf(1.0,1:2)
@test_throws ArgumentError plotsdf(OU(1.0,1.0),1)
@test_throws ArgumentError plotsdf(OU(1.0,1.0),1:2,1)
end
@testset "plotasdf" begin
@test_throws ArgumentError plotasdf(1.0)
@test_throws ArgumentError plotasdf(OU,1:2,1)
@test_throws ArgumentError plotasdf(OU(1.0,1.0),1:2)
@test_throws ArgumentError plotasdf(OU(1.0,1.0),1:2,-1)
@test_throws ArgumentError plotasdf(OU(1.0,1.0),1:2,-1,1)
end
@testset "plotacv" begin
@test_throws ArgumentError plotacv(1.0)
@test_throws ArgumentError plotacv(OU,1:2)
@test_throws ArgumentError plotacv(OUUnknown{1}(1.0,1.0),1:2)
@test_throws ArgumentError plotacv(OU,10,1)
@test_throws ArgumentError plotacv(OU(1.0,1.0),1:2,1)
@test_throws ArgumentError plotacv(OU(1.0,1.0),10,-1)
@test_throws ArgumentError plotacv(OU(1.0,1.0),-10,1)
@test_throws ArgumentError plotacv(OU(1.0,1.0),10,1im)
@test_throws ArgumentError plotacv(OU(1.0,1.0),10,1,1)
end
@testset "plotei" begin
@test_throws ArgumentError plotei(1.0)
@test_throws ArgumentError plotei(OU,10,1)
@test_throws ArgumentError plotei(OU(1.0,1.0),10)
@test_throws ArgumentError plotei(OU(1.0,1.0),1:2,1)
@test_throws ArgumentError plotei(OU(1.0,1.0),10,-1)
@test_throws ArgumentError plotei(OU(1.0,1.0),-10,1)
@test_throws ArgumentError plotei(OU(1.0,1.0),10,1im)
@test_throws ArgumentError plotei(OU(1.0,1.0),10,1,1)
end
end | [
27,
7856,
261,
480,
29,
43930,
46971,
3889,
14,
1199,
1206,
7594,
11935,
818,
4288,
13,
20362,
198,
3500,
1345,
1747,
198,
31,
9288,
2617,
366,
29487,
889,
1,
2221,
198,
220,
220,
220,
2488,
9288,
2617,
366,
489,
1747,
7568,
1,
2221... | 1.947084 | 926 |
# Visualization
function plot_state_city(state)
qiskit.visualization.plot_state_city(state)
end
function plot_histogram(data)
qiskit.visualization.plot_histogram(data)
end
| [
2,
15612,
1634,
198,
198,
8818,
7110,
62,
5219,
62,
19205,
7,
5219,
8,
198,
220,
220,
220,
10662,
1984,
270,
13,
41464,
1634,
13,
29487,
62,
5219,
62,
19205,
7,
5219,
8,
198,
437,
198,
198,
8818,
7110,
62,
10034,
21857,
7,
7890,
... | 2.757576 | 66 |
# AbstractBandedMatrix must implement
# A BlockBandedMatrix is a BlockMatrix, but is not a BandedMatrix
abstract type AbstractBlockBandedMatrix{T} <: AbstractBlockMatrix{T} end
"""
blockbandwidths(A)
Returns a tuple containing the upper and lower blockbandwidth of `A`.
"""
blockbandwidths(A::AbstractMatrix) = (nblocks(A,1)-1 , nblocks(A,2)-1)
"""
blockbandwidth(A,i)
Returns the lower blockbandwidth (`i==1`) or the upper blockbandwidth (`i==2`).
"""
blockbandwidth(A::AbstractMatrix, k::Integer) = blockbandwidths(A)[k]
"""
bandrange(A)
Returns the range `-blockbandwidth(A,1):blockbandwidth(A,2)`.
"""
blockbandrange(A::AbstractMatrix) = -blockbandwidth(A,1):blockbandwidth(A,2)
# start/stop indices of the i-th column/row, bounded by actual matrix size
@inline blockcolstart(A::AbstractVecOrMat, i::Integer) = Block(max(i-colblockbandwidth(A,2)[i], 1))
@inline blockcolstop(A::AbstractVecOrMat, i::Integer) = Block(max(min(i+colblockbandwidth(A,1)[i], nblocks(A, 1)), 0))
@inline blockrowstart(A::AbstractVecOrMat, i::Integer) = Block(max(i-rowblockbandwidth(A,1)[i], 1))
@inline blockrowstop(A::AbstractVecOrMat, i::Integer) = Block(max(min(i+rowblockbandwidth(A,2)[i], nblocks(A, 2)), 0))
for Func in (:blockcolstart, :blockcolstop, :blockrowstart, :blockrowstop)
@eval $Func(A, i::Block{1}) = $Func(A, Int(i))
end
@inline blockcolrange(A::AbstractVecOrMat, i) = blockcolstart(A,i):blockcolstop(A,i)
@inline blockrowrange(A::AbstractVecOrMat, i) = blockrowstart(A,i):blockrowstop(A,i)
# length of i-the column/row
@inline blockcollength(A::AbstractVecOrMat, i) = max(Int(blockcolstop(A, i)) - Int(blockcolstart(A, i)) + 1, 0)
@inline blockrowlength(A::AbstractVecOrMat, i) = max(Int(blockrowstop(A, i)) - Int(blockrowstart(A, i)) + 1, 0)
# this gives the block bandwidth in each block column/row
@inline colblockbandwidths(A::AbstractMatrix) = Fill.(blockbandwidths(A), nblocks(A,2))
@inline rowblockbandwidths(A::AbstractMatrix) = Fill.(blockbandwidths(A), nblocks(A,1))
@inline colblockbandwidth(bs, i::Int) = colblockbandwidths(bs)[i]
@inline rowblockbandwidth(bs, i::Int) = rowblockbandwidths(bs)[i]
"""
isblockbanded(A)
returns true if a matrix implements the block banded interface.
"""
isblockbanded(::AbstractBlockBandedMatrix) = true
isblockbanded(_) = false
# override bandwidth(A,k) for each AbstractBandedMatrix
# override inbands_getindex(A,k,j)
# return id of first empty diagonal intersected along row k
function _firstblockdiagrow(A::AbstractMatrix, k::Int)
a, b = blockrowstart(A, k), blockrowstop(A, k)
c = a == 1 ? b+1 : a-1
c-k
end
# return id of first empty diagonal intersected along column j
function _firstblockdiagcol(A::AbstractMatrix, j::Int)
a, b = blockcolstart(A, j), blockcolstop(A, j)
r = a == 1 ? b+1 : a-1
j-r
end
## BlockSlice1 is a conveneience for views
const BlockSlice1 = BlockSlice{Block{1,Int}}
######################################
# RaggedMatrix interface
######################################
@inline function colstart(A::AbstractBlockBandedMatrix, i::Integer)
bs = A.block_sizes.block_sizes
bs.cumul_sizes[1][Int(blockcolstart(A, _find_block(bs, 2, i)[1]))]
end
@inline function colstop(A::AbstractBlockBandedMatrix, i::Integer)
bs = A.block_sizes.block_sizes
bs.cumul_sizes[1][Int(blockcolstop(A, _find_block(bs, 2, i)[1]))+1]-1
end
@inline function rowstart(A::AbstractBlockBandedMatrix, i::Integer)
bs = A.block_sizes.block_sizes
bs.cumul_sizes[2][Int(blockrowstart(A, _find_block(bs, 1, i)[1]))]
end
@inline function rowstop(A::AbstractBlockBandedMatrix, i::Integer)
bs = A.block_sizes.block_sizes
bs.cumul_sizes[2][Int(blockrowstop(A, _find_block(bs, 1, i)[1]))+1]-1
end
# default implementation loops over all indices, including zeros
function fill!(A::AbstractBlockBandedMatrix, val::Any)
iszero(val) || throw(BandError(A))
fill!(A.data, val)
A
end
| [
2,
27741,
33,
12249,
46912,
1276,
3494,
198,
198,
2,
317,
9726,
33,
12249,
46912,
318,
257,
9726,
46912,
11,
475,
318,
407,
257,
10243,
276,
46912,
198,
397,
8709,
2099,
27741,
12235,
33,
12249,
46912,
90,
51,
92,
1279,
25,
27741,
1... | 2.656969 | 1,478 |
<gh_stars>1-10
function [D,J,JInv,X]=JacobiSphere(ksi,F,Grid)
Rad=Grid.Rad;
X1=Grid.Nodes(F.N(1)).P(1)...
+(Grid.Nodes(F.N(2)).P(1)-Grid.Nodes(F.N(1)).P(1))*ksi(1)...
+(Grid.Nodes(F.N(4)).P(1)-Grid.Nodes(F.N(1)).P(1))*ksi(2)...
+(Grid.Nodes(F.N(3)).P(1)-Grid.Nodes(F.N(4)).P(1)-Grid.Nodes(F.N(2)).P(1)+Grid.Nodes(F.N(1)).P(1))*ksi(1)*ksi(2);
X2=Grid.Nodes(F.N(1)).P(2)...
+(Grid.Nodes(F.N(2)).P(2)-Grid.Nodes(F.N(1)).P(2))*ksi(1)...
+(Grid.Nodes(F.N(4)).P(2)-Grid.Nodes(F.N(1)).P(2))*ksi(2)...
+(Grid.Nodes(F.N(3)).P(2)-Grid.Nodes(F.N(4)).P(2)-Grid.Nodes(F.N(2)).P(2)+Grid.Nodes(F.N(1)).P(2))*ksi(1)*ksi(2);
X3=Grid.Nodes(F.N(1)).P(3)...
+(Grid.Nodes(F.N(2)).P(3)-Grid.Nodes(F.N(1)).P(3))*ksi(1)...
+(Grid.Nodes(F.N(4)).P(3)-Grid.Nodes(F.N(1)).P(3))*ksi(2)...
+(Grid.Nodes(F.N(3)).P(3)-Grid.Nodes(F.N(4)).P(3)-Grid.Nodes(F.N(2)).P(3)+Grid.Nodes(F.N(1)).P(3))*ksi(1)*ksi(2);
f=Rad*(X1^2+X2^2+X3^2)^(-3/2);
dx1dX1=f*(X2^2+X3^2);
dx1dX2=-f*X1*X2;
dx1dX3=-f*X1*X3;
dx2dX1=dx1dX2;
dx2dX2=f*(X1^2+X3^2);
dx2dX3=-f*X2*X3;
dx3dX1=dx1dX3;
dx3dX2=dx2dX3;
dx3dX3=f*(X1^2+X2^2);
% dx1dX1=1;
% dx1dX2=-0;
% dx1dX3=0;
% dx2dX1=0;
% dx2dX2=1;
% dx2dX3=0;
% dx3dX1=0;
% dx3dX2=0;
% dx3dX3=1;
dX1dksi1=(Grid.Nodes(F.N(2)).P(1)-Grid.Nodes(F.N(1)).P(1))...
+(Grid.Nodes(F.N(3)).P(1)-Grid.Nodes(F.N(4)).P(1)...
-Grid.Nodes(F.N(2)).P(1)+Grid.Nodes(F.N(1)).P(1))*ksi(2);
dX1dksi2=(Grid.Nodes(F.N(4)).P(1)-Grid.Nodes(F.N(1)).P(1))...
+(Grid.Nodes(F.N(3)).P(1)-Grid.Nodes(F.N(4)).P(1)...
-Grid.Nodes(F.N(2)).P(1)+Grid.Nodes(F.N(1)).P(1))*ksi(1);
dX2dksi1=(Grid.Nodes(F.N(2)).P(2)-Grid.Nodes(F.N(1)).P(2))...
+(Grid.Nodes(F.N(3)).P(2)-Grid.Nodes(F.N(4)).P(2)...
-Grid.Nodes(F.N(2)).P(2)+Grid.Nodes(F.N(1)).P(2))*ksi(2);
dX2dksi2=(Grid.Nodes(F.N(4)).P(2)-Grid.Nodes(F.N(1)).P(2))...
+(Grid.Nodes(F.N(3)).P(2)-Grid.Nodes(F.N(4)).P(2)...
-Grid.Nodes(F.N(2)).P(2)+Grid.Nodes(F.N(1)).P(2))*ksi(1);
dX3dksi1=(Grid.Nodes(F.N(2)).P(3)-Grid.Nodes(F.N(1)).P(3))...
+(Grid.Nodes(F.N(3)).P(3)-Grid.Nodes(F.N(4)).P(3)...
-Grid.Nodes(F.N(2)).P(3)+Grid.Nodes(F.N(1)).P(3))*ksi(2);
dX3dksi2=(Grid.Nodes(F.N(4)).P(3)-Grid.Nodes(F.N(1)).P(3))...
+(Grid.Nodes(F.N(3)).P(3)-Grid.Nodes(F.N(4)).P(3)...
-Grid.Nodes(F.N(2)).P(3)+Grid.Nodes(F.N(1)).P(3))*ksi(1);
J=zeros(3,2);
J(1,1)=dx1dX1*dX1dksi1+dx1dX2*dX2dksi1+dx1dX3*dX3dksi1;
J(2,1)=dx2dX1*dX1dksi1+dx2dX2*dX2dksi1+dx2dX3*dX3dksi1;
J(3,1)=dx3dX1*dX1dksi1+dx3dX2*dX2dksi1+dx3dX3*dX3dksi1;
J(1,2)=dx1dX1*dX1dksi2+dx1dX2*dX2dksi2+dx1dX3*dX3dksi2;
J(2,2)=dx2dX1*dX1dksi2+dx2dX2*dX2dksi2+dx2dX3*dX3dksi2;
J(3,2)=dx3dX1*dX1dksi2+dx3dX2*dX2dksi2+dx3dX3*dX3dksi2;
D=norm(cross(J(:,1),J(:,2)),2);
if nargout > 2
X=[X1 X2 X3]*(Rad/sqrt((X1^2+X2^2+X3^2))); X=[X1 X2 X3];
end
JInv=inv(J'*J)*J'*sqrt(det(J'*J));
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
8818,
685,
35,
11,
41,
11,
41,
19904,
11,
55,
22241,
28821,
13411,
38882,
7,
591,
72,
11,
37,
11,
41339,
8,
198,
15546,
28,
41339,
13,
15546,
26,
198,
55,
16,
28,
41339,
13,
45,
4147,
... | 1.445545 | 1,919 |
<gh_stars>1-10
module JungleHelperSpiderBoss
using ..Ahorn, Maple
@mapdef Entity "JungleHelper/SpiderBoss" SpiderBoss(x::Integer, y::Integer, color::String="Blue", sprite::String="", webSprite::String="", flag::String="")
const bossColors = String["Blue", "Purple", "Red"]
const bossSprites = Dict{String, String}(
"Blue" => "JungleHelper/SpiderBoss/spider_b_00",
"Purple" => "JungleHelper/SpiderBoss/spider_p_00",
"Red" => "JungleHelper/SpiderBoss/spider_r_00"
)
const placements = Ahorn.PlacementDict(
"Spider Boss ($(color)) (Jungle Helper)" => Ahorn.EntityPlacement(
SpiderBoss,
"point",
Dict{String, Any}(
"color" => color
)
) for color in bossColors
)
Ahorn.editingOptions(entity::SpiderBoss) = Dict{String, Any}(
"color" => bossColors
)
function Ahorn.selection(entity::SpiderBoss)
x, y = Ahorn.position(entity)
return Ahorn.Rectangle(x - 9, y - 9, 17, 17)
end
function Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::SpiderBoss, room::Maple.Room)
color = get(entity.data, "color", "Blue")
Ahorn.drawSprite(ctx, bossSprites[color], 0, 0)
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
26411,
47429,
41294,
37310,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
31,
8899,
4299,
20885,
366,
41,
13687,
47429,
14,
41294,
37310,
1,
12648,
37310,
7,
87,
3712,
46541,... | 2.633735 | 415 |
<gh_stars>1-10
import ONNXRunTime
function onnxruntime_infer(f, inputs...)
reversedims(a::AbstractArray{T,N}) where {T, N} = permutedims(a, N:-1:1)
mktempdir() do dir
modelfile = joinpath(dir, "model.onnx")
save(modelfile, f, size.(inputs)...)
model = ONNXRunTime.load_inference(modelfile)
return model(Dict(ONNXRunTime.input_names(model) .=> reversedims.(inputs))) |> values .|> reversedims |> Tuple
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
198,
11748,
440,
6144,
55,
10987,
7575,
198,
8818,
319,
77,
87,
43282,
62,
259,
2232,
7,
69,
11,
17311,
23029,
628,
197,
260,
690,
276,
12078,
7,
64,
3712,
23839,
19182,
90,
51,
11,
45,
... | 2.379888 | 179 |
#Load the Distributions package. Use `Pkg.install("Distributions")` to install first time.
using Distributions: TDist, ccdf
type regress_results
coefs
yhat
res
vcv
tstat
pval
end
# Keyword arguments are placed after semicolon.
# Symbols start with colon, e.g. `:symbol`.
function ols(y, X; corr=:none, lags::Int=Int(floor(size(X,1)^(1/4))))
# β̂ = X \ y is more stable than β̂ = inv(X'*X) * X' \ y
# see notes at bottom of case 1 notebook
β̂ = X \ y
ŷ = X * β̂
μ̂ = y - ŷ
T, K = size(X)
σ̂² = dot(μ̂, μ̂) / (T - K)
#use correction for variance covariance
if corr == :none
vcv = σ̂² * inv(X'*X)
elseif corr == :white
vcv = newey_west(X, μ̂, 0)
elseif corr == :newey_west
vcv = newey_west(X, μ̂, lags)
else
error("wrong argument for correction keyword")
end
# T statistics for H₀: βᵢ = 0
tstat = β̂ ./ sqrt(diag(vcv))
# absolute value and times two for double sided test
pval = 2 * ccdf(TDist(T-K), abs(tstat))
regress_results(β̂, ŷ, μ̂, vcv, tstat, pval)
end
function newey_west(X, μ̂, lags::Integer)
XtXInv = inv(X'*X)
T, K = size(X)
if lags==0 # White estimator
return XtXInv * X' * diagm(μ̂.^2) * X * XtXInv
end
vcv = zeros(K, K)
for t = 1:T
vcv += μ̂[t]^2 * (X[t,:] * X[t,:]')
end
for lag in 1:lags
w = 1 - lag / (lags + 1)
for t in (lag + 1):T # Calculates the off-diagonal terms
vcv += w * μ̂[t] * μ̂[t-lag] * (X[t-lag,:]*X[t,:]' + X[t,:]*X[t-lag,:]')
end
end
vcv = XtXInv * vcv * XtXInv
end
function gls(y, X, Ω)
P = chol(inv(Ω))
return ols(P*y, P*X)
end
function gmm(y, X, Z; corr=:none, lags=nothing)
T, Kx = size(X)
T, Kz = size(Z)
if corr==:none
# Generalized 1-step IV estimator
W = inv(Z'*Z)
elseif corr==:white | corr==:newey_west
if corr==:white
lags=0
end
gmm1_res = gmm(y,X,Z;corr=:none)
μ̂ = gmm1_res.res
W = zeros(Kz, Kz)
for lag in 0:lags
w = 1 - lag / (lags + 1)
for t in (lag + 1):T
# Calculates the off-diagonal terms
update = w * μ̂[t] * μ̂[t-lag] * (Z[t-lag,:]*Z[t,:]' + Z[t,:]*Z[t-lag,:]')
W = W + update
end
end
else
error("wrong argument for correction keyword")
end
ZtX = Z'*X
XtZ = X'*Z
XtZ_W_ZtXInv = inv(XtZ*W*ZtX)
β̂ = XtZ_W_ZtXInv*(XtZ*W*Z'*y)
ŷ = X * β̂
μ̂ = y - ŷ
σ̂² = dot(μ̂, μ̂) / (T - Kz)
vcv = σ̂² * XtZ_W_ZtXInv
# T statistics for H₀: β₀ = 0
tstat = β̂ ./ sqrt(diag(vcv))
# absolute value and times two for double sided test
pval = 2 * ccdf(TDist(T-K), abs(tstat))
return regress_results(β̂, ŷ, μ̂, vcv, tstat, pval)
end
| [
2,
8912,
262,
46567,
507,
5301,
13,
5765,
4600,
47,
10025,
13,
17350,
7203,
20344,
2455,
507,
4943,
63,
284,
2721,
717,
640,
13,
198,
3500,
46567,
507,
25,
13320,
396,
11,
36624,
7568,
198,
198,
4906,
50252,
62,
43420,
198,
220,
220... | 1.852048 | 1,514 |
<reponame>stevengj/GMT.jl<filename>src/grd2kml.jl
"""
grd2kml(cmd0::String="", arg1=nothing, kwargs...)
Reads a 2-D grid file and makes a quadtree of PNG images and KML wrappers for Google Earth
using the selected tile size [256x256 pixels].
Full option list at [`grd2kml`]($(GMTdoc)grd2kml.html)
Parameters
----------
- $(GMT.opt_C)
- **E** | **url** :: [Type => Str] `Arg = url`
Instead of hosting the files locally, prepend a site URL. The top-level prefix.kml file
will then use this URL to find the other files it references.``
($(GMTdoc)grd2kml.html#e)
- **F** | **filter** :: [Type => Str]
Specifies the filter to use for the downsampling of the grid for more distant viewing.
Choose among boxcar, cosine arch, gaussian, or median [Gaussian].
($(GMTdoc)grd2kml.html#e)
- **H** | **sub_pixel** :: [Type => Int] `Arg = factor`
Improve the quality of rasterization by passing the sub-pixel smoothing factor to psconvert.
($(GMTdoc)grd2kml.html#h)
- **I** | **shade** | **shading** | **intensity** :: [Type => Str | GMTgrid]
Gives the name of a grid file or GMTgrid with intensities in the (-1,+1) range,
or a grdgradient shading flags.
($(GMTdoc)grd2kml.html#i)
- **L** | **tile_size** :: [Type => Number] `Arg = tilesize`
Sets the fixed size of the image building blocks. Must be an integer that is radix 2.
Typical values are 256 or 512 [256].
($(GMTdoc)grd2kml.html#l)
- **N** | **prefix** [Type => Str] `Arg = prefix`
Sets a unique name prefixed used for the top-level KML filename and the directory where all
referenced KML files and PNG images will be written [GMT_Quadtree].
($(GMTdoc)grd2kml.html#n)
- **Q** | **nan_t** | **nan_alpha** :: [Type => Bool]
Make grid nodes with z = NaN transparent, using the color-masking feature in PostScript Level 3.
($(GMTdoc)grd2kml.html#q)
- **T** | **title** :: [Type => Str] `Arg = title`
Sets the title of the top-level document (i.e., its description).
($(GMTdoc)grd2kml.html#t)
- $(GMT.opt_V)
- $(GMT.opt_write)
- $(GMT.opt_append)
- $(GMT.opt_f)
"""
function grd2kml(cmd0::String="", arg1=nothing; kwargs...)
arg2 = nothing; arg3 = nothing; # for CPT and/or illum
length(kwargs) == 0 && occursin(" -", cmd0) && return monolitic("grd2kml", cmd0, arg1, arg2)
d = init_module(false, kwargs...)[1] # Also checks if the user wants ONLY the HELP mode
cmd, = parse_common_opts(d, "", [:V_params :f])
cmd = parse_these_opts(cmd, d, [[:E :url], [:F :filter], [:H :sub_pixel], [:L :tile_size],
[:N :prefix], [:Q :nan_t :nan_alpha], [:T :title]])
cmd, got_fname, arg1 = find_data(d, cmd0, cmd, arg1) # Find how data was transmitted
cmd, N_used, arg1, arg2, = get_cpt_set_R(d, cmd0, cmd, opt_R, got_fname, arg1, arg2)
cmd, arg1, arg2, arg3 = common_shade(d, cmd, arg1, arg2, arg3, nothing, "grd2kml")
common_grd(d, "grd2kml " * cmd, arg1, arg2, arg3) # Finish build cmd and run it
end
# ---------------------------------------------------------------------------------------------------
grd2kml(arg1, cmd0::String=""; kw...) = grd2kml(cmd0, arg1; kw...) | [
27,
7856,
261,
480,
29,
4169,
574,
70,
73,
14,
49424,
13,
20362,
27,
34345,
29,
10677,
14,
2164,
67,
17,
74,
4029,
13,
20362,
198,
37811,
198,
197,
2164,
67,
17,
74,
4029,
7,
28758,
15,
3712,
10100,
2625,
1600,
1822,
16,
28,
223... | 2.532591 | 1,258 |
using SimLynx
using Test
@testset "SimLynx.jl" begin
@test greet() == "Hello World!"
end
| [
3500,
3184,
37207,
87,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
8890,
37207,
87,
13,
20362,
1,
2221,
198,
220,
220,
220,
2488,
9288,
12589,
3419,
6624,
366,
15496,
2159,
2474,
198,
437,
198
] | 2.611111 | 36 |
# Note that this script can accept some limited command-line arguments, run
# `julia build_tarballs.jl --help` to see a usage message.
using BinaryBuilder
# Collection of sources required to build LCIOWrapBuilder
sources = [
"LCIOWrapBuilder"
]
# Bash recipe for building across all platforms
function getscript(version)
shortversion = version[1:3]
return """
cd \$WORKSPACE/srcdir
mkdir build && cd build
cmake -DCMAKE_INSTALL_PREFIX=\$prefix -DCMAKE_TOOLCHAIN_FILE=/opt/\$target/\$target.toolchain -DCMAKE_FIND_ROOT_PATH=\$prefix -DJulia_PREFIX=\$prefix ..
VERBOSE=ON cmake --build . --config Release --target install
"""
end
# These are the platforms we will build for by default, unless further
# platforms are passed in on the command line
platforms = Platform[
Linux(:x86_64, libc=:glibc, compiler_abi=CompilerABI(:gcc7, :cxx11)),
Linux(:x86_64, libc=:glibc, compiler_abi=CompilerABI(:gcc8, :cxx11)),
MacOS(:x86_64, compiler_abi=CompilerABI(:gcc7)),
MacOS(:x86_64, compiler_abi=CompilerABI(:gcc8)),
]
# The products that we will ensure are always built
products(prefix) = [
LibraryProduct(prefix, "liblciowrap", :lciowrap)
]
# Dependencies that must be installed before this package can be built
dependencies = [
"https://github.com/JuliaInterop/libcxxwrap-julia/releases/download/v0.6.2/build_libcxxwrap-julia-1.0.v0.6.2.jl"
"https://github.com/JuliaPackaging/JuliaBuilder/releases/download/v1.0.0-2/build_Julia.v1.0.0.jl"
"https://github.com/jstrube/LCIOBuilder/releases/download/v2.12.1-4/build_LCIOBuilder.v2.12.1-4.jl"
]
# Build the tarballs, and possibly a `build.jl` as well.
version_number = get(ENV, "TRAVIS_TAG", "")
if version_number == ""
version_number = "v0.99"
end
build_tarballs(ARGS, "LCIOWrapBuilder-1.0", VersionNumber(version_number), sources, getscript("1.0.0"), platforms, products, dependencies)
| [
2,
5740,
326,
428,
4226,
460,
2453,
617,
3614,
3141,
12,
1370,
7159,
11,
1057,
198,
2,
4600,
73,
43640,
1382,
62,
18870,
21591,
13,
20362,
1377,
16794,
63,
284,
766,
257,
8748,
3275,
13,
198,
3500,
45755,
32875,
198,
198,
2,
12251,
... | 2.652482 | 705 |
const CuDense{ElT,VecT} = Dense{ElT,VecT} where {VecT<:CuVector}
const CuDenseTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:CuDense}
Dense{T, SA}(x::Dense{T, SB}) where {T<:Number, SA<:CuArray, SB<:Array} = Dense{T, SA}(CuArray(x))
Dense{T, SA}(x::Dense{T, SB}) where {T<:Number, SA<:Array, SB<:CuArray} = Dense{T, SA}(collect(x.data))
Dense{T, S}(size::Integer) where {T, S<:CuArray{<:T}} = Dense{T, S}(CuArrays.zeros(T, size))
function Dense{T, S}(x::T, size::Integer) where {T, S<:CuArray{<:T}}
arr = CuArray{T}(undef, size)
fill!(arr, x)
Dense{T, S}(arr)
end
Base.collect(x::CuDense{T}) where {T<:Number} = Dense(collect(x.data))
Base.complex(::Type{Dense{ElT, VT}}) where {ElT, VT<:CuArray} = Dense{complex(ElT),CuVector{complex(ElT), Nothing}}
CuArrays.CuArray(x::CuDense{ElT}) where {ElT} = CuVector{ElT}(data(x))
CuArrays.CuArray{ElT, N}(x::CuDenseTensor{ElT, N}) where {ElT, N} = CuArray{ElT, N}(reshape(data(store(x)), dims(inds(x))))
CuArrays.CuArray(x::CuDenseTensor{ElT, N}) where {ElT, N} = CuArray{ElT, N}(x)
*(D::Dense{T, AT},x::S) where {T,AT<:CuArray,S<:Number} = Dense(x .* data(D))
Base.:(==)(::Type{<:CuDense{ElT1,CVec1}}, ::Type{<:CuDense{ElT2,CVec2}}) where {ElT1,ElT2,CVec1,CVec2} = (ElT1 == ElT2)
Base.getindex(D::CuDense{<:Number}) = collect(data(D))[]
Base.getindex(D::CuDenseTensor{<:Number, 0}) = store(D)[]
LinearAlgebra.norm(T::CuDenseTensor) = norm(data(store(T)))
# This is for type promotion for Scalar*Dense
function Base.promote_rule(::Type{<:Dense{ElT1,CuVector{ElT1}}},
::Type{ElT2}) where {ElT1,
ElT2<:Number}
ElR = promote_type(ElT1,ElT2)
VecR = CuVector{ElR}
return Dense{ElR,VecR}
end
function Base.permutedims(T::CuDenseTensor{<:Number,N},
perm::NTuple{N,Int}) where {N}
Tp = similar(T,permute(inds(T),perm))
permute!(Tp,T)
return Tp
end
function Base.permutedims!(R::CuDenseTensor{<:Number,N},
T::CuDenseTensor{<:Number,N},
perm::NTuple{N,Int}) where {N}
return permutedims!!(R, T, perm)
end
function permutedims!!(B::Tensor{ElT,N,StoreT,IndsB},
A::Tensor{ElT,N,StoreT,IndsA},
perm::NTuple{N,Int},
f::Function=(r,t)->permute!(r,t)) where {N,ElT,IndsB,IndsA,StoreT<:CuDense{ElT}}
Ais = inds(A)
Bis = permute(inds(A), perm)
B = f(B, A)
return B
end
function Base.similar(::Type{<:CuDenseTensor{ElT}},
inds) where {ElT}
storage_arr = CuVector{ElT}(undef,dim(inds))
return Tensor(Dense(storage_arr),inds)
end
function outer!(R::CuDenseTensor,
T1::CuDenseTensor,
T2::CuDenseTensor)
R_dat = vec(array(T1))*transpose(vec(array(T2)))
copyto!(data(store(R)), vec(R_dat))
inds_outer = unioninds(inds(T1),inds(T2))
return R
end
function contract!!(R::CuDenseTensor{<:Number,NR},
labelsR::NTuple{NR},
T1::CuDenseTensor{<:Number,N1},
labelsT1::NTuple{N1},
T2::CuDenseTensor{<:Number,N2},
labelsT2::NTuple{N2}) where {NR,N1,N2}
if N1==0
# TODO: replace with an add! function?
# What about doing `R .= T1[] .* PermutedDimsArray(T2,perm)`?
perm = getperm(labelsR,labelsT2)
newT2 = Tensor(Dense(data(store(T1)).*data(store(T2))), inds(T2))
permute!(R,newT2)
elseif N2==0
perm = getperm(labelsR,labelsT1)
newT1 = Tensor(Dense(data(store(T2)).*data(store(T1))), inds(T1))
permute!(R,newT1)
elseif N1+N2==NR
# TODO: permute T1 and T2 appropriately first (can be more efficient
# then permuting the result of T1⊗T2)
# TODO: implement the in-place version directly
R = outer!!(R,T1,T2)
inds_outer = unioninds(inds(T1),inds(T2))
R = Tensor(store(R), inds_outer)
else
R = _contract!!(R,labelsR,T1,labelsT1,T2,labelsT2)
end
return R
end
function permutedims!!(B::CuDenseTensor{ElT,0},
A::CuDenseTensor{ElT,0},
perm::NTuple{0,Int},
f=(r,t)->permute!(r,t)) where {ElT<:Number}
Cs = f(B, A)
return Tensor(Dense(vec(Cs)), IndexSet{0}())
end
function permutedims!!(B::CuDenseTensor{ElT,N},
A::CuDenseTensor{ElT,0},
perm::NTuple{N,Int},
f=(r,t)->permute!(r,t)) where {N, ElT<:Number}
Cis = permute(inds(B), perm)
Cs = f(B, A)
return Tensor(Dense(vec(Cs)), Cis)
end
function _contract!(CT::CuDenseTensor{El,NC},
AT::CuDenseTensor{El,NA},
BT::CuDenseTensor{El,NB},
props::ContractionProperties,
α::Number=one(El),β::Number=zero(El)) where {El,NC,NA,NB}
Ainds = inds(AT)
Adims = dims(Ainds)
Binds = inds(BT)
Bdims = dims(Binds)
Cinds = inds(CT)
Cdims = dims(Cinds)
Adata = reshape(data(store(AT)),Adims)
Bdata = reshape(data(store(BT)),Bdims)
Cdata = reshape(data(store(CT)),Cdims)
contracted = commoninds(Ainds, Binds)
A_only = uniqueinds(Ainds, Binds)
B_only = uniqueinds(Binds, Ainds)
ind_dict = Vector{Index}()
for (idx, i) in enumerate(contracted)
push!(ind_dict, i)
end
if length(A_only) > 0
for (idx, i) in enumerate(A_only)
push!(ind_dict, i)
end
end
if length(B_only) > 0
for (idx, i) in enumerate(B_only)
push!(ind_dict, i)
end
end
ctainds = zeros(Int, length(Ainds))
ctbinds = zeros(Int, length(Binds))
ctcinds = zeros(Int, length(Cinds))
for (ii, ia) in enumerate(Ainds)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Binds)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
for (ii, ic) in enumerate(Cinds)
ctcinds[ii] = findfirst(x->x==ic, ind_dict)
end
id_op = CuArrays.CUTENSOR.CUTENSOR_OP_IDENTITY
dict_key = ""
for cc in zip(ctcinds, Cdims)
dict_key *= string(cc[1]) * "," * string(cc[2]) * ","
end
for aa in zip(ctainds, Adims)
dict_key *= string(aa[1]) * "," * string(aa[2]) * ","
end
for bb in zip(ctbinds, Bdims)
dict_key *= string(bb[1]) * "," * string(bb[2]) * ","
end
if haskey(ENV, "CUTENSOR_AUTOTUNE") && tryparse(Int, ENV["CUTENSOR_AUTOTUNE"]) == 1
if haskey(ContractionPlans, dict_key)
dict_val = ContractionPlans[dict_key]
algo = dict_val
#plan = dict_val[2]
Cdata = CuArrays.CUTENSOR.contraction!(α, Adata, Vector{Char}(ctainds), id_op, Bdata, Vector{Char}(ctbinds), id_op, β, Cdata, Vector{Char}(ctcinds), id_op, id_op; algo=algo)
else
# loop through all algos
# pick the fastest one
# store that plan!
best_time = 1e6
best_plan = nothing
best_algo = nothing
max_algos = Ref{Int32}(C_NULL)
CuArrays.CUTENSOR.cutensorContractionMaxAlgos(max_algos)
# fix once the other options are documented
#algos = collect(Cint(CuArrays.CUTENSOR.CUTENSOR_ALGO_GETT):Cint(max_algos[] - 1))
algos = collect(Cint(CuArrays.CUTENSOR.CUTENSOR_ALGO_GETT):Cint(-1))
for algo in reverse(algos)
try
#this_plan = CuArrays.CUTENSOR.contraction_plan(Adata, Vector{Char}(ctainds), id_op, Bdata, Vector{Char}(ctbinds), id_op, Cdata, Vector{Char}(ctcinds), id_op, id_op; algo=CuArrays.CUTENSOR.cutensorAlgo_t(algo), pref=CuArrays.CUTENSOR.CUTENSOR_WORKSPACE_MAX)
Cdata, this_time, bytes, gctime, memallocs = @timed CuArrays.CUTENSOR.contraction!(α, Adata, Vector{Char}(ctainds), id_op, Bdata, Vector{Char}(ctbinds), id_op, β, Cdata, Vector{Char}(ctcinds), id_op, id_op; algo=CuArrays.CUTENSOR.cutensorAlgo_t(algo))
if this_time < best_time
best_time = this_time
#best_plan = this_plan
best_algo = CuArrays.CUTENSOR.cutensorAlgo_t(algo)
end
catch err
@warn "Algorithm $algo not supported"
end
end
ContractionPlans[dict_key] = best_algo
end
else
Cdata = CuArrays.CUTENSOR.contraction!(α, Adata, Vector{Char}(ctainds), id_op, Bdata, Vector{Char}(ctbinds), id_op, β, Cdata, Vector{Char}(ctcinds), id_op, id_op)
end
return parent(Cdata)
end
function Base.:+(B::CuDenseTensor, A::CuDenseTensor)
opC = CUTENSOR.CUTENSOR_OP_IDENTITY
opA = CUTENSOR.CUTENSOR_OP_IDENTITY
opAC = CUTENSOR.CUTENSOR_OP_ADD
Ais = inds(A)
Bis = inds(B)
ind_dict = Vector{Index}()
for (idx, i) in enumerate(inds(A))
push!(ind_dict, i)
end
Adata = data(store(A))
Bdata = data(store(B))
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
ctcinds = copy(ctbinds)
C = CuArrays.zeros(eltype(Bdata), dims(Bis))
CUTENSOR.elementwiseBinary!(one(eltype(Adata)), reshapeAdata, ctainds, opA, one(eltype(Bdata)), reshapeBdata, ctbinds, opC, C, ctcinds, opAC)
copyto!(data(store(B)), vec(C))
return B
end
function Base.:+(B::CuDense, Bis::IndexSet, A::CuDense, Ais::IndexSet)
opA = CUTENSOR.CUTENSOR_OP_IDENTITY
opC = CUTENSOR.CUTENSOR_OP_IDENTITY
opAC = CUTENSOR.CUTENSOR_OP_ADD
ind_dict = Vector{Index}()
for (idx, i) in enumerate(Ais)
push!(ind_dict, i)
end
Adata = data(A)
Bdata = data(B)
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
ctcinds = copy(ctbinds)
C = CuArrays.zeros(eltype(Bdata), dims(Bis))
Cis = Bis
C = CUTENSOR.elementwiseBinary!(1, reshapeAdata, ctainds, opA, 1, reshapeBdata, ctbinds, opC, C, ctcinds, opAC)
copyto!(data(B), vec(C))
return C
end
function Base.:-(B::CuDenseTensor, A::CuDenseTensor)
opC = CUTENSOR.CUTENSOR_OP_IDENTITY
opA = CUTENSOR.CUTENSOR_OP_IDENTITY
opAC = CUTENSOR.CUTENSOR_OP_ADD
Ais = inds(A)
Bis = inds(B)
ind_dict = Vector{Index}()
for (idx, i) in enumerate(inds(A))
push!(ind_dict, i)
end
Adata = data(store(A))
Bdata = data(store(B))
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
ctcinds = copy(ctbinds)
C = CuArrays.zeros(eltype(Bdata), dims(Bis))
CUTENSOR.elementwiseBinary!(one(eltype(Adata)), reshapeAdata, ctainds, opA, -one(eltype(Bdata)), reshapeBdata, ctbinds, opC, C, ctcinds, opAC)
copyto!(data(store(B)), vec(C))
return B
end
function Base.:-(A::CuDense, Ais::IndexSet, B::CuDense, Bis::IndexSet)
opA = CUTENSOR.CUTENSOR_OP_IDENTITY
opC = CUTENSOR.CUTENSOR_OP_IDENTITY
opAC = CUTENSOR.CUTENSOR_OP_ADD
ind_dict = Vector{Index}()
for (idx, i) in enumerate(Ais)
push!(ind_dict, i)
end
Adata = data(A)
Bdata = data(B)
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
ctcinds = copy(ctbinds)
C = CuArrays.zeros(eltype(Bdata), dims(Bis))
Cis = Bis
C = CUTENSOR.elementwiseBinary!(one(eltype(Adata)), reshapeAdata, ctainds, opA, -one(eltype(Bdata)), reshapeBdata, ctbinds, opC, C, ctcinds, opAC)
copyto!(data(B), vec(C))
return C
end
function Base.permute!(B::CuDenseTensor, A::CuDenseTensor)
Ais = inds(A)
Bis = inds(B)
ind_dict = Vector{Index}()
for (idx, i) in enumerate(Ais)
push!(ind_dict, i)
end
Adata = data(store(A))
Bdata = data(store(B))
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
CuArrays.CUTENSOR.permutation!(one(eltype(Adata)), reshapeAdata, Vector{Char}(ctainds), reshapeBdata, Vector{Char}(ctbinds))
return vec(reshapeBdata)
end
function Base.permute!(B::CuDense, Bis::IndexSet, A::CuDense, Ais::IndexSet)
ind_dict = Vector{Index}()
for (idx, i) in enumerate(Ais)
push!(ind_dict, i)
end
Adata = data(A)
Bdata = data(B)
reshapeBdata = reshape(Bdata,dims(Bis))
reshapeAdata = reshape(Adata,dims(Ais))
ctainds = zeros(Int, length(Ais))
ctbinds = zeros(Int, length(Bis))
for (ii, ia) in enumerate(Ais)
ctainds[ii] = findfirst(x->x==ia, ind_dict)
end
for (ii, ib) in enumerate(Bis)
ctbinds[ii] = findfirst(x->x==ib, ind_dict)
end
CuArrays.CUTENSOR.permutation!(one(eltype(Adata)), reshapeAdata, Vector{Char}(ctainds), reshapeBdata, Vector{Char}(ctbinds))
return vec(reshapeBdata)
end
| [
9979,
14496,
35,
1072,
90,
9527,
51,
11,
53,
721,
51,
92,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
796,
360,
1072,
90,
9527,
51,
11,
53,
721,
51,
92,
810,
1391,
53,
721,
51,
27,
25,
461... | 1.967434 | 6,909 |
<reponame>JuliaFEM/FEMBase.jl<filename>test/test_fields.jl
using FEMBase, Test
# From the beginning of a project we had a clear concept in our mind: "everything
# is a field". That is, everything can vary temporally and spatially. We think
# that constant is just a special case of field which does not vary in temporal
# nor spatial direction. Fields can vary in spatial direction, i.e. can be either
# constant or variable, and in temporal direction, i.e. can be time variant or
# time invariant. From this pondering we can think that there exists four kind of
# (discrete) fields:
# - discrete, constant, time invariant (DCTI)
# - discrete, variable, time invariant (DVTI)
# - discrete, constant, time variant (DCTV)
# - discrete, variable, time variant (DVTV)
# Discrete, in this context, means that field is defined in point-wise in
# $1 \ldots n$ locations, from where it is then interpolated to whole domain
# using some interpolation polynomials, i.e.
# ```math
# u(\xi, t) = \sum_{i} u_i[t] N_{i}(\xi,t),
# ```math
# where
# $N_{i}(\xi, t)$
# is the basis function or interpolation polymial corresponding to $i$^{th}
# discrete value and
# $u_{i}$
# is the discrete value.
# Then we have continuous fields, which are defined in whole domain, or at least
# not point-wise. By following the already used abbreviations, we have four more
# fields:
# - continuous, constant, time invariant (CCTI)
# - continuous, variable, time invariant (CVTI)
# - continuous, constant, time variant (DCTV)
# - continuous, variable, time variant (CVTV)
# Continuous, again in this context, does not mean that field has to be defined
# everywhere. It's enough that it's defined in function of spatial and/or temporal
# coordinates, i.e. we have $u \equiv u(\xi, t)$, without a some spesific basis
# needed to interpolate from discrete values.
# Field itself can be in principle anything. However, usually either scalar,
# vector or tensor (matrix). Time does not to have be real, it can be for example
# angle of some rotating machine or even complex value.
# From these starting points, we assume that the mentioned field system can
# describe all imaginable situations.
# ## Creating new fields
# For discrete fields that are varying in spatial direction, value for each
# discrete point is defined using NTuple. The order of points is implicitly
# assumed to be same than node ordering in ABAQUS. That is, first corner nodes
# in anti-clockwise direction and after that middle nodes.
# For example, `(1, 2, 3, 4)` is a scalar field having length of 4 and
# `([1,2],[2,3],[3,4],[4,5])` is a vector field having length of 4.
# For fields that are varying in temporal direction, `time => value` syntax is
# used. The first item in pair is time (or similar) and second item is value
# assigned to that time. For example, `0.0 => 1.0` is a time-dependent scalar
# field having value 1.0 at time 0.0.
# ## Dicrete, constant, time invariant field (DCTI)
# The most simple field is a field that is constant in both time and spatial
# direction. Discrete, constant, time invariant field. For example, youngs
# modulus could be this kind of field.
a = DCTI(1)
# Accessing data is done using `interpolate`. In FEM codes, we try to hide the
# actual type of the field, so for example interpolating constant field works,
# but the result is quite unsuprising.
@test interpolate(a, 0.0) == 1
# Field value value can be updated with `update!` function:
update!(a, 2)
@test a == 2
# Constant field of course doesn't have to be scalar field. It can be e.g.
# vector field. I use here packate Tensors.jl because of its excellent
# performance and other features, but normal `Vector` would work just fine
# also:
using Tensors
b = DCTI(Vec(1, 2))
# Interpolation, again, returns just the original data:
@test interpolate(b, 0.0) == [1, 2]
# Updating field is done using `update!`-function:
update!(b, Vec(2, 3))
@test interpolate(b, 0.0) == [2, 3]
# Constant tensor field:
c = DCTI(Tensor{2,2}((1.0, 2.0, 3.0, 4.0)))
# Data can be accessed also using `getindex`. Also things like `length` and
# `size` are defined.
@test interpolate(c, 0.0) == [1 3; 2 4]
@test c[1] == [1 3; 2 4]
@test length(c) == 4
@test size(c) == (2, 2)
# For now everything might look like extra complexity, but later on we see how
# to combine field with some basis functions in order to interpolate in element
# domain. Another nice feature is that we can interpolate fields in time. In this
# particular case of time invariant fields it of course doesn't give anything
# extra.
# ## Dicrete, variable, time invariant fields (DVTI)
@testset "DVTI field" begin
# scalar field
a = DVTI((1, 2))
@test a[1] == 1
@test a[2] == 2
@test interpolate(a, 0.0) == (1, 2)
update!(a, (2, 3))
@test a == (2, 3)
@test (2, 3) == a
# vector field
b = DVTI(([1, 2], [2, 3]))
@test b[1] == [1, 2]
@test b[2] == [2, 3]
@test interpolate(b, 0.0) == ([1, 2], [2, 3])
update!(b, ([2, 3], [4, 5]))
@test b == ([2, 3], [4, 5])
# tensor field
c = DVTI(([1 2; 3 4], [2 3; 4 5]))
@test c[1] == [1 2; 3 4]
@test c[2] == [2 3; 4 5]
@test interpolate(c, 0.0) == ([1 2; 3 4], [2 3; 4 5])
update!(c, ([2 3; 4 5], [5 6; 7 8]))
@test c == ([2 3; 4 5], [5 6; 7 8])
d = DVTI(2, 3)
@test a == d
end
@testset "DCTV field" begin
# scalar field
a = DCTV(0.0 => 0.0, 1.0 => 1.0)
@test isapprox(interpolate(a, -1.0), 0.0)
@test isapprox(interpolate(a, 0.0), 0.0)
@test isapprox(interpolate(a, 0.5), 0.5)
@test isapprox(interpolate(a, 1.0), 1.0)
update!(a, 1.0 => 2.0)
@test isapprox(interpolate(a, 0.5), 1.0)
update!(a, 2.0 => 1.0)
@test isapprox(interpolate(a, 1.5), 1.5)
# vector field
b = DCTV(0.0 => [1.0, 2.0], 1.0 => [2.0, 3.0])
@test isapprox(interpolate(b, 0.5), [1.5, 2.5])
# tensor field
c = DCTV(0.0 => [1.0 2.0; 3.0 4.0], 1.0 => [2.0 3.0; 4.0 5.0])
@test isapprox(interpolate(c, 0.5), [1.5 2.5; 3.5 4.5])
end
@testset "DVTV field" begin
# scalar field
a = DVTV(0.0 => (0.0, 1.0), 1.0 => (1.0, 0.0))
update!(a, 2.0 => (2.0, 0.0))
r = interpolate(a, 0.5)
@test isapprox(r[1], 0.5)
@test isapprox(r[2], 0.5)
update!(a, 2.0 => (4.0, 0.0))
end
@testset "CVTV field" begin
f = CVTV((xi, t) -> xi[1] * xi[2] * t)
@test isapprox(f([1.0, 2.0], 3.0), 6.0)
end
@testset "Dictionary fields" begin
X = Dict(1 => [0.0, 0.0], 1000 => [1.0, 0.0], 100000 => [1.0, 1.0])
G = DVTId(X)
@test isapprox(G[1], X[1])
@test isapprox(G[1000], X[1000])
@test isapprox(G[100000], X[100000])
Y = Dict(1 => [2.0, 2.0], 1000 => [3.0, 2.0], 100000 => [3.0, 3.0])
F = DVTVd(0.0 => X, 1.0 => Y)
@test isapprox(interpolate(F, 0.5)[100000], [2.0, 2.0])
end
@testset "update dictionary field" begin
f1 = Dict(1 => 1.0, 2 => 2.0, 3 => 3.0)
f2 = Dict(1 => 2.0, 2 => 3.0, 3 => 4.0)
fld = DVTVd(0.0 => f1)
update!(fld, 1.0 => f2)
@test isapprox(interpolate(fld, 0.5)[1], 1.5)
update!(fld, 1.0 => f1)
@test isapprox(interpolate(fld, 0.5)[1], 1.0)
end
@testset "use of common constructor field" begin
@test isa(field(1.0), DCTI)
@test isa(field(1.0 => 1.0), DCTV)
@test isa(field((1.0, 2.0)), DVTI)
@test isa(field(1, 2), DVTI)
@test isa(field(1.0 => (1.0, 2.0)), DVTV)
@test isa(field((xi, t) -> xi[1] * t), CVTV)
@test isa(field(1 => [1.0, 2.0], 10 => [2.0, 3.0]), DVTId)
@test isa(field(0.0 => (1 => 1.0, 10 => 2.0), 1.0 => (1 => 2.0, 10 => 3.0)), DVTVd)
X = Dict(1 => [0.0, 0.0], 2 => [1.0, 0.0])
X1 = field(X)
X2 = field(0.0 => X)
@test isa(X1, DVTId)
@test isa(X2, DVTVd)
end
@testset "general interpolation" begin
a = [1, 2, 3]
b = (2, 3, 4)
@test interpolate(a, b) == 2 + 6 + 12
a = (1, 2)
b = (2, 3, 4)
@test interpolate(a, b) == 2 + 6
@test_throws AssertionError interpolate(b, a)
end
| [
27,
7856,
261,
480,
29,
16980,
544,
37,
3620,
14,
37,
3620,
14881,
13,
20362,
27,
34345,
29,
9288,
14,
9288,
62,
25747,
13,
20362,
198,
3500,
376,
3620,
14881,
11,
6208,
198,
198,
2,
3574,
262,
3726,
286,
257,
1628,
356,
550,
257,... | 2.4506 | 3,249 |
<reponame>paveloom-j/Scats.jl
# This file contains a function
# to write input data to a file
"""
write(input::InputStruct, file::AbstractString)
Write input data from an instance of [`InputStruct`](@ref) to a file.
# Usage
```jldoctest; output = false
using Scats
s = Scats.API()
file, _ = mktemp()
s.Input.write(file)
# output
```
"""
function write(input::InputStruct, file::AbstractString)
open(file, "w") do f
# Print
println(f, "Sample size")
println(f, input.N)
println(f, "\nSample step")
println(f, input.Δt)
println(f, "\nSignificance level")
println(f, input.q)
println(f, "\nTime array")
println(f, input.t)
println(f, "\nValues array")
println(f, input.x)
end
end
| [
27,
7856,
261,
480,
29,
8957,
626,
4207,
12,
73,
14,
3351,
1381,
13,
20362,
198,
2,
770,
2393,
4909,
257,
2163,
198,
2,
284,
3551,
5128,
1366,
284,
257,
2393,
198,
198,
37811,
198,
220,
220,
220,
3551,
7,
15414,
3712,
20560,
44909... | 2.304985 | 341 |
<gh_stars>1-10
module TreeTools
using FastaIO
using JSON
using Dates
## Includes
include("objects.jl")
include("objectsmethods.jl")
include("mutations.jl")
include("prunegraft.jl")
include("datamethods.jl")
include("reading.jl")
include("writing.jl")
include("misc.jl")
include("lbi.jl")
end
## Todo
# the child field of `TreeNode` should be a set and not an array since ordering is not relevant? Howver it makes accessing more difficult. For now, giving up on this idea since I do not benefit from `Set` specific function implemented in Julia: they ultimately fall back to `===` which will consider equal nodes to be different.
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
12200,
33637,
628,
198,
3500,
12549,
64,
9399,
198,
3500,
19449,
198,
3500,
44712,
198,
2235,
29581,
198,
17256,
7203,
48205,
13,
20362,
4943,
198,
17256,
7203,
48205,
24396,
82,
13,
20... | 3.456522 | 184 |
<filename>src/maxpool.jl<gh_stars>0
function maxpool2x2relu!(B, A)
@avx for i₁ ∈ axes(B,1), i₂ ∈ axes(B,2), i₃ ∈ axes(B,3), i₄ ∈ axes(B,4)
A₁ = A[2i₁-1,2i₂-1,i₃,i₄]
A₂ = A[2i₁-1,2i₂ ,i₃,i₄]
A₃ = A[2i₁ ,2i₂-1,i₃,i₄]
A₄ = A[2i₁ ,2i₂ ,i₃,i₄]
B[i₁,i₂,i₃,i₄] = max(max(max(A₁, A₂), max(A₃, A₄)), zero(eltype(A)))
end
B
end
function maxpool2x2relureverse!(A, B̄, B)
@avx unroll=(1,1) for i₁ ∈ axes(B,1), i₂ ∈ axes(B,2), i₃ ∈ axes(B,3), i₄ ∈ axes(B,4)
Bₘ = B[i₁,i₂,i₃,i₄]
B̄ᵢ = B̄[i₁,i₂,i₃,i₄]
A[2i₁-1,2i₂-1,i₃,i₄] = (A[2i₁-1,2i₂-1,i₃,i₄] == Bₘ) * B̄ᵢ
A[2i₁-1,2i₂ ,i₃,i₄] = (A[2i₁-1,2i₂ ,i₃,i₄] == Bₘ) * B̄ᵢ
A[2i₁ ,2i₂-1,i₃,i₄] = (A[2i₁ ,2i₂-1,i₃,i₄] == Bₘ) * B̄ᵢ
A[2i₁ ,2i₂ ,i₃,i₄] = (A[2i₁ ,2i₂ ,i₃,i₄] == Bₘ) * B̄ᵢ
end
end
function maxpool2x2relureversev2!(A, B̄)
@avx unroll=(1,1) for i₁ ∈ axes(B̄,1), i₂ ∈ axes(B̄,2), i₃ ∈ axes(B̄,3), i₄ ∈ axes(B̄,4)
A₁ = A[2i₁-1,2i₂-1,i₃,i₄]
A₂ = A[2i₁-1,2i₂ ,i₃,i₄]
A₃ = A[2i₁ ,2i₂-1,i₃,i₄]
A₄ = A[2i₁ ,2i₂ ,i₃,i₄]
Bₘ = max(max(max(A₁, A₂), max(A₃, A₄)), zero(eltype(A)))
B̄ᵢ = B̄[i₁,i₂,i₃,i₄]
A[2i₁-1,2i₂-1,i₃,i₄] = (A₁ == Bₘ) * B̄ᵢ
A[2i₁-1,2i₂ ,i₃,i₄] = (A₂ == Bₘ) * B̄ᵢ
A[2i₁ ,2i₂-1,i₃,i₄] = (A₃ == Bₘ) * B̄ᵢ
A[2i₁ ,2i₂ ,i₃,i₄] = (A₄ == Bₘ) * B̄ᵢ
end
end
@generated function halve12(x::Tuple{Vararg{<:Any,N}}) where {N}
out = Expr(:tuple)
for n in 1:N
r = Expr(:ref, :x, n)
if n ≤ 2
r = Expr(:call, :>>>, r, Expr(:call, Expr(:curly, :Static, 1)))
end
push!(out.args, r)
end
out
end
function default_alloc_maxpool(mp::MaxPool2x2Layer, img::AbstractArray{T}) where {T}
s = halve12(maybestaticsize(img))
_, p = PaddedMatrices.size_permute_tuples(img)
B = allocarray(T, s)
PermutedDimsArray(B, p)
end
function (sp::StatckPointer)(::typeof(default_alloc_maxpool), mp::MaxPool2x2Layer, img::AbstractArray{T}) where {T}
s = halve12(maybestaticsize(img))
_, p = PaddedMatrices.size_permute_tuples(img)
sp, B = PtrArray{T}(sp, s)
sp, PermutedDimsArray(B, p)
end
function alloc_maxpool_pad12(mp::MaxPool2x2Layer, img::AbstractArray{T,4}) where {T}
strunc = halve12(maybestaticsize(img))
s = (vadd(strunc[1], Static{2}()), vadd(strunc[2], Static{2}()), strunc[3], strunc[4])
_, p = PaddedMatrices.size_permute_tuples(img)
B = allocarray(T, s)
PermutedDimsArray(B, p)
end
function (sp::StatckPointer)(::typeof(default_alloc_maxpool), mp::MaxPool2x2Layer, img::AbstractArray{T,4}) where {T}
s = halve12(maybestaticsize(img))
_, p = PaddedMatrices.size_permute_tuples(img)
sp, B = PtrArray{T}(sp, s)
sp, PermutedDimsArray(B, p)
end
struct MaxPool2x2Layer{F} <: AbstractLayer
f::F
end
MaxPool2x2Layer() = MaxPool2x2Layer(default_alloc_maxpool)
parameters(::MaxPool2x2Layer) = nothing
grad(::MaxPool2x2Layer) = nothing
returns(mp::MaxPool2x2Layer) = mp.o
function forward(sp::StackPointer, mp::MaxPool2x2Layer, img)
sp, out = stack_pointer_call(mp.f, sp, img)
maxpool2x2relu!(out, img)
out, pb, out
end
forward!(mp::MaxPool2x2Layer, img) = (maxpool2x2relu!(returns(mp), img), img)
reverse_grad!(::MaxPool2x2Layer) = nothing
function reverse_chain!(mp::MaxPool2x2Layer, img, ōūt̄)
maxpool2x2relureversev2!(img, ōūt̄)
end
# function maxpool2x2relureversev3!(A, B̄)
# for i₁ ∈ axes(B̄,1), i₂ ∈ axes(B̄,2), i₄ ∈ axes(B̄,4)
# @inbounds @simd ivdep for i₃ ∈ axes(B̄,3)
# A₁ = A[2i₁-1,2i₂-1,i₃,i₄]
# A₂ = A[2i₁-1,2i₂ ,i₃,i₄]
# A₃ = A[2i₁ ,2i₂-1,i₃,i₄]
# A₄ = A[2i₁ ,2i₂ ,i₃,i₄]
# Bₘ = max(max(max(A₁, A₂), max(A₃, A₄)), zero(eltype(A)))
# B̄ᵢ = B̄[i₁,i₂,i₃,i₄]
# A[2i₁-1,2i₂-1,i₃,i₄] = (A₁ == Bₘ) * B̄ᵢ
# A[2i₁-1,2i₂ ,i₃,i₄] = (A₂ == Bₘ) * B̄ᵢ
# A[2i₁ ,2i₂-1,i₃,i₄] = (A₃ == Bₘ) * B̄ᵢ
# A[2i₁ ,2i₂ ,i₃,i₄] = (A₄ == Bₘ) * B̄ᵢ
# end
# end
# end
| [
27,
34345,
29,
10677,
14,
9806,
7742,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
8818,
3509,
7742,
17,
87,
17,
260,
2290,
0,
7,
33,
11,
317,
8,
198,
220,
220,
220,
2488,
615,
87,
329,
1312,
158,
224,
223,
18872,
230,
34197,
7... | 1.399521 | 2,921 |
<gh_stars>100-1000
# functions related to negative binomial distribution
# R implementations
using .RFunctions:
nbinompdf,
nbinomlogpdf,
nbinomcdf,
nbinomccdf,
nbinomlogcdf,
nbinomlogccdf,
nbinominvcdf,
nbinominvccdf,
nbinominvlogcdf,
nbinominvlogccdf
| [
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
2,
5499,
3519,
284,
4633,
9874,
49070,
6082,
198,
198,
2,
371,
25504,
198,
3500,
764,
49,
24629,
2733,
25,
198,
220,
220,
220,
299,
8800,
3361,
7568,
11,
198,
220,
220,
220,
299,
8800,... | 2.17037 | 135 |
<reponame>ven-k/RobustAndOptimalControl.jl
if haskey(ENV, "CI")
ENV["PLOTS_TEST"] = "true"
ENV["GKSwstype"] = "100" # gr segfault workaround
end
using Plots
using RobustAndOptimalControl
using LinearAlgebra
using Test
@testset "RobustAndOptimalControl.jl" begin
@testset "extendedstatespace" begin
@info "Testing extendedstatespace"
include("test_extendedstatespace.jl")
end
@testset "utils" begin
@info "Testing utils"
include("test_utils.jl")
end
@testset "descriptor" begin
@info "Testing descriptor"
include("test_descriptor.jl")
end
@testset "uncertainty" begin
@info "Testing uncertainty"
include("test_uncertainty.jl")
end
@testset "diskmargin" begin
@info "Testing diskmargin"
include("test_diskmargin.jl")
end
@testset "weights" begin
@info "Testing weights"
include("test_weights.jl")
end
@testset "hinfpartition" begin
@info "Testing hinfpartition"
include("test_hinfpartition.jl")
end
@testset "H∞ design" begin
@info "Testing hinf_design"
include("test_hinf_design.jl")
end
@testset "H2 design" begin
@info "Testing H2 design"
include("test_h2_design.jl")
end
@testset "LQG" begin
@info "Testing LQG"
include("test_lqg.jl")
end
@testset "Named systems" begin
@info "Testing Named systems"
include("test_named_systems2.jl")
end
@testset "find_lft" begin
@info "Testing find_lft"
include("test_find_lft.jl")
end
@testset "reduction" begin
@info "Testing test_reduction"
include("test_reduction.jl")
end
@testset "augmentation" begin
@info "Testing augmentation"
include("test_augmentation.jl")
end
@testset "glover_mcfarlane" begin
@info "Testing glover_mcfarlane"
include("test_glover_mcfarlane.jl")
end
@testset "hinfgrad" begin
@info "Testing hinfgrad"
include("test_hinfgrad.jl")
end
end
| [
27,
7856,
261,
480,
29,
574,
12,
74,
14,
14350,
436,
1870,
27871,
4402,
15988,
13,
20362,
198,
361,
468,
2539,
7,
1677,
53,
11,
366,
25690,
4943,
198,
220,
220,
220,
12964,
53,
14692,
6489,
33472,
62,
51,
6465,
8973,
796,
366,
794... | 2.237037 | 945 |
################################################################################
# Augmented Gradient Search ARS
################################################################################
using LinearAlgebra
using Statistics
import LinearAlgebra.normalize
import GeometryBasics.update
function train(env::Environment, policy::Policy{T}, normalizer::Normalizer{T}, hp::HyperParameters{T}) where T
envs = [deepcopy(env) for i = 1:Threads.nthreads()]
output_size, input_size = size(policy.θ)
nx = input_size
nu = output_size
nθ = output_size * input_size
# Rollout data
X = [zeros(nx) for k = 1:hp.horizon+1]
A = [zeros(nu) for k = 1:hp.horizon]
fx = [zeros(nx,nx) for k = 1:hp.horizon]
fu = [zeros(nx,nu) for k = 1:hp.horizon]
# Gradient data
∇θ = zeros(1,nθ)
∂x∂θ = [zeros(nx, nθ) for k = 1:hp.horizon]
for episode = 1:hp.main_loop_size
# Stem
state = reset(env)
X[1] .= copy(state)
done = false
sum_reward = 0
num_plays = 0
for k = 1:hp.horizon
done && break
observe(normalizer, state)
state = normalize(normalizer, state)
action = evaluate(policy, state)
A[k] .= copy(action)
state, reward, done, _ = step(env, action, diff = true)
X[k+1] .= copy(state)
fx[k] .= copy(env.dynamics_jacobian_state)
fu[k] .= copy(env.dynamics_jacobian_input)
reward = max(min(reward, 1), -1)
sum_reward += reward
num_plays += 1
end
∇θ .= 0.0
∂u∂θ = [cat([X[k]' for i = 1:output_size]..., dims = (1,2)) for k = 1:num_plays]
∂r∂x = [FiniteDiff.finite_difference_jacobian(x -> [-cost(env, x, A[k])], X[k]) for k = 1:num_plays]
∂r∂u = [FiniteDiff.finite_difference_jacobian(u -> [-cost(env, X[k], u)], A[k]) for k = 1:num_plays]
for k = 2:num_plays
∂x∂θ[k] .= fx[k-1] * ∂x∂θ[k-1] + fu[k-1] * ∂u∂θ[k-1] #TODO
end
for k = 1:num_plays
if norm(∇θ, Inf) > 8e2
println("grad up to step $k")
break
end
∇θ += ∂r∂x[k] * ∂x∂θ[k] + ∂r∂u[k] * ∂u∂θ[k]
end
∇ = transpose(reshape(∇θ, (input_size, output_size)))
(norm(∇, Inf) < 1e3) && gradient_update(policy, ∇)
# finish, print:
println("episode $episode ∇∞ $(scn(norm(∇, Inf))) r $sum_reward")
end
return nothing
end
function gradient_update(policy::Policy, ∇)
policy.θ += policy.hp.step_size * ∇ ./ norm(∇, Inf)
return nothing
end
| [
29113,
29113,
14468,
198,
2,
2447,
12061,
17701,
1153,
11140,
5923,
50,
198,
29113,
29113,
14468,
198,
3500,
44800,
2348,
29230,
198,
3500,
14370,
198,
198,
11748,
44800,
2348,
29230,
13,
11265,
1096,
198,
11748,
2269,
15748,
15522,
873,
... | 1.96544 | 1,331 |
<reponame>LudwigBoess/SpectralCRsUtility.jl<filename>src/io/io.jl
using GadgetUnits
using GadgetIO
function readSingleCRShockDataFromOutputFile(file::String)
# read file into memory
f = open(file)
lines = readlines(f)
close(f)
# filter only relevant lines. Output of every line starts with "CR DATA".
g = occursin.("CR DATA", lines)
lines = lines[g]
# get number of output lines
N = length(lines)
# init datatype for analyze
cr = CRShockData(N)
for i ∈ 1:N
line_content = split(lines[i])
cr.dt[i] = parse(Float64,line_content[3])
cr.Mach[i] = parse(Float64,line_content[4])
cr.Shock_Speed[i] = parse(Float64,line_content[5])
cr.Shock_Compress[i] = parse(Float64,line_content[6])
cr.Shock_Energy_In[i] = parse(Float64,line_content[7])
cr.Shock_Energy_Real[i] = parse(Float64,line_content[8])
cr.Energy_P[i] = parse(Float64,line_content[9])
cr.Energy_e[i] = parse(Float64,line_content[10])
end
return cr
end
"""
getCRMomentumDistributionFromPartID( snap_file::String, ID::Integer;
pmin::Real=1.0, pmax::Real=1.0e6,
Nbins::Integer=0, mode::Int64=3)
Reads the spectra from a single SPH particle via the particle ID.
"""
function getCRMomentumDistributionFromPartID(snap_file::String, ID::Integer;
pmin::Real=1.0, pmax::Real=1.0e6,
Nbins::Integer=0, mode::Int64=3,
protons::Bool=true, electrons::Bool=true)
h = head_to_obj(snap_file)
info = read_info(snap_file)
if info == 1
if Nbins == 0
error("Can't read spectrum! No info block present!\nSupply number of momentum bins to proceed!")
else
info = Array{InfoLine,1}(undef,7)
info[1] = InfoLine("ID", UInt32, Int32(1), [1, 0, 0, 0, 0, 0])
info[2] = InfoLine("CRpN", Float32, Int32(Nbins), [1, 0, 0, 0, 0, 0])
info[3] = InfoLine("CRpS", Float32, Int32(Nbins), [1, 0, 0, 0, 0, 0])
info[4] = InfoLine("CRpC", Float32, Int32(1), [1, 0, 0, 0, 0, 0])
info[5] = InfoLine("CReN", Float32, Int32(Nbins), [1, 0, 0, 0, 0, 0])
info[6] = InfoLine("CReS", Float32, Int32(Nbins), [1, 0, 0, 0, 0, 0])
info[7] = InfoLine("CReC", Float32, Int32(1), [1, 0, 0, 0, 0, 0])
end
end
# read block positions to speed up IO
block_positions = GadgetIO.get_block_positions(snap_file)
id = read_block(snap_file, "ID",
info=info[getfield.(info, :block_name) .== "ID"][1],
parttype=0, block_position=block_positions["ID"])
# select the position of the requested ID
part = findfirst( id .== UInt32(ID) )[1]
# protons
if protons
CRpN = Float64.(
read_block(snap_file, "CRpN",
info=info[getfield.(info, :block_name) .== "CRpN"][1],
parttype=0, block_position=block_positions["CRpN"])[:,part]
)
CRpS = read_block(snap_file, "CRpS",
info=info[getfield.(info, :block_name) .== "CRpS"][1],
parttype=0, block_position=block_positions["CRpS"])[:,part]
CRpC = read_block(snap_file, "CRpC",
info=info[getfield.(info, :block_name) .== "CRpC"][1],
parttype=0, block_position=block_positions["CRpC"])[part]
Nbins = size(CRpS,1)
end
# electrons
if electrons
CReN = Float64.(
read_block(snap_file, "CReN",
info=info[getfield.(info, :block_name) .== "CReN"][1],
parttype=0, block_position=block_positions["CReN"])[:,part]
)
CReS = read_block(snap_file, "CReS",
info=info[getfield.(info, :block_name) .== "CReS"][1],
parttype=0, block_position=block_positions["CReS"])[:,part]
CReC = read_block(snap_file, "CReC",
info=info[getfield.(info, :block_name) .== "CReC"][1],
parttype=0, block_position=block_positions["CReC"])[part]
Nbins = size(CReS,1)
end
par = CRMomentumDistributionConfig(pmin, pmax, Nbins, mode)
if protons && electrons
return CRMomentumDistribution( CRpN, CRpS, CRpC, par.pmin, par.pmax, par.mc_e ),
CRMomentumDistribution( CReN, CReS, CReC, par.pmin, par.pmax, par.mc_p )
elseif protons
return CRMomentumDistribution( CRpN, CRpS, CRpC, par.pmin, par.pmax, par.mc_e )
elseif electrons
return CRMomentumDistribution( CReN, CReS, CReC, par.pmin, par.pmax, par.mc_p )
end
end
"""
write_crp_cre_to_txt( t::Vector{<:Real}, CRp::CRMomentumDistribution, CRe::CRMomentumDistribution,
output_file::String )
Write CR Proton and Electron spectra for a series of time steps `t` to a txt file.
"""
function write_crp_cre_to_txt(t::Vector{<:Real}, CRp::CRMomentumDistribution, CRe::CRMomentumDistribution,
output_file::String)
data = Matrix{Float64}(undef, length(t), 1+4*length(CRp[1].norm))
for i = 1:length(t)
data[i,:] = [t[i] CRp[i].bound[1:end-1]' CRp[i].norm' CRe[i].bound[1:end-1]' CRe[i].norm' ]
end
writedlm(output_file, data)
end
"""
read_crp_cre_from_txt(filename::String)
Read CR Proton and Electron spectra from a txt file.
"""
function read_crp_cre_from_txt(filename::String)
data = readdlm(filename)
N = size(data,1)
Nbins = Int64((size(data,2) - 1) / 8)
CRp = Array{CRMomentumDistribution,1}(undef,N)
CRe = Array{CRMomentumDistribution,1}(undef,N)
t = data[:,1]
for i = 1:N
CRp[i] = CRMomentumDistribution([data[i,2:2Nbins+1]; data[i,2Nbins+1]], data[i,2Nbins+2:4Nbins+1])
CRe[i] = CRMomentumDistribution([data[i,4Nbins+2:6Nbins+1]; data[i,6Nbins+1]], data[i,6Nbins+2:8Nbins+1])
end
return t, CRp, CRe
end
function write_cr_to_txt(t::Vector{<:Real}, CR::Vector{CRMomentumDistribution}, output_file::String)
data = Matrix{Float64}(undef, length(t), 1+2*length(CR[1].norm))
for i = 1:length(t)
data[i,:] = [t[i] CR[i].bound[1:end-1]' CR[i].norm' ]
end
writedlm(output_file, data)
end
function read_cr_from_txt(fi)
data = readdlm(fi)
N = size(data,1)
Nbins = Int64((size(data,2) - 1) / 4)
CR = Array{CRMomentumDistribution,1}(undef,N)
t = data[:,1]
for i = 1:N
CR[i] = CRMomentumDistribution([data[i,2:2Nbins+1]; data[i,2Nbins+1]], data[i,2Nbins+2:4Nbins+1])
end
return t, CR
end | [
27,
7856,
261,
480,
29,
43,
463,
28033,
16635,
408,
14,
49738,
1373,
9419,
82,
18274,
879,
13,
20362,
27,
34345,
29,
10677,
14,
952,
14,
952,
13,
20362,
198,
3500,
39266,
3118,
896,
198,
3500,
39266,
9399,
198,
198,
8818,
1100,
2800... | 1.88838 | 3,709 |
import Base.CoreLogging:
AbstractLogger,
LogLevel,
handle_message,
min_enabled_level,
shouldlog,
global_logger
struct BaseLogger <: AbstractLogger
min_level::LogLevel
end
min_enabled_level(logger::BaseLogger) = logger.min_level
shouldlog(logger::BaseLogger, args...) = true
function handle_message(::BaseLogger, cl_level, msg, mod, group, id, filepath, line; kwargs...)
logger = getlogger(mod)
level = lowercase(string(cl_level))
log(logger, logger.record(logger.name, level, getlevels(logger)[level], msg))
end
function substitute!(level::LogLevel=min_enabled_level(global_logger()))
global_logger(BaseLogger(level))
notice(getlogger(@__MODULE__), "Substituting global logging with Memento")
end
| [
11748,
7308,
13,
14055,
11187,
2667,
25,
198,
220,
220,
220,
27741,
11187,
1362,
11,
198,
220,
220,
220,
5972,
4971,
11,
198,
220,
220,
220,
5412,
62,
20500,
11,
198,
220,
220,
220,
949,
62,
25616,
62,
5715,
11,
198,
220,
220,
220... | 2.723636 | 275 |
using CUDA
using MOCNeutronTransport
using BenchmarkTools
using Test
# Number of points to use in vectors
N = 2^20
println("Using Point arrays of length $N")
# Check num threads and give warning
nthreads = Threads.nthreads()
if nthreads === 1
@warn "Only using single-thread for cpu. Try restarting julia with 'julia --threads n'"
else
println("Using $nthreads threads for CPU multi-threading")
end
# Single threads CPU add
function sequential_add!(x, y, z)
for i in eachindex(x, y)
@inbounds z[i] = x[i] + y[i]
end
return nothing
end
# Multithreaded CPU add
function parallel_add!(x, y, z)
Threads.@threads for i in eachindex(x, y)
@inbounds z[i] = y[i] + x[i]
end
return nothing
end
# Single threaded GPU add
function gpu_1_thread_add!(x, y, z)
for i = 1:length(x)
@inbounds z[i] = x[i] + y[i]
end
return nothing
end
function bench_gpu_1_thread_add!(x, y, z)
CUDA.@sync begin
@cuda gpu_1_thread_add!(x, y, z)
end
end
# Single block GPU add
function gpu_1_block_add!(x, y, z)
index = threadIdx().x # this example only requires linear indexing, so just use `x`
stride = blockDim().x
for i = index:stride:length(x)
@inbounds z[i] = x[i] + y[i]
end
return nothing
end
function bench_gpu_1_block_add!(x, y, z)
CUDA.@sync begin
@cuda threads=512 gpu_1_block_add!(x, y, z)
end
end
# Multiple blocks GPU add
function gpu_multiblock_add!(x, y, z)
index = (blockIdx().x - 1) * blockDim().x + threadIdx().x
stride = gridDim().x * blockDim().x
for i = index:stride:length(y)
@inbounds z[index] = x[index] + y[index]
end
return nothing
end
function bench_gpu_multiblock_add!(x, y, z)
numblocks = ceil(Int, length(x)/512)
CUDA.@sync begin
@cuda threads=512 blocks=numblocks gpu_multiblock_add!(x, y, z)
end
end
# Use the occupancy API to determine threads and blocks to saturate the GPU
function bench_gpu_multiblock_autooccupancy!(x, y, z)
kernel = @cuda launch=false gpu_multiblock_add!(x, y, z)
config = launch_configuration(kernel.fun)
threads = min(length(y), config.threads)
blocks = cld(length(y), threads)
CUDA.@sync begin
kernel(x, y, z; threads, blocks)
end
end
cpu_time = 0.0
for T = [Float64, Float32]
println("Using Point of type $T")
x = fill(Point_2D{T}(1, 1), N)
y = fill(Point_2D{T}(2, 2), N)
z = fill(Point_2D{T}(0, 0), N)
time = @belapsed sequential_add!($x, $y, $z)
μs = 1e6*time
if T == Float64
global cpu_time = μs
end
speedup = cpu_time/μs
println(" CPU: single-thread = $μs μs")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(z .== Point_2D{T}(3, 3))
fill!(z, Point_2D{T}(0, 0))
time = @belapsed parallel_add!($x, $y, $z)
μs = 1e6*time
speedup = cpu_time/μs
println(" CPU: $nthreads threads = $μs μs.")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(z .== Point_2D{T}(3, 3))
x_d = CUDA.fill(Point_2D{T}(1, 1), N)
y_d = CUDA.fill(Point_2D{T}(2, 2), N)
z_d = CUDA.fill(Point_2D{T}(0, 0), N)
time = @belapsed bench_gpu_1_thread_add!($x_d, $y_d, $z_d)
μs = 1e6*time
speedup = cpu_time/μs
println(" GPU: single-thread/block, 1 blocks = $μs μs.")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(Array(z_d) .== Point_2D{T}(3, 3))
fill!(z_d, Point_2D{T}(0, 0))
time = @belapsed bench_gpu_1_block_add!($x_d, $y_d, $z_d)
μs = 1e6*time
speedup = cpu_time/μs
println(" GPU: 512 threads/block, 1 blocks = $μs μs.")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(Array(z_d) .== Point_2D{T}(3, 3))
fill!(z_d, Point_2D{T}(0, 0))
time = @belapsed bench_gpu_multiblock_add!($x_d, $y_d, $z_d)
μs = 1e6*time
speedup = cpu_time/μs
numblocks = ceil(Int64, N/512)
println(" GPU: 512 threads/block, $numblocks blocks = $μs μs.")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(Array(z_d) .== Point_2D{T}(3, 3))
fill!(z_d, Point_2D{T}(0, 0))
time = @belapsed bench_gpu_multiblock_autooccupancy!($x_d, $y_d, $z_d)
μs = 1e6*time
speedup = cpu_time/μs
kernel = @cuda launch=false gpu_multiblock_add!(x_d, y_d, z_d)
config = launch_configuration(kernel.fun)
threads = min(N, config.threads)
blocks = cld(N, threads)
println(" GPU: $threads threads/block, $blocks blocks = $μs μs.")
println(" Speed up compared to single-thread CPU & Float64 Points = $speedup")
@test all(Array(z_d) .== Point_2D{T}(3, 3))
end
| [
3500,
29369,
5631,
198,
3500,
337,
4503,
8199,
315,
1313,
8291,
634,
198,
3500,
25187,
4102,
33637,
198,
3500,
6208,
198,
198,
2,
7913,
286,
2173,
284,
779,
287,
30104,
198,
45,
796,
362,
61,
1238,
198,
35235,
7203,
12814,
6252,
26515... | 2.181368 | 2,222 |
<reponame>JuliaTagBot/play
# Characters
const SUITCHAR=collect("CDHSN")
const CARDCHAR=collect("23456789TJQKA")
const HEXCHAR=collect("0123456789ABCDEF")
const PLAYERCHAR=collect("WNES")
hex2int(c::Char)=(c <= '9' ? c - '0' : c - '7')
int2hex(i::Integer)=HEXCHAR[i+1]
# Trump suits
const CLUBS=1
const DIAMONDS=2
const HEARTS=3
const SPADES=4
const NOTRUMP=5
# Regular bids are represented by integers 1:35
const TRUMPBIDS=35
makebid(level,suit)=(5*(level-1)+suit)
bidlevel(bid)=1+div(bid-1,5)
bidtrump(bid)=mod1(bid,5)
bidtrumpchar(bid)=SUITCHAR[bidtrump(bid)]
bidlevelchar(bid)='0'+bidlevel(bid)
# Cards
makecard(suit,value)=(value + 13*(suit-1))
cardvalue(card)=mod1(card,13)
cardsuit(card)=1+div(card-1,13)
cardvaluechar(card)=CARDCHAR[cardvalue(card)]
cardsuitchar(card)=SUITCHAR[cardsuit(card)]
# Three extra bids
const PASS=36
const DOUBLE=37
const REDOUBLE=38
const NUMBIDS=38
# Players
const WEST=1
const NORTH=2
const EAST=3
const SOUTH=4
# Double dummy data
# using StaticArrays
# struct DoubleDummy2
# hands::SVector{4,UInt64} # Using 52 bits of UInt64 to encode a hand
# results::SMatrix{4,5,UInt8} # (4,5) array of NS tricks
# end
| [
27,
7856,
261,
480,
29,
16980,
544,
24835,
20630,
14,
1759,
198,
2,
26813,
198,
9979,
13558,
31949,
1503,
28,
33327,
7203,
8610,
7998,
45,
4943,
198,
9979,
48731,
38019,
28,
33327,
7203,
1954,
2231,
3134,
4531,
51,
41,
48,
25123,
4943... | 2.436975 | 476 |
<filename>src/providers/data.jl<gh_stars>1-10
using Dates
include("../structures.jl")
include("../util/util.jl")
include("../util/logger.jl")
function fetchData!(store::DataStore, attribute::DataAttribute)
throw("Unknown data provider $(typeof(attribute))")
end
include("sysstat.jl")
include("aws.jl")
| [
27,
34345,
29,
10677,
14,
15234,
4157,
14,
7890,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
44712,
198,
198,
17256,
7203,
40720,
7249,
942,
13,
20362,
4943,
198,
17256,
7203,
40720,
22602,
14,
22602,
13,
20362,
4943,
... | 2.952381 | 105 |
<reponame>MageekDM/Enigma
function get_perm(str::String)
p = Array(Int, length(str))
for (i,c) in enumerate(str)
p[i] = char2ind(c)
end
p
end
const ENIGMA_ROTORS = [
Rotor(get_perm("EKMFLGDQVZNTOWYHXUSPAIBRCJ"), Int[2]), #char2ind('Q')]), # I
Rotor(get_perm("AJDKSIRUXBLHWTMCQGZNPYFVOE"), Int[5]), # char2ind('E')]), # II
Rotor(get_perm("BDFHJLCPRTXVZNYEIWGAKMUSQO"), Int[char2ind('V')]), # III
Rotor(get_perm("ESOVPZJAYQUIRHXLNFTGKDCMWB"), Int[char2ind('J')]), # IV
Rotor(get_perm("VZBRGITYUPSDNHLXAWMJQOFECK"), Int[char2ind('Z')]), # V
Rotor(get_perm("JPGVOUMFYQBENHZRDKASXLICTW"), Int[char2ind('Z'), char2ind('M')]), # VI
Rotor(get_perm("NZJHGRCXMYSWBOUFAIVLPEKQDT"), Int[char2ind('Z'), char2ind('M')]), # VII
Rotor(get_perm("FKQHTLXOCBJSPDZRAMEWNIUYGV"), Int[char2ind('Z'), char2ind('M')]), # VIII
]
const ENIGMA_REFLECTORS = [
Reflector(get_perm("YRUHQSLDPXNGOKMIEBFZCWVJAT")), # b
Reflector(get_perm("FVPJIAOYEDRZXWGCTKUQSBNMHL")), # c
Reflector(get_perm("ENKQAUYWJICOPBLMDXZVFTHRGS")), # b - thin
Reflector(get_perm("RDOBJNTKVEHMLFCWZAXGYIPSUQ")), # c - thin
]
function get_M3_enigma(
ref::Char, # 'B' or 'C'
wheels::Tuple{Int, Int, Int},
rotor_pos::String, # the starting rotor positions, ex: "DVM"
plugboard_str::String=""; # ex: "BI GP HK LD MO RT VS WZ XN YJ"
emulate_double_stepping::Bool=true,
)
if ref == 'B'
reflector = ENIGMA_REFLECTORS[1]
elseif ref == 'C'
reflector = ENIGMA_REFLECTORS[2]
else
error("unknown reflector $ref")
end
rotors = ENIGMA_ROTORS[[wheels[3], wheels[2], wheels[1]]]
rotors[1].rotor_position = rotor_pos[3]-'A'
rotors[2].rotor_position = rotor_pos[2]-'A'
rotors[3].rotor_position = rotor_pos[1]-'A'
plugboard = Plugboard(collect(1:26))
plugboard_str = replace(plugboard_str, " ", "")
i = 1
while i < length(plugboard_str)
a = char2ind(plugboard_str[i])
b = char2ind(plugboard_str[i+1])
plugboard.perm[a] = b
plugboard.perm[b] = a
i += 2
end
alphabet = collect('A':'Z')
Enigma(plugboard, rotors, reflector, alphabet, emulate_double_stepping)
end
function encode!(enigma::Enigma, input::String)
output = ""
for c in input
output = output * string(ind2char(encode!(enigma, char2ind(c))))
end
output
end
let
enigma = get_M3_enigma('B', (1,2,3), "AAA")
input = "THEQ"*"UICK"*"BROW"*"NFOX"*"JUMP"*"EDOV"*"ERTH"*"ELAZ"*"YDOG"
output = "OPCI"*"LLAZ"*"FXLQ"*"TDNL"*"GGLE"*"YOHH"*"CJGA"*"XWTW"*"AMBH"
@assert encode!(enigma, input) == output
enigma = get_M3_enigma('B', (1,2,3), "AAA")
@assert encode!(enigma, output) == input
enigma = get_M3_enigma('B', (1,2,3), "AAA")
input2 = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
output = "<KEY>QQCUUQTPPHBIEHTUVGCEGPEYMWICGKWJCUFKLUIDMJDIVPJDM"
pred = replace(encode!(enigma, input2), " ", "")
@assert pred == output
enigma = get_M3_enigma('B', (3,5,8), "MCJ")
output2 = "JFHJ"*"BXOD"*"IIZK"*"CHJU"*"TGOL"*"FIYX"*"RSLF"*"KKPO"*"JVRO"
@assert encode!(enigma, input) == output2
enigma = get_M3_enigma('B', (3,5,8), "MCJ")
@assert encode!(enigma, output2) == input
enigma = get_M3_enigma('C', (8,2,4), "PXM", "AB CY ET FQ HZ LG MJ OK US WX")
output3 = "ULQC"*"XYXN"*"JMKV"*"FNAO"*"IVZS"*"OFTW"*"GQBB"*"XNWW"*"BIYS"
@assert encode!(enigma, input) == output3
enigma = get_M3_enigma('C', (8,2,4), "PXM", "AB CY ET FQ HZ LG MJ OK US WX")
@assert encode!(enigma, output3) == input
end | [
27,
7856,
261,
480,
29,
44,
496,
988,
23127,
14,
4834,
13495,
198,
8818,
651,
62,
16321,
7,
2536,
3712,
10100,
8,
198,
220,
220,
220,
279,
796,
15690,
7,
5317,
11,
4129,
7,
2536,
4008,
198,
220,
220,
220,
329,
357,
72,
11,
66,
... | 2.259754 | 1,871 |
<filename>src/optimizers/adam.jl
export Adam
"""
Adam
Adam Optimizer
# References
* <NAME> Ba, ["Adam: A Method for Stochastic Optimization"](http://arxiv.org/abs/1412.6980v8), ICLR 2015.
"""
mutable struct Adam
alpha::Float64
beta1::Float64
beta2::Float64
eps::Float64
states::IdDict
end
Adam() = Adam(0.001, 0.9, 0.999, 1e-8, IdDict())
Adam(alpha)= Adam(alpha, 0.9, 0.999, 1e-8, IdDict())
function (opt::Adam)(param::Array{T}, grad::Array{T}) where T
@assert length(param) == length(grad)
state = get!(opt.states, param, nothing)
if state == nothing
m, v, t = zeros(param), zeros(grad), 1
else
m::Array{T}, v::Array{T}, t::Int = state
end
BLAS.scale!(T(opt.beta1), m)
BLAS.axpy!(T(1.0 - opt.beta1), grad, m)
BLAS.scale!(T(opt.beta2), v)
@inbounds for i = 1:length(grad)
grad[i] = grad[i] * grad[i]
end
BLAS.axpy!(T(1.0 - opt.beta2), grad, v)
fix1 = 1.0 - opt.beta1 ^ t
fix2 = 1.0 - opt.beta2 ^ t
rate = opt.alpha * sqrt(fix2) / fix1
@inbounds for i = 1:length(param)
param[i] -= rate * m[i] / (sqrt(v[i]) + T(opt.eps))
end
opt.states[param] = (m, v, t + 1)
fill!(grad, T(0.0))
end
(opt::Adam)(x::Var) = opt(x.data, x.grad)
function update_slow!(opt::Adam, param::Array{T}, grad::Array{T}) where T
state = get!(opt.states, param, nothing)
if state == nothing
m, v, t = zeros(param), zeros(grad), 1
else
m::Array{T}, v::Array{T}, t::Int = state
end
m += (1.0 - opt.beta1) * (grad - m)
v += (1.0 - opt.beta2) * (grad .* grad - v)
fix1 = 1.0 - opt.beta1 ^ t
fix2 = 1.0 - opt.beta2 ^ t
rate = opt.alpha * sqrt(fix2) / fix1
for i = 1:length(param)
param[i] -= rate * m[i] / (sqrt(v[i]) + T(opt.eps))
end
opt.states[param] = (m, v, t + 1)
fill!(grad, T(0.0))
end
| [
27,
34345,
29,
10677,
14,
40085,
11341,
14,
324,
321,
13,
20362,
198,
39344,
7244,
198,
198,
37811,
198,
220,
220,
220,
7244,
198,
198,
23159,
30011,
7509,
198,
198,
2,
31458,
198,
9,
1279,
20608,
29,
8999,
11,
14631,
23159,
25,
317... | 2.057269 | 908 |
<reponame>UnofficialJuliaMirror/AWSSDK.jl-0d499d91-6ae5-5d63-9313-12987b87d5ad
#==============================================================================#
# Athena.jl
#
# This file is generated from:
# https://github.com/aws/aws-sdk-js/blob/master/apis/athena-2017-05-18.normal.json
#==============================================================================#
__precompile__()
module Athena
using AWSCore
"""
using AWSSDK.Athena.batch_get_named_query
batch_get_named_query([::AWSConfig], arguments::Dict)
batch_get_named_query([::AWSConfig]; NamedQueryIds=)
using AWSCore.Services.athena
athena([::AWSConfig], "BatchGetNamedQuery", arguments::Dict)
athena([::AWSConfig], "BatchGetNamedQuery", NamedQueryIds=)
# BatchGetNamedQuery Operation
Returns the details of a single named query or a list of up to 50 queries, which you provide as an array of query ID strings. Use [ListNamedQueries](@ref) to get the list of named query IDs. If information could not be retrieved for a submitted query ID, information about the query ID submitted is listed under [UnprocessedNamedQueryId](@ref). Named queries are different from executed queries. Use [BatchGetQueryExecution](@ref) to get details about each unique query execution, and [ListQueryExecutions](@ref) to get a list of query execution IDs.
# Arguments
## `NamedQueryIds = [::String, ...]` -- *Required*
An array of query IDs.
# Returns
`BatchGetNamedQueryOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetNamedQuery)
"""
@inline batch_get_named_query(aws::AWSConfig=default_aws_config(); args...) = batch_get_named_query(aws, args)
@inline batch_get_named_query(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "BatchGetNamedQuery", args)
@inline batch_get_named_query(args) = batch_get_named_query(default_aws_config(), args)
"""
using AWSSDK.Athena.batch_get_query_execution
batch_get_query_execution([::AWSConfig], arguments::Dict)
batch_get_query_execution([::AWSConfig]; QueryExecutionIds=)
using AWSCore.Services.athena
athena([::AWSConfig], "BatchGetQueryExecution", arguments::Dict)
athena([::AWSConfig], "BatchGetQueryExecution", QueryExecutionIds=)
# BatchGetQueryExecution Operation
Returns the details of a single query execution or a list of up to 50 query executions, which you provide as an array of query execution ID strings. To get a list of query execution IDs, use [ListQueryExecutions](@ref). Query executions are different from named (saved) queries. Use [BatchGetNamedQuery](@ref) to get details about named queries.
# Arguments
## `QueryExecutionIds = [::String, ...]` -- *Required*
An array of query execution IDs.
# Returns
`BatchGetQueryExecutionOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution)
"""
@inline batch_get_query_execution(aws::AWSConfig=default_aws_config(); args...) = batch_get_query_execution(aws, args)
@inline batch_get_query_execution(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "BatchGetQueryExecution", args)
@inline batch_get_query_execution(args) = batch_get_query_execution(default_aws_config(), args)
"""
using AWSSDK.Athena.create_named_query
create_named_query([::AWSConfig], arguments::Dict)
create_named_query([::AWSConfig]; Name=, Database=, QueryString=, <keyword arguments>)
using AWSCore.Services.athena
athena([::AWSConfig], "CreateNamedQuery", arguments::Dict)
athena([::AWSConfig], "CreateNamedQuery", Name=, Database=, QueryString=, <keyword arguments>)
# CreateNamedQuery Operation
Creates a named query.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `Name = ::String` -- *Required*
The plain language name for the query.
## `Description = ::String`
A brief explanation of the query.
## `Database = ::String` -- *Required*
The database to which the query belongs.
## `QueryString = ::String` -- *Required*
The text of the query itself. In other words, all query statements.
## `ClientRequestToken = ::String`
A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another `CreateNamedQuery` request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the `QueryString`, an error is returned.
**Important**
> This token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.
# Returns
`CreateNamedQueryOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery)
"""
@inline create_named_query(aws::AWSConfig=default_aws_config(); args...) = create_named_query(aws, args)
@inline create_named_query(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "CreateNamedQuery", args)
@inline create_named_query(args) = create_named_query(default_aws_config(), args)
"""
using AWSSDK.Athena.delete_named_query
delete_named_query([::AWSConfig], arguments::Dict)
delete_named_query([::AWSConfig]; NamedQueryId=)
using AWSCore.Services.athena
athena([::AWSConfig], "DeleteNamedQuery", arguments::Dict)
athena([::AWSConfig], "DeleteNamedQuery", NamedQueryId=)
# DeleteNamedQuery Operation
Deletes a named query.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `NamedQueryId = ::String` -- *Required*
The unique ID of the query to delete.
# Returns
`DeleteNamedQueryOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery)
"""
@inline delete_named_query(aws::AWSConfig=default_aws_config(); args...) = delete_named_query(aws, args)
@inline delete_named_query(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "DeleteNamedQuery", args)
@inline delete_named_query(args) = delete_named_query(default_aws_config(), args)
"""
using AWSSDK.Athena.get_named_query
get_named_query([::AWSConfig], arguments::Dict)
get_named_query([::AWSConfig]; NamedQueryId=)
using AWSCore.Services.athena
athena([::AWSConfig], "GetNamedQuery", arguments::Dict)
athena([::AWSConfig], "GetNamedQuery", NamedQueryId=)
# GetNamedQuery Operation
Returns information about a single query.
# Arguments
## `NamedQueryId = ::String` -- *Required*
The unique ID of the query. Use [ListNamedQueries](@ref) to get query IDs.
# Returns
`GetNamedQueryOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery)
"""
@inline get_named_query(aws::AWSConfig=default_aws_config(); args...) = get_named_query(aws, args)
@inline get_named_query(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "GetNamedQuery", args)
@inline get_named_query(args) = get_named_query(default_aws_config(), args)
"""
using AWSSDK.Athena.get_query_execution
get_query_execution([::AWSConfig], arguments::Dict)
get_query_execution([::AWSConfig]; QueryExecutionId=)
using AWSCore.Services.athena
athena([::AWSConfig], "GetQueryExecution", arguments::Dict)
athena([::AWSConfig], "GetQueryExecution", QueryExecutionId=)
# GetQueryExecution Operation
Returns information about a single execution of a query. Each time a query executes, information about the query execution is saved with a unique ID.
# Arguments
## `QueryExecutionId = ::String` -- *Required*
The unique ID of the query execution.
# Returns
`GetQueryExecutionOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution)
"""
@inline get_query_execution(aws::AWSConfig=default_aws_config(); args...) = get_query_execution(aws, args)
@inline get_query_execution(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "GetQueryExecution", args)
@inline get_query_execution(args) = get_query_execution(default_aws_config(), args)
"""
using AWSSDK.Athena.get_query_results
get_query_results([::AWSConfig], arguments::Dict)
get_query_results([::AWSConfig]; QueryExecutionId=, <keyword arguments>)
using AWSCore.Services.athena
athena([::AWSConfig], "GetQueryResults", arguments::Dict)
athena([::AWSConfig], "GetQueryResults", QueryExecutionId=, <keyword arguments>)
# GetQueryResults Operation
Returns the results of a single query execution specified by `QueryExecutionId`. This request does not execute the query but returns results. Use [StartQueryExecution](@ref) to run a query.
# Arguments
## `QueryExecutionId = ::String` -- *Required*
The unique ID of the query execution.
## `NextToken = ::String`
The token that specifies where to start pagination if a previous request was truncated.
## `MaxResults = ::Int`
The maximum number of results (rows) to return in this request.
# Returns
`GetQueryResultsOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults)
"""
@inline get_query_results(aws::AWSConfig=default_aws_config(); args...) = get_query_results(aws, args)
@inline get_query_results(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "GetQueryResults", args)
@inline get_query_results(args) = get_query_results(default_aws_config(), args)
"""
using AWSSDK.Athena.list_named_queries
list_named_queries([::AWSConfig], arguments::Dict)
list_named_queries([::AWSConfig]; <keyword arguments>)
using AWSCore.Services.athena
athena([::AWSConfig], "ListNamedQueries", arguments::Dict)
athena([::AWSConfig], "ListNamedQueries", <keyword arguments>)
# ListNamedQueries Operation
Provides a list of all available query IDs.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `NextToken = ::String`
The token that specifies where to start pagination if a previous request was truncated.
## `MaxResults = ::Int`
The maximum number of queries to return in this request.
# Returns
`ListNamedQueriesOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries)
"""
@inline list_named_queries(aws::AWSConfig=default_aws_config(); args...) = list_named_queries(aws, args)
@inline list_named_queries(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "ListNamedQueries", args)
@inline list_named_queries(args) = list_named_queries(default_aws_config(), args)
"""
using AWSSDK.Athena.list_query_executions
list_query_executions([::AWSConfig], arguments::Dict)
list_query_executions([::AWSConfig]; <keyword arguments>)
using AWSCore.Services.athena
athena([::AWSConfig], "ListQueryExecutions", arguments::Dict)
athena([::AWSConfig], "ListQueryExecutions", <keyword arguments>)
# ListQueryExecutions Operation
Provides a list of all available query execution IDs.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `NextToken = ::String`
The token that specifies where to start pagination if a previous request was truncated.
## `MaxResults = ::Int`
The maximum number of query executions to return in this request.
# Returns
`ListQueryExecutionsOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions)
"""
@inline list_query_executions(aws::AWSConfig=default_aws_config(); args...) = list_query_executions(aws, args)
@inline list_query_executions(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "ListQueryExecutions", args)
@inline list_query_executions(args) = list_query_executions(default_aws_config(), args)
"""
using AWSSDK.Athena.start_query_execution
start_query_execution([::AWSConfig], arguments::Dict)
start_query_execution([::AWSConfig]; QueryString=, ResultConfiguration=, <keyword arguments>)
using AWSCore.Services.athena
athena([::AWSConfig], "StartQueryExecution", arguments::Dict)
athena([::AWSConfig], "StartQueryExecution", QueryString=, ResultConfiguration=, <keyword arguments>)
# StartQueryExecution Operation
Runs (executes) the SQL query statements contained in the `Query` string.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `QueryString = ::String` -- *Required*
The SQL query statements to be executed.
## `ClientRequestToken = ::String`
A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another `StartQueryExecution` request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the `QueryString`, an error is returned.
**Important**
> This token is listed as not required because AWS SDKs (for example the AWS SDK for Java) auto-generate the token for users. If you are not using the AWS SDK or the AWS CLI, you must provide this token or the action will fail.
## `QueryExecutionContext = ["Database" => ::String]`
The database within which the query executes.
## `ResultConfiguration = [ ... ]` -- *Required*
Specifies information about where and how to save the results of the query execution.
```
ResultConfiguration = [
"OutputLocation" => <required> ::String,
"EncryptionConfiguration" => [
"EncryptionOption" => <required> "SSE_S3", "SSE_KMS" or "CSE_KMS",
"KmsKey" => ::String
]
]
```
# Returns
`StartQueryExecutionOutput`
# Exceptions
`InternalServerException`, `InvalidRequestException` or `TooManyRequestsException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution)
"""
@inline start_query_execution(aws::AWSConfig=default_aws_config(); args...) = start_query_execution(aws, args)
@inline start_query_execution(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "StartQueryExecution", args)
@inline start_query_execution(args) = start_query_execution(default_aws_config(), args)
"""
using AWSSDK.Athena.stop_query_execution
stop_query_execution([::AWSConfig], arguments::Dict)
stop_query_execution([::AWSConfig]; QueryExecutionId=)
using AWSCore.Services.athena
athena([::AWSConfig], "StopQueryExecution", arguments::Dict)
athena([::AWSConfig], "StopQueryExecution", QueryExecutionId=)
# StopQueryExecution Operation
Stops a query execution.
For code samples using the AWS SDK for Java, see [Examples and Code Samples](http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the *Amazon Athena User Guide*.
# Arguments
## `QueryExecutionId = ::String` -- *Required*
The unique ID of the query execution to stop.
# Returns
`StopQueryExecutionOutput`
# Exceptions
`InternalServerException` or `InvalidRequestException`.
See also: [AWS API Documentation](https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution)
"""
@inline stop_query_execution(aws::AWSConfig=default_aws_config(); args...) = stop_query_execution(aws, args)
@inline stop_query_execution(aws::AWSConfig, args) = AWSCore.Services.athena(aws, "StopQueryExecution", args)
@inline stop_query_execution(args) = stop_query_execution(default_aws_config(), args)
end # module Athena
#==============================================================================#
# End of file
#==============================================================================#
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
12298,
5432,
48510,
13,
20362,
12,
15,
67,
28324,
67,
6420,
12,
21,
3609,
20,
12,
20,
67,
5066,
12,
6052,
1485,
12,
1065,
44183,
65,
5774,
67,
20,
324,
198,
2,
23... | 3.112891 | 5,368 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.