content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
<reponame>JamesKat94/HoJBot.jl
const QUOTE_CACHE = Cache{String,Float64}(Minute(1))
finnhub_token() = get(ENV, "FINNHUB_TOKEN", "")
function commander(c::Client, m::Message, ::Val{:ig})
@debug "ig_commander called"
command = extract_command("ig", m.content)
args = split(command)
@debug "parse result" command args
if length(args) == 0 ||
args[1] ∉ ["start-game", "abandon-game", "quote", "chart",
"buy", "sell", "rank", "view",
"abandon-game-really"]
help_commander(c, m, :ig)
return
end
user = @discord retrieve(c, User, m.author.id)
try
ig_execute(c, m, user, Val(Symbol(args[1])), args[2:end])
catch ex
if ex isa IgUserError
discord_reply(c, m, ig_hey(user.username, ex.message))
else
discord_reply(c, m, ig_hey(user.username,
"sorry, looks like you've hit a bug. Please report the problem."))
@error "Internal error" ex
Base.showerror(stdout, ex, catch_backtrace())
end
end
end
function help_commander(c::Client, m::Message, ::Val{:ig})
discord_reply(c, m, """
Play the investment game (ig). US market only for now.
```
ig start-game
ig abandon-game
```
Research stocks:
```
ig quote <symbol> - get current price quote
ig chart <symbol> [period] - historical price chart
Period is optional. Examples are: 200d, 36m, or 10y
for 200 days, 36 months, or 10 years respectively.
```
Manage portfolio:
```
ig view - view holdings and current market values
ig buy <n> <symbol> - buy <n> shares of a stock
ig sell <n> <symbol> - sell <n> shares of a stock
```
How are you doing?
```
ig rank [n] - display top <n> portfolios, defaults to 5.
```
""")
end
"""
ig_execute
This function executes any of the following commands.
# start-game
Start a new game by giving the player \$1MM USD.
# abandon-game
Abandon current game by wiping out the player's record.
# quote <symbol>
Fetch current market price of a stock.
# chart <symbol> <lookback>
The `lookback` argument is optional. If it is not specified, then it is defaulted
to 1 year. Otherwise, it takes the format of a number followed by `y`, `m`, or `d`
for number of years, months, or days respectively.
# buy <n> <symbol>
Buy `n` shares of a stock at the current market price. Player must have enough
cash to settle the trade.
# sell <n> <symbol>
Sell `n` shares of a stock at the current market price. Player must have
that many shares in the portfolio.
# view
Display detailed information about the portfolio, its holdings, and total market value.
# rank
Display top portfolios with highest market value.
"""
function ig_execute end
function ig_execute(c::Client, m::Message, user::User, ::Val{Symbol("start-game")}, args)
ig_affirm_non_player(user.id)
pf = ig_start_new_game(user.id)
discord_reply(c, m, ig_hey(user.username, "you have \$" * format_amount(pf.cash) *
" in your shiny new portfolio now! Good luck!"))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{Symbol("abandon-game")}, args)
ig_affirm_player(user.id)
discord_reply(c, m, ig_hey(user.username,
"do you REALLY want to abandon the game and wipe out all of your data? " *
"If so, type `ig abandon-game-really`."))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{Symbol("abandon-game-really")}, args)
ig_affirm_player(user.id)
ig_remove_game(user.id)
discord_reply(c, m, ig_hey(user.username,
"your investment game is now over. Play again soon!"))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:buy}, args)
ig_affirm_player(user.id)
length(args) == 2 ||
throw(IgUserError("Invalid command. Try `ig buy 100 aapl` to buy 100 shares of Apple Inc."))
symbol = strip(uppercase(args[2]))
shares = tryparse(Int, args[1])
shares !== nothing || throw(IgUserError("please enter number of shares as a number: `$shares`"))
purchase_price = ig_buy(user.id, symbol, shares)
discord_reply(c, m, ig_hey(user.username, "you have bought $shares shares of $symbol at \$" *
format_amount(purchase_price)))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:sell}, args)
ig_affirm_player(user.id)
length(args) == 2 ||
throw(IgUserError("Invalid command. Try `ig sell 100 aapl` to sell 100 shares of Apple Inc."))
symbol = strip(uppercase(args[2]))
shares = tryparse(Int, args[1])
shares !== nothing || throw(IgUserError("please enter number of shares as a number: `$shares`"))
current_price = ig_sell(user.id, symbol, shares)
discord_reply(c, m, ig_hey(user.username, "you have sold $shares shares of $symbol at \$" *
format_amount(current_price)))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:view}, args)
ig_affirm_player(user.id)
df = ig_mark_to_market_portfolio(user.id)
ig_reformat_view!(df)
view = length(args) == 1 && args[1] == "simple" ? SimpleView() : PrettyView()
table = ig_view_table(view, df)
total_str = format_amount(round(Int, sum(df.amount)))
discord_reply(c, m, ig_hey(user.username,
"""
here is your portfolio:
```
$table
```
Total portfolio Value: $total_str
"""))
end
# Shorten colummn headings for better display in Discord
function ig_reformat_view!(df::AbstractDataFrame)
select!(df, Not(:purchase_price))
rename!(df, "current_price" => "price", "market_value" => "amount")
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:quote}, args)
length(args) == 1 ||
throw(IgUserError("Invalid command. Try `ig quote aapl` to fetch the current price of Apple Inc."))
symbol = strip(uppercase(args[1]))
price = ig_get_real_time_quote(symbol)
discord_reply(c, m, ig_hey(user.username, "the current price of $symbol is " * format_amount(price)))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:chart}, args)
1 <= length(args) <= 2 ||
throw(IgUserError("Invalid command. Try `ig chart aapl` to see a chart for Apple Inc."))
ch = discord_channel(c, m.channel_id)
symbol = strip(uppercase(args[1]))
lookback = length(args) == 1 ? Year(1) : date_period(lowercase(args[2]))
from_date, to_date = recent_date_range(lookback)
df = ig_historical_prices(symbol, from_date, to_date)
filename = ig_chart(symbol, df.Date, df."Adj Close")
discord_upload_file(c, ch, filename;
content = ig_hey(user.username, "here is the chart for $symbol for the past $lookback. " *
"To plot a chart with different time horizon, " *
"try something like `ig chart $symbol 90d` or `ig chart $symbol 10y`."))
end
function ig_execute(c::Client, m::Message, user::User, ::Val{:rank}, args)
length(args) <= 1 ||
throw(IgUserError("Invalid command. Try `ig rank` or `ig rank 10`"))
n = length(args) == 0 ? 5 : tryparse(Int, args[1])
n !== nothing || throw(IgUserError("invalid rank argument `$(args[1])`. " *
"Try `ig rank` or `ig rank 10`"))
rt = ig_ranking_table(c)
rt = rt[1:min(n, nrow(rt)), :] # get top N results
rt_str = ig_view_table(PrettyView(), rt)
discord_reply(c, m, ig_hey(user.username, """here's the current ranking:
```
$rt_str
```
"""))
end
function ig_ranking_table(c::Client)
valuations = ig_value_all_portfolios()
if length(valuations) > 0
users_dict = retrieve_users(c, [v.id for v in valuations])
@debug "ig_ranking_table" valuations
@debug "ig_ranking_table" users_dict
df = DataFrame(
rank = 1:length(valuations),
player = [users_dict[v.id].username for v in valuations],
portfolio_value = [v.total for v in valuations],
)
else
return DataFrame(player = String[], portfolio_value = Float64[])
end
end
@mockable function retrieve_users(c::Client, ids::Vector{UInt64})
futures = retrieve.(Ref(c), User, ids)
responses = fetch.(futures)
unknown = User(; id = UInt64(0), username = "Unknown")
return Dict(k => res.val === nothing ? unknown : res.val for (k, res) in zip(ids, responses))
end
function ig_value_all_portfolios()
pfs = ig_load_all_portfolios()
valuations = []
for (id, pf) in pfs
@debug "Evaluating portfolio" id pf
df = ig_mark_to_market!(ig_holdings_data_frame(pf))
mv = nrow(df) > 0 ? sum(df.market_value) : 0.0
cash = pf.cash
total = mv + cash
push!(valuations, (; id, mv, cash, total))
end
sort!(valuations; lt = (x,y) -> x.total < y.total, rev = true)
# @info "ig_value_all_portfolios result" valuations
valuations
end
"Format money amount"
format_amount(x::Real) = format(x, commas = true, precision = 2)
format_amount(x::Integer) = format(x, commas = true)
"Pretty table formatters"
decimal_formatter(v, i, j) = v isa Real ? format_amount(v) : v
integer_formatter(v, i, j) = v isa Real ? format_amount(round(Int, v)) : v
"Hey someone"
ig_hey(username::AbstractString, message::AbstractString) = "Hey, " * username * ", " * message
"File location of the game file for a user"
ig_file_path(user_id::UInt64) = joinpath(ig_data_directory(), "$user_id.json")
"Directory of the investment game data files"
ig_data_directory() = joinpath("data", "ig")
"Returns true if user already has a game in progress."
ig_is_player(user_id::UInt64) = isfile(ig_file_path(user_id))
"Start a new game file"
function ig_start_new_game(user_id::UInt64)
pf = IgPortfolio(1_000_000.00, IgHolding[])
ig_save_portfolio(user_id, pf)
return pf
end
"Destroy the existing game file for a user."
ig_remove_game(user_id::UInt64) = rm(ig_file_path(user_id))
"Affirms the user has a game or throw an exception."
function ig_affirm_player(user_id::UInt64)
ig_is_player(user_id) || throw(IgUserError("you don't have a game yet."))
return nothing
end
"Affirms the user does not have game or throw an exception."
function ig_affirm_non_player(user_id::UInt64)
!ig_is_player(user_id) || throw(IgUserError(
"you already have a game running. Use `ig view` to see your current portfolio."
))
return nothing
end
"Save a user portfolio in the data directory."
function ig_save_portfolio(user_id::UInt64, pf::IgPortfolio)
@debug "Saving portfolio" user_id
path = ig_file_path(user_id)
mkpath(dirname(path)) # ensure directory is there
write(path, JSON3.write(pf))
return nothing
end
"Load the portfolio for a single user"
function ig_load_portfolio(user_id::UInt64)
@debug "Loading portfolio" user_id
path = ig_file_path(user_id)
return ig_load_portfolio(path)
end
"Load a single portfolio from game file"
function ig_load_portfolio(path::AbstractString)
mkpath(dirname(path)) # ensure directory is there
bytes = read(path)
return JSON3.read(bytes, IgPortfolio)
end
"Extract user id from the portfolio data file"
function ig_user_id_from_path(path::AbstractString)
filename = basename(path)
filename_without_extension = replace(filename, r"\.json$" => "")
return parse(UInt64, filename_without_extension)
end
"Load all game files"
function ig_load_all_portfolios()
dir = ig_data_directory()
files = readdir(dir)
user_ids = ig_user_id_from_path.(files)
return Dict(user_id => ig_load_portfolio(joinpath(dir, file))
for (user_id, file) in zip(user_ids, files))
end
"Fetch the current quote of a stock"
@mockable function ig_get_real_time_quote(symbol::AbstractString)
@info "$(now()) real time quote: $symbol"
token = finnhub_token()
length(token) > 0 || throw(IgSystemError("No market price provider. Please report to admin."))
symbol = HTTP.escapeuri(symbol)
response = HTTP.get("https://finnhub.io/api/v1/quote?symbol=$symbol&token=$token")
data = JSON3.read(response.body)
current_price = data.c
current_price > 0 || throw(IgUserError("there is no price for $symbol. Is it a valid stock symbol?"))
current_price
end
"Fetch quote of a stock, but possibly with a time delay."
function ig_get_quote(symbol::AbstractString)
return get!(QUOTE_CACHE, symbol) do
ig_get_real_time_quote(symbol)
end
end
"Buy stock for a specific user at a specific price."
function ig_buy(
user_id::UInt64,
symbol::AbstractString,
shares::Real,
current_price::Real = ig_get_real_time_quote(symbol)
)
@debug "Buying stock" user_id symbol shares
pf = ig_load_portfolio(user_id)
cost = shares * current_price
if pf.cash >= cost
pf.cash -= cost
push!(pf.holdings, IgHolding(symbol, shares, current_date(), current_price))
ig_save_portfolio(user_id, pf)
return current_price
end
throw(IgUserError("you don't have enough cash. " *
"Buying $shares shares of $symbol will cost you $(format_amount(cost)) " *
"but you only have $(format_amount(pf.cash))"))
end
"Sell stock for a specific user. Returns executed price."
function ig_sell(
user_id::UInt64,
symbol::AbstractString,
shares::Real,
current_price::Real = ig_get_real_time_quote(symbol)
)
@debug "Selling stock" user_id symbol shares
pf = ig_load_portfolio(user_id)
pf_new = ig_sell_fifo(pf, symbol, shares, current_price)
ig_save_portfolio(user_id, pf_new)
return current_price
end
"Sell stock based upon FIFO accounting scheme. Returns the resulting `IgPortfolio` object."
function ig_sell_fifo(pf::IgPortfolio, symbol::AbstractString, shares::Real, current_price::Real)
existing_shares = ig_count_shares(pf, symbol)
if existing_shares == 0
throw(IgUserError("you do not have $symbol in your portfolio"))
elseif shares > existing_shares
existing_shares_str = format_amount(round(Int, existing_shares))
throw(IgUserError("you cannot sell more than what you own ($existing_shares_str shares)"))
end
proceeds = shares * current_price
# Construct a new IgPortfolio object that contains the resulting portfolio after
# selling the stock. The following logic does it incrementally but just for documentation
# purpose an alternative algorithm would be to make a copy and then relief the sold lots.
holdings = IgHolding[]
pf_new = IgPortfolio(pf.cash + proceeds, holdings)
remaining = shares # keep track of how much to sell
for h in pf.holdings
if h.symbol != symbol || remaining == 0
push!(holdings, h)
else
if h.shares > remaining # relief lot partially
revised_lot = IgHolding(symbol, h.shares - remaining, h.date, h.purchase_price)
push!(holdings, revised_lot)
remaining = 0
else # relief this lot completely and continue
remaining -= h.shares
end
end
end
return pf_new
end
"""
Returns a data frame for the portfolio holdings.
Note that:
1. It does not include cash portion of the portfolio
2. Multiple lots of the same stock will be in different rows
See also: `ig_grouped_holdings`(@ref)
"""
function ig_holdings_data_frame(pf::IgPortfolio)
return DataFrame(
symbol = [h.symbol for h in pf.holdings],
shares = [h.shares for h in pf.holdings],
purchase_price = [h.purchase_price for h in pf.holdings],
purchase_date = [h.date for h in pf.holdings],
)
end
"Returns grouped holdings by symbol with average purchase price"
function ig_grouped_holdings(df::AbstractDataFrame)
df = combine(groupby(df, :symbol)) do sdf
shares = sum(sdf.shares)
weights = sdf.shares / shares
purchase_price = sum(weights .* sdf.purchase_price)
(; shares, purchase_price)
end
return sort!(df, :symbol)
end
"Return a data frame with the user's portfolio marked to market."
function ig_mark_to_market_portfolio(user_id::UInt64)
pf = ig_load_portfolio(user_id)
df = ig_grouped_holdings(ig_holdings_data_frame(pf))
cash_entry = ig_cash_entry(pf)
if nrow(df) > 0
ig_mark_to_market!(df)
push!(df, cash_entry)
else
df = DataFrame([cash_entry])
end
return df
end
"Return the portoflio cash as named tuple that can be appended to the portfolio data frame."
function ig_cash_entry(pf::IgPortfolio)
return (symbol = "CASH:USD", shares = pf.cash,
purchase_price = 1.0, current_price = 1.0, market_value = pf.cash)
end
"Add columns with current price and market value"
function ig_mark_to_market!(df::AbstractDataFrame)
df.current_price = [ig_get_quote(s) for s in df.symbol]
df.market_value = df.shares .* df.current_price
return df
end
"Format data frame using pretty table"
function ig_view_table(::PrettyView, df::AbstractDataFrame)
return pretty_table(String, df;
formatters = integer_formatter, header = names(df))
end
"Return portfolio view as string in a simple format"
function ig_view_table(::SimpleView, df::AbstractDataFrame)
io = IOBuffer()
# @show "ig_view_table" df
for (i, r) in enumerate(eachrow(df))
if !startswith(r.symbol, "CASH:")
println(io,
i, ". ", r.symbol, ": ",
round(Int, r.shares), " x \$", format_amount(r.price),
" = \$", format_amount(round(Int, r.amount)))
else
println(io,
i, ". ", r.symbol, " = ", format_amount(round(Int, r.amount)))
end
end
return String(take!(io))
end
"Returns a tuple of two dates by looking back from today's date"
function recent_date_range(lookback::DatePeriod)
T = current_date()
to_date = T
from_date = T - lookback
return from_date, to_date
end
@mockable current_date() = today()
"""
Return a data frame with historical prices data. Columns include:
- `Date`
- `Open`
- `High`
- `Low`
- `Close`
- `Adj Close`
- `Volume`
"""
function ig_historical_prices(symbol::AbstractString, from_date::Date, to_date::Date)
from_sec = seconds_since_1970(from_date)
to_sec = seconds_since_1970(to_date + Day(1)) # apparently, Yahoo is exclusive on this end
symbol = HTTP.escapeuri(symbol)
url = "https://query1.finance.yahoo.com/v7/finance/download/$symbol?" *
"period1=$from_sec&period2=$to_sec&interval=1d&events=history&includeAdjustedClose=true"
try
return DataFrame(CSV.File(Downloads.download(url)))
catch ex
if ex isa Downloads.RequestError && ex.response.status == 404
throw(IgUserError("there is no historical prices for $symbol. Is it a valid stock symbol?"))
else
rethrow()
end
end
end
"Plot a simple price chart"
function ig_chart(symbol::AbstractString, dates::Vector{Date}, values::Vector{<:Real})
theme(:dark)
height = 720
width = height * 16 ÷ 9
from_date, to_date = extrema(dates)
last_price_str = format_amount(last(values))
p = plot(dates, values,
title = "$symbol Historical Prices ($from_date to $to_date)\nLast price: $last_price_str",
linewidth = 2,
size = (width, height),
legend = nothing,
tickfontsize = 12,
)
filename = tempname() * ".png"
savefig(p, filename)
return filename
end
"Find lots for a specific stock in the portfolio. Sort by purchase date."
function ig_find_lots(pf::IgPortfolio, symbol::AbstractString)
lots = IgHolding[x for x in pf.holdings if x.symbol == symbol]
return sort(lots, lt = (x,y) -> x.date < y.date)
end
"Return total number of shares for a specific stock in the portfolio."
function ig_count_shares(pf::IgPortfolio, symbol::AbstractString)
lots = ig_find_lots(pf, symbol)
return length(lots) > 0 ? round(sum(lot.shares for lot in lots); digits = 0) : 0
# note that we store shares as Float64 but uses it as Int (for now)
end
seconds_since_1970(d::Date) = (d - Day(719163)).instant.periods.value * 24 * 60 * 60
function date_period(s::AbstractString)
m = match(r"^(\d+)([ymd])$", s)
m !== nothing || throw(IgUserError("invalid date period: $s. Try `5y` or `30m`."))
num = parse(Int, m.captures[1])
dct = Dict("y" => Year, "m" => Month, "d" => Day)
return dct[m.captures[2]](num)
end
| [
27,
7856,
261,
480,
29,
14731,
25881,
5824,
14,
28900,
41,
20630,
13,
20362,
198,
9979,
19604,
23051,
62,
34,
2246,
13909,
796,
34088,
90,
10100,
11,
43879,
2414,
92,
7,
9452,
1133,
7,
16,
4008,
198,
198,
69,
3732,
40140,
62,
30001,... | 2.521862 | 8,142 |
<gh_stars>10-100
function p53(data::Dict)
# Setup basic dimensions of arrays
# Parse & check FEdict data
if :struc_el in keys(data)
struc_el = data[:struc_el]
else
println("No fin_el type specified.")
return
end
ndim = struc_el.ndim
nst = struc_el.nst
fin_el = struc_el.fin_el
@assert typeof(fin_el) <: FiniteElement
if typeof(fin_el) == Line
(nels, nn) = mesh_size(fin_el, struc_el.nxe)
elseif typeof(fin_el) == Triangle || typeof(fin_el) == Quadrilateral
(nels, nn) = mesh_size(fin_el, struc_el.nxe, struc_el.nye)
elseif typeof(fin_el) == Hexahedron
(nels, nn) = mesh_size(fin_el, struc_el.nxe, struc_el.nye, struc_el.nze)
else
println("$(typeof(fin_el)) is not a known finite element.")
return
end
nodof = fin_el.nodof # Degrees of freedom per node
ndof = fin_el.nod * nodof # Degrees of freedom per fin_el
# Update penalty if specified in FEdict
penalty = 1e20
if :penalty in keys(data)
penalty = data[:penalty]
end
# Allocate all arrays
# Start with arrays to be initialized from FEdict
if :properties in keys(data)
prop = zeros(size(data[:properties], 1), size(data[:properties], 2))
for i in 1:size(data[:properties], 1)
prop[i, :] = data[:properties][i, :]
end
else
println("No :properties key found in FEdict")
end
nf = ones(Int, nodof, nn)
if :support in keys(data)
for i in 1:size(data[:support], 1)
nf[:, data[:support][i][1]] = data[:support][i][2]
end
end
x_coords = zeros(nn)
if :x_coords in keys(data)
x_coords = data[:x_coords]
end
y_coords = zeros(nn)
if :y_coords in keys(data)
y_coords = data[:y_coords]
end
z_coords = zeros(nn)
if :z_coords in keys(data)
z_coords = data[:z_coords]
end
etype = ones(Int, nels)
if :etype in keys(data)
etype = data[:etype]
end
# All other arrays
points = zeros(struc_el.nip, ndim)
g = zeros(Int, ndof)
g_coord = zeros(ndim,nn)
fun = zeros(fin_el.nod)
coord = zeros(fin_el.nod, ndim)
gamma = zeros(nels)
jac = zeros(ndim, ndim)
g_num = zeros(Int, fin_el.nod, nels)
der = zeros(ndim, fin_el.nod)
deriv = zeros(ndim, fin_el.nod)
bee = zeros(nst,ndof)
km = zeros(ndof, ndof)
mm = zeros(ndof, ndof)
gm = zeros(ndof, ndof)
kg = zeros(ndof, ndof)
eld = zeros(ndof)
weights = zeros(struc_el.nip)
g_g = zeros(Int, ndof, nels)
num = zeros(Int, fin_el.nod)
actions = zeros(ndof, nels)
displacements = zeros(size(nf, 1), ndim)
gc = ones(ndim)
dee = zeros(nst,nst)
sigma = zeros(nst)
axial = zeros(nels)
formnf!(nodof, nn, nf)
neq = maximum(nf)
println("There are $(neq) equations.\n")
# Find global array sizes
for iel in 1:nels
hexahedron_xz!(iel, x_coords, y_coords, z_coords, coord, num)
num_to_g!(num, nf, g)
g_num[:, iel] = num
g_coord[:, num] = coord'
g_g[:, iel] = g
end
sample!(fin_el, points, weights)
gsm = spzeros(neq, neq)
for iel in 1:nels
deemat!(dee, prop[etype[iel], 1], prop[etype[iel], 2])
num = g_num[:, iel]
coord = g_coord[:, num]' # Transpose
g = g_g[:, iel]
km = zeros(ndof, ndof)
for i in 1:struc_el.nip
shape_der!(der, points, i)
jac = der*coord
detm = det(jac)
jac = inv(jac)
deriv = jac*der
beemat!(bee, deriv)
km += (bee')*dee*bee*detm*weights[i]
end
fsparm!(gsm, g, km)
end
loads = OffsetArray(zeros(neq + 1), 0:neq)
if :loaded_nodes in keys(data)
for i in 1:size(data[:loaded_nodes], 1)
loads[nf[:, data[:loaded_nodes][i][1]]] = data[:loaded_nodes][i][2]
end
end
fixed_freedoms = 0
if :fixed_freedoms in keys(data)
fixed_freedoms = size(data[:fixed_freedoms], 1)
end
no = zeros(Int, fixed_freedoms)
node = zeros(Int, fixed_freedoms)
sense = zeros(Int, fixed_freedoms)
value = zeros(Float64, fixed_freedoms)
if :fixed_freedoms in keys(data) && fixed_freedoms > 0
for i in 1:fixed_freedoms
no[i] = nf[data[:fixed_freedoms][i][2], data[:fixed_freedoms][i][1]]
value[i] = data[:fixed_freedoms][i][3]
gsm[no[i], no[i]] += penalty
loads[no[i]] = gsm[no[i], no[i]] * value[i]
end
end
cfgsm = cholesky(gsm)
loads[1:neq] = cfgsm \ loads[1:neq]
displacements = zeros(size(nf))
for i in 1:size(displacements, 1)
for j in 1:size(displacements, 2)
if nf[i, j] > 0
displacements[i,j] = loads[nf[i, j]]
end
end
end
displacements = displacements'
dis_df = DataFrame(
r_disp = displacements[:, 1],
y_disp = displacements[:, 2],
z_disp = displacements[:, 3]
)
struc_el.nip = 1
points = zeros(struc_el.nip, ndim)
weights = zeros(struc_el.nip)
sample!(fin_el, points, weights)
gc1 = Vector{Float64}()
gc2 = Vector{Float64}()
s1 = Vector{Float64}()
s2 = Vector{Float64}()
s3 = Vector{Float64}()
t1 = Vector{Float64}()
t2 = Vector{Float64}()
t3 = Vector{Float64}()
for iel in 1:nels
deemat!(dee, prop[etype[iel], 1], prop[etype[iel], 2])
num = g_num[:, iel]
coord = g_coord[:, num]'
g = g_g[:, iel]
eld = loads[g]
for i in 1:struc_el.nip
shape_fun!(fun, points, i)
shape_der!(der, points, i)
gc = fun'*coord
jac = inv(der*coord)
deriv = jac*der
beemat!(bee, deriv)
sigma = dee*(bee*eld)
gc1 = append!(gc1, gc[1])
gc2 = append!(gc2, gc[2])
s1 = append!(s1, sigma[1])
s2 = append!(s2, sigma[2])
s3 = append!(s3, sigma[3])
t1 = append!(t1, sigma[4])
t2 = append!(t2, sigma[5])
t3 = append!(t3, sigma[6])
end
end
sigma_df = DataFrame(
r_coord = gc1,
y_coord = gc2,
sig_x = s1,
sig_y = s2,
sig_z = s3,
tau_xy = t1,
tau_yz = t2,
tau_zx = t3
)
fem = PtFEM.jFEM(struc_el, fin_el, ndim, nels, nst, ndof, nn, nodof,
neq, penalty, etype, g, g_g, g_num, nf, no,
node, num, sense, actions, bee, coord, gamma, dee,
der, deriv, displacements, eld, fun, gc, g_coord, jac,
km, mm, kg, cfgsm, loads, points, prop, sigma, value,
weights, x_coords, y_coords, z_coords, axial)
(fem, dis_df, sigma_df)
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
8818,
279,
4310,
7,
7890,
3712,
35,
713,
8,
198,
220,
220,
198,
220,
1303,
31122,
4096,
15225,
286,
26515,
198,
220,
220,
198,
220,
1303,
2547,
325,
1222,
2198,
376,
7407,
713,
1366,
198... | 2.008041 | 3,109 |
include("../src/read/read_datasets.jl")
include("../src/utils.jl")
include("../src/pq/PQ.jl");
include("../src/lsq_sparse/LSQ_SPGL1.jl");
include("../src/linscan/Linscan.jl");
function demo_lsq_sparse(
dataset_name="SIFT1M",
nread::Integer=Int(1e4))
# === Hyperparams ===
m = 7 # In LSQ we use m-1 codebooks
h = 256
verbose = true
nquery = Int(1e4)
knn = Int(1e3) # Compute recall up to
b = Int( log2(h) * m )
niter = 10
# === PQ initialization ===
x_train = read_dataset(dataset_name, nread )
d, _ = size( x_train )
C, B, train_error = train_pq(x_train, m, h, verbose)
@printf("Error after PQ is %e\n", train_error)
# === LSQ sparse train ===
ilsiter = 8
icmiter = 4
randord = true
npert = 4
randord = true
S = d*h # SLSQ1 in the paper. Use S = d*h + (d.^2) for SLSQ2
tau = 0.7 # 0.7 for SLSQ1. Use 0.9 for SLSQ2
# Multiply tau times the l1 norm of the PQ solution
taus = zeros( d )
subdims = splitarray( 1:d, m )
for i = 1:m
taus[ subdims[i] ] += sum( abs(C[i]), 2 ) .* tau
end
tau = sum( taus )
spgl1_path = joinpath( pwd(), "matlab/spgl1")
C, B, R, train_error, cbnorms, objs =
train_lsq_sparse(x_train, m, h, niter, ilsiter, icmiter, randord, npert, S, tau,
B, C, eye(Float32, d), spgl1_path)
cbnorms = vec( cbnorms[:] )
# === Encode the base set ===
nread_base = Int(1e6)
x_base = read_dataset(dataset_name * "_base", nread_base )
B_base = randinit(nread_base, m, h) # initialize B at random
ilsiter_base = 16 # LSQ-16 in the paper
for i = 1:ilsiter_base
@printf("Iteration %02d / %02d\n", i, ilsiter_base)
@time B_base = encoding_icm( x_base, B_base, C, icmiter, randord, npert, verbose )
end
base_error = qerror( x_base, B_base, C )
@printf("Error in base is %e\n", base_error)
# Compute and quantize the database norms
B_base_norms = quantize_norms( B_base, C, cbnorms )
db_norms = vec( cbnorms[ B_base_norms ] )
# === Compute recall ===
x_query = read_dataset( dataset_name * "_query", nquery, verbose )
gt = read_dataset( dataset_name * "_groundtruth", nquery, verbose )
if dataset_name == "SIFT1M" || dataset_name == "GIST1M"
gt = gt + 1;
end
gt = convert( Vector{UInt32}, gt[1,1:nquery] )
B_base = convert( Matrix{UInt8}, B_base-1 )
B_base_norms = convert( Vector{UInt8}, B_base_norms-1 )
print("Querying m=$m ... ")
@time dists, idx = linscan_lsq( B_base, x_query, C, db_norms, eye(Float32, d), knn )
println("done")
idx = convert( Matrix{UInt32}, idx );
rec = eval_recall( gt, idx, knn )
end
# train
demo_lsq_sparse()
| [
17256,
7203,
40720,
10677,
14,
961,
14,
961,
62,
19608,
292,
1039,
13,
20362,
4943,
198,
17256,
7203,
40720,
10677,
14,
26791,
13,
20362,
4943,
198,
17256,
7203,
40720,
10677,
14,
79,
80,
14,
47,
48,
13,
20362,
15341,
198,
17256,
7203... | 2.166935 | 1,240 |
"""
Lasso(; alpha = 0.1, tol = 1e-4, mi = 1e+8)
Lasso Regression structure. eEach parameters are as follows:
- `alpha` : leaarning rate.
- `tol` : Allowable error.
- `mi` : Maximum number of learning.
# Example
```jldoctest regression
julia> model = Lasso()
Lasso(Float64[], 0.1, 0.0001, 100000000)
julia> fit!(model, x, t)
3-element Vector{Float64}:
0.0
0.5022766549841176
154.43624186616267
julia> model(x)
20-element Vector{Float64}:
188.64541774549468
187.30088075704523
181.5191726873298
184.15748544986084
185.72158909964372
176.33798923891868
185.80014722707335
184.71565381947883
189.20524796663838
189.67502263476888
189.50409373058318
188.39535519538825
188.481083670683
188.88872347085172
182.8477136378307
188.64156231429416
181.43996475587224
188.9400571253936
179.39836073711297
185.6065850765288
```
"""
mutable struct Lasso
w::Array{Float64, 1}
α::Float64
tol::Float64
mi::Int64
Lasso(; alpha = 0.1, tol = 1e-4, mi = 1e+8) = new(Array{Float64}(undef, 0), alpha, tol, mi)
end
sfvf(x, y) = sign(x) * max(abs(x) - y, 0)
function fit!(model::Lasso, x, t)
function update!(x, t, w, α)
n, d = size(x)
w[1] = mean(t - x' * w[2:end])
wvec = fill!(Array{Float64}(undef, d), w[1])
for k in 1 : n
ww = w[2:end]
ww[k] = 0
q = (t - wvec - x' * ww) ⋅ x[k, :]
r = x[k, :] ⋅ x[k, :]
w[k+1] = sfvf(q / r, α)
end
end
α, tol, mi = model.α, model.tol, model.mi
check_size(x, t)
if ndims(x) == 1 x = x[:, :] end
w = zeros(size(x, 1) + 1)
e = 0.0
for _ in 1 : mi
eb = e
update!(x, t, w, α)
e = sum(abs.(w)) / length(w)
abs(e - eb) <= tol && break
end
model.w = w[end:-1:1]
end
(model::Lasso)(x) = expand(x)' * model.w | [
37811,
198,
220,
220,
220,
406,
28372,
7,
26,
17130,
796,
657,
13,
16,
11,
284,
75,
796,
352,
68,
12,
19,
11,
21504,
796,
352,
68,
10,
23,
8,
198,
43,
28372,
3310,
2234,
4645,
13,
304,
10871,
10007,
389,
355,
5679,
25,
198,
12... | 1.918665 | 959 |
<reponame>SamuelWiqvist/efficient_SDEMEM<gh_stars>0
# script to run inference for the OU SDEMEM model
using Pkg
using LinearAlgebra
using DataFrames
# TODO
# what to do w the adaptive tuning?
# updated resources for sbatch scripts
println("start run script")
# load functions
include(pwd()*"/src/SDEMEM OU process/ou_sdemem.jl")
include(pwd()*"/src/SDEMEM OU process/mcmc.jl")
N_time = 200
M_subjects = 40
nbr_particles = parse(Int,ARGS[1])
ρ = parse(Float64,ARGS[2])
seed = parse(Int,ARGS[3])
y,x,t_vec,dt,η,σ_ϵ,ϕ,prior_parameters_η,prior_parameters_σ_ϵ = set_up(M=M_subjects,N=N_time,seed=seed)
# run MH-Gibbs
# These are now adjusted
R = 60000
burn_in = 10000
job = string(M_subjects)*"_"*string(N_time)*"_"*string(nbr_particles)*"_"*string(ρ)
# hard coded start values
#startval_ϕ = ϕ
#startval_σ_ϵ = σ_ϵ
startval_ϕ = ones(M_subjects,3)
for j = 1:3
μ_0_j, M_0_j, α_j, β_j = prior_parameters_η[j,:]
startval_ϕ[:,j,1] = μ_0_j*ones(M_subjects)
end
startval_σ_ϵ = 0.2
# Set correct start values
startval_ϕ = ϕ
startval_σ_ϵ = σ_ϵ
startval_η = η
Σ_i_σ_ϵ = 0.02^2
Σ_i_ϕ = Matrix{Float64}[]
for i in 1:M_subjects; push!(Σ_i_ϕ, [0.06 0 0;0 0.1 0;0 0 0.06]); end
γ_ϕ_0 = 1.
γ_σ_ϵ_0 = 1.
μ_i_ϕ = startval_ϕ
μ_i_σ_ϵ = startval_σ_ϵ
α_star_ϕ = 0.25
α_star_σ_ϵ = 0.25
log_λ_i_ϕ = log.(2.4/sqrt(3)*ones(M_subjects))
log_λ_i_σ_ϵ = log(2.4)
update_interval = 1
α_power = 0.7
start_update = 100
# estimate parameters using exact Gibbs sampling
# set random numbers
Random.seed!(seed)
run_time_cpmmh = @elapsed chain_ϕ_cpmmh, chain_σ_ϵ_cpmmh, chain_η_cpmmh, accept_vec_cpmmh = gibbs_cpmmh(R,
y,
dt,
Σ_i_σ_ϵ,
Σ_i_ϕ,
γ_ϕ_0,
γ_σ_ϵ_0,
μ_i_ϕ,
μ_i_σ_ϵ,
α_star_ϕ,
α_star_σ_ϵ,
log_λ_i_ϕ,
log_λ_i_σ_ϵ,
update_interval,
start_update,
α_power,
startval_ϕ,
startval_σ_ϵ,
startval_η,
prior_parameters_η,
prior_parameters_σ_ϵ,
nbr_particles,
ρ);
println(run_time_cpmmh)
println(sum(accept_vec_cpmmh[1,:])/(M_subjects*R))
println(sum(accept_vec_cpmmh[2,:])/R)
println(sum(accept_vec_cpmmh[3,:])/(3*R))
sim_data = zeros(4,1)
sim_data[1] = run_time_cpmmh
sim_data[2] = sum(accept_vec_cpmmh[1,:])/(M_subjects*R)
sim_data[3] = sum(accept_vec_cpmmh[2,:])/R
sim_data[4] = sum(accept_vec_cpmmh[3,:])/(3*R)
# Save results
chain_ϕ_export = zeros(M_subjects*3, R)
idx = 0
for m = 1:M_subjects
for j = 1:3
global idx = idx + 1
chain_ϕ_export[idx,:] = chain_ϕ_cpmmh[m,j,:]
end
end
CSV.write("data/SDEMEM OU/cpmmh for plot mess vs N/sim_data_"*string(seed)*"_"*job*".csv", DataFrame(sim_data))
CSV.write("data/SDEMEM OU/cpmmh for plot mess vs N/chain_sigma_epsilon_"*string(seed)*"_"*job*".csv", DataFrame(chain_σ_ϵ_cpmmh'))
CSV.write("data/SDEMEM OU/cpmmh for plot mess vs N/chain_eta_"*string(seed)*"_"*job*".csv", DataFrame(chain_η_cpmmh'))
CSV.write("data/SDEMEM OU/cpmmh for plot mess vs N/chain_phi_"*string(seed)*"_"*job*".csv", DataFrame(chain_ϕ_export'))
| [
27,
7856,
261,
480,
29,
16305,
2731,
54,
25011,
85,
396,
14,
16814,
62,
10305,
3620,
3620,
27,
456,
62,
30783,
29,
15,
198,
2,
4226,
284,
1057,
32278,
329,
262,
47070,
9834,
3620,
3620,
2746,
198,
3500,
350,
10025,
198,
3500,
44800,... | 1.377896 | 3,755 |
<filename>test/deabigdata.jl<gh_stars>1-10
# Tests for Big Data Radial DEA Models
@testset "BigData RadialDEAModel" begin
## Test Radial DEA Models with FLS Book data
X = [5 13; 16 12; 16 26; 17 15; 18 14; 23 6; 25 10; 27 22; 37 14; 42 25; 5 17]
Y = [12; 14; 25; 26; 8; 9; 27; 30; 31; 26; 12]
# Input oriented CRS
deaio = deabigdata(X, Y, orient = :Input, rts = :CRS)
@test typeof(deaio) == RadialDEAModel
@test nobs(deaio) == 11
@test ninputs(deaio) == 2
@test noutputs(deaio) == 1
@test efficiency(deaio) ≈ [1.0000000000;
0.6222896791;
0.8198562444;
1.0000000000;
0.3103709311;
0.5555555556;
1.0000000000;
0.7576690896;
0.8201058201;
0.4905660377;
1.0000000000]
@test convert(Matrix, peers(deaio)) ≈
[1.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 0.4249783174 0 0 0.10928013877 0 0 0 0;
1.134321653 0 0 0.4380053908 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 1.0000000000 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 0.2573807721 0 0 0.04844814534 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 0.33333333333 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 1.00000000000 0 0 0 0;
0.000000000 0 0 1.0348650979 0 0 0.11457435013 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 1.14814814815 0 0 0 0;
0.000000000 0 0 0.4905660377 0 0 0.49056603774 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 1.000000000] atol = 1e-8
@test slacks(deaio, :X) ≈ [0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
4.444444444 0;
0.000000000 0;
0.000000000 0;
1.640211640 0;
0.000000000 0;
0.000000000 4]
@test slacks(deaio, :Y) ≈ zeros(11)
@test efficiency(deabigdata(targets(deaio, :X), targets(deaio, :Y), orient = :Input, rts = :CRS, slack = false)) ≈ ones(11)
@test efficiency(deaadd(targets(deaio, :X), targets(deaio, :Y))) ≈ zeros(11) atol=1e-14
# Otuput oriented CRS
deaoo = deabigdata(X, Y, orient = :Output, rts = :CRS)
@test nobs(deaoo) == 11
@test ninputs(deaoo) == 2
@test noutputs(deaoo) == 1
@test efficiency(deaoo) ≈ [1.0000000000;
1.606968641;
1.219726027;
1.0000000000;
3.221951220;
1.800000000;
1.0000000000;
1.319837398;
1.219354839;
2.038461538;
1.0000000000]
@test convert(Matrix, peers(deaoo)) ≈
[1.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 0.6829268293 0 0 0.1756097561 0 0 0 0;
1.383561644 0 0 0.5342465753 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 1.0000000000 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 0.8292682927 0 0 0.1560975610 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 0.6000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 1.00000000000 0 0 0 0;
0.000000000 0 0 1.3658536585 0 0 0.1512195122 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 1.4000000000 0 0 0 0;
0.000000000 0 0 1.0000000000 0 0 1.0000000000 0 0 0 0;
1.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 0]
@test slacks(deaoo, :X) ≈ [0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
8 0;
0.000000000 0;
0.000000000 0;
2 0;
0.000000000 0;
0.000000000 4]
@test slacks(deaoo, :Y) ≈ zeros(11)
@test efficiency(deabigdata(targets(deaoo, :X), targets(deaoo, :Y), orient = :Output, rts = :CRS, slack = false)) ≈ ones(11)
@test efficiency(deaadd(targets(deaoo, :X), targets(deaoo, :Y))) ≈ zeros(11) atol=1e-10
# Input oriented VRS
deaiovrs = deabigdata(X, Y, orient = :Input, rts = :VRS)
@test nobs(deaiovrs) == 11
@test ninputs(deaiovrs) == 2
@test noutputs(deaiovrs) == 1
@test efficiency(deaiovrs) ≈ [1.0000000000;
0.8699861687;
1.0000000000;
1.0000000000;
0.7116402116;
1.0000000000;
1.0000000000;
1.0000000000;
1.0000000000;
0.4931209269;
1.0000000000]
@test convert(Matrix, peers(deaiovrs)) ≈
[1.000000000 0 0 0.0000000000 0 0.00000000000 0.00000000000 0 0 0 0;
0.52558782849 0 0 0.0000000000 0 0.2842323651 0.1901798064 0 0 0 0;
0.000000000 0 1 0.0000000000 0 0.00000000000 0.00000000000 0 0 0 0;
0.000000000 0 0 1.0000000000 0 0.00000000000 0.00000000000 0 0 0 0;
0.56613756614 0 0 0.0000000000 0 0.4338624339 0.00000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 1.00000000000 0.00000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0.00000000000 1.00000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0.00000000000 0.00000000000 1 0 0 0;
0.000000000 0 0 0.0000000000 0 0.00000000000 0.00000000000 0 1 0 0;
0.03711078928 0 0 0.4433381608 0 0.00000000000 0.5195510500 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0.00000000000 0.00000000000 0 0 0 1.000000000]
@test slacks(deaiovrs, :X) ≈ [0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0 0;
0.000000000 0;
0.000000000 0;
0 0;
0.000000000 0;
0.000000000 4]
@test slacks(deaiovrs, :Y) ≈ [0.000000000;
0.000000000;
0.000000000;
0.000000000;
2.698412698;
0.000000000;
0.000000000;
0.000000000;
0.000000000;
0.000000000;
0.000000000]
@test efficiency(deabigdata(targets(deaiovrs, :X), targets(deaiovrs, :Y), orient = :Input, rts = :VRS, slack = false)) ≈ ones(11)
@test efficiency(deaadd(targets(deaiovrs, :X), targets(deaiovrs, :Y))) ≈ zeros(11) atol=1e-12
# Output oriented VRS
deaoovrs = deabigdata(X, Y, orient = :Output, rts = :VRS)
@test nobs(deaoovrs) == 11
@test ninputs(deaoovrs) == 2
@test noutputs(deaoovrs) == 1
@test efficiency(deaoovrs) ≈ [1.0000000000;
1.507518797;
1.0000000000;
1.0000000000;
3.203947368;
1.000000000;
1.0000000000;
1.000000000;
1.000000000;
1.192307692;
1.0000000000]
@test convert(Matrix, peers(deaoovrs)) ≈
[1.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 0;
0.38157894737 0 0 0.1710526316 0 0 0.4473684211 0 0 0 0;
0.000000000 0 1 0.0000000000 0 0 0.00000000000 0 0 0 0;
0.000000000 0 0 1.0000000000 0 0 0.00000000000 0 0 0 0;
0.03947368421 0 0 0.7763157895 0 0 0.1842105263 0 0 0 0;
0.000000000 0 0 0.0000000000 0 1 0.00000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 1.00000000000 0 0 0 0;
0.000000000 0 0 0.0000000000 0 0 0.00000000000 1 0 0 0;
0.000000000 0 0 0.0000000000 0 0 0.00000000000 0 1 0 0;
0.000000000 0 0 0.0000000000 0 0 0.00000000000 0 1 0 0;
1.000000000 0 0 0.0000000000 0 0 0.00000000000 0 0 0 0]
@test slacks(deaoovrs, :X) ≈ [0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
0.000000000 0;
5 11;
0.000000000 4]
@test slacks(deaoovrs, :Y) ≈ zeros(11) atol=1e-10
@test efficiency(deabigdata(targets(deaoovrs, :X), targets(deaoovrs, :Y), orient = :Output, rts = :VRS, slack = false)) ≈ ones(11)
@test efficiency(deaadd(targets(deaoovrs, :X), targets(deaoovrs, :Y))) ≈ zeros(11) atol=1e-12
# Test no slacks
deaionoslack = deabigdata(X, Y, slack = false)
@test efficiency(deaionoslack) == efficiency(deaio)
@test isempty(slacks(deaionoslack, :X)) == 1
@test isempty(slacks(deaionoslack, :Y)) == 1
@test efficiency(deabigdata(targets(deaionoslack, :X), targets(deaionoslack, :Y), slack = false)) ≈ ones(11)
@test efficiency(deaadd(targets(deaionoslack, :X), targets(deaionoslack, :Y))) != zeros(11) # Different as there is no slacks in first model
# Print
show(IOBuffer(), deaio)
show(IOBuffer(), deaionoslack)
# Test errors
@test_throws DimensionMismatch deabigdata([1; 2 ; 3], [4 ; 5]) # Different number of observations
@test_throws ArgumentError deabigdata([1; 2; 3], [4; 5; 6], orient = :Error) # Invalid orientation
@test_throws ArgumentError deabigdata([1; 2; 3], [4; 5; 6], rts = :Error) # Invalid returns to scale
@test_throws ErrorException deabigdata(X, Y, atol = 0.0, optimizer = DEAOptimizer(:NLP))
# ------------------
# Test if no exteriors
# ------------------
Xnoext = [1 1; 1.5 1; 2 1]
Ynoext = [2 2; 1.5 1.5; 1 0.5]
deanoext = dea(Xnoext, Ynoext, orient = :Input)
deabignoext = deabigdata(Xnoext, Ynoext, orient = :Input)
@test efficiency(deanoext) ≈ efficiency(deabignoext)
@test slacks(deanoext, :X) ≈ slacks(deabignoext, :X)
@test slacks(deanoext, :X) ≈ slacks(deabignoext, :X)
@test targets(deanoext, :X) ≈ targets(deabignoext, :X)
@test targets(deanoext, :Y) ≈ targets(deabignoext, :Y)
@test peersmatrix(deanoext) ≈ peersmatrix(deabignoext)
# ------------------
# Test with random data
# ------------------
rng = StableRNG(1234567)
X = rand(Uniform(10, 20), 500, 6)
Y = rand(Uniform(10, 20), 500, 4)
rdea = dea(X, Y, progress = false)
rdeabig = deabigdata(X, Y, progress = false)
@test efficiency(rdeabig) ≈ efficiency(rdea)
@test slacks(rdeabig, :X) ≈ slacks(rdea, :X)
@test slacks(rdeabig, :Y) ≈ slacks(rdea, :Y)
@test targets(rdeabig, :X) ≈ targets(rdea, :X)
@test targets(rdeabig, :Y) ≈ targets(rdea, :Y)
@test peersmatrix(rdeabig) ≈ peersmatrix(rdea)
# ------------------
# Test Vector and Matrix inputs and outputs
# ------------------
# Tests against results in R
# Inputs is Matrix, Outputs is Vector
X = [2 2; 1 4; 4 1; 4 3; 5 5; 6 1; 2 5; 1.6 8]
Y = [1; 1; 1; 1; 1; 1; 1; 1]
@test efficiency(deabigdata(X, Y, orient = :Input)) ≈ [1; 1; 1; 0.6; 0.4; 1; 0.6666666667; 0.625]
# Inputs is Vector, Output is Matrix
X = [1; 1; 1; 1; 1; 1; 1; 1]
Y = [7 7; 4 8; 8 4; 3 5; 3 3; 8 2; 6 4; 1.5 5]
@test efficiency(deabigdata(X, Y, orient = :Output)) ≈ [1; 1; 1; 1.555555556; 2.333333333; 1; 1.272727273; 1.6]
# Inputs is Vector, Output is Vector
X = [2; 4; 8; 12; 6; 14; 14; 9.412]
Y = [1; 5; 8; 9; 3; 7; 9; 2.353]
@test efficiency(deabigdata(X, Y, orient = :Input)) ≈ [0.4; 1; 0.8; 0.6; 0.4; 0.4; 0.5142857143; 0.2]
end
| [
27,
34345,
29,
9288,
14,
2934,
397,
328,
7890,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
30307,
329,
4403,
6060,
5325,
498,
28647,
32329,
198,
31,
9288,
2617,
366,
12804,
6601,
5325,
498,
7206,
2390,
375,
417,
1,
222... | 1.669217 | 7,972 |
using IterativeSolvers
using FactCheck
using Base.Test
# Type used in SOL test
type Wrapper
m::Int
n::Int
end
Base.size(op::Wrapper, dim::Integer) = (dim == 1) ? op.m :
(dim == 2) ? op.n : 1
Base.size(op::Wrapper) = (op.m, op.n)
Base.eltype(op::Wrapper) = Int
function Base.A_mul_B!(α, A::Wrapper, x, β, y)
m, n = size(A)
scale!(y, β)
y[1] = y[1] + α * x[1]
for i = 2:n
y[i] = y[i] + i * α * x[i] + (i-1) * α * x[i-1]
end
for i = n+1:m
y[i] = y[i]
end
return y
end
function Base.Ac_mul_B!(α, A::Wrapper, x, β, y)
m, n = size(A)
mn = min(m, n)
scale!(y, β)
for i = 1:mn-1
y[i] = y[i] + α * i* (x[i]+x[i+1])
end
y[mn] = y[mn] + α * mn * x[mn]
for i = m+1:n
y[i] = y[i]
end
return y
end
# Type used in Dampenedtest
# solve (A'A + diag(v).^2 ) x = b
# using LSMR in the augmented space A' = [A ; diag(v)] b' = [b; zeros(size(A, 2)]
type DampenedVector{Ty, Tx}
y::Ty
x::Tx
end
Base.eltype(a::DampenedVector) = promote_type(eltype(a.y), eltype(a.x))
function Base.norm(a::DampenedVector)
return sqrt(norm(a.y)^2 + norm(a.x)^2)
end
function Base.copy!{Ty, Tx}(a::DampenedVector{Ty, Tx}, b::DampenedVector{Ty, Tx})
copy!(a.y, b.y)
copy!(a.x, b.x)
return a
end
function Base.fill!(a::DampenedVector, α::Number)
fill!(a.y, α)
fill!(a.x, α)
return a
end
function Base.scale!(a::DampenedVector, α::Number)
scale!(a.y, α)
scale!(a.x, α)
return a
end
function Base.similar(a::DampenedVector, T)
return DampenedVector(similar(a.y, T), similar(a.x, T))
end
function Base.length(a::DampenedVector)
length(a.y) + length(a.x)
end
type DampenedMatrix{TA, Tx}
A::TA
diagonal::Tx
end
Base.eltype(A::DampenedMatrix) = promote_type(eltype(A.A), eltype(A.diagonal))
function Base.size(A::DampenedMatrix, dim::Integer)
m, n = size(A.A)
l = length(A.diagonal)
dim == 1 ? (m + l) :
dim == 2 ? n : 1
end
function Base.A_mul_B!{TA, Tx, Ty}(α::Number, mw::DampenedMatrix{TA, Tx}, a::Tx,
β::Number, b::DampenedVector{Ty, Tx})
if β != 1.
if β == 0.
fill!(b, 0.)
else
scale!(b, β)
end
end
A_mul_B!(α, mw.A, a, 1.0, b.y)
map!((z, x, y)-> z + α * x * y, b.x, b.x, a, mw.diagonal)
return b
end
function Base.Ac_mul_B!{TA, Tx, Ty}(α::Number, mw::DampenedMatrix{TA, Tx}, a::DampenedVector{Ty, Tx},
β::Number, b::Tx)
if β != 1.
if β == 0.
fill!(b, 0.)
else
scale!(b, β)
end
end
Ac_mul_B!(α, mw.A, a.y, 1.0, b)
map!((z, x, y)-> z + α * x * y, b, b, a.x, mw.diagonal)
return b
end
facts(string("lsmr")) do
context("Small dense matrix") do
A = rand(10, 5)
b = rand(10)
x, = lsmr(A, b)
@fact norm(x - A\b) --> less_than(√eps())
end
context("SOL test") do
# Test adapted from the BSD-licensed Matlab implementation at
# http://www.stanford.edu/group/SOL/software/lsqr.html
# <NAME>ers, Systems Optimization Laboratory,
# Dept of MS&E, Stanford University.
#-----------------------------------------------------------------------
# This is a simple example for testing LSMR.
# It uses the leading m*n submatrix from
# A = [ 1
# 1 2
# 2 3
# 3 4
# ...
# n ]
# suitably padded by zeros.
#
# 11 Apr 1996: First version for distribution with lsqr.m.
# <NAME>ers, Dept of EESOR, Stanford University.
function SOLtest(m, n, damp)
A = Wrapper(m, n)
xtrue = n:-1:1
b = Array(Float64, m)
b = float(A_mul_B!(1.0, A, xtrue, 0.0, b))
x, = lsmr(A, b, atol = 1e-7, btol = 1e-7, conlim = 1e10, maxiter = 10n)
r = A_mul_B!(-1, A, x, 1, b)
@fact norm(r) --> less_than_or_equal(1e-4)
end
SOLtest(10, 10, 0)
SOLtest(20, 10, 0)
SOLtest(20, 10, 0.1)
end
context("dampened test") do
# Test used to make sure A, b can be generic matrix / vector
srand(1234)
function DampenedTest(m, n)
b = rand(m)
A = rand(m, n)
v = rand(n)
Adampened = DampenedMatrix(A, v)
bdampened = DampenedVector(b, zeros(n))
x, ch = lsmr(Adampened, bdampened)
@fact norm((A'A + diagm(v).^2)x - A'b) --> less_than(1e-3)
end
DampenedTest(10, 10)
DampenedTest(20, 10)
end
end
| [
3500,
40806,
876,
36949,
690,
198,
3500,
19020,
9787,
198,
3500,
7308,
13,
14402,
628,
198,
198,
2,
5994,
973,
287,
36817,
1332,
198,
4906,
27323,
2848,
198,
220,
220,
220,
285,
3712,
5317,
198,
220,
220,
220,
299,
3712,
5317,
198,
... | 1.811904 | 2,621 |
include("module.jl")
using Logging
Logging.global_logger(Logging.ConsoleLogger(Logging.Info))
F5_DETECTED_USELESS_NF = 0
F5_USELESS_NF = 0
F5_NF = 0
F5_BASIS_SIZE = 0
function zerof5()
global F5_DETECTED_USELESS_NF
global F5_USELESS_NF
global F5_NF
global F5_BASIS_SIZE
F5_DETECTED_USELESS_NF = 0
F5_USELESS_NF = 0
F5_NF = 0
F5_BASIS_SIZE = 0
end
function katsuran(n; ground=QQ)
R, x = PolynomialRing(ground, ["x$i" for i in 0:n])
return [
(sum(x[abs(l)+1]*x[abs(m-l)+1] for l=-n:n if abs(m-l)<=n) -
x[m+1] for m=0:n-1)...,
x[1] + 2sum(x[i+1] for i=1:n) - 1
]
end
#-----------------------------------------------------------------------------
function homogenize(F)
base = parent(F[1])
s = copy(symbols(base))
push!(s, :h)
R, xsu = PolynomialRing(base_ring(base), s, ordering=ordering(base))
xs = xsu[1:end-1]
u = xsu[end]
homF = Vector{eltype(F)}(undef, 0)
for f in F
tdeg = total_degree(leading_monomial(f))
f = evaluate(f, xs)
homf = zero(R)
for t in terms(f)
homf += t * u^(tdeg - total_degree(t))
end
push!(homF, homf)
end
homF
end
function dehomogenize(homF)
R = parent(homF[1])
xs = collect(gens(R))
xs[end] = one(R)
F = Vector{eltype(homF)}(undef, 0)
for homf in homF
f = evaluate(homf, xs)
push!(F, f)
end
F
end
#-----------------------------------------------------------------------------
# Regular reductions
# try to reduce f with g in a regular way and return result
function regular_reduction_step(f::ModuleElement, g::ModuleElement)
evalf = f.ev
evalg = g.ev
# @info "reducing $(f) with $(g)" evalf evalg
for t in terms(evalf)
# if divides
success, u = divides(t, leading_term(evalg))
if success
u = divexact(t, leading_term(evalg))
@info "$(leading_term(evalg)) | $t"
# if reduction is regular (and does not change signature)
# not really correct!
if f > u*g
newf = f - u*g
# @info "regular! $(f) - $(u)*$(g) --> $newf"
return true, newf
end
end
end
return false, f
end
# // same as above, but in terms of set G //
# try to reduce f with G in a regular way and return result
function regular_reduction_step(f::ModuleElement, G)
evalf = f.ev
for g in G
success, newf = regular_reduction_step(f, g)
if success
return true, newf
end
end
return false, f
end
# regular normal form of f w.r.t. G
function regular_normal_form(f::ModuleElement, G)
# @info "computing reg normalform of $f w.r.t $G.."
success = true
newf = copy(f)
while success
success, newf = regular_reduction_step(newf, G)
# @info "reduction $success"
end
newf
end
#-----------------------------------------------------------------------------
# Singular reductions
# if f is singurarly top reducible by G
function issingularlytopreducible(f::ModuleElement, G)
leadevalf = leading_monomial(f.ev)
for g in G
evalg = g.ev
success, u = divides(leadevalf, leading_monomial(evalg))
if success
if (g*u).sgn == f.sgn
return true
end
end
end
return false
end
# if f is singurarly top reducible by G
function syzygy_criterion(f::ModuleElement, syzygies)
for syz in syzygies
success, u = divides(f.sgn.monom, syz.sgn.monom)
if success && f.sgn.index == syz.sgn.index
# @warn "DISCARDED $f by $syz"
global F5_DETECTED_USELESS_NF
F5_DETECTED_USELESS_NF += 1
return true
end
end
return false
end
#-----------------------------------------------------------------------------
# S-polynomial
# returns u, v, such that
# u*a - v*b is spoly(a, b)
function mults(a::ModuleElement, b::ModuleElement)
u = lcm(leading_monomial(a.ev), leading_monomial(b.ev))
u = divexact(u, leading_term(a.ev))
v = lcm(leading_monomial(a.ev), leading_monomial(b.ev))
v = divexact(v, leading_term(b.ev))
u, v
end
# S-polynomial of a and b
function spoly(a::ModuleElement, b::ModuleElement)
u, v = mults(a, b)
u*a - v*b
end
#-----------------------------------------------------------------------------
# Groebner basis things
function construct_module(F)
[
ModuleElement(f, Signature(i, one(f)))
for (i, f) in enumerate(F)
]
end
function signature_groebner_basis(F)
sort!(F, by=leading_monomial, lt=f5_total_degree_lead_cmp)
F = map(f -> map_coefficients(c -> c // leading_coefficient(f), f), F)
G = construct_module(F)
syzygies = []
for i in 1:length(G)
for j in i+1:length(G)
s1 = (G[i]*leading_monomial(G[j].ev)).sgn
s2 = (G[j]*leading_monomial(G[i].ev)).sgn
push!(syzygies, ModuleElement( zero(F[1]), max(s1, s2) ) )
end
end
P = []
for i in 1:length(G)
for j in i+1:length(G)
if syzygy_criterion(spoly(G[i], G[j]), syzygies)
continue
end
push!(P, spoly(G[i], G[j]))
end
end
# @info "generated initial G and P:"
# @info "F = $F"
# @info "G = $G"
# @info "P = $P"
# @info "syz = $syzygies"
while !isempty(P)
sort!(P, rev=false)
f = popfirst!(P)
@info "selected $f"
if syzygy_criterion(f, syzygies)
continue
end
# global F5_NF
# F5_NF += 1
fNF = regular_normal_form(f, G)
global F5_NF
F5_NF += 1
if issyzygy(fNF)
# @warn "Reduction to zero!"
# global F5_REDUCED
# F5_REDUCED += 1
push!(syzygies, fNF)
global F5_USELESS_NF
F5_USELESS_NF += 1
elseif !issingularlytopreducible(fNF, G)
# update P
for fj in G
if syzygy_criterion(spoly(fNF, fj), syzygies)
continue
end
push!(P, spoly(fNF, fj))
# @info "$fNF $fj"
# @info "SPOLY $(last(P))"
end
# update G
fNFnormalzed = fNF * inv(leading_coefficient(fNF.ev))
push!(G, fNFnormalzed)
else
global F5_USELESS_NF
F5_USELESS_NF += 1
end
# @info "updated G and P"
# @info "G = $G"
# @info "P = $P"
# @info "syz = $syzygies"
end
# global F5_SIZE
# F5_SIZE = length(G) - length(F)
# global F5_REDUCED
# global F5_DISCARDED
global F5_BASIS_SIZE
F5_BASIS_SIZE = length(G)
G
end
#-----------------------------------------------------------------------------
using Logging
Logging.global_logger(ConsoleLogger(Logging.Warn))
R, (x,y,z) = PolynomialRing(QQ, ["x","y", "z"], ordering=:degrevlex)
F = [x*z + 1, y*z + 1]
# evaluation
G = signature_groebner_basis(F)
Gev = [g.ev for g in G]
println("############")
println(G)
println(Gev)
| [
198,
17256,
7203,
21412,
13,
20362,
4943,
198,
198,
3500,
5972,
2667,
198,
11187,
2667,
13,
20541,
62,
6404,
1362,
7,
11187,
2667,
13,
47581,
11187,
1362,
7,
11187,
2667,
13,
12360,
4008,
198,
198,
37,
20,
62,
35,
2767,
9782,
1961,
... | 2.017011 | 3,586 |
@testset "rand" begin
# Some arbitrary alignment
x = iterdates()
expected_times = _eval_fast(x).times
@testset "scalar" begin
n = rand(x)
# calling multiple times should give different nodes, since we use a different rng.
@test n != rand(x)
# If we specify an explicit rng, then we should get identical nodes back.
rng = MersenneTwister()
n = rand(rng, x)
@test n === rand(rng, x)
@test value_type(n) == Float64
block = _eval(n)
@test block.times == expected_times
@test all(0 .<= block.values .< 1)
n = rand(rng, x, Int32)
@test n === rand(rng, x, Int32)
@test value_type(n) == Int32
block = _eval(n)
@test block.times == expected_times
@test all(typemin(Int32) .<= block.values .<= typemax(Int32))
end
@testset "array" begin
n = rand(x, ())
@test value_type(n) == Array{Float64,0}
# If we specify an explicit rng, then we should get identical nodes back.
rng = MersenneTwister()
n = rand(rng, x, (2,))
@test n === rand(rng, x, (2,))
@test n != rand(rng, x, (3,))
@test n != rand(rng, x, (2, 3))
@test rand(rng, x, (2,)) === rand(rng, x, 2)
@test rand(rng, x, Float64, (2,)) === rand(rng, x, 2)
@test rand(rng, x, (2,)) === rand(rng, x, Float64, 2)
@test rand(rng, x, (2, 3)) === rand(rng, x, 2, 3)
@test rand(rng, x, Float64, (2, 3)) === rand(rng, x, 2, 3)
@test rand(rng, x, (2, 3)) === rand(rng, x, Float64, 2, 3)
@test value_type(n) == Vector{Float64}
block = _eval(n)
@test block.times == expected_times
@test all(map(value -> all(0 .<= value .< 1), block.values))
end
end
| [
31,
9288,
2617,
366,
25192,
1,
2221,
198,
220,
220,
220,
1303,
2773,
14977,
19114,
198,
220,
220,
220,
2124,
796,
11629,
19581,
3419,
198,
220,
220,
220,
2938,
62,
22355,
796,
4808,
18206,
62,
7217,
7,
87,
737,
22355,
628,
220,
220,... | 2.051429 | 875 |
### Run Simulation ###
function initialize_sim_and_run(;m::MapData
, routes_path::OrderedDict{Symbol, OrderedDict{Int64,Int64}}
, routes_distances::OrderedDict{Symbol, OrderedDict{Int64,Float64}}
, routes_types_both_dir::Dict{Symbol,Symbol}
, orig_map_nodes_num::Int
, when_to_run_people::Float64 = sim_params.when_to_run_people
, when_to_run_wagons::Float64 = sim_params.when_to_run_wagons
, TTC_freq::Int = sim_params.TTC_car_freq
, max_load_subway::Int = sim_params.max_load_subway
, max_load_streetcar::Int = sim_params.max_load_streetcar
, max_load_gov_restr::Float64 = sim_params.max_load_gov_restr
, N_agents::Int = sim_params.N_agents
, sim_run_time_in_secs::Int = sim_params.sim_run_time_in_secs
, serialize_finished_sim_object::Bool = false
, p0::Float64 = sim_params.p0
, parset_id::Int64 = 1)
"""
Description:
Initialization of the simulation with a given parameters. Next the simulation will be running for the specified number of seconds.
Returns:
Simulation objects with the results.
"""
println("By default parameters from `sim_params` object will be used")
println("--------------------")
simul = Simulation(m
, orig_map_nodes_num
, routes_path
, routes_distances
, routes_types_both_dir
, N_agents
, sim_run_time_in_secs
, TTC_freq
, max_load_subway
, max_load_streetcar
, max_load_gov_restr
, p0)
println("when_to_run_people: ",when_to_run_people)
println("when_to_run_wagons: ",when_to_run_wagons)
enqueue!.(Ref(simul.events), simul.agents, tuple.(when_to_run_people,rand(length(simul.agents))))
enqueue!.(Ref(simul.events), simul.wagons, tuple.(when_to_run_wagons,rand(length(simul.wagons))))
enqueue!(simul.events, simul.artificial_agent_stats, tuple(when_to_run_people, rand()))
println("--------------------")
println("Final simul object before run: \n", simul)
println("--------------------")
current_time = 0
while sim_run_time_in_secs > current_time && length(simul.events) > 0
event, t = dequeue_pair!(simul.events)
step!(simul, t, event, orig_map_nodes_num)
current_time = deepcopy(t[1])
if simul.infected_agents_count[end] >= 0.95*N_agents
println("Simulation has ended ealier, because 95% of agents are already infected. Current time: ",current_time)
break
end
end
if serialize_finished_sim_object == true
path_simul = "./data_output/simulation_finished_parset_id_$parset_id.bin"
serialize(path_simul, simul)
println("Simulation object with results was saved in: ", path_simul)
end
println("Simulation has already ended. You can check the results.")
return simul
end
### Run Simulation N times###
function initialize_sim_and_run_N_times_and_gather_results(;m::MapData
, routes_path::OrderedDict{Symbol, OrderedDict{Int64,Int64}}
, routes_distances::OrderedDict{Symbol, OrderedDict{Int64,Float64}}
, routes_types_both_dir::Dict{Symbol,Symbol}
, orig_map_nodes_num::Int
, when_to_run_people::Float64 = sim_params.when_to_run_people
, when_to_run_wagons::Float64 = sim_params.when_to_run_wagons
, TTC_freq::Int = sim_params.TTC_car_freq
, max_load_subway::Int = sim_params.max_load_subway
, max_load_streetcar::Int = sim_params.max_load_streetcar
, max_load_gov_restr::Float64 = sim_params.max_load_gov_restr
, N_agents::Int = sim_params.N_agents
, sim_run_time_in_secs::Int = sim_params.sim_run_time_in_secs
, serialize_finished_sim_object::Bool = false
, parset_id::Int64 = 1
, p0::Float64 = sim_params.p0
, N_times_to_run::Int64 = 30)
"""
Description:
Run the simulation with specified parameters N times and then gather related results into dict.
Returns:
Dict object with mean results of N runs.
"""
res_gathering = Dict{String,Any}()
total_cnt = Vector{Int}[]
ttc_cnt = Vector{Int}[]
street_cnt = Vector{Int}[]
prct_ag_used_TTC = Float64[]
mean_time_spent_in_TTC = Float64[]
max_load_achieved_streetcars = Float64[]
max_load_achieved_subway = Float64[]
all_TTC_trips_count_streetcars = Float64[]
all_TTC_trips_count_subway = Float64[]
max_pass_per_wagon_subway = Vector{Float64}[]
max_pass_per_wagon_streetcars = Vector{Float64}[]
println("Your simulation will be run ", N_times_to_run, " times")
for i in 1:N_times_to_run
println("Is is a run number $i for parameters set number $parset_id.")
@time s = initialize_sim_and_run(m = m
, routes_path = routes_path
, routes_distances = routes_distances
, routes_types_both_dir = routes_types_both_dir
, orig_map_nodes_num = orig_map_nodes_num
, when_to_run_people = when_to_run_people
, when_to_run_wagons = when_to_run_wagons
, TTC_freq = TTC_freq
, max_load_subway = max_load_subway
, max_load_streetcar = max_load_streetcar
, max_load_gov_restr = max_load_gov_restr
, N_agents = N_agents
, sim_run_time_in_secs = sim_run_time_in_secs
, serialize_finished_sim_object =serialize_finished_sim_object
, p0 = p0
, parset_id = parset_id);
push!(total_cnt, s.infected_agents_count)
push!(ttc_cnt, s.infected_agents_wagon)
push!(street_cnt, s.infected_agents_street)
time_spend_in_TTC, TTC_types_used, time_spend_in_TTC_comp_to_total_path_time = TTC_usage_by_agents(s,orig_map_nodes_num);
push!(prct_ag_used_TTC, round((sum([1 for i in time_spend_in_TTC if i > 0]) / N_agents)*100))
push!(mean_time_spent_in_TTC, mean([i for i in time_spend_in_TTC if i > 0]))
loop_max_load_achieved_streetcar = [1 for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :streetcar) & (v >= max_load_streetcar)]
loop_max_load_achieved_subway = [1 for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :subway) & (v >= max_load_subway)]
push!(max_load_achieved_streetcars,sum(loop_max_load_achieved_streetcar))
push!(max_load_achieved_subway,sum(loop_max_load_achieved_subway))
loop_all_TTC_trips_count_streetcars = [1 for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :streetcar)]
loop_all_TTC_trips_count_subway = [1 for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :subway)]
push!(all_TTC_trips_count_streetcars,sum(loop_all_TTC_trips_count_streetcars))
push!(all_TTC_trips_count_subway,sum(loop_all_TTC_trips_count_subway))
loop_max_pass_per_wagon_streetcars = [v for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :streetcar)]
loop_max_pass_per_wagon_subway = [v for (k,v) in s.max_passengers_per_TTC_car if (routes_types_both_dir[k[2]] == :subway)]
push!(max_pass_per_wagon_streetcars, [mean(loop_max_pass_per_wagon_streetcars),std(loop_max_pass_per_wagon_streetcars),maximum(loop_max_pass_per_wagon_streetcars)])
push!(max_pass_per_wagon_subway, [mean(loop_max_pass_per_wagon_subway),std(loop_max_pass_per_wagon_subway),maximum(loop_max_pass_per_wagon_subway)])
end
res_gathering["total_infected"] = (floor.(mean(total_cnt)), floor.(std(total_cnt)))
res_gathering["TTC_infected"] = (floor.(mean(ttc_cnt)), floor.(std(ttc_cnt)))
res_gathering["street_infected"] = (floor.(mean(street_cnt)), floor.(std(street_cnt)))
res_gathering["prct_of_agents_used_TTC"] = (floor.(mean(prct_ag_used_TTC)), floor.(std(prct_ag_used_TTC)))
res_gathering["mean_sec_spent_in_TTC_by_those_who_used"] = (floor.(mean(mean_time_spent_in_TTC)), floor.(std(mean_time_spent_in_TTC)))
res_gathering["times_max_load_achieved_streetcars"] = (floor.(mean(max_load_achieved_streetcars)), floor.(std(max_load_achieved_streetcars)))
res_gathering["times_max_load_achieved_subway"] = (floor.(mean(max_load_achieved_subway)), floor.(std(max_load_achieved_subway)))
res_gathering["all_TTC_trips_count_streetcars"] = (floor.(mean(all_TTC_trips_count_streetcars)),floor.(std(all_TTC_trips_count_streetcars)))
res_gathering["all_TTC_trips_count_subway"] = (floor.(mean(all_TTC_trips_count_subway)),floor.(std(all_TTC_trips_count_subway)))
res_gathering["max_pass_per_wagon_subway"] = (floor.(mean(max_pass_per_wagon_subway)),floor.(maximum(max_pass_per_wagon_subway)))
res_gathering["max_pass_per_wagon_streetcars"] = (floor.(mean(max_pass_per_wagon_streetcars)),floor.(maximum(max_pass_per_wagon_subway)))
println("All runs completed!")
return res_gathering
end
### Create parameters grid ###
function create_df_with_sim_params(;TTC_car_freq::Union{Vector{Int64},Int64}
, max_load_gov_restr::Union{Vector{Float64},Float64}
, when_to_run_people::Union{Vector{Float64},Float64}
, when_to_run_wagons::Union{Vector{Float64},Float64}
, sim_run_time_in_secs::Union{Vector{Int64},Int64}
, N_agents::Union{Vector{Int64},Int64}
, agents_speed_in_sec_per_m::Union{Vector{Float64},Float64}
, max_load_subway::Union{Vector{Int64},Int64}
, max_load_streetcar::Union{Vector{Int64},Int64}
, p0::Union{Vector{Float64},Float64})
"""
Description:
Make all possible combinations of given values.
Returns:
DataFrame with all possible combinations of given parameters where one row means one set of parameters for Simulation.
"""
df = DataFrame(Iterators.product(TTC_car_freq
, max_load_gov_restr
, when_to_run_people
, when_to_run_wagons
, sim_run_time_in_secs
, N_agents
, agents_speed_in_sec_per_m
, max_load_subway
, max_load_streetcar
, p0))
colnames = ["TTC_car_freq", "max_load_gov_restr", "when_to_run_people", "when_to_run_wagons", "sim_run_time_in_secs", "N_agents", "agents_speed_in_sec_per_m", "max_load_subway", "max_load_streetcar", "p0"]
rename!(df,Symbol.(colnames))
df[!,"parset_id"] = [i for i in 1:nrow(df)]
df = df[:,["parset_id","TTC_car_freq", "max_load_gov_restr", "when_to_run_people", "when_to_run_wagons", "sim_run_time_in_secs", "N_agents", "agents_speed_in_sec_per_m", "max_load_subway", "max_load_streetcar", "p0"]]
return df
end
println("Run sim functions were read")
| [
21017,
5660,
41798,
44386,
198,
8818,
41216,
62,
14323,
62,
392,
62,
5143,
7,
26,
76,
3712,
13912,
6601,
198,
197,
197,
197,
197,
11,
11926,
62,
6978,
3712,
35422,
1068,
35,
713,
90,
13940,
23650,
11,
14230,
1068,
35,
713,
90,
5317,... | 2.252166 | 4,616 |
<gh_stars>0
devices = Dict{Symbol, DeviceModel}(:Generators => DeviceModel(PSY.ThermalStandard, ThermalDispatch),
:Loads => DeviceModel(PSY.PowerLoad, StaticPowerLoad))
branches = Dict{Symbol, DeviceModel}(:L => DeviceModel(PSY.Line, StaticLineUnbounded))
services = Dict{Symbol, ServiceModel}()
@testset "Operation set ref models" begin
model_ref = ModelReference(CopperPlatePowerModel, devices, branches, services);
op_model = OperationModel(TestOptModel, model_ref, c_sys5)
set_transmission_ref!(op_model, DCPLLPowerModel)
@test op_model.model_ref.transmission == DCPLLPowerModel
model_ref = ModelReference(CopperPlatePowerModel, devices, branches, services);
op_model = OperationModel(TestOptModel, model_ref, c_sys5)
new_devices = Dict{Symbol, DeviceModel}(:Generators => DeviceModel(PSY.ThermalStandard, ThermalBasicUnitCommitment),
:Loads => DeviceModel(PSY.PowerLoad, StaticPowerLoad))
set_devices_ref!(op_model, new_devices)
@test op_model.model_ref.devices[:Generators].formulation == ThermalBasicUnitCommitment
jump_model = op_model.canonical.JuMPmodel
@test ((JuMP.VariableRef, MOI.ZeroOne) in JuMP.list_of_constraint_types(jump_model)) == true
model_ref = ModelReference(DCPPowerModel, devices, branches, services);
op_model = OperationModel(TestOptModel, model_ref, c_sys5)
new_branches = Dict{Symbol, DeviceModel}(:L => DeviceModel(PSY.Line, StaticLine))
set_branches_ref!(op_model, new_branches)
@test op_model.model_ref.branches[:L].formulation == StaticLine
end
@testset "Operation set models" begin
model_ref = ModelReference(CopperPlatePowerModel, devices, branches, services);
op_model = OperationModel(TestOptModel, model_ref, c_sys5)
set_device_model!(op_model, :Generators, DeviceModel(PSY.ThermalStandard, ThermalBasicUnitCommitment))
@test op_model.model_ref.devices[:Generators].formulation == ThermalBasicUnitCommitment
jump_model = op_model.canonical.JuMPmodel
@test ((JuMP.VariableRef, MOI.ZeroOne) in JuMP.list_of_constraint_types(jump_model)) == true
model_ref = ModelReference(DCPPowerModel, devices, branches, services);
op_model = OperationModel(TestOptModel, model_ref, c_sys5)
set_branch_model!(op_model, :L, DeviceModel(PSY.Line, StaticLine))
@test op_model.model_ref.branches[:L].formulation == StaticLine
end
| [
27,
456,
62,
30783,
29,
15,
198,
42034,
796,
360,
713,
90,
13940,
23650,
11,
16232,
17633,
92,
7,
25,
8645,
2024,
5218,
16232,
17633,
7,
3705,
56,
13,
35048,
7617,
23615,
11,
41590,
49354,
828,
198,
220,
220,
220,
220,
220,
220,
2... | 2.700221 | 904 |
"""
ZeroKernel()
Create a kernel that always returning zero
```
κ(x,y) = 0.0
```
The output type depends of `x` and `y`
"""
struct ZeroKernel <: SimpleKernel end
kappa(κ::ZeroKernel, d::T) where {T<:Real} = zero(T)
metric(::ZeroKernel) = Delta()
Base.show(io::IO, ::ZeroKernel) = print(io, "Zero Kernel")
"""
WhiteKernel()
```
κ(x,y) = δ(x,y)
```
Kernel function working as an equivalent to add white noise. Can also be called via `EyeKernel()`
"""
struct WhiteKernel <: SimpleKernel end
"""
EyeKernel()
See [WhiteKernel](@ref)
"""
const EyeKernel = WhiteKernel
kappa(κ::WhiteKernel, δₓₓ::Real) = δₓₓ
metric(::WhiteKernel) = Delta()
Base.show(io::IO, ::WhiteKernel) = print(io, "White Kernel")
"""
ConstantKernel(; c=1.0)
Kernel function always returning a constant value `c`
```
κ(x,y) = c
```
"""
struct ConstantKernel{Tc<:Real} <: SimpleKernel
c::Vector{Tc}
function ConstantKernel(;c::T=1.0) where {T<:Real}
new{T}([c])
end
end
kappa(κ::ConstantKernel,x::Real) = first(κ.c)*one(x)
metric(::ConstantKernel) = Delta()
Base.show(io::IO, κ::ConstantKernel) = print(io, "Constant Kernel (c = ", first(κ.c), ")")
| [
37811,
198,
220,
220,
220,
12169,
42,
7948,
3419,
198,
198,
16447,
257,
9720,
326,
1464,
8024,
6632,
198,
15506,
63,
198,
220,
220,
220,
7377,
118,
7,
87,
11,
88,
8,
796,
657,
13,
15,
198,
15506,
63,
198,
464,
5072,
2099,
8338,
... | 2.294347 | 513 |
<gh_stars>0
module SpikeTrains
export SpikeTrain, draw_uncorrelated_spikes, draw_correlated_spikes, draw_coincident_spikes, length, iterate, convert, vcat, merge, make_exponentialShift, correlation_code, coincidence_code, plot_spike_trains
using Distributions, Plots#, PlotRecipes
struct SpikeTrain
times::Vector{Float64}
SpikeTrain(times=Float64[]) = new(sort(times))
end
Base.length(s::SpikeTrain) = length(s.times)
Base.iterate(s::SpikeTrain, args...) = iterate(s.times, args...)
Base.convert(SpikeTrain, v) = SpikeTrain(v)
Base.vcat(s::SpikeTrain...) = SpikeTrain(vcat(getfield.(s,:times)...))
Base.merge(s::Array{SpikeTrain}; dims=Base.OneTo(ndims(s))) = dropdims(mapslices(x->vcat(x...), s, dims=dims), dims=dims)
function has_min_distance(times, min_distance)
keep = ones(Bool, length(times))
last_time = -Inf
for (i,t) ∈ enumerate(times)
if t < last_time
keep[i] = false
else
last_time = t+min_distance
end
end
return keep
end
ensure_min_distance(times, min_distance) = times[has_min_distance(times, min_distance)]
"""
draw_uncorrelated_spikes(trange, rates; sorted=true)
Draw uncorrelated spike trains in the interval `trange` with the respective `rates`.
"""
function draw_uncorrelated_spikes(trange, rates)
duration = trange[2]-trange[1]
source = Vector{SpikeTrain}(undef, length(rates))
# draw uncorrelated spike trains with high rate
for (i,r) ∈ enumerate(rates)
num_spikes = rand(Poisson(duration*r))
spikes = rand(num_spikes).*duration.+trange[1]
sort!(spikes)
source[i] = SpikeTrain(spikes)
end
return source
end
make_exponentialShift(τ) = t -> t+rand(Exponential(τ))
"""
draw_correlated_spikes(trange, rates, c, [shift]; min_master_distance=0)
Draws spike trains with given `rates` within the given time interval `trange` and
pairwise correlation coefficients c. A function to `shift` the drawn spikes can be
included. A minimum distance between master spikes can be required. The algorithm implemented
is the offline version of the mixture process (4.6.1) by Brette 2008 [1].
[1](http://romainbrette.fr/WordPress3/wp-content/uploads/2014/06/Brette2008NC.pdf)
E[wt] = E[n_good]*min_master_distance
P(n_good|E[wt]) = Poisson(n_good|rate*(1-E[n_good]*min_master_distance))
E[n_good] = rate*1-rate*E[n_good]*min_master_distance
E[n_good] = rate/(1+rate*min_master_distance) = 1/(1/rate+min_master_distance)
rate_desired =!= E[n_good] = 1/(1/rate+min_master_distance)
=> rate := rate_desired/(1-rate_desired*min_master_distance)
"""
function draw_correlated_spikes(trange, rates, c, shift = make_exponentialShift(1.0); min_master_distance=-0)
# correct the rates to account for the expected loss due to pruning overlapping spikes
frac_overlap = sum(rates)
# draw the uncorrelated source spiketrains
source = if c≈0.0
@assert all(0 .<= rates*min_master_distance .< 1) "Cannot draw $(maximum(rates)) master spikes per unit-time with $(min_master_distance) minimum distance (rates must sum to < $(1/min_master_distance))."
# correct sampling rate for expected loss of master spikes due to enforced minimum distance
rates .= rates./(1 .- rates.*min_master_distance)
draw_uncorrelated_spikes(trange, rates)
else
# determine master spike sampling rate
v = 1/c * mean(rates.^2)/sum(rates)
@assert 0<=length(rates)*v*min_master_distance<1 "Cannot draw ~$(length(rates)*v) master spikes per unit-time with $(min_master_distance) minimum distance (rates must sum to < $(1/min_master_distance))."
# correct sampling rate for expected loss of master spikes due to enforced minimum distance
v /= (1-length(rates)*v*min_master_distance)
# determine subsampling probability
p = (c/mean(rates.^2)) .* (rates * rates')
@assert all(0 .<= p .<= 1) "P not all valid probabilities."
draw_uncorrelated_spikes(trange, fill(v, length(rates)))
end
# ensure minimum distance between all source spike trains
if min_master_distance > 0
n = sum(length, source)
times = Vector{Float64}(undef, n)
sources = Vector{Int}(undef, n)
i = 1
for (s,spikes) ∈ enumerate(source)
inew = i+length(spikes)
times[i:inew-1] = spikes.times
sources[i:inew-1] .= s
i=inew
end
# sort times and corresponding sources
idx=sortperm(times)
times=times[idx]
sources=sources[idx]
# determine which spikes to keep ...
keep=has_min_distance(times, min_master_distance)
# ... and keep only those spikes for the respective source
for (s,spikes) ∈ enumerate(source)
source[s] = SpikeTrain(times[(sources.==s) .& keep])
end
end
if c≈0.0
return source
else
target = Vector{SpikeTrain}(undef, length(rates))
# draw correlated spike trains
for i ∈ eachindex(target)
t = target[i] = SpikeTrain()
for (k,s) ∈ enumerate(source)
num_spikes = rand(Binomial(length(s.times), p[i,k]))
append!(t.times, sample(s.times, num_spikes; replace=false))
end
# shift each value randomly
t.times .= shift.(t.times)
sort!(t.times)
end
return target
end
end
"""
draw_coincident_spikes(trange, num_spikes, p=ones(Float64,1), shift = make_exponentialShift(1.0); sorted=true)
Draws `num_spikes` Poisson spikes in the interval `trange`, each of which appears
in each spike-train `i` with the respective probability `p[i]`.
"""
function draw_coincident_spikes(trange, num_spikes, p=ones(Float64,1), shift = make_exponentialShift(1.0); sorted=true)
times = rand(num_spikes).*(trange[2]-trange[1]).+trange[1]
if sorted
sort!(times)
end
target = Vector{SpikeTrain}(undef, length(p))
for i ∈ eachindex(target)
n = rand(Binomial(num_spikes, p[i]))
target[i] = SpikeTrain(sample(times, n; replace=false))
end
return target
end
"""
correlation_code(trange, stimulus, rates, correlations; kwargs...)
Each slice `stimulus[:,...,:,i]` contains an array indicating the class that the
spike-train with corresponding index represents in the time interval `trange[i:i+1]`.
Each class `c` results in spike-trains with rate `rates[c]` and mutual pairwise
correlation `correlations[c]`.
"""
function correlation_code(trange, stimulus, rates, correlations; kwargs...)
@assert length(trange)-1 == size(stimulus)[end] "trange must include n+1 steps for stimuli with last dimension n"
spiketrains = Array{SpikeTrain}(undef, size(stimulus))
classes = unique(stimulus)
for c ∈ classes
for i ∈ Base.OneTo(length(trange)-1)
idx = findall(s->c==s, view(stimulus, fill(Colon(), ndims(stimulus)-1)...,i))
spiketrains[idx,i] = draw_correlated_spikes(trange[i:i+1], fill(rates[c], length(idx)), correlations[c]; kwargs...)
end
end
return spiketrains
end
"""
coincidence_code(trange, stimulus, background_rate, p; kwargs...)
Each slice `stimulus[:,...,:,i]` contains a boolean array corresponding to
whether or not a spike train with corresponding index participates or not
in the coincident spiking during the time interval `trange[i:i+1]`.
A single spike is drawn within each interval, that appears with probability `p`
in each of the spike trains for which the corresponding entry in `stimulus` is `true`.
In addition to the signal spike for each interval, uncorrelated poisson spike-trains,
drawn with a given `background_rate`, are added as background noise.
"""
function coincidence_code(trange, stimulus, background_rate, p; kwargs...)
@assert length(trange)-1 == size(stimulus)[end] "trange must include n+1 steps for stimuli with last dimension n"
spiketrains = reshape(draw_uncorrelated_spikes((trange[1],trange[end]), fill(background_rate, length(stimulus))), size(stimulus))
for i ∈ Base.OneTo(length(trange)-1)
idx = findall(view(stimulus, fill(Colon(), ndims(stimulus)-1)...,i))
nd = ndims(idx)+1
spiketrains[idx,i] = merge(cat(spiketrains[idx,i], draw_coincident_spikes(trange[i:i+1], 1, fill(p, length(idx)); kwargs...), dims=nd),dims=nd)
end
return spiketrains
end
"""
plot_spike_trains(spiketrains::Array{SpikeTrain}, colors=fill(:auto, length(spiketrains)), plt=plot(); kwargs...)
Draw an array of `spiketrains` with given `colors` within a single given or new plot `plt`.
"""
function plot_spike_trains(spiketrains::Array{SpikeTrain}, colors=fill(:auto, length(spiketrains)), plt=plot(); kwargs...)
for (i,(spiketrain, color)) ∈ enumerate(zip(spiketrains, colors))
plot!(plt, ([spiketrain.times spiketrain.times fill(NaN, length(spiketrain.times))])'[:], i .- 0.5 .+ ([zeros(length(spiketrain.times)) ones(length(spiketrain.times)) fill(NaN, length(spiketrain.times))])'[:], lc=color; kwargs...)
end
return plt
end
end
| [
27,
456,
62,
30783,
29,
15,
198,
21412,
26309,
2898,
1299,
198,
198,
39344,
26309,
44077,
11,
3197,
62,
403,
10215,
5363,
62,
2777,
7938,
11,
3197,
62,
10215,
5363,
62,
2777,
7938,
11,
3197,
62,
1073,
1939,
738,
62,
2777,
7938,
11,
... | 2.480526 | 3,723 |
# DBL_EPSILON - jest to dokładność dla liczb zmiennoprzecinkowych
# dla Float64 jest to 2.2204460492503131e-16
DBL_EPSILON = 2.2204460492503131e-16
function DIFFSIGN(x, y)
if (x <=0 && y >= 0) || (x >= 0 && y <= 0)
return true
else
return false
end
end
function fun(x)
return 1 / (x - 3) - 6
end
function between(x, a, b)
if b > a
return (x >= a && x <= b)
else
return (x >= b && x <= a)
end
end
function lfun(b, a, fb, fa)
if fb != fa
return b - fb*(b - a) / (fb - fa) # metoda siecznych
elseif fa != 0
return Inf
else
return b
end
end
function hfun(b, c)
if c > b
return b + abs(b*DBL_EPSILON)
else
return b - abs(b*DBL_EPSILON)
end
end
function mfun(b, c)
return 0.5*(b + c) # metoda bisekcji
end
function wfun(l, b, c)
h = hfun(b, c)
m = mfun(b, c)
if between(l, h, m) == true
return l
elseif (fun(abs(l - b)) <= abs(b*DBL_EPSILON)) && (between(l, b, m) == false)
return h
else
return m
end
end
function ffun(a, b, fa, fb)
return (fa - fb) / (a - b)
end
function rfun(b, a, d, fb, fa, fd)
alpha = ffun(b, d, fb, fd)*fa
beta = ffun(a, d, fa, fd)*fb
if beta != alpha
return b - beta*(b - a) / (beta - alpha)
elseif alpha != 0
return Inf
else
return 0 # beta == alpha == 0
end
end
function DekkerM(x0, x1, Eps)
"""
b - ostatnie przyblizenie wyniku
c - kontrapunkt b, punkt w którym funkcja f ma przeciwny znak niż w punkcie b
a – poprzednia wartość punktu a, używana do wyliczania następnego punktu metodą siecznych
Metoda bisekcji z punktów b i c tworzy punkt m pomiędzy nimi na środku przedziału.
Wyliczany jest ciąg xi, którego ostatni element oznaczany jest przez x, a poprzedni przez xp.
xk - ostatni punkt w ciągu, który ma różny znak niż x.
Punkt x wyliczany jest dwoma metodami:
- siecznych
- bisekcji
i wybierany jest ten obliczony z metody siecznych jeśli leży pomiędzy punktem b
(ze względów dokładnościowych z pewną poprawką) a punktem m wyliczonym z bisekcji.
Jeżeli f(x) czy f(xk) leży bliżej zera i jeśli f(x) leży bliżej zera
wtedy b ma wartość x, c ma wartość xk, w przeciwnym razie zamiana.
"""
d = NaN
fd = NaN
fxp = fun(x0)
fx = fun(x1)
if x0 == x1
return fx
end
if DIFFSIGN(fx, fxp) == false
return 0
end
if abs(fx) <= abs(fxp)
b = x1
a = c = x0
fa = fxp
fb = fx
else
b = x0
a = c = x1
fa = fx
fb = fxp
end
xk = xp = x0
fxk = fxp
x = x1
iter = 0
age = 0
bp = b
cp = c
ap = a
while abs(b - c) > 2*Eps
iter = iter + 1
age = age + 1
if abs(b - c) <= (0.5 + 2 * DBL_EPSILON)*(abs(bp - cp) + abs(b*DBL_EPSILON))
age = 1
end
xp = x
if iter == 2
lambda = lfun(b, a, fb, fa)
if abs(lambda - b) < abs(b*DBL_EPSILON)
break
end
x = wfun(lambda, b, c)
elseif iter >= 3 && age <= 3
rho = rfun(b, a, d, fb, fa, fd)
if abs(rho - b) < abs(b*DBL_EPSILON)
break
end
x = wfun(rho, b, c)
elseif iter >=3 && age == 4
rho = rfun(b, a, d, fb, fa, fd)
if abs(rho - b) < abs(b*DBL_EPSILON)
break
end
x = wfun(2 * rho - b, b, c)
else
x = mfun(b, c)
end
fxp = fx
fx = fun(x)
if DIFFSIGN(fxp, fx) == true
xk = xp
fxk = fxp
end
bp = b
fbp = fb
ap = a
fap = fa
cp = c
if abs(fx) <= abs(fxk)
a = b
fa = fb
b = x
fb = fx
c = xk
else
b = xk
fb = fxk
a = c = x
fa = fx
end
if b == x || b == bp
d = ap
fd = fap
else
d = ap
fd = fdp
end
end
println("Number of iterations: ", iter)
return b
end
println("f(x) = 1/(x-3)-6\n")
result = DekkerM(3.01, 4, 1e-12)
println("x0 = ", result)
| [
198,
2,
360,
9148,
62,
36,
3705,
4146,
1340,
532,
474,
395,
284,
466,
74,
41615,
324,
3919,
129,
249,
38325,
288,
5031,
3476,
14969,
1976,
11632,
1697,
404,
81,
89,
721,
676,
322,
88,
354,
198,
2,
288,
5031,
48436,
2414,
474,
395,... | 1.624325 | 2,779 |
<reponame>ChevronETC/WaveFD
using BenchmarkTools, Random, Statistics, WaveFD
_nthreads = [2^i for i in 0:floor(Int,log2(Sys.CPU_THREADS))]
if Sys.CPU_THREADS ∉ _nthreads
push!(_nthreads, Sys.CPU_THREADS)
end
const SUITE = BenchmarkGroup()
z0,y0,x0,dz,dy,dx,nt,dt = 0.0,0.0,0.0,10.0,10.0,10.0,3000,0.001
n_2D = (z=parse(Int,get(ENV,"2D_NZ","501")), x=parse(Int,get(ENV,"2D_NX","1001")))
n_3D = (z=parse(Int,get(ENV,"3D_NZ","101")), y=parse(Int,get(ENV,"3D_NY","201")), x=parse(Int,get(ENV,"3D_NX","301")))
nb_2D = (z=parse(Int,get(ENV,"2D_NBZ","$(n_2D.z)")), x=parse(Int,get(ENV,"2D_NBX","8")))
nb_3D = (z=parse(Int,get(ENV,"3D_NBZ","$(n_3D.z)")), y=parse(Int,get(ENV,"3D_NBY","8")), x=parse(Int,get(ENV,"3D_NBX","8")))
@info "size 2D: $n_2D, use ENV[\"2D_NZ\"], ENV[\"2D_NX\"] to customize"
@info "size 3D: $n_3D, use ENV[\"3D_NZ\"], ENV[\"3D_NY\"], ENV[\"3D_NX\"] to customize"
@info "cache block size 2D: $nb_2D, use ENV[\"2D_NBZ\"], ENV[\"2D_NBX\"] to customize"
@info "cache block size 3D: $nb_3D, use ENV[\"3D_NBZ\"], ENV[\"3D_NBY\"], ENV[\"3D_NBX\"] to customize"
rz,rx = dz*ones(n_2D.x),dx*[0:n_2D.x-1;]
iz,ix,c = WaveFD.hickscoeffs(dz, dx, z0, x0, n_2D.z, n_2D.x, rz, rx)
blocks = WaveFD.source_blocking(n_2D.z, n_2D.x, 4, 4, iz, ix, c)
field = zeros(n_2D.z, n_2D.x)
data = rand(nt, n_2D.x)
it = 1
SUITE["2D Utils"] = BenchmarkGroup()
SUITE["2D Utils"]["source_blocking"] = @benchmarkable WaveFD.source_blocking($(n_2D.z), $(n_2D.x), $(nb_2D.z), $(nb_2D).x, $iz, $ix, $c);
SUITE["2D Utils"]["injectdata!"] = @benchmarkable WaveFD.injectdata!($field, $blocks, $data, $it)
dtmod = 0.0002
ntmod = nt * round(Int,dt / dtmod)
h = WaveFD.interpfilters(dtmod, dt, 0, WaveFD.LangC(), Sys.CPU_THREADS)
data_mod = rand(ntmod, n_2D.x)
SUITE["2D Utils"]["interpadjoint!"] = @benchmarkable WaveFD.interpadjoint!($h, $data_mod, $data)
SUITE["2D Utils"]["interpforward!"] = @benchmarkable WaveFD.interpforward!($h, $data, $data_mod)
compressor = WaveFD.Compressor(Float32, Float32, UInt32, (n_2D.z,n_2D.x), (32,32), 1e-2, 1024, false)
field2 = rand(Float32,n_2D.z,n_2D.x)
SUITE["2D compression"] = BenchmarkGroup()
SUITE["2D compression"]["write"] = @benchmarkable WaveFD.compressedwrite(io, $compressor, 1, $field2) setup=(open($compressor); io=open(tempname(),"w")) teardown=(close($compressor); close(io))
SUITE["2D compression"]["read"] = @benchmarkable WaveFD.compressedread!(io, $compressor, 1, $field2) setup=(open($compressor); tfile=tempname(); _io=open(tfile,"w"); WaveFD.compressedwrite(_io, $compressor, 1, $field2); close(_io); io=open(tfile)) teardown=(close($compressor); close(io))
rng2 = (10:n_2D.z-10,10:n_2D.x)
_field2 = view(field2, rng2...)
_compressor = WaveFD.Compressor(Float32, Float32, UInt32, size(_field2), (32,32), 1e-2, 1024, true)
SUITE["2D compression, interior"] = BenchmarkGroup()
SUITE["2D compression, interior"]["write"] = @benchmarkable WaveFD.compressedwrite(io, $_compressor, 1, $field2, $rng2) setup=(open($_compressor); io=open(tempname(),"w")) teardown=(close($_compressor); close(io))
SUITE["2D compression, interior"]["read"] = @benchmarkable WaveFD.compressedread!(io, $_compressor, 1, $field2, $rng2) setup=(open($_compressor); tfile=tempname(); _io=open(tfile,"w"); WaveFD.compressedwrite(_io, $_compressor, 1, $field2, rng2); close(_io); io=open(tfile)) teardown=(close($_compressor); close(io))
SUITE["2DAcoIsoDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_2D.z*n_2D.x,"nbz"=>nb_2D.z,"nbx"=>nb_2D.x,"nthreads"=>_nthreads)])
function p2diso(nthreads,nz,nx,nbz,nbx)
p = WaveFD.Prop2DAcoIsoDenQ_DEO2_FDTD(freesurface=false, nz=nz, nx=nx, nbz=nbz, nbx=nbx, dz=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,pcur,pold = WaveFD.V(p),WaveFD.B(p),WaveFD.PCur(p),WaveFD.POld(p)
v .= 1500
b .= 1
rand!(pcur)
rand!(pold)
p
end
for nthreads in _nthreads
SUITE["2DAcoIsoDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p2diso($nthreads,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x))) teardown=(free(p))
end
function fields2diso(p::WaveFD.Prop2DAcoIsoDenQ_DEO2_FDTD)
nz,nx=size(p)
δm = Dict("v"=>rand(Float32,nz,nx))
fields = Dict("pspace"=>rand(Float32,nz,nx))
δm,fields
end
SUITE["2DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2diso(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields2diso(p)) teardown=(free(p))
SUITE["2DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2diso(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); (δm,fields)=fields2diso(p)) teardown=(free(p))
SUITE["2DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2diso(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); (δm,fields)=fields2diso(p)) teardown=(free(p))
SUITE["2DAcoVTIDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_2D.z*n_2D.x,"nbz"=>nb_2D.z,"nbx"=>nb_2D.x,"nthreads"=>_nthreads)])
function p2dvti(nthreads,nz,nx,nbz,nbx)
p = WaveFD.Prop2DAcoVTIDenQ_DEO2_FDTD(freesurface=false, nz=nz, nx=nx, nbz=nbz, nbx=nbx, dz=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,ϵ,η,f,pcur,pold = WaveFD.V(p),WaveFD.B(p),WaveFD.Eps(p),WaveFD.Eta(p),WaveFD.F(p),WaveFD.PCur(p),WaveFD.POld(p)
rand!(pcur)
rand!(pold)
v .= 1500
b .= 1
ϵ .= 0.2
η .= 0.0
f .= 0.85
p
end
for nthreads in _nthreads
SUITE["2DAcoVTIDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p2dvti($nthreads,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x))) teardown=(free(p))
end
function fields2dvti(p::WaveFD.Prop2DAcoVTIDenQ_DEO2_FDTD)
nz,nx=size(p)
δm = Dict("v"=>rand(Float32,nz,nx))
fields = Dict("pspace"=>rand(Float32,nz,nx),"mspace"=>rand(Float32,nz,nx))
δm,fields
end
SUITE["2DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dvti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields2dvti(p)) teardown=(free(p))
SUITE["2DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dvti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); (δm,fields)=fields2dvti(p)) teardown=(free(p))
SUITE["2DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dvti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); (δm,fields)=fields2dvti(p)) teardown=(free(p))
SUITE["2DAcoTTIDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_2D.z*n_2D.x,"nbz"=>nb_2D.z,"nbx"=>nb_2D.x,"nthreads"=>_nthreads)])
function p2dtti(nthreads,nz,nx,nbz,nbx)
p = WaveFD.Prop2DAcoTTIDenQ_DEO2_FDTD(freesurface=false, nz=nz, nx=nx, nbz=nbz, nbx=nbx, dz=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,ϵ,η,f,sinθ,cosθ,pcur,pold = WaveFD.V(p),WaveFD.B(p),WaveFD.Eps(p),WaveFD.Eta(p),WaveFD.F(p),WaveFD.SinTheta(p),WaveFD.CosTheta(p),WaveFD.PCur(p),WaveFD.POld(p)
v .= 1500
b .= 1
ϵ .= 0.2
η .= 0.0
f .= 0.85
cosθ .= cos(pi/4)
sinθ .= sin(pi/4)
rand!(pcur)
rand!(pold)
p
end
for nthreads in _nthreads
SUITE["2DAcoTTIDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p2dtti($nthreads,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x))) teardown=(free(p))
end
function fields2dtti(p::WaveFD.Prop2DAcoTTIDenQ_DEO2_FDTD)
nz,nx = size(p)
δm = Dict("v"=>rand(Float32,nz,nx))
fields = Dict("pspace"=>rand(Float32,nz,nx),"mspace"=>rand(Float32,nz,nx))
δm,fields
end
SUITE["2DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dtti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields2dtti(p)) teardown=(free(p))
SUITE["2DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dtti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); (δm,fields)=fields2dtti(p)) teardown=(free(p))
SUITE["2DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop2DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p2dtti(Sys.CPU_THREADS,$(n_2D.z),$(n_2D.x),$(nb_2D.z),$(nb_2D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); (δm,fields)=fields2dtti(p)) teardown=(free(p))
nz,ny,nx = 501,301,1001
rz = [dz for iy = 1:ny, ix = 1:nx][:]
ry = [(iy-1)*dy for iy = 1:ny, ix = 1:nx][:]
rx = [(ix-1)*dx for iy = 1:ny, ix = 1:nx][:]
iz,iy,ix,c = WaveFD.hickscoeffs(dz, dy, dx, z0, y0, x0, nz, ny, nx, rz, ry, rx)
blocks = WaveFD.source_blocking(nz, ny, nx, 4, 4, 4, iz, iy, ix, c)
field = zeros(nz, ny, nx)
data = rand(nt, nx*ny)
it = 1
SUITE["3D Utils"] = BenchmarkGroup()
SUITE["3D Utils"]["source_blocking"] = @benchmarkable WaveFD.source_blocking($nz, $ny, $nx, 256, 8, 8, $iz, $iy, $ix, $c);
SUITE["3D Utils"]["injectdata!"] = @benchmarkable WaveFD.injectdata!($field, $blocks, $data, $it)
compressor = WaveFD.Compressor(Float32, Float32, UInt32, (n_3D.z,n_3D.y,n_3D.x), (32,32,32), 1e-2, 1024, false)
field3 = rand(Float32,n_3D.z,n_3D.y,n_3D.x)
SUITE["3D compression"] = BenchmarkGroup()
SUITE["3D compression"]["write"] = @benchmarkable WaveFD.compressedwrite(io, $compressor, 1, $field3) setup=(open($compressor); io=open(tempname(), "w")) teardown=(close($compressor); close(io))
SUITE["3D compression"]["read"] = @benchmarkable WaveFD.compressedread!(io, $compressor, 1, $field3) setup=(open($compressor); tfile=tempname(); _io=open(tfile,"w"); WaveFD.compressedwrite(_io, $compressor, 1, $field3); close(_io); io=open(tfile)) teardown=(close($compressor); close(io))
rng3 = (10:n_3D.z-10,10:n_3D.y,10:n_3D.x)
_field3 = view(field3, rng3...)
_compressor = WaveFD.Compressor(Float32, Float32, UInt32, size(_field3), (32,32,32), 1e-2, 1024, true)
SUITE["3D compression, interior"] = BenchmarkGroup()
SUITE["3D compression, interior"]["write"] = @benchmarkable WaveFD.compressedwrite(io, $_compressor, 1, $field3, $rng3) setup=(open($_compressor); io=open(tempname(),"w")) teardown=(close($_compressor); close(io))
SUITE["3D compression, interior"]["read"] = @benchmarkable WaveFD.compressedread!(io, $_compressor, 1, $field3, $rng3) setup=(open($_compressor); tfile=tempname(); _io=open(tfile,"w"); WaveFD.compressedwrite(_io, $_compressor, 1, $field3, rng3); close(_io); io=open(tfile)) teardown=(close($_compressor); close(io))
SUITE["3DAcoIsoDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_3D.z*n_3D.y*n_3D.x,"nbz"=>nb_3D.z,"nby"=>nb_3D.y,"nbx"=>nb_3D.x,"nthreads"=>_nthreads)])
function p3diso(nthreads,nz,ny,nx,nbz,nby,nbx)
p = WaveFD.Prop3DAcoIsoDenQ_DEO2_FDTD(freesurface=false, nz=nz, ny=ny, nx=nx, nbz=nbz, nby=nby, nbx=nbx, dz=10.0, dy=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,pcur,pold = WaveFD.V(p), WaveFD.B(p),WaveFD.PCur(p),WaveFD.POld(p)
v .= 1500
b .= 1
rand!(pcur)
rand!(pold)
p
end
for nthreads in _nthreads
SUITE["3DAcoIsoDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p3diso($nthreads,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x))) teardown=(free(p)) seconds=15
end
function fields3diso(p::WaveFD.Prop3DAcoIsoDenQ_DEO2_FDTD)
nz,ny,nx=size(p)
δm = Dict("v"=>rand(Float32,nz,ny,nx))
fields = Dict("pspace"=>rand(Float32,nz,ny,nx))
δm,fields
end
SUITE["3DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3diso(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields3diso(p)) teardown=(free(p))
SUITE["3DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3diso(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); x=zeros(Float32,$(n_3D.z),$(n_3D.y),$(n_3D.x)); (δm,fields)=fields3diso(p)) teardown=(free(p))
SUITE["3DAcoIsoDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoIsoDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3diso(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); x=zeros(Float32,$(n_3D.z),$(n_3D.y),$(n_3D.x)); (δm,fields)=fields3diso(p)) teardown=(free(p))
SUITE["3DAcoVTIDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_3D.z*n_3D.y*n_3D.x,"nbz"=>nb_3D.z,"nby"=>nb_3D.y,"nbx"=>nb_3D.x,"nthreads"=>_nthreads)])
function p3dvti(nthreads,nz,ny,nx,nbz,nby,nbx)
p = WaveFD.Prop3DAcoVTIDenQ_DEO2_FDTD(freesurface=false, nz=nz, ny=ny, nx=nx, nbz=nbz, nby=nby, nbx=nbx, dz=10.0, dy=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,ϵ,η,f,pcur,pold = WaveFD.V(p),WaveFD.B(p),WaveFD.Eps(p),WaveFD.Eta(p),WaveFD.F(p),WaveFD.PCur(p),WaveFD.POld(p)
rand!(pcur)
rand!(pold)
v .= 1500
b .= 1
ϵ .= 0.2
η .= 0.0
f .= 0.85
p
end
for nthreads in _nthreads
SUITE["3DAcoVTIDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p3dvti($nthreads,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x))) teardown=(free(p)) seconds=15
end
function fields3dvti(p::WaveFD.Prop3DAcoVTIDenQ_DEO2_FDTD)
nz,ny,nx=size(p)
δm = Dict("v"=>rand(Float32,nz,ny,nx))
fields = Dict("pspace"=>rand(Float32,nz,ny,nx),"mspace"=>rand(Float32,nz,ny,nx))
δm,fields
end
SUITE["3DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dvti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields3dvti(p)) teardown=(free(p))
SUITE["3DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dvti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); (δm,fields)=fields3dvti(p)) teardown=(free(p))
SUITE["3DAcoVTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoVTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dvti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); (δm,fields)=fields3dvti(p)) teardown=(free(p))
SUITE["3DAcoTTIDenQ_DEO2_FDTD"] = BenchmarkGroup([Dict("ncells"=>n_3D.z*n_3D.y*n_3D.x,"nbz"=>nb_3D.z,"nby"=>nb_3D.y,"nbx"=>nb_3D.x,"nthreads"=>_nthreads)])
function p3dtti(nthreads,nz,ny,nx,nbz,nby,nbx)
p = WaveFD.Prop3DAcoTTIDenQ_DEO2_FDTD(freesurface=false, nz=nz, ny=ny, nx=nx, nbz=nbz, nby=nby, nbx=nbx, dz=10.0, dx=10.0, dt=0.001, nthreads=nthreads)
v,b,ϵ,η,f,sinθ,cosθ,sinϕ,cosϕ,pcur,pold = WaveFD.V(p),WaveFD.B(p),WaveFD.Eps(p),WaveFD.Eta(p),WaveFD.F(p),WaveFD.SinTheta(p),WaveFD.CosTheta(p),WaveFD.SinPhi(p),WaveFD.CosPhi(p),WaveFD.PCur(p),WaveFD.POld(p)
v .= 1500
b .= 1
ϵ .= 0.2
η .= 0.0
f .= 0.85
cosθ .= cos(pi/4)
sinθ .= sin(pi/4)
cosϕ .= cos(pi/8)
sinϕ .= sin(pi/8)
rand!(pcur)
rand!(pold)
p
end
for nthreads in _nthreads
SUITE["3DAcoTTIDenQ_DEO2_FDTD"]["$nthreads threads"] = @benchmarkable WaveFD.propagateforward!(p) setup=(p=p3dtti($nthreads,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x))) teardown=(free(p)) seconds=15
end
function fields3dtti(p::WaveFD.Prop3DAcoTTIDenQ_DEO2_FDTD)
nz,ny,nx = size(p)
δm = Dict("v"=>rand(Float32,nz,ny,nx))
fields = Dict("pspace"=>rand(Float32,nz,ny,nx),"mspace"=>rand(Float32,nz,ny,nx))
δm,fields
end
SUITE["3DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, standard"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dtti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionStandard(); (δm,fields)=fields3dtti(p)) teardown=(free(p))
SUITE["3DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation FWI"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dtti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationFWI(); (δm,fields)=fields3dtti(p)) teardown=(free(p))
SUITE["3DAcoTTIDenQ_DEO2_FDTD"]["imaging condition, wave field separation RTM"] = @benchmarkable WaveFD.adjointBornAccumulation!(p,WaveFD.Prop3DAcoTTIDenQ_DEO2_FDTD_Model_V(),ic,δm,fields) setup=(p=p3dtti(Sys.CPU_THREADS,$(n_3D.z),$(n_3D.y),$(n_3D.x),$(nb_3D.z),$(nb_3D.y),$(nb_3D.x)); ic=WaveFD.ImagingConditionWaveFieldSeparationRTM(); (δm,fields)=fields3dtti(p)) teardown=(free(p))
include(joinpath(pkgdir(WaveFD), "benchmark", "mcells_per_second.jl"))
SUITE | [
27,
7856,
261,
480,
29,
7376,
85,
1313,
2767,
34,
14,
39709,
26009,
198,
3500,
25187,
4102,
33637,
11,
14534,
11,
14370,
11,
17084,
26009,
198,
198,
62,
77,
16663,
82,
796,
685,
17,
61,
72,
329,
1312,
287,
657,
25,
28300,
7,
5317,... | 1.962761 | 9,345 |
<filename>MakieCore/src/attributes.jl
const Theme = Attributes
Base.broadcastable(x::AbstractScene) = Ref(x)
Base.broadcastable(x::AbstractPlot) = Ref(x)
Base.broadcastable(x::Attributes) = Ref(x)
# The rules that we use to convert values to a Observable in Attributes
value_convert(x::Observables.AbstractObservable) = Observables.observe(x)
value_convert(@nospecialize(x)) = x
# We transform a tuple of observables into a Observable(tuple(values...))
function value_convert(x::NTuple{N, Union{Any, Observables.AbstractObservable}}) where N
result = Observable(to_value.(x))
onany((args...)-> args, x...)
return result
end
value_convert(x::NamedTuple) = Attributes(x)
# Version of `convert(Observable{Any}, obj)` that doesn't require runtime dispatch
node_any(@nospecialize(obj)) = isa(obj, Observable{Any}) ? obj :
isa(obj, Observable) ? convert(Observable{Any}, obj) : Observable{Any}(obj)
node_pairs(pair::Union{Pair, Tuple{Any, Any}}) = (pair[1] => node_any(value_convert(pair[2])))
node_pairs(pairs) = (node_pairs(pair) for pair in pairs)
Attributes(; kw_args...) = Attributes(Dict{Symbol, Observable}(node_pairs(kw_args)))
Attributes(pairs::Pair...) = Attributes(Dict{Symbol, Observable}(node_pairs(pairs)))
Attributes(pairs::AbstractVector) = Attributes(Dict{Symbol, Observable}(node_pairs.(pairs)))
Attributes(pairs::Iterators.Pairs) = Attributes(collect(pairs))
Attributes(nt::NamedTuple) = Attributes(; nt...)
attributes(x::Attributes) = getfield(x, :attributes)
Base.keys(x::Attributes) = keys(x.attributes)
Base.values(x::Attributes) = values(x.attributes)
function Base.iterate(x::Attributes, state...)
s = iterate(keys(x), state...)
s === nothing && return nothing
return (s[1] => x[s[1]], s[2])
end
function Base.copy(attributes::Attributes)
result = Attributes()
for (k, v) in attributes
# We need to create a new Signal to have a real copy
result[k] = copy(v)
end
return result
end
function Base.deepcopy(obs::Observable)
return Observable{Any}(to_value(obs))
end
function Base.deepcopy(attributes::Attributes)
result = Attributes()
for (k, v) in attributes
# We need to create a new Signal to have a real copy
result[k] = deepcopy(v)
end
return result
end
Base.filter(f, x::Attributes) = Attributes(filter(f, attributes(x)))
Base.empty!(x::Attributes) = (empty!(attributes(x)); x)
Base.length(x::Attributes) = length(attributes(x))
function Base.merge!(target::Attributes, args::Attributes...)
for elem in args
merge_attributes!(target, elem)
end
return target
end
Base.merge(target::Attributes, args::Attributes...) = merge!(copy(target), args...)
@generated hasfield(x::T, ::Val{key}) where {T, key} = :($(key in fieldnames(T)))
@inline function Base.getproperty(x::T, key::Symbol) where T <: Union{Attributes, Transformable}
if hasfield(x, Val(key))
getfield(x, key)
else
getindex(x, key)
end
end
@inline function Base.setproperty!(x::T, key::Symbol, value) where T <: Union{Attributes, Transformable}
if hasfield(x, Val(key))
setfield!(x, key, value)
else
setindex!(x, value, key)
end
end
function Base.getindex(x::Attributes, key::Symbol)
x = attributes(x)[key]
# We unpack Attributes, even though, for consistency, we store them as nodes
# this makes it easier to create nested attributes
return x[] isa Attributes ? x[] : x
end
function Base.setindex!(x::Attributes, value, key::Symbol)
if haskey(x, key)
x.attributes[key][] = value
else
x.attributes[key] = node_any(value)
end
end
function Base.setindex!(x::Attributes, value::Observable, key::Symbol)
if haskey(x, key)
# error("You're trying to update an attribute Observable with a new Observable. This is not supported right now.
# You can do this manually like this:
# lift(val-> attributes[$key] = val, Observable::$(typeof(value)))
# ")
return x.attributes[key] = node_any(value)
else
#TODO make this error. Attributes should be sort of immutable
return x.attributes[key] = node_any(value)
end
return x
end
_indent_attrs(s, n) = join(split(s, '\n'), "\n" * " "^n)
function Base.show(io::IO,::MIME"text/plain", attr::Attributes)
io = IOContext(io, :compact => true)
d = Dict()
print(io, """Attributes with $(length(attr)) $(length(attr) != 1 ? "entries" : "entry")""")
if length(attr) < 1
return
end
print(io, ":")
ks = sort(collect(keys(attr)), by = lowercase ∘ String)
maxlength = maximum(length ∘ String, ks)
for k in ks
print(io, "\n ")
print(io, k)
print(io, " => ")
v = to_value(attr[k])
if v isa Attributes
print(io, _indent_attrs(repr(v), 2))
else
print(io, to_value(attr[k]))
end
end
end
Base.show(io::IO, attr::Attributes) = show(io, MIME"text/plain"(), attr)
theme(x::AbstractPlot) = x.attributes
isvisible(x) = haskey(x, :visible) && to_value(x[:visible])
#dict interface
const AttributeOrPlot = Union{AbstractPlot, Attributes}
Base.pop!(x::AttributeOrPlot, args...) = pop!(x.attributes, args...)
Base.haskey(x::AttributeOrPlot, key) = haskey(x.attributes, key)
Base.delete!(x::AttributeOrPlot, key) = delete!(x.attributes, key)
function Base.get!(f::Function, x::AttributeOrPlot, key::Symbol)
if haskey(x, key)
return x[key]
else
val = f()
x[key] = val
return x[key]
end
end
Base.get!(x::AttributeOrPlot, key::Symbol, default) = get!(()-> default, x, key)
Base.get(f::Function, x::AttributeOrPlot, key::Symbol) = haskey(x, key) ? x[key] : f()
Base.get(x::AttributeOrPlot, key::Symbol, default) = get(()-> default, x, key)
# This is a bit confusing, since for a plot it returns the attribute from the arguments
# and not a plot for integer indexing. But, we want to treat plots as "atomic"
# so from an interface point of view, one should assume that a plot doesn't contain subplots
# Combined plots break this assumption in some way, but the way to look at it is,
# that the plots contained in a Combined plot are not subplots, but _are_ actually
# the plot itself.
Base.getindex(plot::AbstractPlot, idx::Integer) = plot.converted[idx]
Base.getindex(plot::AbstractPlot, idx::UnitRange{<:Integer}) = plot.converted[idx]
Base.setindex!(plot::AbstractPlot, value, idx::Integer) = (plot.input_args[idx][] = value)
Base.length(plot::AbstractPlot) = length(plot.converted)
function Base.getindex(x::AbstractPlot, key::Symbol)
argnames = argument_names(typeof(x), length(x.converted))
idx = findfirst(isequal(key), argnames)
if idx === nothing
return x.attributes[key]
else
x.converted[idx]
end
end
function Base.getindex(x::AttributeOrPlot, key::Symbol, key2::Symbol, rest::Symbol...)
dict = to_value(x[key])
dict isa Attributes || error("Trying to access $(typeof(dict)) with multiple keys: $key, $key2, $(rest)")
dict[key2, rest...]
end
function Base.setindex!(x::AttributeOrPlot, value, key::Symbol, key2::Symbol, rest::Symbol...)
dict = to_value(x[key])
dict isa Attributes || error("Trying to access $(typeof(dict)) with multiple keys: $key, $key2, $(rest)")
dict[key2, rest...] = value
end
function Base.setindex!(x::AbstractPlot, value, key::Symbol)
argnames = argument_names(typeof(x), length(x.converted))
idx = findfirst(isequal(key), argnames)
if idx === nothing && haskey(x.attributes, key)
return x.attributes[key][] = value
elseif !haskey(x.attributes, key)
x.attributes[key] = convert(Observable, value)
else
return setindex!(x.converted[idx], value)
end
end
function Base.setindex!(x::AbstractPlot, value::Observable, key::Symbol)
argnames = argument_names(typeof(x), length(x.converted))
idx = findfirst(isequal(key), argnames)
if idx === nothing
if haskey(x, key)
# error("You're trying to update an attribute Observable with a new Observable. This is not supported right now.
# You can do this manually like this:
# lift(val-> attributes[$key] = val, Observable::$(typeof(value)))
# ")
return x.attributes[key] = value
else
return x.attributes[key] = value
end
else
return setindex!(x.converted[idx], value)
end
end
# a few shortcut functions to make attribute conversion easier
function get_attribute(dict, key)
convert_attribute(to_value(dict[key]), Key{key}())
end
function merge_attributes!(input::Attributes, theme::Attributes)
for (key, value) in theme
if !haskey(input, key)
input[key] = copy(value)
else
current_value = input[key]
if value isa Attributes && current_value isa Attributes
# if nested attribute, we merge recursively
merge_attributes!(current_value, value)
elseif value isa Attributes || current_value isa Attributes
error("""
Type missmatch while merging plot attributes with theme for key: $(key).
Found $(value) in theme, while attributes contains: $(current_value)
""")
else
# we're good! input already has a value, can ignore theme
end
end
end
return input
end
| [
27,
34345,
29,
44,
461,
494,
14055,
14,
10677,
14,
1078,
7657,
13,
20362,
198,
198,
9979,
26729,
796,
49213,
198,
198,
14881,
13,
36654,
2701,
540,
7,
87,
3712,
23839,
36542,
8,
796,
6524,
7,
87,
8,
198,
14881,
13,
36654,
2701,
54... | 2.542382 | 3,728 |
<filename>test/runtests.jl
using NNLS
using Test
# import NonNegLeastSquares
using PyCall
using ECOS
using JuMP
using Random
using LinearAlgebra
import Libdl
const pyopt = pyimport_conda("scipy.optimize", "scipy")
macro wrappedallocs(expr)
argnames = [gensym() for a in expr.args]
quote
function g($(argnames...))
@allocated $(Expr(expr.head, argnames...))
end
$(Expr(:call, :g, [esc(a) for a in expr.args]...))
end
end
include("nnls.jl")
include("qp.jl") | [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
198,
3500,
399,
45,
6561,
198,
3500,
6208,
198,
2,
1330,
8504,
32863,
3123,
459,
22266,
3565,
198,
3500,
9485,
14134,
198,
3500,
13182,
2640,
198,
3500,
12585,
7378,
198,
3500,
14534... | 2.330275 | 218 |
<filename>src/Resource/ResourceManagementClient/model_DeploymentExtendedFilter.jl
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct DeploymentExtendedFilter <: SwaggerModel
provisioningState::Any # spec type: Union{ Nothing, String } # spec name: provisioningState
function DeploymentExtendedFilter(;provisioningState=nothing)
o = new()
validate_property(DeploymentExtendedFilter, Symbol("provisioningState"), provisioningState)
setfield!(o, Symbol("provisioningState"), provisioningState)
o
end
end # type DeploymentExtendedFilter
const _property_map_DeploymentExtendedFilter = Dict{Symbol,Symbol}(Symbol("provisioningState")=>Symbol("provisioningState"))
const _property_types_DeploymentExtendedFilter = Dict{Symbol,String}(Symbol("provisioningState")=>"String")
Base.propertynames(::Type{ DeploymentExtendedFilter }) = collect(keys(_property_map_DeploymentExtendedFilter))
Swagger.property_type(::Type{ DeploymentExtendedFilter }, name::Symbol) = Union{Nothing,eval(Meta.parse(_property_types_DeploymentExtendedFilter[name]))}
Swagger.field_name(::Type{ DeploymentExtendedFilter }, property_name::Symbol) = _property_map_DeploymentExtendedFilter[property_name]
function check_required(o::DeploymentExtendedFilter)
true
end
function validate_property(::Type{ DeploymentExtendedFilter }, name::Symbol, val)
end
| [
27,
34345,
29,
10677,
14,
26198,
14,
26198,
48032,
11792,
14,
19849,
62,
49322,
434,
11627,
1631,
22417,
13,
20362,
198,
2,
770,
2393,
373,
7560,
416,
262,
22300,
2451,
7928,
6127,
35986,
198,
2,
2141,
407,
13096,
428,
2393,
3264,
13,... | 3.340136 | 441 |
<reponame>ianshmean/LibSerialPort.jl
import Base: readline, readuntil
export readline, readuntil
#==
Timeout versions of tbe base functions
==#
"""
readline(s::IO, timeout::T; keep::Bool=false) where {T<:Real}
Like Base.readline, except times-out after `timeout` seconds.
"""
function readline(s::IO, timeout::T; keep::Bool=false) where {T<:Real}
line = readuntil(s, 0x0a, timeout, keep=true)
i = length(line)
if keep || i == 0 || line[i] != 0x0a
return String(line)
elseif i < 2 || line[i-1] != 0x0d
return String(resize!(line,i-1))
else
return String(resize!(line,i-2))
end
end
"""
readuntil(s::IO, delim::AbstractChar, timeout::T; keep::Bool=false) where {T<:Real}
readuntil(s::IO, delim::T, timeout::U; keep::Bool=false) where {T, U<:Real}
Like Base.readuntil, except times-out after `timeout` seconds.
"""
function readuntil(s::IO, delim::AbstractChar, timeout::T; keep::Bool=false) where {T<:Real}
if delim ≤ '\x7f'
return Base.readuntil_string(s, delim % UInt8, keep)
end
out = IOBuffer()
t = Timer(timeout)
while !eof(s) && isopen(t)
bytesavailable(s) == 0 && continue
c = read(s, Char)
if c == delim
keep && write(out, c)
break
elseif c != 0x00
write(out, c)
end
yield()
end
return String(take!(out))
end
function readuntil(s::IO, delim::T, timeout::U; keep::Bool=false) where {T, U<:Real}
out = (T === UInt8 ? Base.StringVector(0) : Vector{T}())
t = Timer(timeout)
while !eof(s) && isopen(t)
bytesavailable(s) == 0 && continue
c = read(s, T)
if c == delim
keep && push!(out, c)
break
elseif c != 0x00
push!(out, c)
end
yield()
end
return out
end
| [
27,
7856,
261,
480,
29,
1547,
71,
32604,
14,
25835,
32634,
13924,
13,
20362,
198,
11748,
7308,
25,
1100,
1370,
11,
1100,
28446,
198,
39344,
1100,
1370,
11,
1100,
28446,
198,
198,
2,
855,
198,
48031,
6300,
286,
256,
1350,
2779,
5499,
... | 2.165493 | 852 |
<filename>src/constraints/chp_constraints.jl
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
function add_chp_fuel_burn_constraints(m, p; _n="")
# Fuel burn slope and intercept
fuel_burn_full_load = 1.0 / p.s.chp.elec_effic_full_load # [kWt/kWe]
fuel_burn_half_load = 0.5 / p.s.chp.elec_effic_half_load # [kWt/kWe]
fuel_burn_slope = (fuel_burn_full_load - fuel_burn_half_load) / (1.0 - 0.5) # [kWt/kWe]
fuel_burn_intercept = fuel_burn_full_load - fuel_burn_slope * 1.0 # [kWt/kWe_rated]
# Fuel cost
fuel_cost_per_kwh = p.s.chp.fuel_cost_per_mmbtu / MMBTU_TO_KWH
fuel_cost_series = per_hour_value_to_time_series(fuel_cost_per_kwh, p.s.settings.time_steps_per_hour,
"CHP.fuel_cost_per_mmbtu")
m[:TotalCHPFuelCosts] = @expression(m, p.pwf_fuel["CHP"] *
sum(m[:dvFuelUsage]["CHP", ts] * fuel_cost_series[ts] for ts in p.time_steps)
)
# Conditionally add dvFuelBurnYIntercept if coefficient p.FuelBurnYIntRate is greater than ~zero
if fuel_burn_intercept > 1.0E-7
dv = "dvFuelBurnYIntercept"*_n
m[Symbol(dv)] = @variable(m, [p.techs.chp, p.time_steps], base_name=dv, lower_bound=0)
#Constraint (1c1): Total Fuel burn for CHP **with** y-intercept fuel burn and supplementary firing
@constraint(m, CHPFuelBurnCon[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvFuelUsage"*_n)][t,ts] == p.hours_per_timestep * (
m[Symbol("dvFuelBurnYIntercept"*_n)][t,ts] +
p.production_factor[t,ts] * fuel_burn_slope * m[Symbol("dvRatedProduction"*_n)][t,ts] +
m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts] / p.s.chp.supplementary_firing_efficiency
)
)
#Constraint (1d): Y-intercept fuel burn for CHP
@constraint(m, CHPFuelBurnYIntCon[t in p.techs.chp, ts in p.time_steps],
fuel_burn_intercept * m[Symbol("dvSize"*_n)][t] - p.s.chp.max_kw *
(1-m[Symbol("binCHPIsOnInTS"*_n)][t,ts]) <= m[Symbol("dvFuelBurnYIntercept"*_n)][t,ts]
)
else
#Constraint (1c2): Total Fuel burn for CHP **without** y-intercept fuel burn
@constraint(m, CHPFuelBurnConLinear[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvFuelUsage"*_n)][t,ts] == p.hours_per_timestep * (
p.production_factor[t,ts] * fuel_burn_slope * m[Symbol("dvRatedProduction"*_n)][t,ts] +
m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts] / p.s.chp.supplementary_firing_efficiency
)
)
end
end
function add_chp_thermal_production_constraints(m, p; _n="")
# Thermal production slope and intercept
thermal_prod_full_load = 1.0 / p.s.chp.elec_effic_full_load * p.s.chp.thermal_effic_full_load # [kWt/kWe]
thermal_prod_half_load = 0.5 / p.s.chp.elec_effic_half_load * p.s.chp.thermal_effic_half_load # [kWt/kWe]
thermal_prod_slope = (thermal_prod_full_load - thermal_prod_half_load) / (1.0 - 0.5) # [kWt/kWe]
thermal_prod_intercept = thermal_prod_full_load - thermal_prod_slope * 1.0 # [kWt/kWe_rated
dv = "dvProductionToWaste"*_n
m[Symbol(dv)] = @variable(m, [p.techs.chp, p.time_steps], base_name=dv, lower_bound=0)
# Conditionally add dvThermalProductionYIntercept if coefficient p.s.chpThermalProdIntercept is greater than ~zero
if thermal_prod_intercept > 1.0E-7
dv = "dvThermalProductionYIntercept"*_n
m[Symbol(dv)] = @variable(m, [p.techs.chp, p.time_steps], base_name=dv, lower_bound=0)
#Constraint (2a-1): Upper Bounds on Thermal Production Y-Intercept
@constraint(m, CHPYInt2a1Con[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvThermalProductionYIntercept"*_n)][t,ts] <= thermal_prod_intercept * m[Symbol("dvSize"*_n)][t]
)
# Constraint (2a-2): Upper Bounds on Thermal Production Y-Intercept
@constraint(m, CHPYInt2a2Con[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvThermalProductionYIntercept"*_n)][t,ts] <= thermal_prod_intercept * p.s.chp.max_kw
* m[Symbol("binCHPIsOnInTS"*_n)][t,ts]
)
#Constraint (2b): Lower Bounds on Thermal Production Y-Intercept
@constraint(m, CHPYInt2bCon[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvThermalProductionYIntercept"*_n)][t,ts] >= thermal_prod_intercept * m[Symbol("dvSize"*_n)][t]
- thermal_prod_intercept * p.s.chp.max_kw * (1 - m[Symbol("binCHPIsOnInTS"*_n)][t,ts])
)
# Constraint (2c): Thermal Production of CHP
# Note: p.HotWaterAmbientFactor[t,ts] * p.HotWaterThermalFactor[t,ts] removed from this but present in math
@constraint(m, CHPThermalProductionCon[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvThermalProduction"*_n)][t,ts] ==
thermal_prod_slope * p.production_factor[t,ts] * m[Symbol("dvRatedProduction"*_n)][t,ts]
+ m[Symbol("dvThermalProductionYIntercept"*_n)][t,ts] +
m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts]
)
else
@constraint(m, CHPThermalProductionConLinear[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvThermalProduction"*_n)][t,ts] ==
thermal_prod_slope * p.production_factor[t,ts] * m[Symbol("dvRatedProduction"*_n)][t,ts] +
m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts]
)
end
end
"""
add_chp_supplementary_firing_constraints(m, p; _n="")
Used by add_chp_constraints to add supplementary firing constraints if
p.s.chp.supplementary_firing_max_steam_ratio > 1.0 to add CHP supplementary firing operating constraints.
Else, the supplementary firing dispatch and size decision variables are set to zero.
"""
function add_chp_supplementary_firing_constraints(m, p; _n="")
thermal_prod_full_load = 1.0 / p.s.chp.elec_effic_full_load * p.s.chp.thermal_effic_full_load # [kWt/kWe]
thermal_prod_half_load = 0.5 / p.s.chp.elec_effic_half_load * p.s.chp.thermal_effic_half_load # [kWt/kWe]
thermal_prod_slope = (thermal_prod_full_load - thermal_prod_half_load) / (1.0 - 0.5) # [kWt/kWe]
# Constrain upper limit of dvSupplementaryThermalProduction, using auxiliary variable for (size * useSupplementaryFiring)
@constraint(m, CHPSupplementaryFireCon[t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts] <=
(p.s.chp.supplementary_firing_max_steam_ratio - 1.0) * p.production_factor[t,ts] * (thermal_prod_slope * m[Symbol("dvSupplementaryFiringSize"*_n)][t] + m[Symbol("dvThermalProductionYIntercept"*_n)][t,ts])
)
# Constrain lower limit of 0 if CHP tech is off
@constraint(m, NoCHPSupplementaryFireOffCon[t in p.techs.chp, ts in p.time_steps],
!m[Symbol("binCHPIsOnInTS"*_n)][t,ts] => {m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts] <= 0.0}
)
end
function add_binCHPIsOnInTS_constraints(m, p; _n="")
# Note, min_turn_down_pct for CHP is only enforced in p.time_steps_with_grid
@constraint(m, [t in p.techs.chp, ts in p.time_steps_with_grid],
m[Symbol("dvRatedProduction"*_n)][t, ts] <= p.s.chp.max_kw * m[Symbol("binCHPIsOnInTS"*_n)][t, ts]
)
@constraint(m, [t in p.techs.chp, ts in p.time_steps_with_grid],
p.s.chp.min_turn_down_pct * m[Symbol("dvSize"*_n)][t] - m[Symbol("dvRatedProduction"*_n)][t, ts] <=
p.s.chp.max_kw * (1 - m[Symbol("binCHPIsOnInTS"*_n)][t, ts])
)
end
function add_chp_rated_prod_constraint(m, p; _n="")
@constraint(m, [t in p.techs.chp, ts in p.time_steps],
m[Symbol("dvSize"*_n)][t] >= m[Symbol("dvRatedProduction"*_n)][t, ts]
)
end
"""
add_chp_hourly_om_charges(m, p; _n="")
- add decision variable "dvOMByHourBySizeCHP"*_n for the hourly CHP operations and maintenance costs
- add the cost to TotalPerUnitHourOMCosts
"""
function add_chp_hourly_om_charges(m, p; _n="")
dv = "dvOMByHourBySizeCHP"*_n
m[Symbol(dv)] = @variable(m, [p.techs.chp, p.time_steps], base_name=dv, lower_bound=0)
#Constraint CHP-hourly-om-a: om per hour, per time step >= per_unit_size_cost * size for when on, >= zero when off
@constraint(m, CHPHourlyOMBySizeA[t in p.techs.chp, ts in p.time_steps],
p.s.chp.om_cost_per_hr_per_kw_rated * m[Symbol("dvSize"*_n)][t] -
p.max_size[t] * p.s.chp.om_cost_per_hr_per_kw_rated * (1-m[Symbol("binCHPIsOnInTS"*_n)][t,ts])
<= m[Symbol("dvOMByHourBySizeCHP"*_n)][t, ts]
)
#Constraint CHP-hourly-om-b: om per hour, per time step <= per_unit_size_cost * size for each hour
@constraint(m, CHPHourlyOMBySizeB[t in p.techs.chp, ts in p.time_steps],
p.s.chp.om_cost_per_hr_per_kw_rated * m[Symbol("dvSize"*_n)][t]
>= m[Symbol("dvOMByHourBySizeCHP"*_n)][t, ts]
)
#Constraint CHP-hourly-om-c: om per hour, per time step <= zero when off, <= per_unit_size_cost*max_size
@constraint(m, CHPHourlyOMBySizeC[t in p.techs.chp, ts in p.time_steps],
p.max_size[t] * p.s.chp.om_cost_per_hr_per_kw_rated * m[Symbol("binCHPIsOnInTS"*_n)][t,ts]
>= m[Symbol("dvOMByHourBySizeCHP"*_n)][t, ts]
)
m[:TotalHourlyCHPOMCosts] = @expression(m, p.third_party_factor * p.pwf_om *
sum(m[Symbol(dv)][t, ts] * p.hours_per_timestep for t in p.techs.chp, ts in p.time_steps))
nothing
end
"""
add_chp_constraints(m, p; _n="")
Used in src/reopt.jl to add_chp_constraints if !isempty(p.techs.chp) to add CHP operating constraints and
cost expressions.
"""
function add_chp_constraints(m, p; _n="")
# TODO if chp.min_turn_down_pct is 0.0, and there is no fuel burn or thermal y-intercept, we don't need the binary below
@warn """Adding binary variable to model CHP.
Some solvers are very slow with integer variables"""
@variables m begin
binCHPIsOnInTS[p.techs.chp, p.time_steps], Bin # 1 If technology t is operating in time step; 0 otherwise
end
m[:TotalHourlyCHPOMCosts] = 0
m[:TotalCHPFuelCosts] = 0
m[:TotalCHPPerUnitProdOMCosts] = @expression(m, p.third_party_factor * p.pwf_om *
sum(p.s.chp.om_cost_per_kwh * p.hours_per_timestep *
m[:dvRatedProduction][t, ts] for t in p.techs.chp, ts in p.time_steps)
)
if p.s.chp.om_cost_per_hr_per_kw_rated > 1.0E-7
add_chp_hourly_om_charges(m, p)
end
add_chp_fuel_burn_constraints(m, p; _n=_n)
add_chp_thermal_production_constraints(m, p; _n=_n)
add_binCHPIsOnInTS_constraints(m, p; _n=_n)
add_chp_rated_prod_constraint(m, p; _n=_n)
if p.s.chp.supplementary_firing_max_steam_ratio > 1.0
add_chp_supplementary_firing_constraints(m,p; _n=_n)
else
for t in p.techs.chp
fix(m[Symbol("dvSupplementaryFiringSize"*_n)][t], 0.0, force=true)
for ts in p.time_steps
fix(m[Symbol("dvSupplementaryThermalProduction"*_n)][t,ts], 0.0, force=true)
end
end
end
end
| [
27,
34345,
29,
10677,
14,
1102,
2536,
6003,
14,
354,
79,
62,
1102,
2536,
6003,
13,
20362,
198,
2,
41906,
17174,
8412,
9,
198,
2,
4526,
8738,
11,
15069,
357,
66,
8,
13130,
12,
42334,
11,
10302,
329,
45276,
6682,
11,
11419,
13,
198,... | 2.163868 | 5,895 |
module ENN
export TimedAutomata, TimePetriNets, Neurons
include(joinpath("TimedAutomata/","TimedAutomata.jl"))
include(joinpath("TimePetriNets/","TimePetriNets.jl"))
include(joinpath("Neurons/","Neurons.jl"))
end | [
21412,
412,
6144,
198,
198,
39344,
5045,
276,
38062,
1045,
11,
3862,
25803,
380,
45,
1039,
11,
3169,
333,
684,
198,
198,
17256,
7,
22179,
6978,
7203,
14967,
276,
38062,
1045,
14,
2430,
14967,
276,
38062,
1045,
13,
20362,
48774,
198,
1... | 2.654321 | 81 |
<gh_stars>0
using RecipesBase, Measures
export InvMcmcSampler, McmcOutput
struct InvMcmcSampler{T<:InvMcmcMove}
move::T
desired_samples::Int
burn_in::Int
lag::Int
K::Int
init::McmcInitialiser
pointers::InteractionSequence{Int}
function InvMcmcSampler(
move::T;
desired_samples::Int=1000, burn_in::Int=0, lag::Int=1,
K::Int=100, init=InitMode()
) where {T<:InvMcmcMove}
pointers = [Int[] for i in 1:(2K)]
new{T}(move, desired_samples, burn_in, lag, K, init, pointers)
end
end
Base.show(io::IO, x::InvMcmcSampler{T}) where {T} = print(io, typeof(x))
struct McmcOutput{T<:Union{SIS,SIM}}
sample::Vector{Vector{Path{Int}}} # The sample
model::T
end
Base.show(io::IO, x::McmcOutput) = print(io, typeof(x))
@recipe function f(output::McmcOutput)
model = output.model
sample = output.sample
x = map(x -> model.dist(model.mode, x), sample)
xguide --> "Sample"
yguide --> "Distance from Mode"
legend --> false
size --> (800, 300)
margin --> 5mm
x
end
acceptance_prob(mcmc::InvMcmcSampler) = acceptance_prob(mcmc.move)
function eval_accept_prob(
S_curr::InteractionSequence{Int},
S_prop::InteractionSequence{Int},
model::T,
log_ratio::Float64
) where {T<:Union{SIS,SIM}}
mode, γ, dist = (model.mode, model.γ, model.dist)
# @show S_curr, S_prop
log_lik_ratio = -γ * (
dist(mode, S_prop)-dist(mode, S_curr)
)
if typeof(model)==SIM
log_multinom_term = log_multinomial_ratio(S_curr, S_prop)
return log_lik_ratio + log_ratio + log_multinom_term
else
return log_lik_ratio + log_ratio
end
end
function accept_reject!(
S_curr::InteractionSequence{Int},
S_prop::InteractionSequence{Int},
pointers::InteractionSequence{Int},
move::InvMcmcMove,
model::T
) where {T<:Union{SIS,SIM}}
move.counts[2] += 1
log_ratio = prop_sample!(S_curr, S_prop, move, pointers, model.V)
# Catch out of bounds proposals (reject them, i.e. 0 acc prob)
if any(!(1 ≤ length(x) ≤ model.K_inner.u) for x in S_prop)
log_α = -Inf
elseif !(1 ≤ length(S_prop) ≤ model.K_outer.u)
log_α = -Inf
else
log_α = eval_accept_prob(S_curr, S_prop, model, log_ratio)
end
# @show log_α
if log(rand()) < log_α
# We accept!
move.counts[1] += 1
enact_accept!(S_curr, S_prop, pointers, move)
else
# We reject!
enact_reject!(S_curr, S_prop, pointers, move)
end
end
function intialise_states!(
pointers::InteractionSequence{Int},
init::InteractionSequence{Int}
)
S_curr = InteractionSequence{Int}()
S_prop = InteractionSequence{Int}()
for init_path in init
tmp = pop!(pointers)
copy!(tmp, init_path)
push!(S_curr, tmp)
tmp = pop!(pointers)
copy!(tmp, init_path)
push!(S_prop, tmp)
end
return S_curr, S_prop
end
function return_states!(
S_curr::InteractionSequence{Int},
S_prop::InteractionSequence{Int},
pointers::InteractionSequence{Int}
)
for i in eachindex(S_curr)
tmp = pop!(S_curr)
pushfirst!(pointers, tmp)
tmp = pop!(S_prop)
pushfirst!(pointers, tmp)
end
end
function draw_sample!(
sample_out::Union{InteractionSequenceSample{Int}, SubArray},
mcmc::InvMcmcSampler,
model::T;
burn_in::Int=mcmc.burn_in,
lag::Int=mcmc.lag,
init::InteractionSequence{Int}=get_init(mcmc.init, model)
) where {T<:Union{SIS,SIM}}
pointers = mcmc.pointers
S_curr, S_prop = intialise_states!(pointers, init)
sample_count = 1 # Keeps which sample to be stored we are working to get
i = 0 # Keeps track all samples (included lags and burn_ins)
reset_counts!(mcmc.move) # Reset counts for mcmc move (tracks acceptances)
while sample_count ≤ length(sample_out)
i += 1
# Store value
if (i > burn_in) & (((i-1) % lag)==0)
@inbounds sample_out[sample_count] = deepcopy(S_curr)
sample_count += 1
end
accept_reject!(
S_curr, S_prop,
pointers,
mcmc.move,
model
)
# println(S_prop)
end
return_states!(S_curr, S_prop, pointers)
end
function draw_sample(
mcmc::InvMcmcSampler,
model::T;
desired_samples::Int=mcmc.desired_samples,
burn_in::Int=mcmc.burn_in,
lag::Int=mcmc.lag,
init::InteractionSequence{Int}=get_init(mcmc.init, model)
) where {T<:Union{SIS,SIM}}
sample_out = InteractionSequenceSample{Int}(undef, desired_samples)
draw_sample!(sample_out, mcmc, model, burn_in=burn_in, lag=lag, init=init)
return sample_out
end
function (mcmc::InvMcmcSampler)(
model::T;
desired_samples::Int=mcmc.desired_samples,
burn_in::Int=mcmc.burn_in,
lag::Int=mcmc.lag,
init::InteractionSequence{Int}=get_init(mcmc.init, model)
) where {T<:Union{SIS,SIM}}
sample_out = draw_sample(mcmc, model, desired_samples=desired_samples, burn_in=burn_in, lag=lag, init=init)
return McmcOutput(sample_out, model)
end | [
27,
456,
62,
30783,
29,
15,
198,
3500,
44229,
14881,
11,
45040,
198,
39344,
10001,
9742,
23209,
16305,
20053,
11,
1982,
23209,
26410,
198,
198,
7249,
10001,
9742,
23209,
16305,
20053,
90,
51,
27,
25,
19904,
9742,
23209,
21774,
92,
198,
... | 2.127139 | 2,454 |
using PFKernels
using PFKernels.PowerSystem
using PFKernels.ForwardDiff
using LinearAlgebra
AT = PFKernels.AT
function mynorm(x)
t1s{N} = ForwardDiff.Dual{Nothing,Float64, N} where N
n = length(x)
FT = t1s{n}
V = Vector
F = zeros(n)
adF = V{FT}(undef, n)
adx = V{FT}(undef, n)
# ForwardDiff.seed!(adx, x, one(ForwardDiff.Partials{n,Float64}))
ForwardDiff.seed!(adx, x, ForwardDiff.construct_seeds(ForwardDiff.Partials{n,Float64}))
dF = AT(adF)
dx = AT(adx)
PFKernels.kernel!(PFKernels.backend, dF, dx)
nrm2 = t1s{n}(0.0)
for v in dF
nrm2 += v
end
@show typeof(nrm2)
@show nrm2
return ForwardDiff.value(nrm2), ForwardDiff.partials(nrm2)
end
# Inject GPU kernels
PFKernels.include(joinpath(dirname(@__FILE__), "kernels.jl"))
x = rand(10)
nrm2 = x -> norm(x)^2
res = nrm2(x)
dres = ForwardDiff.gradient(nrm2, x)
@show x
F, dF = mynorm(x)
@assert(F ≈ res)
@assert(dF ≈ dres)
| [
3500,
350,
26236,
44930,
198,
3500,
350,
26236,
44930,
13,
13434,
11964,
198,
3500,
350,
26236,
44930,
13,
39746,
28813,
198,
3500,
44800,
2348,
29230,
198,
198,
1404,
796,
350,
26236,
44930,
13,
1404,
198,
198,
8818,
616,
27237,
7,
87,... | 2.085153 | 458 |
<reponame>Giarcr0b/MVO_Tool
# Copyright 2016, <NAME>, <NAME>, <NAME>, and contributors
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#############################################################################
# JuMP
# An algebraic modeling language for Julia
# See http://github.com/JuliaOpt/JuMP.jl
#############################################################################
# print.jl
# All "pretty printers" for JuMP types.
# - Delegates to appropriate handler methods for REPL or IJulia.
# - These handler methods then pass the correct symbols to use into a
# generic string builder. The IJulia handlers will also wrap in MathJax
# start/close tags.
# - To find printing code for a type in this file, search for `## TypeName`
# - Code here does not need to be fast, in fact simplicity trumps speed
# within reason as this code is thorny enough as it is.
# - Corresponding tests are in test/print.jl, although test/operator.jl
# is also testing the constraint/expression code extensively as well.
# - Base.print and Base.string both delegate to Base.show, if they are not
# separately defined.
#############################################################################
# Used for dispatching
abstract PrintMode
abstract REPLMode <: PrintMode
abstract IJuliaMode <: PrintMode
# Whether something is zero or not for the purposes of printing it
const PRINT_ZERO_TOL = 1e-10
# List of indices available for variable printing
const DIMS = ["i","j","k","l","m","n"]
# Helper function that rounds carefully for the purposes of printing
# e.g. 5.3 => 5.3
# 1.0 => 1
function str_round(f::Float64)
abs(f) == 0.0 && return "0" # strip sign off zero
str = string(f)
length(str) >= 2 && str[end-1:end] == ".0" ? str[1:end-2] : str
end
# TODO: get rid of this! This is only a helper, and should be Base.values
# (and probably live there, as well)
_values(x::Array) = x
_values(x) = Base.values(x)
# REPL-specific symbols
# Anything here: https://en.wikipedia.org/wiki/Windows-1252
# should probably work fine on Windows
const repl = Dict{Symbol,UTF8String}(
:leq => (is_windows() ? "<=" : "≤"),
:geq => (is_windows() ? ">=" : "≥"),
:eq => (is_windows() ? "==" : "="),
:times => "*",
:sq => "²",
:ind_open => "[",
:ind_close => "]",
:for_all => (is_windows() ? "for all" : "∀"),
:in => (is_windows() ? "in" : "∈"),
:open_set => "{",
:dots => (is_windows() ? ".." : "…"),
:close_set => "}",
:union => (is_windows() ? "or" : "∪"),
:infty => (is_windows() ? "Inf" : "∞"),
:open_rng => "[",
:close_rng => "]",
:integer => "integer",
:succeq0 => " is semidefinite",
:Vert => (is_windows() ? "||" : "‖"),
:sub2 => (is_windows() ? "_2" : "₂"))
# IJulia-specific symbols
const ijulia = Dict{Symbol,UTF8String}(
:leq => "\\leq",
:geq => "\\geq",
:eq => "=",
:times => "\\times ",
:sq => "^2",
:ind_open => "_{",
:ind_close => "}",
:for_all => "\\quad\\forall",
:in => "\\in",
:open_set => "\\{",
:dots => "\\dots",
:close_set => "\\}",
:union => "\\cup",
:infty => "\\infty",
:open_rng => "\\[",
:close_rng => "\\]",
:integer => "\\in \\mathbb{Z}",
:succeq0 => "\\succeq 0",
:Vert => "\\Vert",
:sub2 => "_2")
typealias PrintSymbols Dict{Symbol,UTF8String}
# If not already mathmode, then wrap in MathJax start/close tags
math(s,mathmode) = mathmode ? s : "\$\$ $s \$\$"
# helper to look up corresponding JuMPContainerData
printdata(v::JuMPContainer) = getmeta(v, :model).varData[v]
function printdata(v::Array{Variable})
if isempty(v)
error("Cannot locate printing data for an empty array")
end
m = first(v).m
m.varData[v]
end
#------------------------------------------------------------------------
## Model
#------------------------------------------------------------------------
function Base.print(io::IO, m::Model; ignore_print_hook=(m.printhook==nothing))
ignore_print_hook || return m.printhook(io, m)
print(io, model_str(REPLMode,m))
end
function Base.show(io::IO, m::Model)
plural(n) = (n==1 ? "" : "s")
print(io, m.objSense == :Max ? "Maximization" : ((m.objSense == :Min && (!isempty(m.obj) || (m.nlpdata !== nothing && isa(m.nlpdata.nlobj, NonlinearExprData)))) ? "Minimization" : "Feasibility"))
println(io, " problem with:")
nlin = length(m.linconstr)
println(io, " * $(nlin) linear constraint$(plural(nlin))")
nquad = length(m.quadconstr)
if nquad > 0
println(io, " * $(nquad) quadratic constraint$(plural(nquad))")
end
nsos = length(m.sosconstr)
if nsos > 0
println(io, " * $(nsos) SOS constraint$(plural(nsos))")
end
nsoc = length(m.socconstr)
if nsoc > 0
println(io, " * $(nsoc) SOC constraint$(plural(nsoc))")
end
nsdp = length(m.sdpconstr)
if nsdp > 0
println(io, " * $(nsdp) semidefinite constraint$(plural(nsdp))")
end
nlp = m.nlpdata
if nlp !== nothing && length(nlp.nlconstr) > 0
println(io, " * $(length(nlp.nlconstr)) nonlinear constraint$(plural(length(nlp.nlconstr)))")
end
print(io, " * $(m.numCols) variable$(plural(m.numCols))")
nbin = sum(m.colCat .== :Bin)
nint = sum(m.colCat .== :Int)
nsc = sum(m.colCat .== :SemiCont)
nsi = sum(m.colCat .== :SemiInt)
varstr = Any[]
nbin == 0 || push!(varstr, "$nbin binary")
nint == 0 || push!(varstr, "$nint integer")
nsc == 0 || push!(varstr, "$nsc semicontinuous")
nsi == 0 || push!(varstr, "$nsi semi-integer")
if isempty(varstr)
println(io,)
else
println(io, ": $(join(varstr, ", "))")
end
print(io, "Solver is ")
if isa(m.solver, UnsetSolver)
print(io, "default solver")
else
print(io, split(split(string(m.solver), "Solver")[1], ".")[2])
end
end
@compat Base.show(io::IO, ::MIME"text/latex", m::Model) =
print(io, model_str(IJuliaMode,m))
function model_str(mode, m::Model, sym::PrintSymbols)
ijl = mode == IJuliaMode
sep = ijl ? " & " : " "
eol = ijl ? "\\\\\n" : "\n"
nlp = m.nlpdata
# Objective
qobj_str = quad_str(mode, m.obj)
obj_sense = ijl ? (m.objSense == :Max ? "\\max" : "\\min")*"\\quad" :
(m.objSense == :Max ? "Max" : "Min")
str = obj_sense * sep
if nlp !== nothing && nlp.nlobj !== nothing
str *= (qobj_str=="0"?"":"$qobj_str + ") * "(nonlinear expression)"
else
str *= qobj_str
end
str *= eol
# Constraints
str *= ijl ? "\\text{Subject to} \\quad" : "Subject to" * eol
for c in m.linconstr
str *= sep * con_str(mode,c,mathmode=true) * eol
end
for c in m.quadconstr
str *= sep * con_str(mode,c,mathmode=true) * eol
end
for c in m.sosconstr
str *= sep * con_str(mode,c,mathmode=true) * eol
end
for c in m.socconstr
str *= sep * con_str(mode,c,mathmode=true) * eol
end
if nlp !== nothing && length(nlp.nlconstr) > 0
num = length(nlp.nlconstr)
str *= sep * string("$num nonlinear constraint", num>1?"s":"") * eol
end
# Display indexed variables
in_dictlist = falses(m.numCols)
for d in m.dictList
# make sure that you haven't changed a variable type in the collection
firstval = first(_values(d))
cat = getcategory(firstval)
lb, ub = getlowerbound(firstval), getupperbound(firstval)
allsame = true
for v in _values(d)
if !(getcategory(v) == cat && getlowerbound(v) == lb && getupperbound(v) == ub)
allsame = false
break
elseif v in m.customNames
allsame = false
break
end
end
if allsame
for it in _values(d) # Mark variables in JuMPContainer as printed
in_dictlist[it.col] = true
end
str *= sep * cont_str(mode,d,mathmode=true) * eol
end
end
# Display non-indexed variables
for i in 1:m.numCols
in_dictlist[i] && continue
var_name = var_str(mode,m,i)
var_lb, var_ub = m.colLower[i], m.colUpper[i]
str_lb = var_lb == -Inf ? "-"*sym[:infty] : str_round(var_lb)
str_ub = var_ub == +Inf ? sym[:infty] : str_round(var_ub)
var_cat = m.colCat[i]
if var_cat == :Bin # x binary
str *= string(sep, var_name,
" ", sym[:in],
" ", sym[:open_set],
"0,1", sym[:close_set])
elseif var_cat == :SemiInt # x in union of 0 and {lb,...,ub}
str *= string(sep, var_name,
" ", sym[:in],
" ", sym[:open_set],
str_lb, ",", sym[:dots], ",", str_ub,
sym[:close_set],
" ", sym[:union], " ",
sym[:open_set], "0", sym[:close_set])
elseif var_cat == :SemiCont # x in union of 0 and [lb,ub]
str *= string(sep, var_name,
" ", sym[:in],
" ", sym[:open_rng],
str_lb, ",", str_ub,
sym[:close_rng],
" ", sym[:union], " ",
sym[:open_set], "0", sym[:close_set])
elseif var_cat == :Fixed
str *= string(sep, var_name, " = ", str_lb)
elseif var_lb == -Inf && var_ub == +Inf # Free variable
str *= string(sep, var_name, " free")
elseif var_lb == -Inf # No lower bound
str *= string(sep, var_name, " ", sym[:leq], " ", str_ub)
elseif var_ub == +Inf # No upper bound
str *= string(sep, var_name, " ", sym[:geq], " ", str_lb)
else
str *= string(sep, str_lb, " ", sym[:leq],
" ", var_name, " ",
sym[:leq], " ", str_ub)
end
if var_cat == :Int
str *= string(", ", sym[:integer])
end
str *= eol
end
ijl ? "\$\$ \\begin{alignat*}{1}"*str*"\\end{alignat*}\n \$\$" :
str
end
# Handlers to use correct symbols
model_str(::Type{REPLMode}, m::Model) =
model_str(REPLMode, m, repl)
model_str(::Type{IJuliaMode}, m::Model; mathmode=true) =
math(model_str(IJuliaMode, m, ijulia), mathmode)
#------------------------------------------------------------------------
## Variable
#------------------------------------------------------------------------
Base.show(io::IO, v::Variable) = print(io, var_str(REPLMode,v))
@compat Base.show(io::IO, ::MIME"text/latex", v::Variable) =
print(io, var_str(IJuliaMode,v,mathmode=false))
function var_str(mode, m::Model, col::Int; mathmode=true)
colNames = mode == REPLMode ? m.colNames : m.colNamesIJulia
if colNames[col] === EMPTYSTRING
for cont in m.dictList
fill_var_names(mode, colNames, cont)
end
end
return math(colNames[col] == "" ? "col_$col" : colNames[col], mathmode)
end
function fill_var_names{N}(mode, colNames, v::JuMPArray{Variable,N})
data = printdata(v)
idxsets = data.indexsets
lengths = map(length, idxsets)
name = data.name
cprod = cumprod([lengths...])
for (ind,var) in enumerate(v.innerArray)
idx_strs = [string( idxsets[1][mod1(ind,lengths[1])] )]
for i = 2:N
push!(idx_strs, string(idxsets[i][Int(ceil(mod1(ind,cprod[i]) / cprod[i-1]))]))
end
if mode == IJuliaMode
colNames[var.col] = string(name, "_{", join(idx_strs,",") , "}")
else
colNames[var.col] = string(name, "[", join(idx_strs,",") , "]")
end
end
end
function fill_var_names(mode, colNames, v::JuMPDict{Variable})
name = printdata(v).name
for (ind,var) in zip(keys(v),values(v))
if mode == IJuliaMode
colNames[var.col] = string(name, "_{", join([string(i) for i in ind],","), "}")
else
colNames[var.col] = string(name, "[", join([string(i) for i in ind],","), "]")
end
end
end
function fill_var_names(mode, colNames, v::Array{Variable})
isempty(v) && return
sizes = size(v)
m = first(v).m
if !haskey(m.varData, v)
return
end
name = m.varData[v].name
for (ii,var) in enumerate(v)
@assert var.m === m
ind = ind2sub(sizes, ii)
colNames[var.col] = if mode === IJuliaMode
string(name, "_{", join(ind, ","), "}")
else
string(name, "[", join(ind, ","), "]")
end
end
return
end
# Handlers to use correct symbols
var_str(::Type{REPLMode}, v::Variable) =
var_str(REPLMode, v.m, v.col)
var_str(::Type{IJuliaMode}, v::Variable; mathmode=true) =
var_str(IJuliaMode, v.m, v.col, mathmode=mathmode)
#------------------------------------------------------------------------
## Norm
#------------------------------------------------------------------------
Base.show(io::IO, j::Norm) = print(io, norm_str(REPLMode,j))
@compat Base.show(io::IO, ::MIME"text/latex", j::Norm) =
print(io, norm_str(IJuliaMode,j))
function norm_str(mode, n::Norm, sym::PrintSymbols)
string(sym[:Vert], "[",
join(map(t->aff_str(mode,t),n.terms),","),
"]", sym[:Vert], sym[:sub2])
end
# Handlers to use correct symbols
norm_str(::Type{REPLMode}, n::Norm) =
norm_str(REPLMode, n, repl)
norm_str(::Type{IJuliaMode}, n::Norm; mathmode=true) =
math(norm_str(IJuliaMode, n, ijulia), mathmode)
exprstr(n::Norm) = norm_str(REPLMode, n)
#------------------------------------------------------------------------
## JuMPContainer{Variable}
#------------------------------------------------------------------------
Base.show(io::IO, j::Union{JuMPContainer{Variable}, Array{Variable}}) = print(io, cont_str(REPLMode,j))
@compat Base.show(io::IO, ::MIME"text/latex", j::Union{JuMPContainer{Variable},Array{Variable}}) =
print(io, cont_str(IJuliaMode,j,mathmode=false))
# Generic string converter, called by mode-specific handlers
# Assumes that !isempty(j)
_getmodel(j::Array{Variable}) = first(j).m
_getmodel(j::JuMPContainer) = getmeta(j, :model)
function cont_str(mode, j, sym::PrintSymbols)
# Check if anything in the container
if isempty(j)
name = isa(j, JuMPContainer) ? printdata(j).name : "Empty Array{Variable}"
return "$name (no indices)"
end
m = _getmodel(j)
# If this looks like a user-created Array, then defer to base printing
if !haskey(m.varData, j)
@assert isa(j, Array{Variable})
if ndims(j) == 1
return sprint((io,v) -> Base.show_vector(io, v, "[", "]"), j)
else
return sprint((io,X) -> Base.showarray(io, X), j)
end
end
data = printdata(j)
# 1. construct the part with variable name and indexing
locvars = map(data.indexexprs) do tmp
var = tmp.idxvar
if var == nothing
return ""
else
return string(var)
end
end
num_dims = length(data.indexsets)
idxvars = Array(UTF8String, num_dims)
dimidx = 1
for i in 1:num_dims
if data.indexexprs[i].idxvar == nothing
while DIMS[dimidx] in locvars
dimidx += 1
end
if dimidx > length(DIMS)
error("Unexpectedly ran out of indices")
end
idxvars[i] = DIMS[dimidx]
dimidx += 1
else
idxvars[i] = locvars[i]
end
end
name_idx = string(data.name, sym[:ind_open], join(idxvars,","), sym[:ind_close])
# 2. construct part with what we index over
idx_sets = sym[:for_all]*" "*join(map(dim->string(idxvars[dim], " ", sym[:in],
" ", sym[:open_set],
cont_str_set(data.indexsets[dim],sym[:dots]),
sym[:close_set]), 1:num_dims), ", ")
# 3. Handle any conditions
if isa(j, JuMPDict) && data.condition != :()
idx_sets *= string(" s.t. ",join(parse_conditions(data.condition), " and "))
end
# 4. Bounds and category, if possible, and return final string
a_var = first(_values(j))
model = a_var.m
var_cat = model.colCat[a_var.col]
var_lb = model.colLower[a_var.col]
var_ub = model.colUpper[a_var.col]
# Variables may have different bounds, so we can't really print nicely
# at this time (possibly ever, as they could have been changed post
# creation, which we'd never be able to handle.
all_same_lb = true
all_same_ub = true
for var in _values(j)
all_same_lb &= model.colLower[var.col] == var_lb
all_same_ub &= model.colUpper[var.col] == var_ub
end
str_lb = var_lb == -Inf ? "-"*sym[:infty] : str_round(var_lb)
str_ub = var_ub == +Inf ? sym[:infty] : str_round(var_ub)
# Special case bounds printing based on the category
if var_cat == :Bin # x in {0,1}
return "$name_idx $(sym[:in]) $(sym[:open_set])0,1$(sym[:close_set]) $idx_sets"
elseif var_cat == :SemiInt # x in union of 0 and {lb,...,ub}
si_lb = all_same_lb ? str_lb : sym[:dots]
si_ub = all_same_ub ? str_ub : sym[:dots]
return "$name_idx $(sym[:in]) $(sym[:open_set])$si_lb,$(sym[:dots]),$si_ub$(sym[:close_set]) $(sym[:union]) $(sym[:open_set])0$(sym[:close_set]) $idx_sets"
elseif var_cat == :SemiCont # x in union of 0 and [lb,ub]
si_lb = all_same_lb ? str_lb : sym[:dots]
si_ub = all_same_ub ? str_ub : sym[:dots]
return "$name_idx $(sym[:in]) $(sym[:open_rng])$si_lb,$si_ub$(sym[:close_rng]) $(sym[:union]) $(sym[:open_set])0$(sym[:close_set]) $idx_sets"
elseif var_cat == :Fixed
si_bnd = all_same_lb ? str_lb : sym[:dots]
return "$name_idx = $si_bnd $idx_sets"
end
# Continuous and Integer
idx_sets = var_cat == :Int ? ", $(sym[:integer]), $idx_sets" : " $idx_sets"
if all_same_lb && all_same_ub
# Free variable
var_lb == -Inf && var_ub == +Inf && return "$name_idx free$idx_sets"
# No lower bound
var_lb == -Inf && return "$name_idx $(sym[:leq]) $str_ub$idx_sets"
# No upper bound
var_ub == +Inf && return "$name_idx $(sym[:geq]) $str_lb$idx_sets"
# Range
return "$str_lb $(sym[:leq]) $name_idx $(sym[:leq]) $str_ub$idx_sets"
end
if all_same_lb && !all_same_ub
var_lb == -Inf && return "$name_idx $(sym[:leq]) $(sym[:dots])$idx_sets"
return "$str_lb $(sym[:leq]) $name_idx $(sym[:leq]) $(sym[:dots])$idx_sets"
end
if !all_same_lb && all_same_ub
var_ub == +Inf && return "$name_idx $(sym[:geq]) $(sym[:dots])$idx_sets"
return "$(sym[:dots]) $(sym[:leq]) $name_idx $(sym[:leq]) $str_ub$idx_sets"
end
return "$(sym[:dots]) $(sym[:leq]) $name_idx $(sym[:leq]) $(sym[:dots])$idx_sets"
end
# UTILITY FUNCTIONS FOR cont_str
function cont_str_set(idxset::Union{Range,Array}, dots) # 2:2:20 -> {2,4..18,20}
length(idxset) == 0 && return dots
length(idxset) == 1 && return string(idxset[1])
length(idxset) == 2 && return string(idxset[1],",",idxset[2])
length(idxset) == 3 && return string(idxset[1],",",idxset[2],",",idxset[3])
length(idxset) == 4 && return string(idxset[1],",",idxset[2],",",idxset[3],",",idxset[4])
return string(idxset[1],",",idxset[2],",",dots,",",idxset[end-1],",",idxset[end])
end
cont_str_set(idxset, dots) = return dots # Fallback
# parse_conditions
# Not exported. Traverses an expression and constructs an array with entries
# corresponding to each condition. More specifically, if the condition is
# a && (b || c) && (d && e), it returns [a, b || c, d, e].
parse_conditions(not_an_expr) = not_an_expr
function parse_conditions(expr::Expr)
ret = Any[]
if expr.head != :&&
return push!(ret, expr)
end
recurse = map(parse_conditions, expr.args)
vcat(ret, recurse...)
end
# Handlers to use correct symbols
cont_str(::Type{REPLMode}, j; mathmode=false) =
cont_str(REPLMode, j, repl)
cont_str(::Type{IJuliaMode}, j; mathmode=true) =
math(cont_str(IJuliaMode, j, ijulia), mathmode)
#------------------------------------------------------------------------
## JuMPContainer{Float64}
#------------------------------------------------------------------------
Base.show(io::IO, j::JuMPContainer{Float64}) = print(io, val_str(REPLMode,j))
function val_str{N}(mode, j::JuMPArray{Float64,N})
m = _getmodel(j)
data = printdata(j)
out_str = "$(data.name): $N dimensions:\n"
if isempty(j)
return out_str * " (no entries)"
end
function val_str_rec(depth, parent_index::Vector{Any}, parent_str::AbstractString)
# Turn index set into strings
indexset = data.indexsets[depth]
index_strs = map(string, indexset)
# Determine longest index so we can align columns
max_index_len = 0
for index_str in index_strs
max_index_len = max(max_index_len, strwidth(index_str))
end
# If have recursed, we need to prepend the parent's index strings
# accumulated, as well as white space so the alignment works.
for i = 1:length(index_strs)
index_strs[i] = parent_str * lpad(index_strs[i],max_index_len," ")
end
# Create a string for the number of spaces we need to indent
indent = " "^(2*(depth-1))
# Determine the need to recurse
if depth == N
# Deepest level
for i = 1:length(indexset)
value = length(parent_index) == 0 ?
j[indexset[i]] :
j[parent_index...,indexset[i]]
out_str *= indent * "[" * index_strs[i] * "] = $value\n"
end
else
# At least one more layer to go
for i = 1:length(indexset)
index = indexset[i]
# Print the ":" version of indices we will recurse over
out_str *= indent * "[" * index_strs[i] * ",:"^(N-depth) * "]\n"
val_str_rec(depth+1,
length(parent_index) == 0 ? Any[index] : Any[parent_index...,index],
index_strs[i] * ",")
end
end
end
val_str_rec(1,Any[],"")
return out_str
end
# support types that don't have built-in comparison
function _isless(t1::Tuple, t2::Tuple)
n1, n2 = length(t1), length(t2)
for i = 1:min(n1, n2)
a, b = t1[i], t2[i]
if !isequal(a, b)
return applicable(isless,a,b) ? isless(a, b) : isless(hash(a),hash(b))
end
end
return n1 < n2
end
function val_str(mode, dict::JuMPDict{Float64})
nelem = length(dict.tupledict)
isempty(dict) && return ""
m = _getmodel(dict)
data = printdata(dict)
out_str = "$(data.name): $(length(data.indexsets)) dimensions, $nelem "
out_str *= nelem == 1 ? "entry" : "entries"
out_str *= ":"
sortedkeys = sort(collect(keys(dict.tupledict)), lt = _isless)
ndim = length(first(keys(dict.tupledict)))
key_strs = Array(AbstractString, length(dict), ndim)
for (i, key) in enumerate(sortedkeys)
for j in 1:ndim
key_strs[i,j] = string(key[j])
end
end
max_dim_lens = map(1:ndim) do i
maximum(map(length,key_strs[:,i]))
end
key_str = map(1:length(dict)) do i
join(map(1:ndim) do j
lpad(key_strs[i,j], max_dim_lens[j])
end, ",")
end
max_key_len = maximum(map(length,key_str))
for (i,key) in enumerate(sortedkeys)
val = dict[key...]
out_str *= "\n" * lpad("[$(key_str[i])]", max_key_len+3)
out_str *= " = $val"
end
return out_str
end
#------------------------------------------------------------------------
## AffExpr (not GenericAffExpr)
#------------------------------------------------------------------------
Base.show(io::IO, a::AffExpr) = print(io, aff_str(REPLMode,a))
@compat Base.show(io::IO, ::MIME"text/latex", a::AffExpr) =
print(io, math(aff_str(IJuliaMode,a),false))
# Generic string converter, called by mode-specific handlers
function aff_str(mode, a::AffExpr, show_constant=true)
# If the expression is empty, return the constant (or 0)
if length(a.vars) == 0
return show_constant ? str_round(a.constant) : "0"
end
# Get reference to models included in this expression
moddict = Dict{Model,IndexedVector{Float64}}()
for var in a.vars
if !haskey(moddict, var.m)
moddict[var.m] = IndexedVector(Float64,var.m.numCols)
end
end
# Collect like terms
for ind in 1:length(a.vars)
addelt!(moddict[a.vars[ind].m], a.vars[ind].col, a.coeffs[ind])
end
elm = 1
term_str = Array(UTF8String, 2*length(a.vars))
# For each model
for m in keys(moddict)
indvec = moddict[m]
# For each non-zero for this model
for i in 1:indvec.nnz
idx = indvec.nzidx[i]
elt = indvec.elts[idx]
abs(elt) < PRINT_ZERO_TOL && continue # e.g. x - x
pre = abs(abs(elt)-1) < PRINT_ZERO_TOL ? "" : str_round(abs(elt)) * " "
var = var_str(mode,m,idx)
term_str[2*elm-1] = elt < 0 ? " - " : " + "
term_str[2*elm ] = "$pre$var"
elm += 1
end
end
if elm == 1
# Will happen with cancellation of all terms
# We should just return the constant, if its desired
return show_constant ? str_round(a.constant) : "0"
else
# Correction for very first term - don't want a " + "/" - "
term_str[1] = (term_str[1] == " - ") ? "-" : ""
ret = join(term_str[1:2*(elm-1)])
if abs(a.constant) >= PRINT_ZERO_TOL && show_constant
ret = string(ret, a.constant < 0 ? " - " : " + ", str_round(abs(a.constant)))
end
return ret
end
end
# Precompile for faster boot times
Base.precompile(aff_str, (Type{JuMP.REPLMode}, AffExpr, Bool))
Base.precompile(aff_str, (Type{JuMP.IJuliaMode}, AffExpr, Bool))
Base.precompile(aff_str, (Type{JuMP.REPLMode}, AffExpr))
Base.precompile(aff_str, (Type{JuMP.IJuliaMode}, AffExpr))
#------------------------------------------------------------------------
## GenericQuadExpr
#------------------------------------------------------------------------
Base.show(io::IO, q::GenericQuadExpr) = print(io, quad_str(REPLMode,q))
@compat Base.show(io::IO, ::MIME"text/latex", q::GenericQuadExpr) =
print(io, quad_str(IJuliaMode,q,mathmode=false))
# Generic string converter, called by mode-specific handlers
function quad_str(mode, q::GenericQuadExpr, sym)
length(q.qvars1) == 0 && return aff_str(mode,q.aff)
# Canonicalize x_i * x_j so i <= j
for ind in 1:length(q.qvars1)
if q.qvars2[ind].col < q.qvars1[ind].col
q.qvars1[ind],q.qvars2[ind] = q.qvars2[ind],q.qvars1[ind]
end
end
# Merge duplicates
Q = sparse([v.col for v in q.qvars1], [v.col for v in q.qvars2], q.qcoeffs)
I,J,V = findnz(Q)
Qnnz = length(V)
# Odd terms are +/i, even terms are the variables/coeffs
term_str = Array(UTF8String, 2*Qnnz)
if Qnnz > 0
for ind in 1:Qnnz
val = abs(V[ind])
pre = (val == 1.0 ? "" : str_round(val)*" ")
x = var_str(mode,q.qvars1[ind].m,I[ind])
y = var_str(mode,q.qvars1[ind].m,J[ind])
term_str[2*ind-1] = V[ind] < 0 ? " - " : " + "
term_str[2*ind ] = "$pre$x" * (x == y ? sym[:sq] : "$(sym[:times])$y")
end
# Correction for first term as there is no space
# between - and variable coefficient/name
term_str[1] = V[1] < 0 ? "-" : ""
end
ret = join(term_str)
if q.aff.constant == 0 && length(q.aff.vars) == 0
return ret
else
aff = aff_str(mode,q.aff)
if aff[1] == '-'
return string(ret, " - ", aff[2:end])
else
return string(ret, " + ", aff)
end
end
end
# Handlers to use correct symbols
quad_str(::Type{REPLMode}, q::GenericQuadExpr) =
quad_str(REPLMode, q, repl)
quad_str(::Type{IJuliaMode}, q::GenericQuadExpr; mathmode=true) =
math(quad_str(IJuliaMode, q, ijulia), mathmode)
#------------------------------------------------------------------------
## SOCExpr
#------------------------------------------------------------------------
Base.show(io::IO, c::SOCExpr) = print(io, expr_str(REPLMode, c))
@compat Base.show(io::IO, ::MIME"text/latex", c::SOCExpr) =
print(io, expr_str(IJuliaMode, c))
function expr_str(mode, c::SOCExpr)
coeff = (c.coeff == 1) ? "" : string(c.coeff, " ")
aff = aff_str(mode, c.aff)
if aff[1] == '-'
chain = " - "
aff = aff[2:end]
elseif aff == "0"
aff = ""
chain = ""
else # positive
chain = " + "
end
string(coeff, norm_str(mode, c.norm), chain, aff)
end
#------------------------------------------------------------------------
## GenericRangeConstraint
#------------------------------------------------------------------------
Base.show(io::IO, c::GenericRangeConstraint) = print(io, con_str(REPLMode,c))
@compat Base.show(io::IO, ::MIME"text/latex", c::GenericRangeConstraint) =
print(io, con_str(IJuliaMode,c,mathmode=false))
# Generic string converter, called by mode-specific handlers
function con_str(mode, c::GenericRangeConstraint, sym)
s = sense(c)
a = aff_str(mode,c.terms,false)
if s == :range
out_str = "$(str_round(c.lb)) $(sym[:leq]) $a $(sym[:leq]) $(str_round(c.ub))"
else
rel = s == :<= ? sym[:leq] : (s == :>= ? sym[:geq] : sym[:eq])
out_str = string(a," ",rel," ",str_round(rhs(c)))
end
out_str
end
# Handlers to use correct symbols
con_str(::Type{REPLMode}, c::GenericRangeConstraint; args...) =
con_str(REPLMode, c, repl)
con_str(::Type{IJuliaMode}, c::GenericRangeConstraint; mathmode=true) =
math(con_str(IJuliaMode, c, ijulia), mathmode)
#------------------------------------------------------------------------
## QuadConstraint
#------------------------------------------------------------------------
Base.show(io::IO, c::QuadConstraint) = print(io, con_str(REPLMode,c))
@compat Base.show(io::IO, ::MIME"text/latex", c::QuadConstraint) =
print(io, con_str(IJuliaMode,c,mathmode=false))
# Generic string converter, called by mode-specific handlers
function con_str(mode, c::QuadConstraint, sym)
s = c.sense
r = (s == :<=) ? sym[:leq] : (s == :>= ? sym[:geq] : sym[:eq])
"$(quad_str(mode,c.terms)) $r 0"
end
# Handlers to use correct symbols
con_str(::Type{REPLMode}, c::QuadConstraint; args...) =
con_str(REPLMode, c, repl)
con_str(::Type{IJuliaMode}, c::QuadConstraint; mathmode=true) =
math(con_str(IJuliaMode, c, ijulia), mathmode)
#------------------------------------------------------------------------
## SOCConstraint
#------------------------------------------------------------------------
Base.show(io::IO, c::SOCConstraint) = print(io, con_str(REPLMode,c))
@compat Base.show(io::IO, ::MIME"text/latex", c::SOCConstraint) =
print(io, con_str(IJuliaMode,c))
function con_str(mode, c::SOCConstraint, sym::PrintSymbols)
ne = c.normexpr
coeff = ne.coeff == 1 ? "" : string(ne.coeff, " ")
nrm = norm_str(mode, ne.norm)
aff = aff_str(mode, -ne.aff)
string(coeff, nrm, " $(repl[:leq]) ", aff)
end
# Handlers to use correct symbols
con_str(::Type{REPLMode}, c::SOCConstraint; args...) =
con_str(REPLMode, c, repl)
con_str(::Type{IJuliaMode}, c::SOCConstraint; mathmode=true) =
math(con_str(IJuliaMode, c, ijulia), mathmode)
#------------------------------------------------------------------------
## SOSConstraint
#------------------------------------------------------------------------
Base.show(io::IO, c::SOSConstraint) = print(io, con_str(REPLMode,c))
@compat Base.show(io::IO, ::MIME"text/latex", c::SOSConstraint) =
print(io, con_str(IJuliaMode,c,mathmode=false))
# Generic string converter, called by mode-specific handlers
function con_str(mode, c::SOSConstraint, sym::PrintSymbols)
term_str = [string(str_round(c.weights[i]), " ", c.terms[i])
for i in 1:length(c.terms)]
"$(c.sostype): $(sym[:open_set])$(join(term_str,", "))$(sym[:close_set])"
end
# Handlers to use correct symbols
con_str(::Type{REPLMode}, c::SOSConstraint; args...) =
con_str(REPLMode, c, repl)
con_str(::Type{IJuliaMode}, c::SOSConstraint; mathmode=true) =
math(con_str(IJuliaMode, c, ijulia), mathmode)
#------------------------------------------------------------------------
## SDConstraint
#------------------------------------------------------------------------
Base.show(io::IO, c::SDConstraint) = print(io, con_str(REPLMode,c))
@compat Base.show(io::IO, ::MIME"text/latex", c::SDConstraint) =
print(io, con_str(IJuliaMode,c,mathmode=false))
# Generic string converter, called by mode-specific handlers
function con_str(mode, c::SDConstraint, succeq0)
t = c.terms
str = sprint(print, t)
splitted = split(str, "\n")[2:end]
center = ceil(Int, length(splitted)/2)
splitted[center] *= succeq0
join(splitted, "\n")
end
# Handlers to use correct symbols
con_str(::Type{REPLMode}, c::SDConstraint; args...) =
con_str(REPLMode, c, repl[:succeq0])
con_str(::Type{IJuliaMode}, c::SDConstraint; mathmode=true) =
math(con_str(IJuliaMode, c, ijulia[:succeq0], mathmode))
#------------------------------------------------------------------------
## ConstraintRef
#------------------------------------------------------------------------
Base.show(io::IO, c::ConstraintRef{Model,LinearConstraint}) = print(io, con_str(REPLMode,c.m.linconstr[c.idx]))
Base.show(io::IO, c::ConstraintRef{Model,QuadConstraint}) = print(io, con_str(REPLMode,c.m.quadconstr[c.idx]))
Base.show(io::IO, c::ConstraintRef{Model,SOSConstraint}) = print(io, con_str(REPLMode,c.m.sosconstr[c.idx]))
Base.show(io::IO, c::ConstraintRef{Model,SOCConstraint}) = print(io, con_str(REPLMode,c.m.socconstr[c.idx]))
Base.show(io::IO, c::ConstraintRef{Model,SDConstraint}) = print(io, con_str(REPLMode,c.m.sdpconstr[c.idx]))
function Base.show(io::IO, c::ConstraintRef{Model,NonlinearConstraint})
print(io, "Reference to nonlinear constraint #$(linearindex(c))")
end
@compat Base.show(io::IO, ::MIME"text/latex", c::ConstraintRef{Model,LinearConstraint}) =
print(io, con_str(IJuliaMode,c.m.linconstr[c.idx],mathmode=false))
@compat Base.show(io::IO, ::MIME"text/latex", c::ConstraintRef{Model,QuadConstraint}) =
print(io, con_str(IJuliaMode,c.m.quadconstr[c.idx],mathmode=false))
@compat Base.show(io::IO, ::MIME"text/latex", c::ConstraintRef{Model,SOSConstraint}) =
print(io, con_str(IJuliaMode,c.m.sosconstr[c.idx],mathmode=false))
| [
27,
7856,
261,
480,
29,
33704,
5605,
81,
15,
65,
14,
44,
29516,
62,
25391,
198,
2,
220,
15069,
1584,
11,
1279,
20608,
22330,
1279,
20608,
22330,
1279,
20608,
22330,
290,
20420,
198,
2,
220,
770,
8090,
6127,
5178,
318,
2426,
284,
262... | 2.23331 | 15,803 |
<gh_stars>10-100
############################################################
## joMatrix - extra functions
# elements(jo)
#elements(A::joAbstractDAparallelToggleOperator) = throw(joAbstractDAparallelToggleOperatorException("elements: pointless operation for joDAdistribute/joDAgather operations"))
# hasinverse(jo)
# issquare(jo)
# istall(jo)
# iswide(jo)
# iscomplex(jo)
# islinear(jo)
# isadjoint(jo)
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
29113,
14468,
7804,
4242,
198,
2235,
2525,
46912,
532,
3131,
5499,
198,
198,
2,
4847,
7,
7639,
8,
198,
2,
68,
3639,
7,
32,
3712,
7639,
23839,
5631,
1845,
29363,
51,
20258,
18843,
1352,
8... | 3.036765 | 136 |
matmul_sizes(s::Integer) = (s,s,s)
matmul_sizes(m_k_n::Tuple{Vararg{<:Integer, 3}}) = m_k_n
"""
runbench(backend, TA, TB, TC; kwargs...)
"""
function runbench(backend::Symbol, ::Type{TA}, ::Type{TB}, ::Type{TC};
kwargs...) where {TA, TB, TC}
return runbench(Val(backend), TA, TB, TC; kwargs...)
end
"""
runbench(::Val{:CUDA}, TA, TB, TC; kwargs...)
"""
function runbench(::Val{:CUDA}, ::Type{TA}, ::Type{TB}, ::Type{TC};
estimator::F = Base.minimum,
libs = all_libs(),
sizes = [2^n for n in 7:14]) where {TA, TB, TC, F}
sizes_vec = collect(sizes)
_sizes_column = Vector{Int}(undef, 0)
_libraries_column = Vector{Symbol}(undef, 0)
_tflops_column = Vector{Float64}(undef, 0)
_times_column = Vector{Float64}(undef, 0)
p = ProgressMeter.Progress(length(sizes_vec))
warmed_up = falses(length(sizes_vec))
for (j, sz) ∈ enumerate(sizes_vec)
showvalues = []
push!(showvalues, (:Size, sz))
GC.gc()
CUDA.reclaim()
M = sz
K = sz
N = sz
for lib in libs
benchfunc! = getbenchfunc(lib)
if !(warmed_up[j])
A_h = rand(TA, (M, K)) / sqrt(TA(K));
B_h = rand(TB, (K, N)) / sqrt(TB(K));
C_h = rand(TC, (M, N));
A = CUDA.CuArray(A_h);
B = CUDA.CuArray(B_h);
C = CUDA.CuArray(C_h);
benchfunc!(C, A, B) # warmup
warmed_up[j] = true
end
A_h = rand(TA, (M, K)) / sqrt(TA(K));
B_h = rand(TB, (K, N)) / sqrt(TB(K));
C_h = rand(TC, (M, N));
A = CUDA.CuArray(A_h);
B = CUDA.CuArray(B_h);
C = CUDA.CuArray(C_h);
trial = benchfunc!(C, A, B)
point_estimate = estimator(trial)
t_ns = Base.time(point_estimate) # time in nanoseconds
t_ps = 1_000 * t_ns # time in picoseconds
flops_factor = 2
tflops = flops_factor * M * K * N / t_ps # teraflops
push!(showvalues, (lib, "$(round(tflops; digits = 2)) TFLOPS"))
push!(_sizes_column, sz)
push!(_libraries_column, lib)
push!(_tflops_column, tflops)
push!(_times_column, t_ns)
end
ProgressMeter.next!(p; showvalues = showvalues)
GC.gc()
CUDA.reclaim()
end
df = DataFrames.DataFrame(
Size = _sizes_column,
Library = _libraries_column,
TFLOPS = _tflops_column,
Time = _times_column,
)
benchmark_result = BenchmarkResult{TA, TB, TC}(df)
return benchmark_result
end
| [
6759,
76,
377,
62,
82,
4340,
7,
82,
3712,
46541,
8,
796,
357,
82,
11,
82,
11,
82,
8,
198,
6759,
76,
377,
62,
82,
4340,
7,
76,
62,
74,
62,
77,
3712,
51,
29291,
90,
19852,
853,
90,
27,
25,
46541,
11,
513,
11709,
8,
796,
285,... | 1.806538 | 1,499 |
# SPDX-License-Identifier: X11
# 2020-08-29
# Five Variables (100pt)
println(findfirst(x -> x == 0, parse.(Int, split(readline(), " "))))
| [
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
1395,
1157,
198,
2,
12131,
12,
2919,
12,
1959,
198,
2,
10579,
15965,
2977,
357,
3064,
457,
8,
198,
198,
35235,
7,
19796,
11085,
7,
87,
4613,
2124,
6624,
657,
11,
21136,
12195,
5317,
11... | 2.622642 | 53 |
using Revise
using ReversibleSeismic
nx = 1000
ny = 1000
param = AcousticPropagatorParams(nx=nx, ny=ny,
Rcoef=0.2, dx=20.0, dy=20.0, dt=0.05, nstep=100)
c = 1000*ones(param.NX+2, param.NY+2)
src = (param.NX÷2, param.NY÷2)
srcv = Ricker(param, 100.0, 500.0)
tu = solve(param, src, srcv, c)
loss = sum(tu .^ 2)
@assert loss ≈ 10.931466822080788
using PyPlot
function viz(param, tu)
close("all")
plot(srcv)
savefig("_srcv.png")
for i = 1:200:param.NSTEP+1
close("all")
pcolormesh(tu[:,:,i])
colorbar()
savefig("_f$i.png")
end
end
viz(param, tu)
| [
3500,
5416,
786,
198,
3500,
797,
37393,
4653,
1042,
291,
198,
198,
77,
87,
796,
8576,
198,
3281,
796,
8576,
198,
17143,
796,
4013,
21618,
24331,
363,
1352,
10044,
4105,
7,
77,
87,
28,
77,
87,
11,
299,
88,
28,
3281,
11,
198,
220,
... | 1.892966 | 327 |
<gh_stars>0
#=
JoiceTypeConstructorFunctions:
- Julia version: 1.6
- Author: connorforsythe
- Date: 2021-11-05
=#
export ConstructModelInfo
function checkModelInfo(Data::DataFrame, ChoiceColumn::Symbol, Parameters::Vector{Symbol}, ChoiceSetIDColumn::Symbol, RandomParameters::RandColsType=nothing, PanelIDColumn::ColsType=nothing, WeightColumn::ColsType=nothing)
dataCols = Symbol.(names(Data))
missingCols = []
if !(ChoiceColumn in dataCols)
push!(missingCols, ChoiceColumn)
end
if !(ChoiceSetIDColumn in dataCols)
push!(missingCols, ChoiceSetIDColumn)
end
if !(PanelIDColumn in dataCols) && (PanelIDColumn!=nothing)
push!(missingCols, PanelIDColumn)
end
for tempSymbol in Parameters
if !(tempSymbol in dataCols)
push!(missingCols, tempSymbol)
end
end
if RandomParameters!=nothing
for tempSymbol in keys(RandomParameters)
if !(tempSymbol in dataCols)
push!(missingCols, tempSymbol)
end
end
end
if length(missingCols)>0
error("You have specified the following cols that aren't in the provided dataset: $missingCols")
end
end
function convertParameterColumns(x::Vector{String}) return Symbol.(x) end
function convertParameterColumns(x::Vector{Symbol}) return x end
function convertParameterColumns(x::Symbol) return [x] end
function convertParameterColumns(x::String) return convertParameterColumns(Symbol(x)) end
function convertParameterColumns(x::Nothing) return x end
function convertParameterColumn(x::String) return Symbol(x) end
function convertParameterColumn(x::Symbol) return x end
function convertParameterColumn(x::Nothing) return x end
function convertRandomParameterColumns(x::Dict{Symbol, String}) return x end
function convertRandomParameterColumns(x::Dict{String, String}) return Dict(zip(convertParameterColumns([keys(x)...]), values(x))) end
function convertRandomParameterColumns(x::Nothing) return x end
function ConstructModelInfo(Data::DataFrame, ChoiceColumn::ColType, Parameters::ColsType, ChoiceSetIDColumn::ColType; RandomParameters::RandColsType=nothing, PanelIDColumn::ColType=nothing,
Space::String="preference", Name::NameType=nothing, InterDrawCount::Int=2048, WeightColumn::ColType=nothing)
TimeCreated = Dates.now()
ChoiceColumn = convertParameterColumn(ChoiceColumn)
Parameters = convertParameterColumns(Parameters)
ChoiceSetIDColumn = convertParameterColumn(ChoiceSetIDColumn)
WeightColumn = convertParameterColumn(WeightColumn)
PanelIDColumn = convertParameterColumn(PanelIDColumn)
RandomParameters = convertRandomParameterColumns(RandomParameters)
RandomDraws = getDraws(RandomParameters, InterDrawCount)
typedSpace::JoiceModelSpace = Preference()
colsToKeep = copy(Parameters)
appendParamNames!(colsToKeep, RandomParameters)
push!(colsToKeep, ChoiceColumn)
push!(colsToKeep, ChoiceSetIDColumn)
if(lowercase(Space)=="wtp")
typedSpace = WTP()
end
typedDataStructure::JoiceDataStructure = Panel()
ObservationColumn::Symbol = ChoiceSetIDColumn
if(PanelIDColumn==nothing)
typedDataStructure = Cross()
else
push!(colsToKeep, PanelIDColumn)
ObservationColumn = PanelIDColumn
end
if(WeightColumn!=nothing)
push!(colsToKeep, WeightColumn)
end
checkModelInfo(Data, ChoiceColumn, Parameters, ChoiceSetIDColumn, RandomParameters, PanelIDColumn, WeightColumn)
PreferenceSearchBounds = [-1, 1]
println("Model being constructed with the following properties:")
if(PanelIDColumn!=nothing)
println("Panel Variable: $PanelIDColumn")
end
println("Choice Set Variable: $ChoiceSetIDColumn")
println("Choice Indicator Variable: $ChoiceColumn")
println("Fixed Parameters: $Parameters")
if(RandomParameters!=nothing)
println("Random Parameters: $RandomParameters")
end
print("Data has ")
print(size(Data)[1])
println(" rows")
cleanDataFrame = cleanData(Data, colsToKeep, ChoiceSetIDColumn, PanelIDColumn)
r = ModelInfo(cleanDataFrame, ChoiceColumn, Parameters, :choiceSetID, RandomParameters, RandomDraws,
PanelIDColumn, ObservationColumn, typedDataStructure, typedSpace, Name, InterDrawCount,
WeightColumn, TimeCreated, PreferenceSearchBounds)
return r
end
function cleanData(data::DataFrame, colsToKeep::Vector{Symbol}, choiceSetIDColumn::Symbol, panelIDColumn::Symbol)
data = data[:, colsToKeep]
#Construct new id to allow for repeated choice set ids given
maxChoiceSetID = max(data[:, choiceSetIDColumn]...)
panelIDMultiplier = 10^(ceil(log10(maxChoiceSetID)))
data[:, :choiceSetID] .= (data[:, panelIDColumn].*panelIDMultiplier).+data[:, choiceSetIDColumn]
return data
end
function cleanData(data::DataFrame, colsToKeep::Vector{Symbol}, choiceSetIDColumn::Symbol, panelIDColumn::Nothing)
data = data[:, colsToKeep]
data[:, :choiceSetID] .= data[:, choiceSetIDColumn]
return data
end
function appendParamNames!(currentVect::Vector{Symbol}, randomParameters::Nothing) end
function appendParamNames!(currentVect::Vector{Symbol}, randomParameters::Dict{Symbol, String})
for tempCol in keys(randomParameters)
push!(currentVect, tempCol)
end
end | [
27,
456,
62,
30783,
29,
15,
198,
2,
28,
198,
41,
2942,
6030,
42316,
273,
24629,
2733,
25,
198,
12,
22300,
2196,
25,
352,
13,
21,
198,
12,
6434,
25,
369,
13099,
69,
669,
88,
1169,
198,
12,
7536,
25,
33448,
12,
1157,
12,
2713,
1... | 2.941434 | 1,827 |
function BigFraction(arg0::BigInteger)
return BigFraction((BigInteger,), arg0)
end
function BigFraction(arg0::BigInteger, arg1::BigInteger)
return BigFraction((BigInteger, BigInteger), arg0, arg1)
end
function BigFraction(arg0::jdouble)
return BigFraction((jdouble,), arg0)
end
function BigFraction(arg0::jdouble, arg1::jdouble, arg2::jint)
return BigFraction((jdouble, jdouble, jint), arg0, arg1, arg2)
end
function BigFraction(arg0::jdouble, arg1::jint)
return BigFraction((jdouble, jint), arg0, arg1)
end
function BigFraction(arg0::jint)
return BigFraction((jint,), arg0)
end
function BigFraction(arg0::jint, arg1::jint)
return BigFraction((jint, jint), arg0, arg1)
end
function BigFraction(arg0::jlong)
return BigFraction((jlong,), arg0)
end
function BigFraction(arg0::jlong, arg1::jlong)
return BigFraction((jlong, jlong), arg0, arg1)
end
function abs(obj::BigFraction)
return jcall(obj, "abs", BigFraction, ())
end
function add(obj::BigFraction, arg0::BigFraction)
return jcall(obj, "add", BigFraction, (BigFraction,), arg0)
end
function add(obj::BigFraction, arg0::BigInteger)
return jcall(obj, "add", BigFraction, (BigInteger,), arg0)
end
function add(obj::BigFraction, arg0::jint)
return jcall(obj, "add", BigFraction, (jint,), arg0)
end
function add(obj::BigFraction, arg0::jlong)
return jcall(obj, "add", BigFraction, (jlong,), arg0)
end
function big_decimal_value(obj::BigFraction)
return jcall(obj, "bigDecimalValue", BigDecimal, ())
end
function big_decimal_value(obj::BigFraction, arg0::jint)
return jcall(obj, "bigDecimalValue", BigDecimal, (jint,), arg0)
end
function big_decimal_value(obj::BigFraction, arg0::jint, arg1::jint)
return jcall(obj, "bigDecimalValue", BigDecimal, (jint, jint), arg0, arg1)
end
function byte_value(obj::Number)
return jcall(obj, "byteValue", jbyte, ())
end
function compare_to(obj::BigFraction, arg0::BigFraction)
return jcall(obj, "compareTo", jint, (BigFraction,), arg0)
end
function divide(obj::BigFraction, arg0::BigFraction)
return jcall(obj, "divide", BigFraction, (BigFraction,), arg0)
end
function divide(obj::BigFraction, arg0::BigInteger)
return jcall(obj, "divide", BigFraction, (BigInteger,), arg0)
end
function divide(obj::BigFraction, arg0::jint)
return jcall(obj, "divide", BigFraction, (jint,), arg0)
end
function divide(obj::BigFraction, arg0::jlong)
return jcall(obj, "divide", BigFraction, (jlong,), arg0)
end
function double_value(obj::BigFraction)
return jcall(obj, "doubleValue", jdouble, ())
end
function equals(obj::BigFraction, arg0::Object)
return jcall(obj, "equals", jboolean, (Object,), arg0)
end
function float_value(obj::BigFraction)
return jcall(obj, "floatValue", jfloat, ())
end
function get_denominator(obj::BigFraction)
return jcall(obj, "getDenominator", BigInteger, ())
end
function get_denominator_as_int(obj::BigFraction)
return jcall(obj, "getDenominatorAsInt", jint, ())
end
function get_denominator_as_long(obj::BigFraction)
return jcall(obj, "getDenominatorAsLong", jlong, ())
end
function get_field(obj::BigFraction)
return jcall(obj, "getField", BigFractionField, ())
end
function get_numerator(obj::BigFraction)
return jcall(obj, "getNumerator", BigInteger, ())
end
function get_numerator_as_int(obj::BigFraction)
return jcall(obj, "getNumeratorAsInt", jint, ())
end
function get_numerator_as_long(obj::BigFraction)
return jcall(obj, "getNumeratorAsLong", jlong, ())
end
function get_reduced_fraction(::Type{BigFraction}, arg0::jint, arg1::jint)
return jcall(BigFraction, "getReducedFraction", BigFraction, (jint, jint), arg0, arg1)
end
function hash_code(obj::BigFraction)
return jcall(obj, "hashCode", jint, ())
end
function int_value(obj::BigFraction)
return jcall(obj, "intValue", jint, ())
end
function long_value(obj::BigFraction)
return jcall(obj, "longValue", jlong, ())
end
function multiply(obj::BigFraction, arg0::BigFraction)
return jcall(obj, "multiply", BigFraction, (BigFraction,), arg0)
end
function multiply(obj::BigFraction, arg0::BigInteger)
return jcall(obj, "multiply", BigFraction, (BigInteger,), arg0)
end
function multiply(obj::BigFraction, arg0::jint)
return jcall(obj, "multiply", BigFraction, (jint,), arg0)
end
function multiply(obj::BigFraction, arg0::jlong)
return jcall(obj, "multiply", BigFraction, (jlong,), arg0)
end
function negate(obj::BigFraction)
return jcall(obj, "negate", BigFraction, ())
end
function percentage_value(obj::BigFraction)
return jcall(obj, "percentageValue", jdouble, ())
end
function pow(obj::BigFraction, arg0::BigInteger)
return jcall(obj, "pow", BigFraction, (BigInteger,), arg0)
end
function pow(obj::BigFraction, arg0::jdouble)
return jcall(obj, "pow", jdouble, (jdouble,), arg0)
end
function pow(obj::BigFraction, arg0::jint)
return jcall(obj, "pow", BigFraction, (jint,), arg0)
end
function pow(obj::BigFraction, arg0::jlong)
return jcall(obj, "pow", BigFraction, (jlong,), arg0)
end
function reciprocal(obj::BigFraction)
return jcall(obj, "reciprocal", BigFraction, ())
end
function reduce(obj::BigFraction)
return jcall(obj, "reduce", BigFraction, ())
end
function short_value(obj::Number)
return jcall(obj, "shortValue", jshort, ())
end
function signum(obj::BigFraction)
return jcall(obj, "signum", jint, ())
end
function subtract(obj::BigFraction, arg0::BigFraction)
return jcall(obj, "subtract", BigFraction, (BigFraction,), arg0)
end
function subtract(obj::BigFraction, arg0::BigInteger)
return jcall(obj, "subtract", BigFraction, (BigInteger,), arg0)
end
function subtract(obj::BigFraction, arg0::jint)
return jcall(obj, "subtract", BigFraction, (jint,), arg0)
end
function subtract(obj::BigFraction, arg0::jlong)
return jcall(obj, "subtract", BigFraction, (jlong,), arg0)
end
function to_string(obj::BigFraction)
return jcall(obj, "toString", JString, ())
end
| [
8818,
4403,
37,
7861,
7,
853,
15,
3712,
12804,
46541,
8,
198,
220,
220,
220,
1441,
4403,
37,
7861,
19510,
12804,
46541,
11,
828,
1822,
15,
8,
198,
437,
198,
198,
8818,
4403,
37,
7861,
7,
853,
15,
3712,
12804,
46541,
11,
1822,
16,
... | 2.605206 | 2,305 |
module QJuliaRegisters
# Plain baseline registers
const half2 = NTuple{2, VecElement{Float16}}
const half4 = NTuple{4, VecElement{Float16}}
#
const float2 = NTuple{2, VecElement{Float32}}
const float4 = NTuple{4, VecElement{Float32}}
const float8 = NTuple{8, VecElement{Float32}}
const float16 = NTuple{16, VecElement{Float32}}
#
const double2 = NTuple{2, VecElement{Float64}}
const double4 = NTuple{4, VecElement{Float64}}
const double8 = NTuple{8, VecElement{Float64}}
# Helper functions for plain registers
function register_type(T)
if T == Float16 || T == ComplexF16 || T == half2 || T == half4
return Float16
elseif T == Float32 || T == ComplexF32 || T == float2 || T == float4 || T == float8 || T == float16
return Float32
elseif T == Float64 || T == ComplexF64 || T == double2 || T == double4 || T == double8
return Float64
elseif T == BigFloat || T == Complex{BigFloat}
return BigFloat
else
error("Cannot deduce a type.")
end
return nothing
end
function register_size(T)
if T == Float16 || T == Float32 || T == Float64 || T == BigFloat
return 1
elseif T == ComplexF16 || T == ComplexF32 || T == ComplexF64 || T == Complex{BigFloat} || T == half2 || T == float2 || T == double2
return 2
elseif T == double4 || T == float4 || T == half4
return 4
elseif T == double8 || T == float8
return 8
elseif T == float16
return 16
else
error("Cannot deduce the register size (type is not supported).")
end
return nothing
end
# More generic registers
abstract type GenericRegister{T<:Real,N} end
mutable struct FloatN{T<:AbstractFloat,N} <: GenericRegister{T,N}
val::NTuple{N, VecElement{T}}
FloatN{T,N}() where {T} where {N} = new( NTuple{N, VecElement{T}}(ntuple(i->0, N)) )
FloatN{T,N}(src::FloatN) where {T} where {N} = new( src.val )
FloatN{T,N}(reg::NTuple{N, VecElement{T}}) where {T} where {N} = new( reg )
end
# re-implement plain complex type for compatibility
mutable struct Complex2{T<:AbstractFloat, N} <: GenericRegister{T,N}
val::Complex{T}
Complex2{T,N}() where {T} where {N} = new( Complex{T}(0.0, 0.0) )
Complex2{T,N}(src::Complex2) where {T} where {N} = new( src.val )
Complex2{T,N}(reg::Complex{T}) where {T} where {N} = new( reg )
Complex2{T,N}(rea::T, img::T) where {T} where {N} = new( Complex{T}(rea, img) )
Complex2{T,N}(tpl::NTuple{2, VecElement{T}}) where {T} where {N} = new( Complex{T}(tpl[1], tpl[2]) )
end
mutable struct IntN{T<:Integer,N} <: GenericRegister{T,N}
val::NTuple{N, VecElement{T}}
IntN{T,N}() where {T} where {N} = new( NTuple{N, VecElement{T}}(ntuple(i->0, N)) )
IntN{T,N}(src::IntN) where {T} where {N} = new( src.val )
IntN{T,N}(reg::NTuple{N, VecElement{T}}) where {T} where {N} = new( reg )
end
# Floating point registers
Half2 = FloatN{Float16, 2}
Half4 = FloatN{Float16, 4}
Half8 = FloatN{Float16, 8}
Half16 = FloatN{Float16, 16}
Single2 = FloatN{Float32, 2}
Single4 = FloatN{Float32, 4}
Single8 = FloatN{Float32, 8}
Single16 = FloatN{Float32, 16}
Double2 = FloatN{Float64, 2}
Double4 = FloatN{Float64, 4}
Double8 = FloatN{Float64, 8}
# Big float type
BigDouble2 = FloatN{BigFloat, 2}
# Additional complex types (are they compatible with Half2, Single2, Double2?)
ComplexH = Complex2{Float16,2}
ComplexS = Complex2{Float32,2}
ComplexD = Complex2{Float64,2}
ComplexB = Complex2{BigFloat,2}
# Signed: BigInt Int128 Int16 Int32 Int64 Int8
# Unsigned: UInt128 UInt16 UInt32 UInt64 UInt8
Int2 = IntN{Int32, 2}
Int4 = IntN{Int32, 4}
UInt2 = IntN{UInt32, 2}
UInt4 = IntN{UInt32, 4}
LongInt2 = IntN{Int64, 2}
LongInt4 = IntN{Int64, 4}
# Helper methods
function register_type(reg::GenericRegister{T,N}) where T where N; return T; end
function register_size(reg::GenericRegister{T,N}) where T where N; return N; end
end #QJuliaRegisters
| [
21412,
1195,
16980,
544,
8081,
6223,
198,
198,
2,
28847,
14805,
28441,
198,
9979,
2063,
17,
796,
24563,
29291,
90,
17,
11,
38692,
20180,
90,
43879,
1433,
11709,
198,
9979,
2063,
19,
796,
24563,
29291,
90,
19,
11,
38692,
20180,
90,
438... | 2.518229 | 1,536 |
<reponame>arnavgautam/Mimi.jl
module TestParameterTypes
using Mimi
using Test
import Mimi:
external_params, external_param, TimestepMatrix, TimestepVector,
ArrayModelParameter, ScalarModelParameter, FixedTimestep, build, import_params!
#
# Test that parameter type mismatches are caught
#
expr = :(
@defcomp BadComp1 begin
a = Parameter(index=[time, regions], default=[10, 11, 12]) # should be 2D default
function run_timestep(p, v, d, t)
end
end
)
@test_throws LoadError eval(expr)
expr = :(
@defcomp BadComp2 begin
a = Parameter(default=[10, 11, 12]) # should be scalar default
function run_timestep(p, v, d, t)
end
end
)
@test_throws LoadError eval(expr)
#
# Test that the old type parameterization syntax errors
#
expr = :(
@defcomp BadComp3 begin
a::Int = Parameter()
function run_timestep(p, v, d, t)
end
end
)
eval(expr) # Just a deprecation warning for v0.10, then will change to error in v1.0
# @test_throws LoadError eval(expr)
@defcomp MyComp begin
a = Parameter(index=[time, regions], default=ones(101,3))
b = Parameter(index=[time], default=1:101)
c = Parameter(index=[regions])
d = Parameter()
e = Parameter(index=[four])
f = Parameter{Array{Float64, 2}}()
g = Parameter{Int}(default=10.0) # value should be Int despite Float64 default
h = Parameter(default=10) # should be "numtype", despite Int default
j = Parameter{Int}(index = [regions])
function run_timestep(p, v, d, t)
end
end
# Check that explicit number type for model works as expected
numtype = Float32
arrtype = Union{Missing, numtype}
m = Model(numtype)
set_dimension!(m, :time, 2000:2100)
set_dimension!(m, :regions, 3)
set_dimension!(m, :four, 4)
add_comp!(m, MyComp)
set_param!(m, :MyComp, :c, [4,5,6])
set_param!(m, :MyComp, :d, 0.5) # 32-bit float constant
set_param!(m, :MyComp, :e, [1,2,3,4])
set_param!(m, :MyComp, :f, reshape(1:16, 4, 4))
set_param!(m, :MyComp, :j, [1,2,3])
build(m) # applies defaults, creating external params
extpars = external_params(m)
# TBD: These are not (yet) external params. Defaults are applied at build time.
@test isa(extpars[:a], ArrayModelParameter)
@test isa(extpars[:b], ArrayModelParameter)
@test isa(extpars[:c], ArrayModelParameter)
@test isa(extpars[:d], ScalarModelParameter)
@test isa(extpars[:e], ArrayModelParameter)
@test isa(extpars[:f], ScalarModelParameter) # note that :f is stored as a scalar parameter even though its values are an array
@test typeof(extpars[:a].values) == TimestepMatrix{FixedTimestep{2000, 1}, arrtype, 1}
@test typeof(extpars[:b].values) == TimestepVector{FixedTimestep{2000, 1}, arrtype}
@test typeof(extpars[:c].values) == Array{arrtype, 1}
@test typeof(extpars[:d].value) == numtype
@test typeof(extpars[:e].values) == Array{arrtype, 1}
@test typeof(extpars[:f].value) == Array{Float64, 2}
@test typeof(extpars[:g].value) <: Int
@test typeof(extpars[:h].value) == numtype
# test updating parameters
@test_throws ErrorException update_param!(m, :a, 5) # expects an array
@test_throws ErrorException update_param!(m, :a, ones(101)) # wrong size
@test_throws ErrorException update_param!(m, :a, fill("hi", 101, 3)) # wrong type
update_param!(m, :a, Array{Int,2}(zeros(101, 3))) # should be able to convert from Int to Float
@test_throws ErrorException update_param!(m, :d, ones(5)) # wrong type; should be scalar
update_param!(m, :d, 5) # should work, will convert to float
@test extpars[:d].value == 5
@test_throws ErrorException update_param!(m, :e, 5) # wrong type; should be array
@test_throws ErrorException update_param!(m, :e, ones(10)) # wrong size
update_param!(m, :e, [4,5,6,7])
@test length(extpars) == 9
@test typeof(extpars[:a].values) == TimestepMatrix{FixedTimestep{2000, 1}, arrtype, 1}
@test typeof(extpars[:d].value) == numtype
@test typeof(extpars[:e].values) == Array{arrtype, 1}
#------------------------------------------------------------------------------
# Test updating TimestepArrays with update_param!
#------------------------------------------------------------------------------
@defcomp MyComp2 begin
x=Parameter(index=[time])
y=Variable(index=[time])
function run_timestep(p,v,d,t)
v.y[t]=p.x[t]
end
end
# 1. Test with Fixed Timesteps
m = Model()
set_dimension!(m, :time, 2000:2002)
add_comp!(m, MyComp2) # ; first=2000, last=2002)
set_param!(m, :MyComp2, :x, [1, 2, 3])
# N.B. `first` and `last` are now disabled.
# Can't move last beyond last for a component
# @test_throws ErrorException set_dimension!(m, :time, 2001:2003)
set_dimension!(m, :time, 2001:2002)
update_param!(m, :x, [4, 5, 6], update_timesteps = false)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.FixedTimestep{2000, 1, LAST} where LAST, Union{Missing,Float64}, 1}
@test x.values.data == [4., 5., 6.]
# TBD: this fails, but I'm not sure how it's supposed to behave. It says:
# (ERROR: BoundsError: attempt to access 3-element Array{Float64,1} at index [4])
# run(m)
# @test m[:MyComp2, :y][1] == 5 # 2001
# @test m[:MyComp2, :y][2] == 6 # 2002
update_param!(m, :x, [2, 3], update_timesteps = true)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.FixedTimestep{2001, 1, LAST} where LAST, Union{Missing,Float64}, 1}
@test x.values.data == [2., 3.]
run(m)
@test m[:MyComp2, :y][1] == 2 # 2001
@test m[:MyComp2, :y][2] == 3 # 2002
# 2. Test with Variable Timesteps
m = Model()
set_dimension!(m, :time, [2000, 2005, 2020])
add_comp!(m, MyComp2)
set_param!(m, :MyComp2, :x, [1, 2, 3])
set_dimension!(m, :time, [2005, 2020, 2050])
update_param!(m, :x, [4, 5, 6], update_timesteps = false)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.VariableTimestep{(2000, 2005, 2020)}, Union{Missing,Float64}, 1}
@test x.values.data == [4., 5., 6.]
#run(m)
#@test m[:MyComp2, :y][1] == 5 # 2005
#@test m[:MyComp2, :y][2] == 6 # 2020
update_param!(m, :x, [2, 3, 4], update_timesteps = true)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.VariableTimestep{(2005, 2020, 2050)}, Union{Missing,Float64}, 1}
@test x.values.data == [2., 3., 4.]
run(m)
@test m[:MyComp2, :y][1] == 2 # 2005
@test m[:MyComp2, :y][2] == 3 # 2020
# 3. Test updating from a dictionary
m = Model()
set_dimension!(m, :time, [2000, 2005, 2020])
add_comp!(m, MyComp2)
set_param!(m, :MyComp2, :x, [1, 2, 3])
set_dimension!(m, :time, [2005, 2020, 2050])
update_params!(m, Dict(:x=>[2, 3, 4]), update_timesteps = true)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.VariableTimestep{(2005, 2020, 2050)}, Union{Missing,Float64}, 1}
@test x.values.data == [2., 3., 4.]
run(m)
@test m[:MyComp2, :y][1] == 2 # 2005
@test m[:MyComp2, :y][2] == 3 # 2020
@test m[:MyComp2, :y][3] == 4 # 2050
# 4. Test updating the time index to a different length
m = Model()
set_dimension!(m, :time, 2000:2002) # length 3
add_comp!(m, MyComp2)
set_param!(m, :MyComp2, :x, [1, 2, 3])
set_dimension!(m, :time, 1999:2003) # length 5
@test_throws ErrorException update_param!(m, :x, [2, 3, 4, 5, 6], update_timesteps = false)
update_param!(m, :x, [2, 3, 4, 5, 6], update_timesteps = true)
x = external_param(m.md, :x)
@test x.values isa Mimi.TimestepArray{Mimi.FixedTimestep{1999, 1, LAST} where LAST, Union{Missing,Float64}, 1}
@test x.values.data == [2., 3., 4., 5., 6.]
run(m)
@test m[:MyComp2, :y] == [2., 3., 4., 5., 6.]
# 5. Test all the warning and error cases
@defcomp MyComp3 begin
regions=Index()
x=Parameter(index=[time]) # One timestep array parameter
y=Parameter(index=[regions]) # One non-timestep array parameter
z=Parameter() # One scalar parameter
end
m = Model() # Build the model
set_dimension!(m, :time, 2000:2002) # Set the time dimension
set_dimension!(m, :regions, [:A, :B])
add_comp!(m, MyComp3)
set_param!(m, :MyComp3, :x, [1, 2, 3])
set_param!(m, :MyComp3, :y, [10, 20])
set_param!(m, :MyComp3, :z, 0)
@test_throws ErrorException update_param!(m, :x, [1, 2, 3, 4]) # Will throw an error because size
@test_throws ErrorException update_param!(m, :y, [10, 15], update_timesteps=true) # Not a timestep array
update_param!(m, :y, [10, 15])
@test external_param(m.md, :y).values == [10., 15.]
@test_throws ErrorException update_param!(m, :z, 1, update_timesteps=true) # Scalar parameter
update_param!(m, :z, 1)
@test external_param(m.md, :z).value == 1
# Reset the time dimensions
set_dimension!(m, :time, 2005:2007)
update_params!(m, Dict(:x=>[3,4,5], :y=>[10,20], :z=>0), update_timesteps=true) # Won't error when updating from a dictionary
@test external_param(m.md, :x).values isa Mimi.TimestepArray{Mimi.FixedTimestep{2005,1},Union{Missing,Float64},1}
@test external_param(m.md, :x).values.data == [3.,4.,5.]
@test external_param(m.md, :y).values == [10.,20.]
@test external_param(m.md, :z).value == 0
#------------------------------------------------------------------------------
# Test the three different set_param! methods for a Symbol type parameter
#------------------------------------------------------------------------------
@defcomp A begin
p1 = Parameter{Symbol}()
end
function _get_model()
m = Model()
set_dimension!(m, :time, 10)
add_comp!(m, A)
return m
end
# Test the 3-argument version of set_param!
m = _get_model()
@test_throws MethodError set_param!(m, :p1, 3) # Can't set it with an Int
set_param!(m, :p1, :foo) # Set it with a Symbol
run(m)
@test m[:A, :p1] == :foo
# Test the 4-argument version of set_param!
m = _get_model()
@test_throws MethodError set_param!(m, :A, :p1, 3)
set_param!(m, :A, :p1, :foo)
run(m)
@test m[:A, :p1] == :foo
# Test the 5-argument version of set_param!
m = _get_model()
@test_throws MethodError set_param!(m, :A, :p1, :A_p1, 3)
set_param!(m, :A, :p1, :A_p1, :foo)
run(m)
@test m[:A, :p1] == :foo
#------------------------------------------------------------------------------
# Test that if set_param! errors in the connection step,
# the created param doesn't remain in the model's list of params
#------------------------------------------------------------------------------
@defcomp A begin
p1 = Parameter(index = [time])
end
@defcomp B begin
p1 = Parameter(index = [time])
end
m = Model()
set_dimension!(m, :time, 10)
add_comp!(m, A)
add_comp!(m, B)
@test_throws ErrorException set_param!(m, :p1, 1:5) # this will error because the provided data is the wrong size
@test isempty(m.md.external_params) # But it should not be added to the model's dictionary
end #module
| [
27,
7856,
261,
480,
29,
1501,
615,
70,
2306,
321,
14,
44,
25236,
13,
20362,
198,
21412,
6208,
36301,
31431,
198,
198,
3500,
337,
25236,
198,
3500,
6208,
198,
198,
11748,
337,
25236,
25,
198,
220,
220,
220,
7097,
62,
37266,
11,
7097,... | 2.494631 | 4,284 |
# ThomasFermi.jl
# - Thomas Fermi Density Functional Theory
# Marder is: "Condensed Mattery Physics", <NAME>, 2000.
# Also very useful is <NAME>'s first DFT lecture, on the Thomas-Fermi method
# http://www.home.uni-osnabrueck.de/apostnik/Lectures/DFT-1.pdf
"""
ThomasFermi_T(n)
Thomas Fermi kinetic energy (T).
Following eqn. 9.69 Marder p/ 217
"""
function ThomasFermi_T(n)
T= (hbar^2)/(2*me) * 3/5 * (3*pi^2)^(2/3) * n^(5/3)
T
end
"
ThomasFermi_T_fnderiv(n)
Thomas Fermi kinetic energy (T) as a functional derivative.
Following eqn. 9.76 Marder p/ 217
"
function ThomasFermi_T_fnderiv(n)
# @printf("ThomasFermi_T_fnderiv(%g,%g)\n",n,V) # to locate NaN error
T= (hbar^2)/(2*me) * 3/5 * (3*pi^2)^(2/3) * n^(2/3)
T
end
"
ThomasFermi_Exc(n)
Thomas Fermi exchange and correlation energy; takes electron density, returns energy.
Following eqn. 9.73 Marder p/ 217
"
function ThomasFermi_Exc(n)
Exc= - 3/4 * (3/pi)^(1/3) * q^2 * n^(5/3)
Exc
end
"
UAtomic(Z,r)
Potential due to Atomic charge; simple bare Coulomb form.
"
function UAtomic(Z,r)
U = -k_e * Z * q^2/r
end
function ThomasFermi_CoulombPotential(density,i,gridspacing,N)
# Coulomb integral.
# Nb: as spherical atom, probably need some horrible weighting parameters for the area of the spherical shell in the range [i] or [j]
# i.e. currently it's some 1D pipe full of electrons with a nuclear charge at the end
Potential=0.0
for j in 1:N
if j==i
continue
end
Potential+= k_e * q^2*density[j]/(gridspacing*(i-j))
end
Potential
end
"
AtomicTest(Z,N=100)
Evaluate Thomas Fermi energy for a spherical atom, atomic charge Z.
N: number of grid points in 1D density repr.
c.f. Marder, p.219, Eqn (9.76):
'The energy of an atomic of nuclear charge Z is approximately -1.5375 Z^(7/3) Ry'
"
function AtomicTest(Z; N=10,verbose::Bool=false)
println("AtomicTest, atomic charge: ",Z)
const radius=25Å
density=zeros(N).+Z/N # I know, a bit dirty... Fills density with flat electron density as initial guess.
gridspacing=N/radius
# Do some DFT here...
V = 4/3*pi*radius^3 # Volume of total sphere of charge density considered.
# Nb: Not sure if corect; Kinetic energy T is proport. to V. Defo volume and not potential?
sumE=0.0
for n in 1:10
if verbose; @printf("SCF Loop: %d\n",n); end;
sumE=0.0
# Central equation as (9.76) in Marder, dropping Dirac term
for i in 1:N
# mu being the chemical potential; this pulls together (9.76)
# Mu is the Lagrange multiplier to enforce the constraint that the density is conserved;
# helpfully this is also identical to the chemical potential
mu=ThomasFermi_T_fnderiv(density[i]) + UAtomic(Z,i*gridspacing) + ThomasFermi_CoulombPotential(density,i,gridspacing,N)
# So I gues we could use the fact that the chem potential is constant, to start moving electron density around?
# From Postnikov 1.2; mu drops to zero at r=infinity, therefore mu is zero everywhere
# TODO: Insert self-consistency here...
if verbose
@printf("\t i: %d density: %g T: %g \n\t\tmu %g = T_fnderiv %g + UAtomic: %g + Coulomb %g\n",
i,density[i],
ThomasFermi_T(density[i]),
mu,ThomasFermi_T_fnderiv(density[i]),UAtomic(Z,i*gridspacing),ThomasFermi_CoulombPotential(density,i,gridspacing,N))
end
# OK; calculate total energy
E=ThomasFermi_T(density[i]) + density[i]*UAtomic(Z,i*gridspacing) + density[i]*ThomasFermi_CoulombPotential(density,i,gridspacing,N)
if verbose
@printf("\t\tE %g = T %g + U %g + Coulomb %g\nTotal E: %g J = %g eV\n",
E,ThomasFermi_T(density[i]), density[i]*UAtomic(Z,i*gridspacing), density[i]*ThomasFermi_CoulombPotential(density,i,gridspacing,N),
sumE,sumE/q)
end
sumE+=E
# Nb: horrid hack :^)
density[i]-=mu*10E35 # vary density based on how much chemical potential mu exceeds 0
if density[i]<0.0; density[i]=0.0; end
end
# Impose constraint sum. density = Z
if verbose
@printf("Sum of density pre normalisation: %f\n",sum(density))
end
density=density.*Z/sum(density)
end
# TODO: calculate total Thomas-Fermi energy here, from density...
println("Density: ",density)
@printf("Total E: %g J = %g Ry = %g eV\n",sumE,sumE/Ry,sumE/q)
println("Marder analytic reference: E ~= -1.5375.Z^(7/3) = ",-1.5375*Z^(7/3), " Ry") # Nb: do some unit conversions
end
| [
2,
5658,
37,
7780,
72,
13,
20362,
198,
2,
220,
532,
5658,
376,
7780,
72,
360,
6377,
44224,
17003,
198,
198,
2,
337,
446,
263,
318,
25,
366,
25559,
15385,
337,
16296,
23123,
1600,
1279,
20608,
22330,
4751,
13,
220,
198,
198,
2,
441... | 2.234405 | 2,116 |
using LowRankIntegrators, SparseArrays
@testset "Burgers equation" begin
n = 1000 # spatial discretization
l = π # length of spatial domain
Δx = l/n # step size
x_range = Δx/2:Δx:l-Δx/2 # uniform grid
# boundary conditions
left(i) = i > 1 ? i - 1 : n
right(i) = i < n ? i + 1 : 1
# discretized diff operators
# laplacian (times viscosity)
Δ = spzeros(n, n)
ν = 0.005
for i in 1:n
Δ[i,left(i)] = ν/Δx^2
Δ[i,i] = -2ν/Δx^2
Δ[i,right(i)] = ν/Δx^2
end
# gradient
∇ = spzeros(n, n)
for i in 1:n
∇[i,left(i)] = -1/2/Δx
∇[i,right(i)] = 1/2/Δx
end
function burgers(ρ, (∇,Δ), t)
return Δ*ρ - (∇*ρ) .* ρ
end
# uncertainty range
m = 20 # parameter realizations scale as m^2
σ = [0.5,0.5]
ξ_range = [(ξ_1,ξ_2) for ξ_1 in range(-1,1,length=m), ξ_2 in range(-1,1,length=m)];
#initial condition
ub(x) = 0.5*(exp.(cos.(x)) .- 1.5).*sin.(x .+ 2π*0.37) # deterministic initial condition
uprime(x,ξ,σ) = σ[1]*ξ[1]*sin.(2π*x) .+ σ[2]*ξ[2]*sin.(3π*x) # stochastic fluctuation
ρ0_mat = hcat([ub(x_range) + uprime(x_range, ξ, σ) for ξ in ξ_range]...) # full rank initial condition
r = 5 # approximation rank
lr_ρ0 = truncated_svd(ρ0_mat, r); # intial condition
dt = 1e-2 # time step
lr_prob = MatrixDEProblem((ρ,t) -> burgers(ρ, (∇,Δ), t), lr_ρ0, (0.0, 1.0)) # defines the matrix differential equation problem
solvers = [UnconventionalAlgorithm(),
ProjectorSplitting(PrimalLieTrotter()),
ProjectorSplitting(DualLieTrotter()),
ProjectorSplitting(Strang()),
RankAdaptiveUnconventionalAlgorithm(1e-4, rmax=10)]
for solver in solvers
lr_sol = LowRankIntegrators.solve(lr_prob, solver, dt) # solves the low rank approximation
end
end | [
3500,
7754,
27520,
34500,
18942,
11,
1338,
17208,
3163,
20477,
198,
198,
31,
9288,
2617,
366,
33,
3686,
364,
16022,
1,
2221,
198,
220,
220,
220,
299,
796,
8576,
1303,
21739,
1221,
1186,
1634,
628,
220,
220,
220,
300,
796,
18074,
222,
... | 1.975713 | 947 |
@defcomp climatedynamics begin
# Total radiative forcing
radforc = Parameter(index=[time])
# Average global temperature
temp = Variable(index=[time])
# lifetempconst
lifetempconst = Parameter()
# lifetemplin
lifetemplin = Parameter()
# lifetempqd
lifetempqd = Parameter()
# Climate sensitivity
climatesensitivity = Parameter(default = 2.999999803762826)
function run_timestep(p, v, d, t)
if is_first(t)
v.temp[t] = 0.20
else
LifeTemp = max(p.lifetempconst + p.lifetemplin * p.climatesensitivity + p.lifetempqd * p.climatesensitivity^2.0, 1.0)
delaytemp = 1.0 / LifeTemp
temps = p.climatesensitivity / 5.35 / log(2.0)
# Calculate temperature
dtemp = delaytemp * temps * p.radforc[t] - delaytemp * v.temp[t - 1]
v.temp[t] = v.temp[t - 1] + dtemp
end
end
end
| [
171,
119,
123,
31,
4299,
5589,
5424,
515,
4989,
873,
2221,
198,
220,
220,
220,
1303,
7472,
19772,
876,
10833,
198,
220,
220,
220,
2511,
1640,
66,
796,
25139,
2357,
7,
9630,
41888,
2435,
12962,
628,
220,
220,
220,
1303,
13475,
3298,
... | 2.15103 | 437 |
<filename>src/entities/seekerbarrier.jl
module SeekerBarrier
using ..Ahorn, Maple
const placements = Ahorn.PlacementDict(
"Seeker Barrier" => Ahorn.EntityPlacement(
Maple.SeekerBarrier,
"rectangle"
),
)
Ahorn.minimumSize(entity::Maple.SeekerBarrier) = 8, 8
Ahorn.resizable(entity::Maple.SeekerBarrier) = true, true
function Ahorn.selection(entity::Maple.SeekerBarrier)
x, y = Ahorn.position(entity)
width = Int(get(entity.data, "width", 8))
height = Int(get(entity.data, "height", 8))
return Ahorn.Rectangle(x, y, width, height)
end
function Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::Maple.SeekerBarrier, room::Maple.Room)
width = Int(get(entity.data, "width", 32))
height = Int(get(entity.data, "height", 32))
Ahorn.drawRectangle(ctx, 0, 0, width, height, (0.25, 0.25, 0.25, 0.8), (0.0, 0.0, 0.0, 0.0))
end
end | [
27,
34345,
29,
10677,
14,
298,
871,
14,
325,
28233,
5657,
5277,
13,
20362,
198,
21412,
1001,
28233,
10374,
5277,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
9979,
21957,
3196,
796,
7900,
1211,
13,
3646,
5592,
35,
713,
7,
... | 2.429752 | 363 |
<reponame>richardreeve/Diversity.jl<gh_stars>10-100
using DataFrames
using LinearAlgebra
"""
UniqueTypes
A subtype of AbstractTypes where all individuals are completely
distinct. This type is the simplest AbstractTypes subtype, which
identifies all individuals as unique and completely distinct from each
other.
"""
struct UniqueTypes <: Diversity.API.AbstractTypes
num::Int64
names::Vector{String}
function UniqueTypes(num::Integer)
num > 0 || error("Too few species")
new(num, map(x -> "$x", 1:num))
end
function UniqueTypes(names::Vector{String})
num = length(names)
num > 0 || error("Too few species")
new(num, names)
end
end
import Diversity.API._hassimilarity
_hassimilarity(::UniqueTypes) = false
import Diversity.API._counttypes
function _counttypes(ut::UniqueTypes, ::Bool)
return ut.num
end
import Diversity.API._gettypenames
function _gettypenames(ut::UniqueTypes, ::Bool)
return ut.names
end
import Diversity.API._calcsimilarity
function _calcsimilarity(ut::UniqueTypes, ::Real)
return I
end
import Diversity.API._calcordinariness
function _calcordinariness(::UniqueTypes, abundances::AbstractArray, ::Real)
return abundances
end
import Diversity.API._getdiversityname
_getdiversityname(::UniqueTypes) = "Unique"
"""
Species
A subtype of AbstractTypes where all species are completely distinct.
This type is the simplest AbstractTypes subtype, which identifies all
species as unique and completely distinct from each other.
"""
const Species = UniqueTypes
"""
Taxonomy
A subtype of AbstractTypes with similarity between related taxa,
creating taxonomic similarity matrices.
"""
struct Taxonomy{FP <: AbstractFloat} <: Diversity.API.AbstractTypes
speciesinfo::DataFrame
taxa::Dict{Symbol, FP}
typelabel::Symbol
function Taxonomy{FP}(speciesinfo::DataFrame,
taxa::Dict{Symbol, FP},
typelabel::Symbol) where FP <: AbstractFloat
sort(describe(speciesinfo)[!,:variable]) == sort([keys(taxa)...]) ||
error("Taxon labels do not match similarity values")
typelabel ∈ describe(speciesinfo)[!,:variable] ||
error("$typelabel not found in DataFrame column names")
new{FP}(speciesinfo, taxa, typelabel)
end
end
function Taxonomy(speciesinfo::DataFrame, taxa::Dict,
typelabel::Symbol = :Species)
Taxonomy{valtype(taxa)}(speciesinfo, taxa, typelabel)
end
import Diversity.API.floattypes
function floattypes(::Taxonomy{FP}) where FP <: AbstractFloat
return Set([FP])
end
function _counttypes(tax::Taxonomy, ::Bool)
return nrow(tax.speciesinfo)
end
function _gettypenames(tax::Taxonomy, ::Bool)
return tax.speciesinfo[!,tax.typelabel]
end
function _calcsimilarity(::Taxonomy, ::Real)
error("Can't generate a taxonomic similarity matrix yet")
end
_getdiversityname(::Taxonomy) = "Taxonomy"
"""
GeneralTypes{FP, M}
An AbstractTypes subtype with a general similarity matrix. This
subtype simply holds a matrix with similarities between individuals.
# Members:
- `z` A two-dimensional matrix representing similarity between
individuals.
"""
struct GeneralTypes{FP <: AbstractFloat,
M <: AbstractMatrix{FP}} <: Diversity.API.AbstractTypes
"""
z
A two-dimensional matrix representing similarity between
individuals.
"""
z::M
"""
names
Optional vector of type names.
"""
names::Vector{String}
"""
# Constructor for GeneralTypes
Creates an instance of the GeneralTypes class, with an arbitrary
similarity matrix.
"""
function GeneralTypes(zmatrix::M) where {FP <: AbstractFloat,
M <: AbstractMatrix{FP}}
size(zmatrix, 1) == size(zmatrix, 2) ||
throw(DimensionMismatch("Similarity matrix is not square"))
minimum(zmatrix) ≥ 0 || throw(DomainError(minimum(zmatrix),
"Similarities must be ≥ 0"))
maximum(zmatrix) ≤ 1 || @warn "Similarity matrix has values above 1"
new{FP, M}(zmatrix, map(x -> "$x", 1:size(zmatrix, 1)))
end
function GeneralTypes(zmatrix::M, names::Vector{String}) where
{FP <: AbstractFloat, M <: AbstractMatrix{FP}}
size(zmatrix, 1) == size(zmatrix, 2) ||
throw(DimensionMismatch("Similarity matrix is not square"))
minimum(zmatrix) ≥ 0 || throw(DomainError(minimum(zmatrix),
"Similarities must be ≥ 0"))
maximum(zmatrix) ≤ 1 || @warn "Similarity matrix has values above 1"
length(names) == size(zmatrix, 1) ||
error("Species name vector does not match similarity matrix")
new{FP, M}(zmatrix, names)
end
end
function floattypes(::GeneralTypes{FP, M}) where {FP <: AbstractFloat,
M <: AbstractMatrix{FP}}
return Set([FP])
end
function _counttypes(gt::GeneralTypes, ::Bool)
return size(gt.z, 1)
end
function _gettypenames(gt::GeneralTypes, ::Bool)
return gt.names
end
function _calcsimilarity(gt::GeneralTypes, ::Real)
return gt.z
end
_getdiversityname(::GeneralTypes) = "Arbitrary Z"
| [
27,
7856,
261,
480,
29,
7527,
446,
631,
303,
14,
35,
1608,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
6060,
35439,
198,
3500,
44800,
2348,
29230,
198,
198,
37811,
198,
220,
220,
220,
30015,
31431,
198,
198,
32,
8... | 2.617894 | 2,023 |
<filename>docs/make.jl
using BRIKHEAD
using Documenter
makedocs(;
modules=[BRIKHEAD],
authors="<NAME> <<EMAIL>> and contributors",
repo="https://github.com/notZaki/BRIKHEAD.jl/blob/{commit}{path}#L{line}",
sitename="BRIKHEAD.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://notZaki.github.io/BRIKHEAD.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/notZaki/BRIKHEAD.jl",
)
| [
27,
34345,
29,
31628,
14,
15883,
13,
20362,
198,
3500,
347,
7112,
42,
37682,
198,
3500,
16854,
263,
198,
198,
76,
4335,
420,
82,
7,
26,
198,
220,
220,
220,
13103,
41888,
33,
7112,
42,
37682,
4357,
198,
220,
220,
220,
7035,
2625,
2... | 2.13253 | 249 |
<gh_stars>0
using IterativeSolvers
using Base.Test
@testset "SVD Lanczos" begin
srand(1234567)
#Thick restart methods
@testset "Thick restart with method=$method" for method in (:ritz, :harmonic)
for T in (Float32, Float64)
@testset "Diagonal Matrix{$T}" begin
n = 30
ns = 5
tol = 1e-5
A = full(Diagonal(T[1.0 : n;]))
q = ones(T, n) / √n
σ, L, history = svdl(A, nsv=ns, v0=q, tol=tol, reltol=tol, maxiter=n, method=method, vecs=:none, log=true)
@test isa(history, ConvergenceHistory)
@test norm(σ - [n : -1.0 : n - 4;]) < 5^2 * 1e-5
@test_throws ArgumentError svdl(A, nsv=ns, v0=q, tol=tol, reltol=tol, maxiter=n, method=:fakemethod, vecs=:none)
#Check the singular vectors also
Σ, L = svdl(A, nsv=ns, v0=q, tol=tol, reltol=tol, maxiter=n, method=method, vecs=:both)
#The vectors should have the structure
# [ 0 0 ... 0 ]
# [ ... ]
# [ 0 0 ... ±1 ]
# [ 0 ... ]
# [ 0 ±1 ... 0 ]
# [±1 0 ... 0 ]
# and furthermore the signs should be aligned across Σ[:U] and Σ[:V]
for i = 1 : 5
Σ[:U][end + 1 - i, i] -= sign(Σ[:U][end + 1 - i, i])
end
@test vecnorm(Σ[:U]) < σ[1] * √tol
for i = 1 : 5
Σ[:Vt][i, end + 1 - i] -= sign(Σ[:Vt][i, end + 1 - i])
end
@test vecnorm(Σ[:U]) < σ[1] * √tol
@test norm(σ - Σ[:S]) < 2max(tol * ns * σ[1], tol)
#Issue #55
let
σ1, _ = svdl(A, nsv=1, tol=tol, reltol=tol)
@test abs(σ[1] - σ1[1]) < 2max(tol * σ[1], tol)
end
end
@testset "Rectangular Matrix{$T}" begin
srand(1)
m = 300
n = 200
k = 5
l = 10
A = randn(T, m, n)
q = randn(T, n) |> x -> x / norm(x)
σ, L = svdl(A, nsv=k, k=l, v0=q, tol=1e-5, maxiter=30, method=method)
@test norm(σ - svdvals(A)[1 : k]) < k^2 * 1e-5
end
end
end
end #svdl
@testset "BrokenArrowBidiagonal" begin
B = IterativeSolvers.BrokenArrowBidiagonal([1, 2, 3], [1, 2], Int[])
@test full(B) == [1 0 1; 0 2 2; 0 0 3]
@test B[3,3] == 3
@test B[2,3] == 2
@test B[3,2] == 0
@test B[1,3] == 1
@test size(B) == (3,3)
@test_throws ArgumentError size(B,3)
@test_throws BoundsError B[1,5]
end
| [
27,
456,
62,
30783,
29,
15,
198,
3500,
40806,
876,
36949,
690,
198,
3500,
7308,
13,
14402,
198,
198,
31,
9288,
2617,
366,
50,
8898,
21534,
37925,
1,
2221,
198,
198,
82,
25192,
7,
10163,
2231,
3134,
8,
198,
198,
2,
817,
624,
15765,... | 1.681579 | 1,520 |
<filename>src/problems/127.word-ladder.jl
# ---
# title: 127. Word Ladder
# id: problem127
# author: AquaIndigo
# date: 2020-11-07
# difficulty: Medium
# categories: Breadth-first Search
# link: <https://leetcode.com/problems/word-ladder/description/>
# hidden: true
# ---
#
# Given two words ( _beginWord_ and _endWord_ ), and a dictionary's word list,
# find the length of shortest transformation sequence from _beginWord_ to
# _endWord_ , such that:
#
# 1. Only one letter can be changed at a time.
# 2. Each transformed word must exist in the word list.
#
# **Note:**
#
# * Return 0 if there is no such transformation sequence.
# * All words have the same length.
# * All words contain only lowercase alphabetic characters.
# * You may assume no duplicates in the word list.
# * You may assume _beginWord_ and _endWord_ are non-empty and are not the same.
#
# **Example 1:**
#
#
#
# Input:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# Output: 5
#
# Explanation: As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# return its length 5.
#
#
# **Example 2:**
#
#
#
# Input:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
#
# Output: 0
#
# Explanation: The endWord "cog" is not in wordList, therefore no possible ** ** transformation.
#
#
#
## @lc code=start
using LeetCode
using DataStructures
function ladder_length(begin_word::String, end_word::String, word_list::Vector{String})::Int
function isadj(s1, s2)
flg = false
for i in 1:length(s1)
if s1[i] != s2[i]
if flg
return false
else
flg = true
end
end
end
return true
end
if !(end_word in word_list)
return 0
end
!(begin_word in word_list) && push!(word_list, begin_word)
s, t = findall(x -> x == begin_word, word_list)[1],
findall(x -> x == end_word, word_list)[1]
## println(s, ", ", t)
### wl = collect.(word_list)
len = length(word_list)
edges = [Set{Int}() for i in 1:len]
### construct adj list
for i in 1:len
for j in (i + 1):len
if isadj(word_list[i], word_list[j])
push!(edges[i], j)
push!(edges[j], i)
end
end
end
## BFS
qs, qt = Queue{Int}(), Queue{Int}()
dists = [0 for i in 1:len]
visited = [0 for i in 1:len]
enqueue!(qs, s)
enqueue!(qt, t)
visited[s] = 1
visited[t] = 2
dists[s] = dists[t] = 1
while !isempty(qs) && !isempty(qt)
rt1, rt2 = dequeue!(qs), dequeue!(qt)
dis1, dis2 = dists[rt1] + 1, dists[rt2] + 1
## println(rt1, " ", dis1)
for neib in edges[rt1]
if visited[neib] == 2
return dis1 + dists[neib] - 1
elseif visited[neib] == 0
visited[neib] = 1
enqueue!(qs, neib)
dists[neib] = dis1
end
end
for neib in edges[rt2]
if visited[neib] == 1
return dis2 + dists[neib] - 1
elseif visited[neib] == 0
visited[neib] = 2
enqueue!(qt, neib)
dists[neib] = dis2
end
end
end
return 0
end
## @lc code=end
| [
27,
34345,
29,
10677,
14,
1676,
22143,
14,
16799,
13,
4775,
12,
9435,
1082,
13,
20362,
198,
2,
11420,
198,
2,
3670,
25,
18112,
13,
9678,
12862,
1082,
198,
2,
4686,
25,
1917,
16799,
198,
2,
1772,
25,
24838,
5497,
14031,
198,
2,
312... | 2.013825 | 1,736 |
if basename(pwd()) == "aoc"
cd("2019/6")
end
struct Planet
name
parent
children
end
Planet(name) = Planet(name, nothing, Planet[])
function Base.getindex(planet::Planet, name::AbstractString)
if planet.name == name
planet
else
for child in planet.children
result = child[name]
if !isnothing(result)
return result
end
end
end
end
Base.haskey(planet::Planet, name::AbstractString) = !isnothing(planet[name])
function loadsystem(filename::AbstractString)
system = Planet("COM")
lines = readlines(filename)
while length(lines) > 0
line = popfirst!(lines)
parent, child = split(line, ')')
if haskey(system, parent)
push!(system[parent].children, Planet(child, system[parent], Planet[]))
else
push!(lines, line)
end
end
system
end
function orbits(planet::Planet)
depth(planet) * length(planet.children) + sum(orbits.(planet.children), init = 0)
end
depth(::Nothing) = 0
depth(planet::Planet) = 1 + depth(planet.parent)
function parents(planet::Planet)
if isnothing(planet.parent)
[planet]
else
[planet, parents(planet.parent)...]
end
end
part1(filename::AbstractString) = orbits(loadsystem(filename))
@assert part1("example.txt") == 42
function part2(filename::AbstractString)
system = loadsystem(filename)
you = system["YOU"].parent
santa = system["SAN"].parent
common = first(intersect(parents(you), parents(santa)))
depth(you) + depth(santa) - 2 * depth(common)
end
@assert part2("example2.txt") == 4
part2("input.txt")
| [
361,
1615,
12453,
7,
79,
16993,
28955,
6624,
366,
64,
420,
1,
198,
220,
220,
220,
22927,
7203,
23344,
14,
21,
4943,
198,
437,
198,
198,
7249,
11397,
198,
220,
220,
220,
1438,
198,
220,
220,
220,
2560,
198,
220,
220,
220,
1751,
198... | 2.491779 | 669 |
# Core types and definitions
if VERSION < v"0.5.0-dev"
macro pure(ex)
esc(ex)
end
else
using Base: @pure
end
const Symbols = Tuple{Symbol,Vararg{Symbol}}
@doc """
Type-stable axis-specific indexing and identification with a
parametric type.
### Type parameters
```julia
immutable Axis{name,T}
```
* `name` : the name of the axis, a Symbol
* `T` : the type of the axis
### Constructors
```julia
Axis{name}(I)
```
### Arguments
* `name` : the axis name Symbol or integer dimension
* `I` : the indexer, any indexing type that the axis supports
### Examples
Here is an example with a Dimensional axis representing a time
sequence along rows and a Categorical axis of Symbols for column
headers.
```julia
A = AxisArray(reshape(1:60, 12, 5), .1:.1:1.2, [:a, :b, :c, :d, :e])
A[Axis{:col}(2)] # grabs the second column
A[Axis{:col}(:b)] # Same as above, grabs column :b (the second column)
A[Axis{:row}(2)] # grabs the second row
A[Axis{2}(2:5)] # grabs the second through 5th columns
```
""" ->
immutable Axis{name,T}
val::T
end
# Constructed exclusively through Axis{:symbol}(...) or Axis{1}(...)
(::Type{Axis{name}}){name,T}(I::T=()) = Axis{name,T}(I)
Base.:(==){name}(A::Axis{name}, B::Axis{name}) = A.val == B.val
Base.hash{name}(A::Axis{name}, hx::UInt) = hash(A.val, hash(name, hx))
axistype{name,T}(::Axis{name,T}) = T
axistype{name,T}(::Type{Axis{name,T}}) = T
# Pass indexing and related functions straight through to the wrapped value
# TODO: should Axis be an AbstractArray? AbstractArray{T,0} for scalar T?
Base.getindex(A::Axis, i...) = A.val[i...]
Base.eltype{_,T}(::Type{Axis{_,T}}) = eltype(T)
Base.size(A::Axis) = size(A.val)
Base.endof(A::Axis) = length(A)
Base.indices(A::Axis) = indices(A.val)
Base.indices(A::Axis, d) = indices(A.val, d)
Base.length(A::Axis) = length(A.val)
(A::Axis{name}){name}(i) = Axis{name}(i)
Base.convert{name,T}(::Type{Axis{name,T}}, ax::Axis{name,T}) = ax
Base.convert{name,T}(::Type{Axis{name,T}}, ax::Axis{name}) = Axis{name}(convert(T, ax.val))
@doc """
An AxisArray is an AbstractArray that wraps another AbstractArray and
adds axis names and values to each array dimension. AxisArrays can be indexed
by using the named axes as an alternative to positional indexing by
dimension. Other advanced indexing along axis values are also provided.
### Type parameters
The AxisArray contains several type parameters:
```julia
immutable AxisArray{T,N,D,Ax} <: AbstractArray{T,N}
```
* `T` : the elemental type of the AbstractArray
* `N` : the number of dimensions
* `D` : the type of the wrapped AbstractArray
* `Ax` : the names and types of the axes, as a (specialized) NTuple{N, Axis}
### Constructors
```julia
AxisArray(A::AbstractArray, axes::Axis...)
AxisArray(A::AbstractArray, names::Symbol...)
AxisArray(A::AbstractArray, vectors::AbstractVector...)
AxisArray(A::AbstractArray, (names...,), (steps...,), [(offsets...,)])
```
### Arguments
* `A::AbstractArray` : the wrapped array data
* `axes` or `names` or `vectors` : dimensional information for the wrapped array
The dimensional information may be passed in one of three ways and is
entirely optional. When the axis name or value is missing for a
dimension, a default is substituted. The default axis names for
dimensions `(1, 2, 3, 4, 5, ...)` are `(:row, :col, :page, :dim_4,
:dim_5, ...)`. The default axis values are `indices(A, d)` for each
missing dimension `d`.
### Indexing
Indexing returns a view into the original data. The returned view is a
new AxisArray that wraps a SubArray. Indexing should be type
stable. Use `Axis{axisname}(idx)` to index based on a specific
axis. `axisname` is a Symbol specifying the axis to index/slice, and
`idx` is a normal indexing object (`Int`, `Array{Int,1}`, etc.) or a
custom indexing type for that particular type of axis.
Two main types of axes supported by default include:
* Categorical axis -- These are vectors of labels, normally Symbols or
strings. Elements or slices can be indexed by elements or vectors
of elements.
* Dimensional axis -- These are sorted vectors or iterators that can
be indexed by `ClosedInterval()`. These are commonly used for sequences of
times or date-times. For regular sample rates, ranges can be used.
User-defined axis types can be added along with custom indexing
behaviors. To add add a custom type as a Categorical or Dimensional
axis, add a trait using `AxisArrays.axistrait`. Here is the example of
adding a custom Dimensional axis:
```julia
AxisArrays.axistrait(v::MyCustomAxis) = AxisArrays.Dimensional
```
For more advanced indexing, you can define custom methods for
`AxisArrays.axisindexes`.
### Examples
Here is an example with a Dimensional axis representing a time
sequence along rows (it's a FloatRange) and a Categorical axis of
Symbols for column headers.
```julia
A = AxisArray(reshape(1:15, 5, 3), Axis{:time}(.1:.1:0.5), Axis{:col}([:a, :b, :c]))
A[Axis{:time}(1:3)] # equivalent to A[1:3,:]
A[Axis{:time}(ClosedInterval(.2,.4))] # restrict the AxisArray along the time axis
A[ClosedInterval(0.,.3), [:a, :c]] # select an interval and two columns
```
""" ->
immutable AxisArray{T,N,D,Ax} <: AbstractArray{T,N}
data::D # D <:AbstractArray, enforced in constructor to avoid dispatch bugs (https://github.com/JuliaLang/julia/issues/6383)
axes::Ax # Ax<:NTuple{N, Axis}, but with specialized Axis{...} types
(::Type{AxisArray{T,N,D,Ax}}){T,N,D,Ax}(data::AbstractArray{T,N}, axs::Tuple{Vararg{Axis,N}}) = new{T,N,D,Ax}(data, axs)
end
# Helper functions: Default axis names (if not provided)
_defaultdimname(i) = i == 1 ? (:row) : i == 2 ? (:col) : i == 3 ? (:page) : Symbol(:dim_, i)
# Why doesn't @pure work here?
@generated function _nextaxistype{M}(axs::NTuple{M,Axis})
name = _defaultdimname(M+1)
:(Axis{$(Expr(:quote, name))})
end
"""
default_axes(A::AbstractArray)
default_axes(A::AbstractArray, axs)
Return a tuple of Axis objects that appropriately index into the array A.
The optional second argument can take a tuple of vectors or axes, which will be
wrapped with the appropriate axis name, and it will ensure no axis goes beyond
the dimensionality of the array A.
"""
@inline default_axes(A::AbstractArray, args=indices(A)) = _default_axes(A, args, ())
_default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{}, axs::NTuple{N,Axis}) = axs
_default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{Any, Vararg{Any}}, axs::NTuple{N,Axis}) = throw(ArgumentError("too many axes provided"))
_default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{Axis, Vararg{Any}}, axs::NTuple{N,Axis}) = throw(ArgumentError("too many axes provided"))
@inline _default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{}, axs::Tuple) =
_default_axes(A, args, (axs..., _nextaxistype(axs)(indices(A, length(axs)+1))))
@inline _default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{Any, Vararg{Any}}, axs::Tuple) =
_default_axes(A, Base.tail(args), (axs..., _nextaxistype(axs)(args[1])))
@inline _default_axes{T,N}(A::AbstractArray{T,N}, args::Tuple{Axis, Vararg{Any}}, axs::Tuple) =
_default_axes(A, Base.tail(args), (axs..., args[1]))
# Axis consistency checks — ensure sizes match and the names are unique
@inline checksizes(axs, sz) =
(length(axs[1]) == sz[1]) & checksizes(tail(axs), tail(sz))
checksizes(::Tuple{}, sz) = true
@inline function checknames(name::Symbol, names...)
matches = false
for n in names
matches |= name == n
end
matches && throw(ArgumentError("axis name :$name is used more than once"))
checknames(names...)
end
checknames(name, names...) = throw(ArgumentError("the Axis names must be Symbols"))
checknames() = ()
# The primary AxisArray constructors — specify an array to wrap and the axes
AxisArray(A::AbstractArray, vects::Union{AbstractVector, Axis}...) = AxisArray(A, vects)
AxisArray(A::AbstractArray, vects::Tuple{Vararg{Union{AbstractVector, Axis}}}) = AxisArray(A, default_axes(A, vects))
function AxisArray{T,N}(A::AbstractArray{T,N}, axs::NTuple{N,Axis})
checksizes(axs, _size(A)) || throw(ArgumentError("the length of each axis must match the corresponding size of data"))
checknames(axisnames(axs...)...)
AxisArray{T,N,typeof(A),typeof(axs)}(A, axs)
end
# Simple non-type-stable constructors to specify names as symbols
AxisArray(A::AbstractArray) = AxisArray(A, ()) # Disambiguation
AxisArray(A::AbstractArray, names::Symbol...) = (inds = indices(A); AxisArray(A, ntuple(i->Axis{names[i]}(inds[i]), length(names))))
function AxisArray{T,N}(A::AbstractArray{T,N}, names::NTuple{N,Symbol}, steps::NTuple{N,Number}, offsets::NTuple{N,Number}=map(zero, steps))
axs = ntuple(i->Axis{names[i]}(range(offsets[i], steps[i], size(A,i))), N)
AxisArray(A, axs...)
end
# Traits
immutable HasAxes{B} end
HasAxes{A<:AxisArray}(::Type{A}) = HasAxes{true}()
HasAxes{A<:AbstractArray}(::Type{A}) = HasAxes{false}()
HasAxes(A::AbstractArray) = HasAxes(typeof(A))
# Axis definitions
@doc """
axisdim(::AxisArray, ::Axis) -> Int
axisdim(::AxisArray, ::Type{Axis}) -> Int
Given an AxisArray and an Axis, return the integer dimension of
the Axis within the array.
""" ->
axisdim(A::AxisArray, ax::Axis) = axisdim(A, typeof(ax))
@generated function axisdim{T<:Axis}(A::AxisArray, ax::Type{T})
dim = axisdim(A, T)
:($dim)
end
# The actual computation is done in the type domain, which is a little tricky
# due to type invariance.
axisdim{T,N,D,Ax,name,S}(A::Type{AxisArray{T,N,D,Ax}}, ::Type{Axis{name,S}}) = axisdim(A, Axis{name})
function axisdim{T,N,D,Ax,name}(::Type{AxisArray{T,N,D,Ax}}, ::Type{Axis{name}})
isa(name, Int) && return name <= N ? name : error("axis $name greater than array dimensionality $N")
names = axisnames(Ax)
idx = findfirst(names, name)
idx == 0 && error("axis $name not found in array axes $names")
idx
end
# Base definitions that aren't provided by AbstractArray
@inline Base.size(A::AxisArray) = size(A.data)
@inline Base.size(A::AxisArray, Ax::Axis) = size(A.data, axisdim(A, Ax))
@inline Base.size{Ax<:Axis}(A::AxisArray, ::Type{Ax}) = size(A.data, axisdim(A, Ax))
@inline Base.indices(A::AxisArray) = indices(A.data)
@inline Base.indices(A::AxisArray, Ax::Axis) = indices(A.data, axisdim(A, Ax))
@inline Base.indices{Ax<:Axis}(A::AxisArray, ::Type{Ax}) = indices(A.data, axisdim(A, Ax))
Base.convert{T,N}(::Type{Array{T,N}}, A::AxisArray{T,N}) = convert(Array{T,N}, A.data)
Base.parent(A::AxisArray) = A.data
# Similar is tricky. If we're just changing the element type, it can stay as an
# AxisArray. But if we're changing dimensions, there's no way it can know how
# to keep track of the axes, so just punt and return a regular old Array.
# TODO: would it feel more consistent to return an AxisArray without any axes?
Base.similar{S}(A::AxisArray, ::Type{S}) = (d = similar(A.data, S); AxisArray(d, A.axes))
Base.similar{S,N}(A::AxisArray, ::Type{S}, dims::Dims{N}) = similar(A.data, S, dims)
# If, however, we pass Axis objects containing the new axis for that dimension,
# we can return a similar AxisArray with an appropriately modified size
Base.similar{T}(A::AxisArray{T}, ax1::Axis, axs::Axis...) = similar(A, T, (ax1, axs...))
Base.similar{S}(A::AxisArray, ::Type{S}, ax1::Axis, axs::Axis...) = similar(A, S, (ax1, axs...))
@generated function Base.similar{T,S,N}(A::AxisArray{T,N}, ::Type{S}, axs::Tuple{Axis,Vararg{Axis}})
inds = Expr(:tuple)
ax = Expr(:tuple)
for d=1:N
push!(inds.args, :(indices(A, Axis{$d})))
push!(ax.args, :(axes(A, Axis{$d})))
end
to_delete = Int[]
for i=1:length(axs.parameters)
a = axs.parameters[i]
d = axisdim(A, a)
axistype(a) <: Tuple{} && push!(to_delete, d)
inds.args[d] = :(indices(axs[$i].val, 1))
ax.args[d] = :(axs[$i])
end
sort!(to_delete)
deleteat!(inds.args, to_delete)
deleteat!(ax.args, to_delete)
quote
d = similar(A.data, S, $inds)
AxisArray(d, $ax)
end
end
# These methods allow us to preserve the AxisArray under reductions
# Note that we only extend the following two methods, and then have it
# dispatch to package-local `reduced_indices` and `reduced_indices0`
# methods. This avoids a whole slew of ambiguities.
if VERSION == v"0.5.0"
Base.reduced_dims(A::AxisArray, region) = reduced_indices(axes(A), region)
Base.reduced_dims0(A::AxisArray, region) = reduced_indices0(axes(A), region)
else
Base.reduced_indices(A::AxisArray, region) = reduced_indices(axes(A), region)
Base.reduced_indices0(A::AxisArray, region) = reduced_indices0(axes(A), region)
end
reduced_indices{N}(axs::Tuple{Vararg{Axis,N}}, ::Tuple{}) = axs
reduced_indices0{N}(axs::Tuple{Vararg{Axis,N}}, ::Tuple{}) = axs
reduced_indices{N}(axs::Tuple{Vararg{Axis,N}}, region::Integer) =
reduced_indices(axs, (region,))
reduced_indices0{N}(axs::Tuple{Vararg{Axis,N}}, region::Integer) =
reduced_indices0(axs, (region,))
reduced_indices{N}(axs::Tuple{Vararg{Axis,N}}, region::Dims) =
map((ax,d)->d∈region ? reduced_axis(ax) : ax, axs, ntuple(identity, Val{N}))
reduced_indices0{N}(axs::Tuple{Vararg{Axis,N}}, region::Dims) =
map((ax,d)->d∈region ? reduced_axis0(ax) : ax, axs, ntuple(identity, Val{N}))
@inline reduced_indices{Ax<:Axis}(axs::Tuple{Vararg{Axis}}, region::Type{Ax}) =
_reduced_indices(reduced_axis, (), region, axs...)
@inline reduced_indices0{Ax<:Axis}(axs::Tuple{Vararg{Axis}}, region::Type{Ax}) =
_reduced_indices(reduced_axis0, (), region, axs...)
@inline reduced_indices(axs::Tuple{Vararg{Axis}}, region::Axis) =
_reduced_indices(reduced_axis, (), region, axs...)
@inline reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Axis) =
_reduced_indices(reduced_axis0, (), region, axs...)
reduced_indices(axs::Tuple{Vararg{Axis}}, region::Tuple) =
reduced_indices(reduced_indices(axs, region[1]), tail(region))
reduced_indices(axs::Tuple{Vararg{Axis}}, region::Tuple{Vararg{Axis}}) =
reduced_indices(reduced_indices(axs, region[1]), tail(region))
reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Tuple) =
reduced_indices0(reduced_indices0(axs, region[1]), tail(region))
reduced_indices0(axs::Tuple{Vararg{Axis}}, region::Tuple{Vararg{Axis}}) =
reduced_indices0(reduced_indices0(axs, region[1]), tail(region))
@pure samesym{n1,n2}(::Type{Axis{n1}}, ::Type{Axis{n2}}) = Val{n1==n2}()
samesym{n1,n2,T1,T2}(::Type{Axis{n1,T1}}, ::Type{Axis{n2,T2}}) = samesym(Axis{n1},Axis{n2})
samesym{n1,n2}(::Type{Axis{n1}}, ::Axis{n2}) = samesym(Axis{n1}, Axis{n2})
samesym{n1,n2}(::Axis{n1}, ::Type{Axis{n2}}) = samesym(Axis{n1}, Axis{n2})
samesym{n1,n2}(::Axis{n1}, ::Axis{n2}) = samesym(Axis{n1}, Axis{n2})
@inline _reduced_indices{Ax<:Axis}(f, out, chosen::Type{Ax}, ax::Axis, axs...) =
__reduced_indices(f, out, samesym(chosen, ax), chosen, ax, axs)
@inline _reduced_indices(f, out, chosen::Axis, ax::Axis, axs...) =
__reduced_indices(f, out, samesym(chosen, ax), chosen, ax, axs)
_reduced_indices(f, out, chosen) = out
@inline __reduced_indices(f, out, ::Val{true}, chosen, ax, axs) =
_reduced_indices(f, (out..., f(ax)), chosen, axs...)
@inline __reduced_indices(f, out, ::Val{false}, chosen, ax, axs) =
_reduced_indices(f, (out..., ax), chosen, axs...)
reduced_axis(ax) = ax(oftype(ax.val, Base.OneTo(1)))
reduced_axis0(ax) = ax(oftype(ax.val, length(ax.val) == 0 ? Base.OneTo(0) : Base.OneTo(1)))
function Base.permutedims(A::AxisArray, perm)
p = permutation(perm, axisnames(A))
AxisArray(permutedims(A.data, p), axes(A)[[p...]])
end
Base.transpose{T}(A::AxisArray{T,2}) = AxisArray(transpose(A.data), A.axes[2], A.axes[1])
Base.ctranspose{T}(A::AxisArray{T,2}) = AxisArray(ctranspose(A.data), A.axes[2], A.axes[1])
Base.transpose{T}(A::AxisArray{T,1}) = AxisArray(transpose(A.data), Axis{:transpose}(Base.OneTo(1)), A.axes[1])
Base.ctranspose{T}(A::AxisArray{T,1}) = AxisArray(ctranspose(A.data), Axis{:transpose}(Base.OneTo(1)), A.axes[1])
Base.map!{F}(f::F, A::AxisArray) = (map!(f, A.data); A)
Base.map(f, A::AxisArray) = AxisArray(map(f, A.data), A.axes...)
function Base.map!{F,T,N,D,Ax<:Tuple{Vararg{Axis}}}(f::F, dest::AxisArray{T,N,D,Ax},
As::AxisArray{T,N,D,Ax}...)
matchingdims((dest, As...)) || error("All axes must be identically-valued")
data = map(a -> a.data, As)
map!(f, dest.data, data...)
return dest
end
function Base.map{T,N,D,Ax<:Tuple{Vararg{Axis}}}(f, As::AxisArray{T,N,D,Ax}...)
matchingdims(As) || error("All axes must be identically-valued")
data = map(a -> a.data, As)
return AxisArray(map(f, data...), As[1].axes...)
end
permutation(to::Union{AbstractVector{Int},Tuple{Int,Vararg{Int}}}, from::Symbols) = to
"""
permutation(to, from) -> p
Calculate the permutation of labels in `from` to produce the order in
`to`. Any entries in `to` that are missing in `from` will receive an
index of 0. Any entries in `from` that are missing in `to` will have
their indices appended to the end of the permutation. Consequently,
the length of `p` is equal to the longer of `to` and `from`.
"""
function permutation(to::Symbols, from::Symbols)
n = length(to)
nf = length(from)
li = linearindices(from)
d = Dict(from[i]=>i for i in li)
covered = similar(dims->falses(length(li)), li)
ind = Array{Int}(max(n, nf))
for (i,toi) in enumerate(to)
j = get(d, toi, 0)
ind[i] = j
if j != 0
covered[j] = true
end
end
k = n
for i in li
if !covered[i]
d[from[i]] != i && throw(ArgumentError("$(from[i]) is a duplicated argument"))
k += 1
k > nf && throw(ArgumentError("no incomplete containment allowed in $to and $from"))
ind[k] = i
end
end
ind
end
function Base.squeeze(A::AxisArray, dims::Dims)
keepdims = setdiff(1:ndims(A), dims)
AxisArray(squeeze(A.data, dims), axes(A)[keepdims])
end
# This version is type-stable
function Base.squeeze{Ax<:Axis}(A::AxisArray, ::Type{Ax})
dim = axisdim(A, Ax)
AxisArray(squeeze(A.data, dim), dropax(Ax, axes(A)...))
end
@inline dropax(ax, ax1, axs...) = (ax1, dropax(ax, axs...)...)
@inline dropax{name}(ax::Axis{name}, ax1::Axis{name}, axs...) = dropax(ax, axs...)
@inline dropax{name}(ax::Type{Axis{name}}, ax1::Axis{name}, axs...) = dropax(ax, axs...)
@inline dropax{name,T}(ax::Type{Axis{name,T}}, ax1::Axis{name}, axs...) = dropax(ax, axs...)
dropax(ax) = ()
# A simple display method to include axis information. It might be nice to
# eventually display the axis labels alongside the data array, but that is
# much more difficult.
function summaryio(io::IO, A::AxisArray)
_summary(io, A)
for (name, val) in zip(axisnames(A), axisvalues(A))
print(io, " :$name, ")
show(IOContext(io, :limit=>true), val)
println(io)
end
print(io, "And data, a ", summary(A.data))
end
_summary{T,N}(io, A::AxisArray{T,N}) = println(io, "$N-dimensional AxisArray{$T,$N,...} with axes:")
function Base.summary(A::AxisArray)
io = IOBuffer()
summaryio(io, A)
String(io)
end
# Custom methods specific to AxisArrays
@doc """
axisnames(A::AxisArray) -> (Symbol...)
axisnames(::Type{AxisArray{...}}) -> (Symbol...)
axisnames(ax::Axis...) -> (Symbol...)
axisnames(::Type{Axis{...}}...) -> (Symbol...)
Returns the axis names of an AxisArray or list of Axises as a tuple of Symbols.
""" ->
axisnames{T,N,D,Ax}(::AxisArray{T,N,D,Ax}) = _axisnames(Ax)
axisnames{T,N,D,Ax}(::Type{AxisArray{T,N,D,Ax}}) = _axisnames(Ax)
axisnames{Ax<:Tuple{Vararg{Axis}}}(::Type{Ax}) = _axisnames(Ax)
@pure _axisnames(Ax) = axisnames(Ax.parameters...)
axisnames() = ()
@inline axisnames{name }(::Axis{name}, B::Axis...) = tuple(name, axisnames(B...)...)
@inline axisnames{name }(::Type{Axis{name}}, B::Type...) = tuple(name, axisnames(B...)...)
@inline axisnames{name,T}(::Type{Axis{name,T}}, B::Type...) = tuple(name, axisnames(B...)...)
axisname{name,T}(::Type{Axis{name,T}}) = name
axisname{name }(::Type{Axis{name }}) = name
axisname(ax::Axis) = axisname(typeof(ax))
@doc """
axisvalues(A::AxisArray) -> (AbstractVector...)
axisvalues(ax::Axis...) -> (AbstractVector...)
Returns the axis values of an AxisArray or list of Axises as a tuple of vectors.
""" ->
axisvalues(A::AxisArray) = axisvalues(A.axes...)
axisvalues() = ()
axisvalues(ax::Axis, axs::Axis...) = tuple(ax.val, axisvalues(axs...)...)
@doc """
axes(A::AxisArray) -> (Axis...)
axes(A::AxisArray, ax::Axis) -> Axis
axes(A::AxisArray, dim::Int) -> Axis
Returns the tuple of axis vectors for an AxisArray. If an specific `Axis` is
specified, then only that axis vector is returned. Note that when extracting a
single axis vector, `axes(A, Axis{1})`) is type-stable and will perform better
than `axes(A)[1]`.
For an AbstractArray without `Axis` information, `axes` returns the
default axes, i.e., those that would be produced by `AxisArray(A)`.
""" ->
axes(A::AxisArray) = A.axes
axes(A::AxisArray, dim::Int) = A.axes[dim]
axes(A::AxisArray, ax::Axis) = axes(A, typeof(ax))
@generated function axes{T<:Axis}(A::AxisArray, ax::Type{T})
dim = axisdim(A, T)
:(A.axes[$dim])
end
axes(A::AbstractArray) = default_axes(A)
axes(A::AbstractArray, dim::Int) = default_axes(A)[dim]
### Axis traits ###
@compat abstract type AxisTrait end
immutable Dimensional <: AxisTrait end
immutable Categorical <: AxisTrait end
immutable Unsupported <: AxisTrait end
axistrait(::Any) = Unsupported
axistrait(ax::Axis) = axistrait(ax.val)
axistrait{T<:Union{Number, Dates.AbstractTime}}(::AbstractVector{T}) = Dimensional
axistrait{T<:Union{Symbol, AbstractString}}(::AbstractVector{T}) = Categorical
checkaxis(ax::Axis) = checkaxis(ax.val)
checkaxis(ax) = checkaxis(axistrait(ax), ax)
checkaxis(::Type{Unsupported}, ax) = nothing # TODO: warn or error?
# Dimensional axes must be monotonically increasing
checkaxis(::Type{Dimensional}, ax) = issorted(ax) || throw(ArgumentError("Dimensional axes must be monotonically increasing"))
# Categorical axes must simply be unique
function checkaxis(::Type{Categorical}, ax)
seen = Set{eltype(ax)}()
for elt in ax
if elt in seen
throw(ArgumentError("Categorical axes must be unique"))
end
push!(seen, elt)
end
end
_length(A::AbstractArray) = length(linearindices(A))
_length(A) = length(A)
_size(A::AbstractArray) = map(length, indices(A))
_size(A) = size(A)
_size(A::AbstractArray, d) = length(indices(A, d))
_size(A, d) = size(A, d)
| [
2,
7231,
3858,
290,
17336,
198,
198,
361,
44156,
2849,
1279,
410,
1,
15,
13,
20,
13,
15,
12,
7959,
1,
198,
220,
220,
220,
15021,
5899,
7,
1069,
8,
198,
220,
220,
220,
220,
220,
220,
220,
3671,
7,
1069,
8,
198,
220,
220,
220,
... | 2.475142 | 9,132 |
<filename>test/runtests.jl
# This file is a part of BAT.jl, licensed under the MIT License (MIT).
using Test
Test.@testset "Package BAT" begin
include("utils/test_utils.jl")
include("rngs/test_rngs.jl")
include("distributions/test_distributions.jl")
include("parameters/test_parameters.jl")
include("statistics/test_statistics.jl")
include("densities/test_densities.jl")
include("samplers/test_samplers.jl")
include("io/test_io.jl")
include("plotting/test_plotting.jl")
end
| [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
198,
2,
770,
2393,
318,
257,
636,
286,
37421,
13,
20362,
11,
11971,
739,
262,
17168,
13789,
357,
36393,
737,
198,
198,
3500,
6208,
198,
198,
14402,
13,
31,
9288,
2617,
366,
27813,
... | 2.598985 | 197 |
<reponame>jmmshn/LeetCode.jl<filename>src/problems/23.merge-k-sorted-lists.jl
# ---
# title: 23. Merge k Sorted Lists
# id: problem23
# author: Indigo
# date: 2021-04-14
# difficulty: Hard
# categories: Linked List, Divide and Conquer, Heap
# link: <https://leetcode.com/problems/merge-k-sorted-lists/description/>
# hidden: true
# ---
#
# You are given an array of `k` linked-lists `lists`, each linked-list is sorted
# in ascending order.
#
# _Merge all the linked-lists into one sorted linked-list and return it._
#
#
#
# **Example 1:**
#
#
#
# Input: lists = [[1,4,5],[1,3,4],[2,6]]
# Output: [1,1,2,3,4,4,5,6]
# Explanation: The linked-lists are:
# [
# 1->4->5,
# 1->3->4,
# 2->6
# ]
# merging them into one sorted list:
# 1->1->2->3->4->4->5->6
#
#
# **Example 2:**
#
#
#
# Input: lists = []
# Output: []
#
#
# **Example 3:**
#
#
#
# Input: lists = [[]]
# Output: []
#
#
#
#
# **Constraints:**
#
# * `k == lists.length`
# * `0 <= k <= 10^4`
# * `0 <= lists[i].length <= 500`
# * `-10^4 <= lists[i][j] <= 10^4`
# * `lists[i]` is sorted in **ascending order**.
# * The sum of `lists[i].length` won't exceed `10^4`.
#
#
## @lc code=start
using LeetCode
Base.isless(l1::ListNode, l2::ListNode) = l1.val < l2.val
function merge_k_lists(lists::Vector{T}) where {T<:Union{ListNode{Int},Nothing}}
pq = BinaryMinHeap{ListNode{Int}}()
for l in lists
isnothing(l) || push!(pq, l)
end
res = ListNode{Int}()
p = res
while !isempty(pq)
tp_list = pop!(pq)
p = p.next = tp_list
tp_list = tp_list.next
isnothing(tp_list) || push!(pq, tp_list)
end
return res.next
end
## @lc code=end
| [
27,
7856,
261,
480,
29,
73,
76,
907,
21116,
14,
3123,
316,
10669,
13,
20362,
27,
34345,
29,
10677,
14,
1676,
22143,
14,
1954,
13,
647,
469,
12,
74,
12,
82,
9741,
12,
20713,
13,
20362,
198,
2,
11420,
198,
2,
3670,
25,
2242,
13,
... | 2.033105 | 876 |
using Plots
using FFTW
tan_taylor(x) = x+( x^3/3 )+( x^5*2/15.0 )+( x^7*17/315.0 )+( x^9*62/2835.0 )+( x^11*1382/155925.0 )+( x^13*21844/6081075.0 )+( x^15*929569/638512875.0 );
x = LinRange(-π/2+0.1, π/2-0.1, 1001);
tanx_taylor = tan_taylor.(x);
tanx_native = tan.(x);
delta = tanx_native-tanx_taylor
# Time domain comparison.
plot( x, tanx_native,
label = "Julia native tan",
size = (1024, 720),)
plot!( x, tanx_taylor,
label = "8-term Taylor series",)
savefig("figures/tan/time_domain") | [
3500,
1345,
1747,
198,
3500,
376,
9792,
54,
198,
198,
38006,
62,
83,
7167,
7,
87,
8,
796,
2124,
33747,
2124,
61,
18,
14,
18,
1267,
33747,
2124,
61,
20,
9,
17,
14,
1314,
13,
15,
1267,
33747,
2124,
61,
22,
9,
1558,
14,
27936,
13... | 1.965385 | 260 |
module Separators
using Genie, Stipple, StippleUI, StippleUI.API
import Genie.Renderer.Html: HTMLString, normal_element, template
export separator
function __init__()
Genie.Renderer.Html.register_normal_element("q__separator", context = Genie.Renderer.Html)
end
function separator(args...; wrap::Function = StippleUI.DEFAULT_WRAPPER, kwargs...)
wrap() do
Genie.Renderer.Html.q__separator(args...; kwargs...)
end
end
end
| [
21412,
8621,
283,
2024,
198,
198,
3500,
49405,
11,
520,
18793,
11,
520,
18793,
10080,
11,
520,
18793,
10080,
13,
17614,
198,
11748,
49405,
13,
49,
437,
11882,
13,
39,
20369,
25,
11532,
10100,
11,
3487,
62,
30854,
11,
11055,
198,
198,
... | 2.824675 | 154 |
<gh_stars>0
module PostgreSQLLoader
using Octo.Repo: ExecuteResult
# https://github.com/invenia/LibPQ.jl
using LibPQ # v0.9.1
using .LibPQ.Tables
const current = Dict{Symbol, Any}(
:conn => nothing,
)
current_conn() = current[:conn]
# db_connect
function db_connect(; kwargs...)
if !isempty(kwargs)
str = join(map(kv->join(kv, '='), collect(kwargs)), ' ')
conn = LibPQ.Connection(str)
current[:conn] = conn
end
end
# db_disconnect
function db_disconnect()
conn = current_conn()
close(conn)
current[:conn] = nothing
end
# query
function query(sql::String)
conn = current_conn()
stmt = LibPQ.prepare(conn, sql)
result = LibPQ.execute(stmt)
df = Tables.rowtable(result)
LibPQ.close(result)
df
end
function query(prepared::String, vals::Vector)
conn = current_conn()
stmt = LibPQ.prepare(conn, prepared)
result = LibPQ.execute(stmt, vals)
df = Tables.rowtable(result)
LibPQ.close(result)
df
end
# execute
function execute(sql::String)::ExecuteResult
conn = current_conn()
LibPQ.execute(conn, sql)
ExecuteResult()
end
function execute(prepared::String, vals::Vector)::ExecuteResult
conn = current_conn()
stmt = LibPQ.prepare(conn, prepared)
LibPQ.execute(stmt, vals)
ExecuteResult()
end
function execute(prepared::String, nts::Vector{<:NamedTuple})::ExecuteResult
conn = current_conn()
stmt = LibPQ.prepare(conn, prepared)
for tup in nts
LibPQ.execute(stmt, collect(tup))
end
ExecuteResult()
end
end # module Octo.Backends.PostgreSQLLoader
| [
27,
456,
62,
30783,
29,
15,
198,
21412,
2947,
16694,
50,
48,
3069,
1170,
263,
198,
198,
3500,
2556,
78,
13,
6207,
78,
25,
8393,
1133,
23004,
198,
198,
2,
3740,
1378,
12567,
13,
785,
14,
259,
574,
544,
14,
25835,
47,
48,
13,
2036... | 2.372404 | 674 |
<filename>src/clibrary.jl
struct Clibrary
handle::Ptr{Cvoid}
Clibrary(libName::Union{AbstractString, Nothing} = nothing, flags = Libdl.RTLD_LAZY | Libdl.RTLD_DEEPBIND | Libdl.RTLD_LOCAL) = new(Libdl.dlopen(libName === nothing ? _NullCString() : libName, flags))
end
Base.close(lib::Clibrary) = Libdl.dlclose(lib.handle)
# NOTE: risky hack to trick Julia dlopen into ccalling the C dlopen with NULL (meaning to dlopen current process rather than a library)
struct _NullCString <: AbstractString end
Base.cconvert(::Type{Cstring}, ::_NullCString) = Cstring(C_NULL)
| [
27,
34345,
29,
10677,
14,
565,
4115,
13,
20362,
628,
198,
7249,
1012,
4115,
198,
197,
28144,
3712,
46745,
90,
34,
19382,
92,
198,
197,
198,
197,
2601,
4115,
7,
8019,
5376,
3712,
38176,
90,
23839,
10100,
11,
10528,
92,
796,
2147,
11,... | 2.865 | 200 |
"""
LinearRegression()
Classic regression model. This struct has no parameter.
If you want to use polynomial model, use `Regression.make_design_matrix()`.
see also: [`make_design_matrix`](@ref)
# Example
```jldoctest regression
julia> x = [
16.862463771320925 68.10823385851712
15.382965696961577 65.4313485700859
8.916228406218375 53.92034559524475
10.560285659132695 59.17305391117168
12.142253214135884 62.28708207525656
5.362107221163482 43.604947901567414
13.893239446341777 62.44348617377496
11.871357065173395 60.28433066289655
29.83792267802442 69.22281924803998
21.327107214235483 70.15810991597944
23.852372696012498 69.81780163668844
26.269031430914108 67.61037566099782
22.78907104644012 67.78105545358633
26.73342178134947 68.59263965946904
9.107259141706415 56.565383817343495
29.38551885863976 68.1005579469209
7.935966787763017 53.76264777936664
29.01677894379809 68.69484161138638
6.839609488194577 49.69794758177567
13.95215840314148 62.058116579899085]; #These data are also used to explanations of other functions.
julia> t = [169.80980778351542, 167.9081124078835, 152.30845618985222, 160.3110300206261, 161.96826472170756, 136.02842285615077, 163.98131131382686, 160.117817321485, 172.22758529098235, 172.21342437006865, 171.8939175591617, 169.83018083884602, 171.3878062674257, 170.52487535026015, 156.40282783981309, 170.6488327896672, 151.69267899906185, 172.32478221316322, 145.14365314788827, 163.79383292080666];
julia> model = LinearRegression()
LinearRegression(Float64[])
julia> fit!(model, x, t)
3-element Vector{Float64}:
-0.04772448076255398
1.395963968616736
76.7817095600793
julia> model(x)
20-element Vector{Float64}:
171.05359766482795
167.38737053144575
151.62704681535598
158.88117658330424
163.15274911747872
137.3967419011542
163.28751869479999
160.3699086857777
171.99027166957023
173.70207799107243
173.10650291105486
169.9096820022986
170.31402414534642
171.25872436348817
155.31030802635905
170.44522606721017
151.45368882321284
171.29242257091374
145.83183688699864
162.74674475052848
```
"""
mutable struct LinearRegression
w::Array
LinearRegression() = new(Array{Float64}(undef, 0))
end
"""
fit!(model, x, t)
`x` must be the number of features in the first dimension and the second dimension must be the number of data.
"""
function fit!(model::LinearRegression, x, t)
check_size(x, t)
x = expand(x)
model.w = inv(x * x') * x * t
end
(model::LinearRegression)(x) = expand(x)' * model.w | [
37811,
198,
220,
220,
220,
44800,
8081,
2234,
3419,
198,
39914,
20683,
2746,
13,
770,
2878,
468,
645,
11507,
13,
198,
1532,
345,
765,
284,
779,
745,
6213,
49070,
2746,
11,
779,
4600,
8081,
2234,
13,
15883,
62,
26124,
62,
6759,
8609,
... | 2.277876 | 1,130 |
<reponame>MillironX/beefblup
# beefblup
# Julia package for performing single-variate BLUP to find beef cattle
# breeding values
# (C) 2021 <NAME>
# Licensed under BSD-3-Clause License
# cSpell:includeRegExp #.*
# cSpell:includeRegExp ("""|''')[^\1]*\1
module BeefBLUP
# Import the required packages
using CSV
using DataFrames
using LinearAlgebra
using Dates
using Gtk
# Main entry-level function - acts just like the script
function beefblup()
# Ask for an input spreadsheet
path = open_dialog_native(
"Select a beefblup worksheet",
GtkNullContainer(),
("*.csv", GtkFileFilter("*.csv", name="beefblup worksheet"))
)
# Ask for an output text filename
savepath = save_dialog_native(
"Save your beefblup results",
GtkNullContainer(),
(GtkFileFilter("*.txt", name="Results file"),
"*.txt")
)
# Ask for heritability
print("What is the heritability for this trait?> ")
h2 = parse(Float64, readline(stdin))
beefblup(path, savepath, h2)
end
function beefblup(datafile::String, h2::Float64)
# Assume the data is named the same as the file without the trailing extension
dataname = join(split(datafile, ".")[1:end - 1])
# Create a new results name
resultsfile = string(dataname, "_results.txt")
# Pass this info on to the worker
beefblup(datafile, resultsfile, h2)
end
# Main worker function, can perform all the work if given all the user input
function beefblup(path::String, savepath::String, h2::Float64)
# Import data from a suitable spreadsheet
data = DataFrame(CSV.File(path))
# Make sure the data is in the proper format
renamecolstospec!(data)
# Sort the array by date
sort!(data, :birthdate)
# Define fields to hold id values for animals and their parents
numanimals = length(data.id)
# Calculate the relationship matrix
A = additiverelationshipmatrix(data.id, data.dam, data.sire)
# Extract all of the fixed effects
fixedeffectdata = data[:,5:end-1]
(X, fixedeffects) = fixedeffectmatrix(fixedeffectdata)
# Extract the observed data
Y = convert(Array{Float64}, data[:,end])
# The random effects matrix
Z = Matrix{Int}(I, numanimals, numanimals)
# Remove items where there is no data
nullobs = findall(isnothing, Y)
Z[nullobs, nullobs] .= 0
# Calculate heritability
λ = (1 - h2) / h2
# Use the mixed-model equations
MME = [X' * X X' * Z; Z' * X (Z' * Z) + (inv(A) .* λ)]
MMY = [X' * Y; Z' * Y]
solutions = MME \ MMY
# Find the accuracies
diaginv = diag(inv(MME))
reliability = ones(Float64, length(diaginv)) .- diaginv .* λ
# Find how many traits we found BLUE for
numgroups = length(reliability) - numanimals
# Split the BLUP and BLUE results
β = solutions[1:numgroups]
μ = solutions[numgroups+1:end]
μ_reliability = reliability[numgroups+1:end]
# Extract the names of the traits
traitname = names(data)[end]
# Start printing results to output
fileID = open(savepath, "w")
write(fileID, "beefblup Results Report\n")
write(fileID, "Produced using beefblup (")
write(fileID, "https://github.com/millironx/beefblup")
write(fileID, ")\n\n")
write(fileID, "Input:\t")
write(fileID, path)
write(fileID, "\nAnalysis performed:\t")
write(fileID, string(Dates.today()))
write(fileID, "\nTrait examined:\t")
write(fileID, traitname)
write(fileID, "\n\n")
# Print base population stats
write(fileID, "Base Population:\n")
for i in 1:length(fixedeffects)
effect = fixedeffects[i]
write(fileID, "\t")
write(fileID, string(effect.name))
write(fileID, ":\t")
write(fileID, string(effect.basetrait))
write(fileID, "\n")
end
write(fileID, "\tMean ")
write(fileID, traitname)
write(fileID, ":\t")
write(fileID, string(solutions[1]))
write(fileID, "\n\n")
# Contemporary group adjustments
counter = 2
write(fileID, "Contemporary Group Effects:\n")
for i in 1:length(fixedeffects)
effect = fixedeffects[i]
write(fileID, "\t")
write(fileID, effect.name)
write(fileID, "\tEffect\n")
for j in 1:length(effect.alltraits)
types = effect.alltraits[j]
write(fileID, "\t")
write(fileID, string(types))
write(fileID, "\t")
write(fileID, string(β[counter]))
write(fileID, "\n")
counter = counter + 1
end
write(fileID, "\n")
end
write(fileID, "\n")
# Expected breeding values
write(fileID, "Expected Breeding Values:\n")
write(fileID, "\tID\tEBV\tReliability\n")
for i in 1:numanimals
write(fileID, "\t")
write(fileID, string(data.id[i]))
write(fileID, "\t")
write(fileID, string(μ[i]))
write(fileID, "\t")
write(fileID, string(μ_reliability[i]))
write(fileID, "\n")
end
write(fileID, "\n - END REPORT -")
close(fileID)
end
"""
fixedeffectmatrix(fixedeffectdata::DataFrame)
Creates contemporary groupings and the fixed-effect incidence matrix based on the fixed
effects listed in `fixedeffectdata`.
Returns a tuple `(X::Matrix{Int}, fixedeffects::Array{FixedEffect})` in which `X` is the
actual matrix, and `fixedeffects` is the contemporary groupings.
"""
function fixedeffectmatrix(fixedeffectdata::DataFrame)
# Declare an empty return matrix
fixedeffects = FixedEffect[]
# Add each trait to the array
for i in 1:size(fixedeffectdata)[2]
name = names(fixedeffectdata)[i]
traits = eachcol(fixedeffectdata)[i]
if length(unique(traits)) > 1
push!(fixedeffects, FixedEffect(name, traits))
else
@warn string("column '", name, "' does not have any unique animals and will be dropped from analysis")
pname = propertynames(fixedeffectdata)[i]
DataFrames.select!(fixedeffectdata, Not(pname))
end
end
X = ones(Int64, (size(fixedeffectdata)[1], 1))
for i in 1:length(fixedeffects)
trait = fixedeffects[i]
for phenotype in trait.alltraits
X = cat(X, Int64.(fixedeffectdata[:,i] .== phenotype), dims=2)
end
end
return X, fixedeffects
end
"""
additiverelationshipmatrix(id, dam, sire)
Returns the additive numerator relationship matrix based on the pedigree provided in `dam`
and `sire` for animals in `id`.
"""
function additiverelationshipmatrix(id::AbstractVector, damid::AbstractVector, sireid::AbstractVector)
# Sanity-check for valid pedigree
if !(length(id) == length(damid) && length(damid) == length(sireid))
throw(ArgumentError("id, dam, and sire must be of the same length"))
end
# Convert to positions
dam = indexin(damid, id)
sire = indexin(sireid, id)
# Calculate loop iterations
numanimals = length(dam)
# Create an empty matrix for the additive relationship matrix
A = zeros(numanimals, numanimals)
# Create the additive relationship matrix by the FORTRAN method presented by
# Henderson
for i in 1:numanimals
if !isnothing(dam[i]) && !isnothing(sire[i])
for j in 1:(i - 1)
A[j,i] = 0.5 * (A[j,sire[i]] + A[j,dam[i]])
A[i,j] = A[j,i]
end
A[i,i] = 1 + 0.5 * A[sire[i], dam[i]]
elseif !isnothing(dam[i]) && isnothing(sire[i])
for j in 1:(i - 1)
A[j,i] = 0.5 * A[j,dam[i]]
A[i,j] = A[j,i]
end
A[i,i] = 1
elseif isnothing(dam[i]) && !isnothing(sire[i])
for j in 1:(i - 1)
A[j,i] = 0.5 * A[j,sire[i]]
A[i,j] = A[j,i]
end
A[i,i] = 1
else
for j in 1:(i - 1)
A[j,i] = 0
A[i,j] = 0
end
A[i,i] = 1
end
end
return A
end
"""
renamecolstospec(::DataFrame)
Renames the first four columns of the beefblup data sheet so that they can be referred to by
name instead of by column index, regardless of user input.
"""
function renamecolstospec!(df::DataFrame)
# Pull out the fixed-effect and observation name
othernames = propertynames(df)[5:end]
# Put specification column names and user-defined names together
allnames = cat([:id, :birthdate, :dam, :sire], othernames, dims=1)
# Rename in the DataFrame
rename!(df, allnames, makeunique=true)
return df
end
struct FixedEffect
name::String
basetrait::Any
alltraits::AbstractArray{Any}
end
function FixedEffect(name::String, incidences)
basetrait = last(unique(incidences))
types = unique(incidences)[1:end-1]
return FixedEffect(name, basetrait, types)
end
end
| [
27,
7856,
261,
480,
29,
22603,
1934,
55,
14,
1350,
891,
2436,
929,
198,
2,
12023,
2436,
929,
198,
2,
22300,
5301,
329,
9489,
2060,
12,
25641,
378,
9878,
8577,
284,
1064,
12023,
17025,
198,
2,
18954,
3815,
198,
2,
357,
34,
8,
33448... | 2.434469 | 3,609 |
<gh_stars>1-10
"""
Execution state that encapsulates synchronization primitives and resources
bound to a command submission on the GPU.
Resources bound to the related execution can be either freed or released (dereferenced)
once the execution completes.
The execution is assumed to be completed when the execution
state has been waited on (see [`wait`](@ref)), or when an inherited execution state has
been completed execution.
Execution state can be inherited across submissions, where resource dependencies
are transfered over to a new execution state bound to a command that **must** include
the inherited `ExecutionState`'s semaphore as a wait semaphore.
"""
struct ExecutionState
queue::Queue
semaphore::Optional{Vk.SemaphoreSubmitInfoKHR}
fence::Optional{Vk.Fence}
free_after_completion::Vector{Ref}
release_after_completion::Vector{Ref}
end
function ExecutionState(queue; semaphore = nothing, fence = nothing, free_after_completion = [], release_after_completion = [])
ExecutionState(queue, semaphore, fence, free_after_completion, release_after_completion)
end
function finalize!(exec::ExecutionState)
for resource in exec.free_after_completion
finalize(resource[])
end
empty!(exec.free_after_completion)
empty!(exec.release_after_completion)
true
end
function _wait(fence::Vk.Fence, timeout)
ret = unwrap(Vk.wait_for_fences(Vk.device(fence), [fence], true, timeout))
ret == Vk.SUCCESS
end
function _wait(fences, timeout)
isempty(fences) && return true
ret = unwrap(Vk.wait_for_fences(Vk.device(first(fence)), fences, true, timeout))
ret == Vk.SUCCESS
end
function Base.wait(exec::ExecutionState, timeout = typemax(UInt32))
fence = exec.fence::Vk.Fence
_wait(fence, timeout) && finalize!(exec)
end
function Base.wait(execs::AbstractVector{ExecutionState}, timeout = typemax(UInt32))
fences = map(Base.Fix2(getproperty, :fence), execs)::Vector{Fence}
_wait(fences, timeout) && all(finalize!, execs)
end
function submit(dispatch::QueueDispatch, queue_family_index, submit_infos;
signal_fence = false,
semaphore = nothing,
free_after_completion = [],
release_after_completion = [],
inherit = nothing,
check_inheritance = true,
)
q = queue(dispatch, queue_family_index)
fence = signal_fence ? Vk.Fence(Vk.device(q)) : nothing
if inherit isa ExecutionState
append!(free_after_completion, inherit.free_after_completion)
append!(release_after_completion, inherit.release_after_completion)
empty!(inherit.free_after_completion)
empty!(inherit.release_after_completion)
if check_inheritance && !any(Base.Fix1(in, inherit.semaphore), submit_infos.wait_semaphores)
error("No wait semaphore has been registered that
matches the inherited state.")
end
end
for submit_info in submit_infos
for command_buffer_info in submit_info.command_buffer_infos
end_recording(command_buffer_info.command_buffer)
end
end
unwrap(Vk.queue_submit_2_khr(q, submit_infos; fence = something(fence, C_NULL)))
ExecutionState(q; semaphore, fence, free_after_completion, release_after_completion)
end
function submit(dispatch::QueueDispatch, queue_family_index, submit_info::Vk.SubmitInfo2KHR;
signal_fence = false,
semaphore = nothing,
free_after_completion = [],
release_after_completion = [],
inherit = nothing,
check_inheritance = true,
)
if !isnothing(semaphore)
semaphore in submit_info.signal_semaphore_infos || error("The provided semaphore was not included in the submission structure")
end
submit(dispatch, queue_family_index, [submit_info]; signal_fence, semaphore, free_after_completion, release_after_completion, inherit, check_inheritance)
end
function Base.show(io::IO, exec::ExecutionState)
print(io, ExecutionState, "($(exec.queue)")
if !isnothing(exec.fence)
is_complete = _wait(exec.fence, 0)
if is_complete
print(io, ", completed execution")
else
print(io, ", in progress")
end
end
print(io, ')')
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
198,
23002,
1009,
1181,
326,
32652,
15968,
42133,
2684,
20288,
290,
4133,
198,
7784,
284,
257,
3141,
14498,
319,
262,
11362,
13,
198,
198,
33236,
5421,
284,
262,
3519,
9706,
460,
307,
... | 2.738017 | 1,523 |
function setupCommBuffers!(domain::Domain, edgeNodes::Int)
function cache_align_real(n::Int64)
n = Int32(n)
(n + CACHE_COHERENCE_PAD_REAL%Int32 - one(Int32)) & ~(CACHE_COHERENCE_PAD_REAL%Int32 - one(Int32))
end
@unpack_Domain domain
# allocate a buffer large enough for nodal ghost data
maxEdgeSize = max(domain.sizeX, max(domain.sizeY, domain.sizeZ))+1
m_maxPlaneSize = cache_align_real(maxEdgeSize*maxEdgeSize)
m_maxEdgeSize = cache_align_real(maxEdgeSize)
maxPlaneSize = m_maxPlaneSize
maxEdgeSize = m_maxEdgeSize
# assume communication to 6 neighbors by default
m_rowMin = (m_rowLoc == 0) ? 0 : 1
m_rowMax = (m_rowLoc == m_tp-1) ? 0 : 1
m_colMin = (m_colLoc == 0) ? 0 : 1
m_colMax = (m_colLoc == m_tp-1) ? 0 : 1
m_planeMin = (m_planeLoc == 0) ? 0 : 1
m_planeMax = (m_planeLoc == m_tp-1) ? 0 : 1
# account for face communication
comBufSize =(
(m_rowMin + m_rowMax + m_colMin + m_colMax + m_planeMin + m_planeMax) *
m_maxPlaneSize * MAX_FIELDS_PER_MPI_COMM
)
# account for edge communication
comBufSize += (
((m_rowMin & m_colMin) + (m_rowMin & m_planeMin) + (m_colMin & m_planeMin) +
(m_rowMax & m_colMax) + (m_rowMax & m_planeMax) + (m_colMax & m_planeMax) +
(m_rowMax & m_colMin) + (m_rowMin & m_planeMax) + (m_colMin & m_planeMax) +
(m_rowMin & m_colMax) + (m_rowMax & m_planeMin) + (m_colMax & m_planeMin)) *
m_maxEdgeSize * MAX_FIELDS_PER_MPI_COMM
)
# account for corner communication
# factor of 16 is so each buffer has its own cache line
comBufSize += (((m_rowMin & m_colMin & m_planeMin) +
(m_rowMin & m_colMin & m_planeMax) +
(m_rowMin & m_colMax & m_planeMin) +
(m_rowMin & m_colMax & m_planeMax) +
(m_rowMax & m_colMin & m_planeMin) +
(m_rowMax & m_colMin & m_planeMax) +
(m_rowMax & m_colMax & m_planeMin) +
(m_rowMax & m_colMax & m_planeMax)) * CACHE_COHERENCE_PAD_REAL
)
commDataSend = Vector{Float64}(undef, comBufSize)
commDataRecv = Vector{Float64}(undef, comBufSize)
# prevent floating point exceptions
fill!(commDataSend, 0)
fill!(commDataRecv, 0)
# Boundary nodesets
if (m_colLoc == 0)
resize!(symmX, edgeNodes*edgeNodes)
end
if (m_rowLoc == 0)
resize!(symmY, edgeNodes*edgeNodes)
end
if (m_planeLoc == 0)
resize!(symmZ, edgeNodes*edgeNodes)
end
@pack_Domain! domain
end
getMyRank(comm::MPI.Comm) = MPI.Comm_rank(comm)
getMyRank(::Nothing) = 0
getNumRanks(comm::MPI.Comm) = MPI.Comm_size(comm)
getNumRanks(::Nothing) = 1
getWtime(::MPI.Comm) = MPI.Wtime()
getWtime(::Nothing) = time()
comm_max(data::Float64, comm::MPI.Comm) = MPI.Allreduce(data, MPI.MAX, comm)
comm_max(data::Float64, ::Nothing) = data
comm_min(data::Float64, comm::MPI.Comm) = MPI.Allreduce(data, MPI.MIN, comm)
comm_min(data::Float64, ::Nothing) = data
comm_barrier(comm::MPI.Comm) = MPI.Barrier(comm)
comm_barrier(::Nothing) = nothing
| [
8818,
9058,
6935,
36474,
364,
0,
7,
27830,
3712,
43961,
11,
5743,
45,
4147,
3712,
5317,
8,
198,
220,
220,
220,
2163,
12940,
62,
31494,
62,
5305,
7,
77,
3712,
5317,
2414,
8,
198,
220,
220,
220,
220,
220,
220,
220,
299,
796,
2558,
... | 2.11413 | 1,472 |
using ParallelAccelerator
#ParallelAccelerator.ParallelIR.set_debug_level(3)
@acc function find_chg(k,m,W,Wp)
W_tmp = [abs(W[i,j] - Wp[i,j]) for i in 1:m, j in 1:k]
s = [sum(W_tmp[:,j]) for j in 1:k]
chg = maximum(s)
end
function main(m::Int, k::Int)
W = Array{Float64}(m, k)
Wp = Array{Float64}(m, k)
fill!(W, 3)
fill!(Wp, 5)
chg = find_chg(k,m,W,Wp)
println("chg = ", chg)
end
main(100, 100)
| [
3500,
42945,
12832,
7015,
1352,
198,
198,
2,
10044,
29363,
12832,
7015,
1352,
13,
10044,
29363,
4663,
13,
2617,
62,
24442,
62,
5715,
7,
18,
8,
198,
198,
31,
4134,
2163,
1064,
62,
354,
70,
7,
74,
11,
76,
11,
54,
11,
54,
79,
8,
... | 1.850427 | 234 |
using Iconv
using Test
@test iconv("笨熊", "UTF-8", "GBK") == togbk("笨熊") == g"笨熊"
@test iconv(g"笨熊", "GBK", "UTF-8") == toutf8(g"笨熊") == b"笨熊" | [
3500,
26544,
85,
198,
3500,
6208,
198,
198,
31,
9288,
7196,
85,
7203,
163,
105,
101,
163,
228,
232,
1600,
366,
48504,
12,
23,
1600,
366,
4579,
42,
4943,
6624,
284,
22296,
74,
7203,
163,
105,
101,
163,
228,
232,
4943,
6624,
308,
1,... | 1.42 | 100 |
<gh_stars>1-10
module GHWT_tf_1d
include("utils.jl")
using ..GraphSignal, ..GraphPartition, ..BasisSpecification, LinearAlgebra
include("common.jl")
export ghwt_tf_bestbasis, eghwt_bestbasis
"""
coeffdict = tf_init(dmatrix::Matrix{Float64},GP::GraphPart)
Store the expanding coeffcients from matrix into a list of dictionary (inbuilt hashmap in Julia)
### Input Arguments
* `dmatrix`: The expanding GHWT coefficients of all levels corresponding to input GP
* `GP::GraphPart`: an input GraphPart object
### Output Arguments
* `coeffdict`: The expanding GHWT coeffcients stored in a list of "dictionary" (inbuilt hashmap in Julia),
* `coeffdict`: The entry `coeffdict[j][(k,l)]` corresponds to the coefficient of basis-vector on level j with region k and tag l.
Copyright 2018 The Regents of the University of California
Implemented by <NAME> (Adviser: Dr. <NAME>)
"""
function tf_init(dmatrix::Matrix{Float64},GP::GraphPart)
# Obtain the tag information
tag = convert(Array{Int,2},GP.tag)
# Obtain the region information
tag_r = convert(Array{Int,2},rs_to_region(GP.rs, GP.tag))
(m,n) = size(dmatrix)
# Initialize coeffdict
coeffdict = Array{Dict{Tuple{Int,Int},Float64}}(undef, n)
# Fill in the values with the rule that `coeffdict[j][(k,l)]` represents
# the coefficient of basis-vector on level j with region k and tag l.
for i = 1:n
coeffdict[i]= Dict{Tuple{Int,Int},Float64}((tag_r[j,i],tag[j,i]) => dmatrix[j,i] for j=1:m)
end
return coeffdict
end
"""
coeffdict_new,tag_tf = tf_core_new(coeffdict::Array{Dict{Tuple{Int,Int},Float64},1})
One forward iteration of time-frequency adapted GHWT method. For each entry in `coeffdict_new`, we compare two (or one) entries in 'coeffdict' on time-direction and two (or one) entries in 'coeffdict' on frequency-direction.
Those two groups reprensent the same subspace. We compare the cost-functional value of them and choose the smaller one as a new entry in 'coeffdict_new'.
### Input Arguments
* `coeffdict`: The entries of which reprensents the cost functional value of some basis-vectors' coefficients.
### Output Arguments
* `coeffdict_new`: The entries of which represents the cost functional value of some basis-vectors' coefficients
* `tag_tf`: Indicating whether the time-direction (0) or frequency direction (1) was chosen for each entry in coeffdict_new.
Copyright 2018 The Regents of the University of California
Implemented by <NAME> (Adviser: Dr. <NAME>)
"""
function tf_core_new(coeffdict::Array{Dict{Tuple{Int,Int},Float64},1})
# We choose '1-norm' as the optional cost functional
costfun = cost_functional(1)
jmax = length(coeffdict)
# Initialization
coeffdict_new = Array{Dict{Tuple{Int,Int},Float64}}(undef, jmax-1)
tag_tf = Array{Dict{Tuple{Int,Int},Bool}}(undef, jmax-1)
# Iterate through levels
for j = 1:(jmax-1)
# the temporary dictionary, which will be the j-th level of `coeffdict_new`
temp_coeff = Dict{Tuple{Int,Int},Float64}()
# the temporary dictionary, which will be the j-th level of 'tag_tf'
temp_tf = Dict{Tuple{Int,Int},Bool}()
# iterate through the entries in coeffdict on level j
for key in keys(coeffdict[j])
# coeffdict[j][k,l] represent the entry on level j with region k and tag l
k = key[1] # region index
l = key[2] # tag index
# only look at the entry with even tag l to avoid duplication
if l%2 == 0
# search for the (j,k,l+1) entry.
# the (j,k,l) and (j,k,l+1) span the same subspace as (j+1,2*k,l/2) and (j+1,2*k+1,l/2)
# (j,k,l) and (j,k,l+1) are `frequency-direction`
# (j,k,l) and (j,k,l+1) are `time-direction`
if haskey(coeffdict[j],(k,l+1)) # check for degenerate case ((j,k,l+1) doesn't exist)
freqcos = costfun([coeffdict[j][(k,l)],coeffdict[j][(k,l+1)]])
else
freqcos = costfun([coeffdict[j][(k,l)]])
end
if ~haskey(coeffdict[j+1],(2*k,l/2)) # check for degenerate case ((j+1,2*k,l/2) or (j+1,2*k+1,l/2) doesn't exist)
timecos = costfun([coeffdict[j+1][(2*k+1,l/2)]])
elseif ~haskey(coeffdict[j+1],(2*k+1,l/2))
timecos = costfun([coeffdict[j+1][(2*k,l/2)]])
else
timecos = costfun([coeffdict[j+1][(2*k+1,l/2)],coeffdict[j+1][(2*k,l/2)]])
end
# compare the cost-functional value and record into 'tag_tf'
if timecos <= freqcos
temp_coeff[(k,l/2)] = timecos
temp_tf[(k,l/2)] = false
else
temp_coeff[(k,l/2)] = freqcos
temp_tf[(k,l/2)] = true
end
end
end
coeffdict_new[j] = temp_coeff
tag_tf[j] = temp_tf
end
return coeffdict_new,tag_tf
end
"""
tag_tf_b_new = tf_basisrecover_new(tag_tf_b::Array{Dict{Tuple{Int,Int},Bool}},tag_tf_f::Array{Dict{Tuple{Int,Int},Bool}})
One backward iteration of time-frequency adapted GHWT method to recover the best-basis from the `tag_tf`s recorded.
### Input Arguments
* `tag_tf_b`: The `dictionary` recording the time-or-frequency information on some iteration 'i' in the main algorithm
* `tag_tf_f`: The `dictionary` recording the time-or-frequency information on some iteration 'i+1' in the main algorithm
### Output Arguments
* `tag_tf_b_new`: The updated 'tag_tf_b'. Eventually the 'tag_tf' on iteration 1 will represent the selected best-basis
Copyright 2018 The Regents of the University of California
Implemented by <NAME> (Adviser: Dr. <NAME>)
"""
function tf_basisrecover_new(tag_tf_b::Array{Dict{Tuple{Int,Int},Bool}},tag_tf_f::Array{Dict{Tuple{Int,Int},Bool}})
# Initialization
jmax = length(tag_tf_b)
#tag_tf_b_new = Array{Dict{Tuple{Int,Int},Bool}}(jmax)
tag_tf_b_new = Array{Dict{Tuple{Int,Int},Bool}}(undef, jmax)
for j = 1:jmax
tag_tf_b_new[j] = Dict{Tuple{Int,Int},Bool}()
end
# Iterate on the levels
for j = 1:(jmax-1)
for key in keys(tag_tf_f[j])
k = key[1]
l = key[2]
# The entries on frequency-direction are selected
if tag_tf_f[j][(k,l)] == true
if ~haskey(tag_tf_b[j],(k,2*l))
tag_tf_b_new[j][(k,2*l+1)] = tag_tf_b[j][(k,2*l+1)]
elseif ~haskey(tag_tf_b[j],(k,2*l+1))
tag_tf_b_new[j][(k,2*l)] = tag_tf_b[j][(k,2*l)]
else
tag_tf_b_new[j][(k,2*l)] = tag_tf_b[j][(k,2*l)]
tag_tf_b_new[j][(k,2*l+1)] = tag_tf_b[j][(k,2*l+1)]
end
else
# The entries on time-direction are selected
if ~haskey(tag_tf_b[j+1],(2*k,l))
tag_tf_b_new[j+1][(2*k+1,l)] = tag_tf_b[j+1][(2*k+1,l)]
elseif ~haskey(tag_tf_b[j+1],(2*k+1,l))
tag_tf_b_new[j+1][(2*k,l)] = tag_tf_b[j+1][(2*k,l)]
else
tag_tf_b_new[j+1][(2*k+1,l)] = tag_tf_b[j+1][(2*k+1,l)]
tag_tf_b_new[j+1][(2*k,l)] = tag_tf_b[j+1][(2*k,l)]
end
end
end
end
return tag_tf_b_new
end
"""
(dvec, BS) = ghwt_tf_bestbasis(dmatrix::Array{Float64,3}, GP::GraphPart; cfspec::Float64 = 1.0, flatten::Any = 1.0)
Implementation of time-frequency adapted GHWT method = eGHWT.
Modified from the algorithm in the paper: "A Fast Algorithm for Adapted Time Frequency Tilings" by <NAME> and <NAME>.
### Input Arguments
* `dmatrix::Array{Float64,3}`: the matrix of expansion coefficients
* `GP::GraphPart`: an input GraphPart object
* `cfspec::Any`: the specification of cost functional to be used (default: 1.0, i.e., 1-norm)
* `flatten::Any`: the method for flattening vector-valued data to scalar-valued data (default: 1.0, i.e., 1-norm)
### Output Arguments
* `dvec::Matrix{Float64}`: the vector of expansion coefficients corresponding to the eGHWT best basis
* `BS::BasisSpec`: a BasisSpec object which specifies the eGHWT best basis
Copyright 2018 The Regents of the University of California
Implemented by <NAME> (Adviser: Dr. <NAME>)
"""
function ghwt_tf_bestbasis(dmatrix::Array{Float64,3}, GP::GraphPart; cfspec::Float64 = 1.0, flatten::Any = 1.0)
# determine the cost functional to be used
costfun = cost_functional(cfspec)
# constants and dmatrix cleanup
fcols = Base.size(dmatrix,3)
dmatrix[ abs.(dmatrix) .< 10^2 * eps() ] .= 0
dmatrix0 = deepcopy(dmatrix) # keep the original dmatrix as dmatrix0
# "flatten" dmatrix
if fcols > 1
if !isnothing(flatten)
dmatrix = dmatrix_flatten(dmatrix, flatten)[:,:,1]
end
end
dmatrix = dmatrix.^cfspec
# Initialization. Store the expanding coeffcients from matrix into a list of dictionary (inbuilt hashmap in Julia)
# The entry `coeffdict[j][(k,l)]` corresponds to the coefficient of basis-vector on level j with region k and tag l.
tag = convert(Array{Int,2},GP.tag)
tag_r = rs_to_region(GP.rs, GP.tag)
(m,n) = size(dmatrix)
coeffdict = Array{Dict{Tuple{Int,Int},Float64}}(undef, n)
for i = 1:n
coeffdict[i]= Dict{Tuple{Int,Int},Float64}((tag_r[j,i],tag[j,i]) => dmatrix[j,i] for j=1:m)
end
# TAG_tf stores the time-or-frequency information on every iteration
# COEFFDICT stores the corresponding cost-functional values.
COEFFDICT = Vector(undef, n)
TAG_tf = Vector(undef, n)
# Initialization of the first iteration
COEFFDICT[1] = coeffdict
TAG_tf_init = Array{Dict{Tuple{Int,Int},Bool}}(undef, n)
for j = 1:n
TAG_tf_init[j] = Dict(key => true for key in keys(coeffdict[j]))
end
TAG_tf[1] = TAG_tf_init
# Iterate forward. For each entry in `COEFFDICT[i+1]`, we compare two (or one) entries in 'COEFFDICT[i]' on time-direction and two (or one) on frequency-direction.
# Those two groups reprensent the same subspace. We compare the cost-functional value of them and choose the smaller one as a new entry in 'COEFFDICT[i+1]'
# 'TAG_tf[i+1]' records if frequency-direction (1) or time-direction (0) was chosen.
for i = 1:(n-1)
COEFFDICT[i+1], TAG_tf[i+1] = tf_core_new(COEFFDICT[i])
end
# Iterate backward with the existing tag_tf information to recover the best-basis.
bestbasis_tag = TAG_tf[n]
for i = (n-1):-1:1
bestbasis_tag = tf_basisrecover_new(TAG_tf[i],bestbasis_tag)
end
# Change the data structure from dictionary to matrix
#bestbasis = zeros(m,n)
#bestbasis_tag_matrix = zeros(Int,m,n)
levlist = Vector{Tuple{Int,Int}}(undef, 0)
dvec = Vector{Float64}(undef, 0)
for j = 1:n
for i = 1:m
k = tag_r[i,j]
l = tag[i,j]
if haskey(bestbasis_tag[j],(k,l))
#bestbasis_tag_matrix[i,j] = 1
push!(levlist, (i,j))
#bestbasis[i,j] = dmatrix[i,j]
push!(dvec, dmatrix[i,j])
end
end
end
BS = BasisSpec(levlist, c2f = true, description = "eGHWT Best Basis")
dvec = dmatrix2dvec(dmatrix0, GP, BS)
return dvec, BS
end # of function ghwt_tf_bestbasis
const eghwt_bestbasis = ghwt_tf_bestbasis # This should be a useful alias!
end # of module GHWT_tf_1d
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
24739,
39386,
62,
27110,
62,
16,
67,
201,
198,
201,
198,
17256,
7203,
26791,
13,
20362,
4943,
201,
198,
201,
198,
3500,
11485,
37065,
11712,
282,
11,
11485,
37065,
7841,
653,
11,
1148... | 2.102084 | 5,613 |
using Test
using StaticArrays
using IntervalArithmetic
# using Revise
using HDGElasticity
function allapprox(v1,v2)
return all(v1 .≈ v2)
end
@test HDGElasticity.linear_map_slope(1.,2.,[1.,2.],[3.,4.]) ≈ [2.,2.]
map = HDGElasticity.LineMap(0.,1.,[0.,0.],[1.,1.])
@test map.xiL ≈ 0.0
@test map.xiR ≈ 1.0
@test allapprox(map.xL,[0.,0.])
@test allapprox(map.xR,[1.,1.])
@test allapprox(map.slope,[1.,1.])
@test HDGElasticity.dimension(map) == 2
@test allapprox(map(0.),[0.,0.])
@test allapprox(map(0.5),[0.5,0.5])
map = HDGElasticity.LineMap([0.5,0.5],[1.,0.5])
@test map.xiL ≈ -1.0
@test map.xiR ≈ 1.0
@test allapprox(map.xL,[0.5,0.5])
@test allapprox(map.xR,[1.0,0.5])
@test allapprox(map.slope,[0.5,0.0]/2)
map = HDGElasticity.LineMap([0.,0.],[1.,1.]*sqrt(2))
@test allapprox(map(-1),[0.,0.])
@test allapprox(map(1),[1,1]*sqrt(2))
@test allapprox(HDGElasticity.jacobian(map),[1,1]/sqrt(2))
@test allapprox(HDGElasticity.determinant_jacobian(map),1.)
@test allapprox(HDGElasticity.inverse_jacobian(map),[1,1]*sqrt(2))
xiL = [0.,0.]
xiR = [1.,1.]
xL = [0.,0.]
xR = [2.,1.]
map = HDGElasticity.CellMap(xiL,xiR,xL,xR)
@test allapprox(map.xiL,xiL)
@test allapprox(map.xiR,xiR)
@test allapprox(map.xL,xL)
@test allapprox(map.xR,xR)
@test HDGElasticity.dimension(map) == 2
@test allapprox(map([0.5,0.5]),[1.,0.5])
@test allapprox(map([0.25,0.75]),[0.5,0.75])
@test allapprox(HDGElasticity.jacobian(map),[2.,1.])
@test allapprox(HDGElasticity.inverse_jacobian(map),[0.5,1.])
@test HDGElasticity.determinant_jacobian(map) ≈ 2.0
@test allapprox(HDGElasticity.face_determinant_jacobian(map),[2.,1.,2.,1.])
HDGElasticity.update_range!(map,[1.,1.],[2.,2.])
@test allapprox(map([0.5,0.5]),[1.5,1.5])
@test allapprox(map([0.75,0.25]),[1.75,1.25])
@test allapprox(HDGElasticity.jacobian(map),[1.,1.])
@test HDGElasticity.determinant_jacobian(map) ≈ 1.0
map = HDGElasticity.CellMap(2)
@test allapprox(map.xiL,[-1.,-1.])
@test allapprox(map.xiR,[+1.,+1.])
@test allapprox(map.xL,[-1.,-1.])
@test allapprox(map.xR,[+1.,+1.])
@test allapprox(map.slope,[1.,1.])
HDGElasticity.update_range!(map,[0.,0.],[1.,1.])
@test allapprox(map.xL,[0.,0.])
@test allapprox(map.xR,[1.,1.])
@test allapprox(map.slope,[0.5,0.5])
box = IntervalBox(1..2,3..5)
HDGElasticity.update_range!(map,box)
@test allapprox(map.xL,[1.,3.])
@test allapprox(map.xR,[2.,5.])
@test allapprox(map.slope,[0.5,1.])
map = HDGElasticity.CellMap([1.,2.],[2.,4.])
@test allapprox(map.xiL,[-1.,-1.])
@test allapprox(map.xiR,[1.,1.])
@test allapprox(map.xL,[1.,2.])
@test allapprox(map.xR,[2.,4.])
@test allapprox(HDGElasticity.jacobian(map),[0.5,1.])
@test HDGElasticity.determinant_jacobian(map) ≈ 0.5
xi = IntervalBox(-1..1,2)
x = IntervalBox(0..1,2)
map = HDGElasticity.CellMap(xi,x)
@test allapprox(map.xiL,[-1.,-1.])
@test allapprox(map.xiR,[1.,1.])
@test allapprox(map.xL,[0.,0.])
@test allapprox(map.xR,[1.,1.])
map = HDGElasticity.CellMap(IntervalBox(0..2,1..2))
@test allapprox(map.xiL,[-1.,-1.])
@test allapprox(map.xiR,[1.,1.])
@test allapprox(map.xL,[0.,1.])
@test allapprox(map.xR,[2.,2.])
fmaps = HDGElasticity.reference_cell_facemaps(2)
@test allapprox(fmaps[1](-1.0),[-1.,-1.])
@test allapprox(fmaps[1](+1.0),[+1.,-1.])
@test allapprox(fmaps[2](-1.0),[+1.,-1.])
@test allapprox(fmaps[2](+1.0),[+1.,+1.])
@test allapprox(fmaps[3](-1.0),[-1.,+1.])
@test allapprox(fmaps[3](+1.0),[+1.,+1.])
@test allapprox(fmaps[4](-1.0),[-1.,-1.])
@test allapprox(fmaps[4](+1.0),[-1.,+1.])
| [
3500,
6208,
198,
3500,
36125,
3163,
20477,
198,
3500,
4225,
2100,
3163,
29848,
198,
2,
1262,
5416,
786,
198,
3500,
5572,
8264,
75,
3477,
414,
198,
198,
8818,
477,
1324,
13907,
7,
85,
16,
11,
85,
17,
8,
198,
220,
220,
220,
1441,
47... | 1.91922 | 1,795 |
<filename>src/direct/sparseblocks.jl<gh_stars>0
struct BlockIndices
i1::UnitRange{Int}
i2::UnitRange{Int}
isdiag::Bool
end
# Remove isdiag field from comparison for dict
Base.isequal(b1::BlockIndices, b2::BlockIndices) = b1.i1 == b2.i1 && b1.i2 == b2.i2
Base.hash(b::BlockIndices, h::UInt) = hash((b.i1, b.i2), h)
"""
SparseBlocks
A type for speeding up expressions like the following:
A[rows, cols] .= data
A[rows, cols] .+= data
where `A` is a `SparseMatrixCSC` and `rows` and `cols` are `UnitRange`s. The
`data` is usually a `Matrix` but will work with any `AbstractArray` or
collection that supports broadcasting.
The key assumption is that the sparsity in `A` is known and fixed, specified by
a list of "blocks," or a chunk of consecutive rows and columns, addressable by
two `UnitRange` objects.
Usage consists of two phases: 1) initialization, where the sparsity structure is
specified, and 2) usage, where the precomputed data in this struct is used to
speed up indexing operations.
## Initialization
To specify the sparsity structure, use [`addblock!`](@ref). For example, we could
do the following:
sb = SparseBlocks(10,10) # blocks for a 10x10 sparse matrix
addblock!(sb, 1:2, 1:2) # nonzeros block in top left corner
addblock!(sb, 1:3, 8:10) # nonzeros block in top right corner
After adding all of our blocks, we initialize the sparsity structure via
[`initialize!`](@ref):
A = spzeros(10,10)
initialize!(sb, A)
After calling `initialize!`, the matrix `A` will have the correct number of nonzero
entries. The speed of the indexing operations is given by using cached conversions
from the entries of each block to the storage location in the nonzeros vector of `A`,
so it's critical that the sparsity structure of `A` does not change!
## Usage
Once initialized, we can use the cached information in this object to speed up
writing to our sparse matrix `A`. Basic usage is as simple as wrapping our indices
with an indexing call to our `SparseBlocks` object:
A[sb[1:2, 1:2]] .= randn(2,2)
Note that these indices must exactly match one of the argument pairs passed to
[`addblock`](@ref). For best performance, you can cache the [`SparseBlockIndex`](@ref)
returned from this object:
inds = sb[1:2, 1:2] # cache this somewhere for future indexing calls
A[inds] .= randn(2,2)
All of these methods avoid dynamic memory allocations and are about 5-20x faster
indexing directly into the sparse matrix.
"""
struct SparseBlocks
n::Int
m::Int
inds::Dict{BlockIndices,Matrix{Int}}
SparseBlocks(n,m) = new(n,m,Dict{BlockIndices,Matrix{Int}}())
end
"""
addblock!(blocks, rows, cols)
Add a nonzero block for the sparse matrix, spanning the block defined by
consecutive `rows` and `cols` ranges. All calls to `addblock!` should be
made before calling [`initialize!`](@ref).
"""
function addblock!(blocks::SparseBlocks, i1::UnitRange, i2::UnitRange, isdiag::Bool=false)
if i1.start < 1 || i1.stop > blocks.n || i2.start < 1 || i2.stop > blocks.m
throw(BoundsError(spzeros(blocks.n, blocks.m), (i1, i2)))
end
block = BlockIndices(i1, i2, isdiag)
if isdiag
blocks.inds[block] = zeros(Int, min(length(i1), length(i2)), 1)
else
blocks.inds[block] = zeros(Int, length(i1), length(i2))
end
blocks
end
"""
initialize!(blocks::SparseBlocks, A::SparseMatrixCSC)
Initialize the sparsity structure specified by `blocks` (via previous calls
to [`addblock!`](@ref)). Writes epsilon values to the nonzero blocks in `A`
to initialize the data storage for the sparse matrix. The indices into this
storage is computed and stored in `blocks`.
"""
function initialize!(blocks::SparseBlocks, A::SparseMatrixCSC{T}) where T
n,m = blocks.n, blocks.m
if size(A) != (n,m)
throw(DimensionMismatch("Dimension of sparse matrix doesn't match expected size of ($n,$m)."))
end
@assert size(A) == (blocks.n, blocks.m)
for block in keys(blocks.inds)
if block.isdiag
for (i,j) in zip(block.i1, block.i2)
A[i,j] = eps(T)
end
else
A[block.i1, block.i2] .= eps(T)
end
end
inds = blocks.inds
for (block,inds) in pairs(blocks.inds)
rows = block.i1
cols = block.i2
n = length(rows)
if block.isdiag
for (j,(r,c)) in enumerate(zip(rows, cols))
nzind = getnzind(A, r, c)
if !isempty(nzind)
inds[j] = nzind[1]
end
end
else
for (j,col) in enumerate(cols)
nzind = getnzind(A, rows[1], col)
if !isempty(nzind)
inds[:,j] .= nzind[1] - 1 .+ (1:n)
end
end
end
end
blocks
end
# Get the index into the nonzero vector for the given row and column
function getnzind(A,row,col)
rows = view(A.rowval, nzrange(A,col))
istart = searchsorted(rows, row) .+ A.colptr[col] .- 1
return istart
end
"""
SparseBlockIndex
A custom index for sparse arrays with a fixed sparsity structure, especially for
those whose nonzeros appear as dense blocks. This is created by indexing
into a [`SparseBlocks`] object, using indices to one of the cached blocks.
This object can then be used as a index for those same indices, creating a
[`SparseBlockView`](@ref) object that efficiently maps cartesian indices
into the nonzeros vector of the sparse matrix.
"""
struct SparseBlockIndex
block::BlockIndices
nzinds::Matrix{Int}
end
function Base.getindex(blocks::SparseBlocks, i1::UnitRange, i2::UnitRange)
block0 = BlockIndices(i1, i2, false)
block = getkey(blocks.inds, block0, nothing)
if isnothing(block)
throw(KeyError((i1, i2)))
end
SparseBlockIndex(block, blocks.inds[block])
end
"""
SparseBlockView
A custom view into a sparse matrix with known sparsity, and whose nonzero entries
appears as dense blocks. Can be used a normal array, where operations on the elements
write and read directly from the nonzeros vector of the sparse matrix.
"""
struct SparseBlockView{Tv,Ti} <: AbstractMatrix{Tv}
data::SparseMatrixCSC{Tv,Ti}
block::SparseBlockIndex
end
# Needed to support expressions like A[B] .= data
Broadcast.dotview(A::AbstractSparseMatrix, block::SparseBlockIndex) = SparseBlockView(A, block)
# Define this type when indexing into a SparseMatrixCSC with a SparseBlockIndex
Base.getindex(A::AbstractSparseMatrix, block::SparseBlockIndex) = SparseBlockView(A, block)
# Array interface
Base.size(B::SparseBlockView) = size(B.block.nzinds)
Base.getindex(B::SparseBlockView, index) = getindex(B.data.nzval, B.block.nzinds[index])
Base.getindex(B::SparseBlockView, I::Vararg{Int,2}) = getindex(B.data.nzval, B.block.nzinds[I...])
Base.setindex!(B::SparseBlockView, v, index) = setindex!(B.data.nzval, v, B.block.nzinds[index])
Base.setindex!(B::SparseBlockView, v, I::Vararg{Int,2}) = setindex!(B.data.nzval, v, B.block.nzinds[I...])
Base.IndexStyle(::SparseBlockView) = IndexLinear()
# Broadcasting interface
struct SparseViewStyle <: Broadcast.BroadcastStyle end
Base.BroadcastStyle(::Type{<:SparseBlockView}) = SparseViewStyle()
# Always use the new broadcasting style over others
Base.BroadcastStyle(::SparseViewStyle, ::Broadcast.BroadcastStyle) = SparseViewStyle()
# Handle generic expressions like B .= f.(arg)
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{Broadcast.DefaultMatrixStyle})
for i in eachindex(bc)
B[i] = bc[i]
end
B
end
# Handle generic expressions like B .= f.(B, g.(args...))
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{SparseViewStyle})
for i in eachindex(bc)
B[i] = bc.f(B[i], bc[i])
end
B
end
# Specialization of B .= data::AbstractMatrix
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{Broadcast.DefaultMatrixStyle,Nothing,typeof(identity),<:Tuple{<:AbstractMatrix}})
data = bc.args[1]
for i in eachindex(B)
B[i] = data[i]
end
B
end
# Specialization of B .= f.(B, data::AbstractMatrix)
# which includes B .+= data::AbstractMatrix and variants
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{SparseViewStyle,Nothing,<:Any,<:Tuple{<:SparseBlockView,<:AbstractArray}})
data = bc.args[2]
for i in eachindex(B)
B[i] = bc.f(B[i], data[i])
end
B
end
# Specialization of B .= D::Diagonal
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{<:LinearAlgebra.StructuredMatrixStyle{<:Diagonal}, Nothing, typeof(identity), <:Tuple{<:Diagonal}})
D = bc.args[1]
if B.block.block.isdiag
for i in eachindex(B)
B[i] = D.diag[i]
end
else
for i = 1:minimum(size(B))
B[i,i] = D.diag[i]
end
end
end
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{SparseViewStyle,Nothing,<:Any,<:Tuple{<:SparseBlockView,<:Diagonal}})
D = bc.args[2]
f = bc.f
if B.block.block.isdiag
for i in eachindex(B)
B[i] = f(B[i], D.diag[i])
end
else
for i = 1:minimum(size(B))
B[i,i] = f(B[i], D.diag[i])
end
end
end
# Specialize B' .= bc
function Broadcast.materialize!(B::Adjoint{T, <:SparseBlockView{T}}, bc::Broadcast.Broadcasted{Broadcast.DefaultMatrixStyle,Nothing,typeof(identity),<:Tuple{<:AbstractMatrix}}) where T
data = bc.args[1]
for i in eachindex(bc)
it = CartesianIndex(i[2], i[1])
B.parent[it] = data[i]
end
B
end
# Specialize B = UpperTriangular(data)
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{<:LinearAlgebra.StructuredMatrixStyle{<:UpperTriangular}})
for r = 1:size(B,1)
for c = r:size(B,2)
i = CartesianIndex(r,c)
B[r,c] = bc[i]
end
end
end
# Specialize B = LowerTriangular(data)
function Broadcast.materialize!(B::SparseBlockView, bc::Broadcast.Broadcasted{<:LinearAlgebra.StructuredMatrixStyle{<:LowerTriangular}})
for c = 1:size(B,2)
for r = c:size(B,1)
i = CartesianIndex(r,c)
B[r,c] = bc[i]
end
end
end | [
27,
34345,
29,
10677,
14,
12942,
14,
82,
29572,
27372,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
198,
7249,
9726,
5497,
1063,
198,
220,
220,
220,
1312,
16,
3712,
26453,
17257,
90,
5317,
92,
198,
220,
220,
220,
1312,
17,
3712,
26... | 2.481544 | 4,172 |
using OffsetArrays
using StaticArrays
using DataStructures
export pgbf, cgbf, contract, addbf!, PGBF, CGBF, build_basis,eri_fetcher, Shell, Basis
export m2ao, shell_indices,nao
"""
PGBF(expn,xyz,I,J,K,norm)
Create a primitive Gaussian basis function
g(x,y,z) = norm * (x-x0)^I (y-y0)^J (z-z0)^K exp(-expn*r^2)
The function parameters `xyz` correspond to [x0,y0,z0].
"""
mutable struct PGBF
expn::Float64
xyz::SVector{3,Float64}
I::Int64
J::Int64
K::Int64
norm::Float64
end
"""
pgbf(expn,x=0,y=0,z=0,I=0,J=0,K=0,norm=1)
Helper function to create a normalized PGBF with some optional
defaults set.
"""
function pgbf(expn,x=0.0,y=0.0,z=0.0,I=0,J=0,K=0,norm=1)
p = PGBF(expn,SA[x,y,z],I,J,K,norm)
normalize!(p)
return p
end
function amplitude(bf::PGBF,x,y,z)
dx,dy,dz = dxyz = bf.xyz-SA[x,y,z]
r2 = dist2(dxyz)
return bf.norm*(dx^bf.I)*(dy^bf.J)*(dz^bf.K)*exp(-bf.expn*r2)
end
(bf::PGBF)(x,y,z) = amplitude(bf::PGBF,x,y,z)
function normalize!(pbf::PGBF)
pbf.norm /= sqrt(overlap(pbf,pbf))
end
"""
CGBF(xyz,I,J,K,norm,[pbgfs],[coefs])
Create a contracted Gaussian basis function made of
the functions in [pgbfs] with coefficients [coefs].
Also track the origin `xyz` and powers `I,J,K`.
"""
mutable struct CGBF
xyz::SVector{3,Float64}
I::Int64
J::Int64
K::Int64
norm::Float64
pgbfs::Vector{PGBF}
coefs::Vector{Float64}
end
"""
cgbf(expn,x=0,y=0,z=0,I=0,J=0,K=0,norm=1)
Helper function to create a CGBF with optional defaults.
"""
cgbf(x=0.0,y=0.0,z=0.0,I=0,J=0,K=0) = CGBF(SA[x,y,z],I,J,K,1.0,[],[])
amplitude(bf::CGBF,x,y,z) = bf.norm*sum(c*amplitude(pbf,x,y,z) for (c,pbf) in primitives(bf))
(bf::CGBF)(x,y,z) = amplitude(bf::CGBF,x,y,z)
function normalize!(bf::CGBF)
bf.norm /= sqrt(overlap(bf,bf))
end
primitives(a::CGBF) = zip(a.coefs,a.pgbfs)
function addbf!(cbf::CGBF,expn,coef)
Base.push!(cbf.pgbfs,pgbf(expn,cbf.xyz...,cbf.I,cbf.J,cbf.K))
Base.push!(cbf.coefs,coef)
normalize!(cbf)
end
contract(f,a::CGBF,b::CGBF) = a.norm*b.norm*sum(ca*cb*f(abf,bbf) for (ca,abf) in primitives(a) for (cb,bbf) in primitives(b))
function contract(f,a::CGBF,b::CGBF,c::CGBF,d::CGBF)
s = 0
for (ca,abf) in primitives(a)
for (cb,bbf) in primitives(b)
for (cc,cbf) in primitives(c)
for (cd,dbf) in primitives(d)
s += ca*cb*cc*cd*f(abf,bbf,cbf,dbf)
end
end
end
end
return a.norm*b.norm*c.norm*d.norm*s
end
function shells(atoms::Vector{Atom},name="sto3g")
data = basis_data[name]
shs = Shell[]
for atom in atoms
for (sym,primlist) in data[atom.atno]
L = lvalue[sym]
expns = [expn for (expn,coef) in primlist]
coefs = [coef for (expn,coef) in primlist]
bfs = CGBF[]
for (I,J,K) in shell_indices[L]
cbf = cgbf(atom.xyz...,I,J,K)
push!(bfs,cbf)
for (expn,coef) in zip(expns,coefs)
addbf!(cbf,expn,coef)
end
end
push!(shs,Shell(atom.xyz,lvalue[sym],expns,coefs,bfs))
end
end
return shs
end
function build_basis(atoms::Vector{Atom},name="sto3g")
shs = shells(atoms,name)
bfs = CGBF[]
ishs = Int[]
mshs = Int[]
for (ish,sh) in enumerate(shs)
for (msh,(I,J,K)) in enumerate(shell_indices[sh.L])
cbf = cgbf(sh.xyz...,I,J,K)
push!(bfs,cbf)
push!(ishs,ish)
push!(mshs,msh)
for (expn,coef) in zip(sh.expns,sh.coefs)
addbf!(cbf,expn,coef)
end
end
end
return Basis(bfs,shs,ishs,mshs)
end
"""
Shell(xyz,L,expns,coeffs)
Structure for a basis function shell, containing multiple
CGBFs of different angular momenta.
"""
mutable struct Shell
xyz::SVector{3,Float64}
L::Int
expns::Vector{Float64}
coefs::Vector{Float64}
cgbfs::Vector{CGBF}
end
nbf(sh::Shell) = length(shell_indices(sh.L))
function pgbfs(sh::Shell)
x,y,z = sh.xyz
pgbfs = PGBF[]
for (I,J,K) in shell_indices[sh.L]
for (expn,coef) in zip(sh.expns,sh.coefs)
push!(pgbfs,pgbf(expn,x,y,z,I,J,K))
end
end
return pgbfs
end
"""
Basis(cgbfs,shells,ishell,mshell)
Structure to hold a basis set, with info about shells and other data
"""
mutable struct Basis # subset of AbstractVector{CGBF}?
cgbfs::Vector{CGBF}
shells::Vector{Shell}
ishell::Vector{Int64} # Which shell corresponds to bf i
mshell::Vector{Int64} # Which m-value (px,dxy, etc.) corresponds to bf i
end
Base.size(b::Basis) = (length(b.cgbfs),)
Base.length(b::Basis) = length(b.cgbfs)
Base.iterate(b::Basis,i::Int) = iterate(b.cgbfs,i)
Base.iterate(b::Basis) = iterate(b.cgbfs)
Base.getindex(b::Basis, i::Int) = b.cgbfs[i]
nbf(b::Basis) = length(b.cgbfs)
nshells(b::Basis) = length(b.shells)
"""
eri_fetcher
Compute all of the required ijkl terms that go into an ERI
record, and compute the required calls to hrr, and how to
unpack the results into the record.
eri_fetcher returns a dictionary such that the integral structure
may be formed via:
```
fetcher[ishell,jshell,kshell,lshell] = (ijkl,hi,hj,hk,hl)
hrrs = hrr(ishell,jshell,kshell,lshell)
ints[ijkl] = hrrs[hi,hj,hk,hl]
```
"""
function eri_fetcher(bfs::Basis)
fetcher = DefaultOrderedDict{NTuple{4, Int}, Vector{NTuple{5, Int}}}(Vector{NTuple{5, Int}}[])
for (index,ijkl) in enumerate(iiterator(length(bfs)))
i,j,k,l = ijkl
li,mi = bfs.ishell[i],bfs.mshell[i]
lj,mj = bfs.ishell[j],bfs.mshell[j]
lk,mk = bfs.ishell[k],bfs.mshell[k]
ll,ml = bfs.ishell[l],bfs.mshell[l]
push!(fetcher[li,lj,lk,ll],(index,mi,mj,mk,ml))
end
return fetcher
end
function eri_fetcher(shs::Vector{Shell})
fetcher = DefaultOrderedDict{NTuple{4, Int}, Vector{NTuple{5, Int}}}(Vector{NTuple{5, Int}}[])
nsh = length(shs)
ijk2lm = make_ijk2lm()
i = 0
index = 0
for (ish,shi) in enumerate(shs)
shi = shs[ish]
for ibf in shi.cgbfs
i += 1
mi = ijk2lm[ibf.I,ibf.J,ibf.K][2]
j = 0
for (jsh,shj) in enumerate(take(shs,ish))
for jbf in shj.cgbfs
j += 1
if j > i break end
mj = ijk2lm[jbf.I,jbf.J,jbf.K][2]
k = 0
for (ksh,shk) in enumerate(shs)
for kbf in shk.cgbfs
k += 1
mk = ijk2lm[kbf.I,kbf.J,kbf.K][2]
l = 0
for (lsh,shl) in enumerate(take(shs,ksh))
@inbounds for lbf in shl.cgbfs
l += 1
if l > k break end
if k*l > i*j break end
ml = ijk2lm[lbf.I,lbf.J,lbf.K][2]
index += 1
push!(fetcher[ish,jsh,ksh,lsh],(index,mi,mj,mk,ml))
end # lbf
end # lsh
end # kbf
end # ksh
end # jbf
end # jsh
end # ibf
end # ish
return fetcher
end
# shell_indices map from a shell l-value to the Cartesian version of m-values that are the
# powers of the Cartesian Gaussian basis functions.
#
# If desired, we can also invert shell_indices to map IJK triplets to l,m pairs:
# IJK2lm = Dict(IJK =>(l,m) for l in 0:4 for (m,IJK) in enumerate(shell_indices[l]))
const shell_indices = Dict(
0 => [MVector(0,0,0)], # 1
1 => [MVector(1,0,0),MVector(0,1,0),MVector(0,0,1)], # 3
2 => [MVector(2,0,0),MVector(1,1,0),MVector(1,0,1),MVector(0,2,0),MVector(0,1,1),MVector(0,0,2)],
3 => [MVector(3,0,0),MVector(2,1,0),MVector(2,0,1),
MVector(1,2,0),MVector(1,0,2),MVector(1,1,1),
MVector(0,3,0),MVector(0,2,1),MVector(0,1,2),MVector(0,0,3)], # 10
4 => [MVector(4,0,0),MVector(3,1,0),MVector(3,0,1),MVector(2,2,0),MVector(2,1,1),MVector(2,0,2),
MVector(1,3,0),MVector(1,2,1),MVector(1,1,2),MVector(1,0,3),
MVector(0,4,0),MVector(0,3,1),MVector(0,2,2),MVector(0,1,3),MVector(0,0,4)] # 15
)
llabel = Dict(0=>"s",1=>"p",2=>"d",3=>"f",4=>"g",5=>"h")
lvalue = merge(Dict((v,k) for (k,v) in llabel),Dict((uppercase(v),k) for (k,v) in llabel))
bflabel(bf) = llabel[bf.I+bf.J+bf.K]*bfpow("x",bf.I)*bfpow("y",bf.J)*bfpow("z",bf.K)
function bfpow(s,j)
if j == 0
return ""
elseif j == 1
return s
end
return "$s$j"
end
"make_m2ao - Make map between a sequential list of (mx,my,mz) values and ao indices"
function make_m2ao(lmax=4)
m2ao = Dict{Vector{Int64}, Int64}()
iao = 0
for l in 0:lmax
for ms in shell_indices[l]
iao += 1
m2ao[ms] = iao
end
end
return m2ao
end
function make_ao2m(lmax=4)
ao2m = MVector{3, Int64}[]
for l in 0:lmax
for ms in shell_indices[l]
push!(ao2m,ms)
end
end
return ao2m
end
const m2ao = make_m2ao()
const ao2m = make_ao2m()
"make_nao - Number of AOs for system with l shells"
make_nao(l) = sum(length(shell_indices[i]) for i in 0:l)
const nao = OffsetArray([make_nao(l) for l in 0:4],0:4)
# Data functions and tables over basis functions
# todo: keep a constant of maxao or something
# and use it to construct tables
function make_shift_index()
n = length(ao2m)
shift_index = zeros(Int,n,3)
for a in 1:n
m = ao2m[a]
for i in 1:3
if m[i] == 0
shift_index[a,i] = 0
else
mm = copy(m)
mm[i] -= 1
am = m2ao[mm]
shift_index[a,i] = am
end
end
end
return shift_index
end
const shift_index = make_shift_index()
function make_shift_direction()
n = length(ao2m)
shift_direction = zeros(Int,n)
for a in 1:n
shift_direction[a] = argmax(ao2m[a])
end
return shift_direction
end
const shift_direction = make_shift_direction()
function make_shell_number()
n = length(ao2m)
shell_number = zeros(Int,n)
for a in 1:n
shell_number[a] = sum(ao2m[a])
end
return shell_number
end
const shell_number = make_shell_number()
function make_shift_index_plus()
n = length(ao2m)
maxsh = shell_number[n]
shift_index = zeros(Int,n,3)
for a in 1:n
m = ao2m[a]
for i in 1:3
if sum(m) == maxsh
shift_index[a,i] = 0
else
mm = copy(m)
mm[i] += 1
am = m2ao[mm]
shift_index[a,i] = am
end
end
end
return shift_index
end
const shift_index_plus = make_shift_index_plus()
function make_ijk2lm(maxl=4)
ijk2lm = Dict{NTuple{3,Int},NTuple{2,Int}}()
for l in 0:maxl
for (m,(i,j,k)) in enumerate(shell_indices[l])
ijk2lm[i,j,k] = (l,m)
end
end
return ijk2lm
end
| [
3500,
3242,
2617,
3163,
20477,
198,
3500,
36125,
3163,
20477,
198,
3500,
6060,
44909,
942,
198,
39344,
23241,
19881,
11,
269,
70,
19881,
11,
2775,
11,
751,
19881,
28265,
350,
4579,
37,
11,
327,
4579,
37,
11,
1382,
62,
12093,
271,
11,
... | 1.779658 | 6,322 |
export Domain, SegmentDomain, tocanonical, fromcanonical, fromcanonicalD, ∂
export chebyshevpoints, fourierpoints, isambiguous, arclength
export components, component, ncomponents
# add indexing for all spaces, not just DirectSumSpace
# mimicking scalar vs vector
# prectype gives the precision, including for Vec
prectype(::Type{D}) where {D<:Domain} = float(eltype(eltype(D)))
prectype(d::Domain) = prectype(typeof(d))
#TODO: bivariate AnyDomain
struct AnyDomain <: Domain{UnsetNumber} end
struct EmptyDomain <: Domain{Nothing} end
isambiguous(::AnyDomain) = true
dimension(::AnyDomain) = 1
complexlength(::AnyDomain) = NaN
arclength(::AnyDomain) = NaN
arclength(::EmptyDomain) = false
arclength(::DomainSets.EmptySpace) = false
isempty(::AnyDomain) = false
reverseorientation(a::Union{AnyDomain,EmptyDomain}) = a
canonicaldomain(a::Union{AnyDomain,EmptyDomain}) = a
indomain(x::Domain,::EmptyDomain) = false
convert(::Type{Domain{T}}, ::AnyDomain) where T = Domain(T)
union(::AnyDomain, d::Domain) = d
union(d::Domain, ::AnyDomain) = d
union(::EmptyDomain, ::EmptyDomain) = EmptyDomain()
union(::EmptyDomain, a::Domain) = a
union(a::Domain, ::EmptyDomain) = a
##General routines
isempty(::EmptyDomain) = true
## Interval DomainSets
abstract type SegmentDomain{T} <: Domain{T} end
abstract type AbstractSegment{T} <: SegmentDomain{T} end
const IntervalOrSegment{T} = Union{AbstractInterval{T}, AbstractSegment{T}}
const IntervalOrSegmentDomain{T} = Union{AbstractInterval{T}, SegmentDomain{T}}
canonicaldomain(d::IntervalOrSegmentDomain) = ChebyshevInterval{real(prectype(d))}()
domainscompatible(a,b) = domainscompatible(domain(a),domain(b))
domainscompatible(a::Domain,b::Domain) = isambiguous(a) || isambiguous(b) ||
isapprox(a,b)
##TODO: Should fromcanonical be fromcanonical!?
#TODO consider moving these
leftendpoint(d::IntervalOrSegmentDomain{T}) where {T} = fromcanonical(d,-one(eltype(T)))
rightendpoint(d::IntervalOrSegmentDomain{T}) where {T} = fromcanonical(d,one(eltype(T)))
indomain(x,::AnyDomain) = true
function indomain(x,d::SegmentDomain)
T=float(real(prectype(d)))
y=tocanonical(d,x)
ry=real(y)
iy=imag(y)
sc=norm(fromcanonicalD(d,ry<-1 ? -one(ry) : (ry>1 ? one(ry) : ry))) # scale based on stretch of map on projection to interal
dy=fromcanonical(d,y)
# TODO: use isapprox once keywords are fast
((isinf(norm(dy)) && isinf(norm(x))) || norm(dy-x) ≤ 1000eps(T)*max(norm(x),1)) &&
-one(T)-100eps(T)/sc ≤ ry ≤ one(T)+100eps(T)/sc &&
-100eps(T)/sc ≤ iy ≤ 100eps(T)/sc
end
ncomponents(s::Domain) = 1
components(s::Domain) = [s]
function components(s::Domain,k)
k ≠ 1 && throw(BoundsError())
s
end
issubcomponent(a::Domain,b::Domain) = a in components(b)
##### canoncial
"""
canonicaldomain(d)
returns a domain which we map to for operations. For example,
the canonical domain for an interval [a,b] is [-1,1]
"""
function canonicaldomain end
"""
tocanonical(d, x)
maps the point `x` in `d` to a point in `canonical(d,x)`
"""
function tocanonical end
issubset(a::Domain,b::Domain) = a==b
## conveninece routines
ones(d::Domain) = ones(prectype(d),Space(d))
zeros(d::Domain) = zeros(prectype(d),Space(d))
function commondomain(P::AbstractVector)
ret = AnyDomain()
for op in P
d = domain(op)
@assert ret == AnyDomain() || d == AnyDomain() || ret == d
if d != AnyDomain()
ret = d
end
end
ret
end
commondomain(P::AbstractVector,g::AbstractArray{T}) where {T<:Number} = commondomain(P)
commondomain(P::AbstractVector,g) = commondomain([P;g])
domain(::Number) = AnyDomain()
## rand
rand(d::IntervalOrSegmentDomain,k...) = fromcanonical.(Ref(d),2rand(k...)-1)
checkpoints(d::IntervalOrSegmentDomain) = fromcanonical.(Ref(d),[-0.823972,0.01,0.3273484])
## boundary
boundary(d::SegmentDomain) = [leftendpoint(d),rightendpoint(d)] #TODO: Points domain
## map domains
# we auto vectorize arguments
tocanonical(d::Domain,x,y,z...) = tocanonical(d,Vec(x,y,z...))
fromcanonical(d::Domain,x,y,z...) = fromcanonical(d,Vec(x,y,z...))
mappoint(d1::Domain,d2::Domain,x...) = fromcanonical(d2,tocanonical(d1,x...))
invfromcanonicalD(d::Domain,x...) = 1/fromcanonicalD(d,x...)
## domains in higher dimensions
## sorting
# we sort spaces lexigraphically by default
for OP in (:<,:(<=),:>,:(>=),:(isless))
@eval $OP(a::Domain,b::Domain)=$OP(string(a),string(b))
end
## Other special domains
struct PositiveIntegers <: Domain{Int} end
struct Integers <: Domain{Int} end
const ℕ = PositiveIntegers()
const ℤ = Integers()
| [
198,
198,
39344,
20021,
11,
1001,
5154,
43961,
11,
284,
49883,
605,
11,
422,
49883,
605,
11,
422,
49883,
605,
35,
11,
18872,
224,
198,
39344,
1125,
48209,
258,
85,
13033,
11,
46287,
5277,
13033,
11,
318,
4131,
29709,
11,
610,
565,
3... | 2.579621 | 1,796 |
cluster = [1 1 1 1; 2 2 3 3; 4 4 5 5]
X = Array(reshape(1:24, 2, 3, 4))
@testset "pool" begin
@testset "GlobalPool" begin
glb_cltr = [1 1 1 1; 1 1 1 1; 1 1 1 1]
p = GlobalPool(:add, 3, 4)
@test p(X) == sumpool(glb_cltr, X)
end
@testset "LocalPool" begin
p = LocalPool(:add, cluster)
@test p(X) == sumpool(cluster, X)
end
@testset "TopKPool" begin
N = 10
k, in_channel = 4, 7
X = rand(in_channel, N)
for T = [Bool, Float64]
adj = rand(T, N, N)
p = TopKPool(adj, k, in_channel)
@test eltype(p.p) === Float32
@test size(p.p) == (in_channel,)
@test eltype(p.Ã) === T
@test size(p.Ã) == (k, k)
y = p(X)
@test size(y) == (in_channel, k)
end
end
end
| [
565,
5819,
796,
685,
16,
352,
352,
352,
26,
362,
362,
513,
513,
26,
604,
604,
642,
642,
60,
198,
55,
796,
15690,
7,
3447,
1758,
7,
16,
25,
1731,
11,
362,
11,
513,
11,
604,
4008,
198,
198,
31,
9288,
2617,
366,
7742,
1,
2221,
... | 1.737705 | 488 |
<reponame>lytemar/Julia-1.0-Programming-Cookbook
blockvalid(x, v) = count(isequal(v), x) ≤ 1
function backtrack!(x)
pos = findfirst(isequal(0), x)
isa(pos, Nothing) && return true
iloc = 3div(pos[1]-1, 3) .+ (1:3)
jloc = 3div(pos[2]-1, 3) .+ (1:3)
for k in 1:9
x[pos] = k
blockvalid(view(x, pos[1], :), k) || continue
blockvalid(view(x, :, pos[2]), k) || continue
blockvalid(view(x, iloc, jloc), k) || continue
backtrack!(x) && return true
end
x[pos] = 0
return false
end
function ssolve(lines, i)
t = [lines[10i-j][k] - '0' for j in 8:-1:0, k in 1:9]
backtrack!(t)
sum([100, 10, 1] .* t[1, 1:3])
end
lines = readlines("p096_sudoku.txt")
@time sum(ssolve(lines, i) for i in 1:50)
@time sum(ssolve(lines, i) for i in 1:50)
| [
27,
7856,
261,
480,
29,
306,
11498,
283,
14,
16980,
544,
12,
16,
13,
15,
12,
15167,
2229,
12,
28937,
2070,
198,
9967,
12102,
7,
87,
11,
410,
8,
796,
954,
7,
786,
13255,
7,
85,
828,
2124,
8,
41305,
352,
198,
198,
8818,
736,
116... | 2.002475 | 404 |
abstract type RegressionType end
struct OLS
robust::Bool
function OLS(robust=true)
new(robust)
end
end
| [
397,
8709,
2099,
3310,
2234,
6030,
886,
198,
198,
7249,
440,
6561,
198,
220,
220,
220,
12373,
3712,
33,
970,
628,
220,
220,
220,
2163,
440,
6561,
7,
22609,
436,
28,
7942,
8,
198,
220,
220,
220,
220,
220,
220,
220,
649,
7,
22609,
... | 2.309091 | 55 |
geneCols = Dict{String,Symbol}(
"aliases" => :GeneSymbolAlias,
"symbol" => :OfficialGeneSymbol,
"region" => :GeneRegion,
"regionStart" => :GeneStart,
"regionEnd" => :GeneEnd,
"id" => :EnsemblGeneId,
"version" => :EnsemblGeneVersion,
"biotype" => :EnsemblGeneBiotype,
"name" => :GeneName
);
transcriptCols = Dict{String,Symbol}(
"region" => :TranscriptRegion,
"regionStart" => :TranscriptStart,
"regionEnd" => :TranscriptEnd,
"id" => :EnsemblTranscriptId,
"version" => :EnsemblTranscriptVersion,
"biotype" => :EnsemblTranscriptBiotype
);
proteinCols = Dict{String,Symbol}(
"id" => :EnsemblProteinId,
"version" => :EnsemblProteinVersion
);
elTypeDict = Dict{String, Union}(
# "aliases" => Vector{String},
"aliases" => Union{String, Missing},
"symbol" => Union{String, Missing},
"region" => Union{String, Missing},
"regionStart" => Union{Int, Missing},
"regionEnd" => Union{Int, Missing},
"id" => Union{String, Missing},
"version" => Union{Int, Missing},
"biotype" => Union{String, Missing},
"name" => Union{String, Missing}
);
function parseGngJson(json::Array{Any,1})
#########################
# Count number or rows and create columns
# In the case of the columns only look at the first entry
# First four columns are inputDB, inputID, targetID and targetDB
nCols = 4;
colNames = Symbol["InputId", "InputSourceDb", "TargetId", "TargetDb"];
colNamesUnstack = Symbol["InputId", "InputSourceDb"];
colElTypes = Union[Union{String, Missing}, Union{String, Missing}, Union{String, Missing}, Union{String, Missing}];
# Gene column names and types
map(i->haskey(geneCols,i) && (push!(colNames, geneCols[i]),
push!(colNamesUnstack, geneCols[i]),
push!(colElTypes, elTypeDict[i])), keys(json[1]["row"][1]["gene"]))
# Transcript:
# Dict{String,Any} with 4 entries:
# "peptide" => Dict{String,Any}(Pair{String,Any}("id", "ENSP00000381185"),Pair{String,Any}("targetIds", Any[Dict{String,Any}(Pair{String,Any}("dbName", "UniProtKB/TrEMBL"),Pair{String,Any}("id", "A0A024R2B3"))]),Pair{String,A…
# "id" => "ENST00000398117"
# "targetIds" => Any[]
# "region" => "18"
map(i->haskey(transcriptCols,i) && (push!(colNames, transcriptCols[i]),
push!(colNamesUnstack, transcriptCols[i]),
push!(colElTypes, elTypeDict[i])), keys(json[1]["row"][1]["gene"]["transcript"][1]))
# number of cols minus 1 because of targetIds, e.g.:
# Dict{String,Any} with 3 entries:
# "id" => "ENSP00000381185"
# "targetIds" => Any[Dict{String,Any}(Pair{String,Any}("dbName", "UniProtKB/TrEMBL"),Pair{String,Any}("id", "A0A024R2B3"))]
# "version" => 1
map(i->haskey(proteinCols,i) && (push!(colNames, proteinCols[i]),
push!(colNamesUnstack, proteinCols[i]),
push!(colElTypes, elTypeDict[i])), keys(json[1]["row"][1]["gene"]["transcript"][1]["peptide"]))
##################
# Rows
nRows = 0;
for entry in json
# For each gene entry
# nRows += 1;
# Number of external gene identifiers
gene = entry["row"][1]["gene"];
nRows += (length(gene["targetIds"]) * length(gene["transcript"]));
for transcript in gene["transcript"]
# Transcript:
# Dict{String,Any} with 4 entries:
# "peptide" => Dict{String,Any}(Pair{String,Any}("id", "ENSP00000381185"),Pair{String,Any}("targetIds", Any[Dict{String,Any}(Pair{String,Any}("dbName", "UniProtKB/TrEMBL"),Pair{String,Any}("id", "A0A024R2B3"))]),Pair{String,A…
# "id" => "ENST00000398117"
# "targetIds" => Any[]
# "region" => "18"
nRows += length(transcript["targetIds"]);
# Peptide:
# Dict{String,Any} with 3 entries:
# "id" => "ENSP00000381185"
# "targetIds" => Any[Dict{String,Any}(Pair{String,Any}("dbName", "UniProtKB/TrEMBL"),Pair{String,Any}("id", "A0A024R2B3"))]
# "version" => 1
nRows += length(transcript["peptide"]["targetIds"]);
end
# println(entry["row"][1])
end
nRows
x = DataFrames.DataFrame(
colElTypes,
colNames,
nRows
# 100
);
lowG = 1;
highG = 1;
lowT = 1;
highT = 1;
for entry in json
# For each gene entry
lowT = lowG;
highT = lowT;
highG = lowG;
# Number of external gene identifiers
gene = entry["row"][1]["gene"];
externalIds = String[];
externalDbs = String[];
# In the case of external IDs add entries to arrays
!isempty(gene["targetIds"]) && map(i->(push!(externalIds, i["id"]), push!(externalDbs, i["dbName"])), gene["targetIds"]);
externalGeneIds = copy(externalIds);
externalGeneDbs = copy(externalDbs);
# nRows += length(gene["targetIds"])
for transcript in gene["transcript"]
# In the case of external IDs add entries to arrays
!isempty(transcript["targetIds"]) && map(i->(push!(externalIds, i["id"]), push!(externalDbs, i["dbName"])), transcript["targetIds"]);
peptide = transcript["peptide"];
!isempty(peptide["targetIds"]) && map(i->(push!(externalIds, i["id"]), push!(externalDbs, i["dbName"])), peptide["targetIds"]);
highT += (length(externalIds) - 1);
# for
# map(i->haskey(geneCols,i) && (push!(colNames, geneCols[i]), push!(colElTypes, elTypeDict[i])), keys(json[1]["row"][1]["gene"]))
# for epEntry in transcript["peptide"]["targetIds"]
map(key->haskey(transcriptCols,key) && (x[transcriptCols[key]][lowT:highT] = transcript[key]), keys(transcript))
map(key->haskey(proteinCols,key) && (x[proteinCols[key]][lowT:highT] = peptide[key]), keys(peptide))
# map(key->haskey(geneCols,key) && (println("col: $(geneCols[key]) = $(gene[key])")), keys(gene))
# map(key->haskey(geneCols,key) && (println("$(collect(lowT:highT)) -> $(fill(gene[key], high))")), keys(gene))
x[:TargetId][lowT:highT] = externalIds;
x[:TargetDb][lowT:highT] = externalDbs;
externalIds = copy(externalGeneIds);
externalDbs = copy(externalGeneDbs);
# x[low,:EnsemblGeneId] = gene["id"];
# x[low,:EnsemblTranscriptId] = transcript["id"];
# x[low,:EnsemblProteinId] = peptide["id"];
# println("[$i] : $externalId, $externalDb")
# println.("enspId: $(transcript["peptide"]["id"]) -> $(transcript["peptide"]["targetIds"])")
lowT = highT + 1;
highT = lowT;
end
highG = highT-1;
# Fill in gene information
for (key, value) in gene
if haskey(geneCols, key) && !isempty(value)
val = value;
if key == "aliases"
val = join(value, ",");
end
x[geneCols[key]][lowG:highG] = val;
end
end
# map(key->haskey(geneCols,key) && (x[geneCols[key]][lowG:highG] = fill(join(gene[key], ","), (highG - lowG + 1))), keys(gene));
x[:InputId][lowG:highG] = entry["row"][1]["inputId"];
x[:InputSourceDb][lowG:highG] = entry["row"][1]["inputDb"];
lowG = highG + 1;
end
x
return x;
end
function unstackDf(x::D, keyCols::S = Vector{Symbol}(), keyColsElType::T = Vector{DataType}()) where {
D<:DataFrames.DataFrame,
S<:Vector{Symbol},
T<:Vector{DataType}}
if isempty(keyCols)
keyCols = names(x)[map(i->!in(i,["TargetId", "TargetDb"]), collect(String,names(x)))];
keyColsElType = DataFrames.eltypes(x)[map(i->!in(i,["TargetId", "TargetDb"]), collect(String,names(x)))];
elseif length(keyCols != length(keyColsElType))
error("keyCols and keyColsElType have to be of same length.!");
end
colNames = copy(keyCols);
colElTypes = copy(keyColsElType);
append!(colNames, DataFrames.identifier.(collect(unique(x[:TargetDb]))));
append!(colElTypes, fill(Union{String, Missing}, (length(colNames) - length(keyCols))));
nRows = DataFrames.nrow(unique(x[keyCols]));
xDf = DataFrames.DataFrame(
colElTypes,
colNames,
nRows
);
sortByCols = copy(keyCols);
push!(sortByCols, :TargetDb);
sort!(x, cols = sortByCols);
idx = 0;
key = "";
for row in DataFrames.eachrow(x)
keyCurr = join(map(i->i[2], row[keyCols]));
if key != keyCurr
idx += 1;
key = keyCurr;
map(i->xDf[idx,i[1]] = i[2], row[keyCols]);
end
colId = DataFrames.identifier(row[:TargetDb])
xDf[idx, colId] = DataFrames.ismissing(xDf[idx, colId]) ?
row[:TargetId] :
xDf[idx, colId] * "," * row[:TargetId];
end
return xDf;
end
| [
198,
70,
1734,
5216,
82,
796,
360,
713,
90,
10100,
11,
13940,
23650,
92,
7,
198,
220,
220,
366,
7344,
1386,
1,
5218,
1058,
39358,
13940,
23650,
40489,
11,
198,
220,
220,
366,
1837,
23650,
1,
5218,
1058,
28529,
39358,
13940,
23650,
1... | 2.120162 | 4,186 |
<gh_stars>10-100
using Documenter
using MERAKit
makedocs(sitename = "MERAKit.jl", modules = [MERAKit])
deploydocs(repo = "github.com/mhauru/MERAKit.jl.git")
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
16854,
263,
198,
3500,
34482,
10206,
270,
198,
198,
76,
4335,
420,
82,
7,
48937,
12453,
796,
366,
29296,
10206,
270,
13,
20362,
1600,
13103,
796,
685,
29296,
10206,
270,
12962,
198,
... | 2.323529 | 68 |
<reponame>JuliaPackageMirrors/CoordinateTransformations.jl<filename>src/CoordinateTransformations.jl
__precompile__()
module CoordinateTransformations
using StaticArrays
using Rotations
export RotMatrix, Quat, SpQuat, AngleAxis, RodriguesVec,
RotX, RotY, RotZ,
RotXY, RotYX, RotZX, RotXZ, RotYZ, RotZY,
RotXYX, RotYXY, RotZXZ, RotXZX, RotYZY, RotZYZ,
RotXYZ, RotYXZ, RotZXY, RotXZY, RotYZX, RotZYX
# Core methods
export compose, ∘, transform_deriv, transform_deriv_params
export Transformation, IdentityTransformation
# 2D coordinate systems and their transformations
export Polar
export PolarFromCartesian, CartesianFromPolar
# 3D coordinate systems and their transformations
export Spherical, Cylindrical
export SphericalFromCartesian, CartesianFromSpherical,
CylindricalFromCartesian, CartesianFromCylindrical,
CylindricalFromSpherical, SphericalFromCylindrical
# Common transformations
export AbstractAffineMap
export AffineMap, LinearMap, Translation
include("core.jl")
include("coordinatesystems.jl")
include("affine.jl")
# Deprecations
export transform
Base.@deprecate_binding AbstractTransformation Transformation
Base.@deprecate transform(transformation::Transformation, x) transformation(x)
end # module
| [
27,
7856,
261,
480,
29,
16980,
544,
27813,
27453,
5965,
14,
7222,
45480,
41762,
602,
13,
20362,
27,
34345,
29,
10677,
14,
7222,
45480,
41762,
602,
13,
20362,
198,
834,
3866,
5589,
576,
834,
3419,
198,
198,
21412,
22819,
4559,
41762,
6... | 3.057971 | 414 |
<reponame>matbesancon/Manopt.jl
using Manifolds, Manopt, Test, ManifoldsBase
using Random
Random.seed!(42)
# Test the additional manifold functions
#
@testset "Additional Manifold functions" begin
@testset "mid point & reflect" begin
M = Sphere(2)
p = [1.0, 0.0, 0.0]
q = [0.0, 1.0, 0.0]
r = mid_point(M,p,q)
r2 = similar(r)
mid_point!(M,r2,p,q)
r3 = shortest_geodesic(M,p,q,0.5)
r4 = mid_point(M,p,q,q)
@test isapprox(M,r,r2)
@test isapprox(M,r2,r3)
@test isapprox(M,r3,r4)
r5 = similar(r4)
mid_point!(M,r5,p,q,q)
@test isapprox(M,r4,r5)
r4 = mid_point(M,p,-p,q)
r5 = similar(r4)
mid_point!(M,r5,p,-p,q)
@test isapprox(M,r4,q)
@test isapprox(M,r4,r5)
@test isapprox(M,reflect(M,p,q),-q)
f = x->x
@test reflect(M,f,q) == q
M2 = Euclidean(2)
p2 = [1.0, 0.0]
q2 = [0.0, 1.0]
s = mid_point(M2,p2,q2)
s2 = similar(s)
mid_point!(M2,s2,p2,q2)
@test s==s2
@test s==(p2+q2)/2
s = mid_point(M2,p2,q2,s)
@test s==s2
s2 = similar(s)
mid_point!(M2,s2,p2,q2,s)
@test s==s2
end
@testset "random" begin
Mc = Circle()
pc = random_point(Mc)
@test is_manifold_point(Mc, pc, true)
Xc = random_tangent(Mc,pc)
@test is_tangent_vector(Mc, pc, Xc, true)
Me = Euclidean(3)
pe = random_point(Me)
@test is_manifold_point(Me, pe, true)
Xe = random_tangent(Me,pe)
@test is_tangent_vector(Me, pe, Xe, true)
Mg = Grassmann(3,2)
pg = random_point(Mg)
@test is_manifold_point(Mg, pg, true)
Xg = random_tangent(Mg,pg)
@test is_tangent_vector(Mg, pg, Xg, true; atol=10^(-14))
Mp = ProductManifold(Mg,Me)
pp = random_point(Mp)
@test is_manifold_point(Mp, pp, true)
Xp = random_tangent(Mp,pp)
@test is_tangent_vector(Mp, pp, Xp,true; atol=10^(-15))
Mp2 = PowerManifold(Me, NestedPowerRepresentation(),4)
pp2 = random_point(Mp2)
@test is_manifold_point(Mp2,pp2)
Xp2 = random_tangent(Mp2,pp2)
@test is_tangent_vector(Mp2,pp2,Xp2)
Mp3 = PowerManifold(Me, ArrayPowerRepresentation(), 4)
pp3 = random_point(Mp3)
@test is_manifold_point(Mp3,pp3)
Xp3 = random_tangent(Mp3,pp3)
@test is_tangent_vector(Mp3,pp3,Xp3)
Mr = Rotations(3)
pr = random_point(Mr)
@test is_manifold_point(Mr,pr)
Xr = random_tangent(Mr,pr)
@test is_tangent_vector(Mr,pr,Xr)
Mspd = SymmetricPositiveDefinite(3)
pspd = random_point(Mspd)
@test is_manifold_point(Mspd,pspd;atol=10^(-14))
Xspd = random_tangent(Mspd,pspd)
@test is_tangent_vector(Mspd,pspd,Xspd, true; atol=10^(-15))
Xspd2 = random_tangent(Mspd,pspd, Val(:Rician))
@test is_tangent_vector(Mspd,pspd,Xspd2,true;atol=10^(-15))
Mst = Stiefel(3,2)
pst = random_point(Mst)
@test is_manifold_point(Mst, pst, true)
Msp = Sphere(2)
psp = random_point(Msp)
@test is_manifold_point(Msp, psp, true)
Xsp = random_tangent(Msp,psp)
@test is_tangent_vector(Msp,psp,Xsp,true;atol=10^(-15))
Mh = Hyperbolic(2)
ph = [0.0,0.0,1.0]
Xh = random_tangent(Mh, ph)
@test is_tangent_vector(Mh, ph, Xh, true)
end
end | [
27,
7856,
261,
480,
29,
6759,
12636,
272,
1102,
14,
5124,
8738,
13,
20362,
198,
3500,
1869,
361,
10119,
11,
1869,
8738,
11,
6208,
11,
1869,
361,
10119,
14881,
198,
198,
3500,
14534,
198,
29531,
13,
28826,
0,
7,
3682,
8,
198,
2,
62... | 1.702415 | 2,070 |
using Documenter
push!(LOAD_PATH,"../src/")
using BasesAndSamples
makedocs(
sitename = "BasesAndSamples.jl Documentation",
format = Documenter.HTML(),
modules = [BasesAndSamples]
)
# Documenter can also automatically deploy documentation to gh-pages.
# See "Hosting Documentation" and deploydocs() in the Documenter manual
# for more information.
deploydocs(
repo = "github.com/nanleij/BasesAndSamples.jl.git",
devbranch="main"
)
| [
3500,
16854,
263,
198,
14689,
0,
7,
35613,
62,
34219,
553,
40720,
10677,
14,
4943,
198,
3500,
347,
1386,
1870,
50,
12629,
198,
198,
76,
4335,
420,
82,
7,
198,
220,
220,
220,
1650,
12453,
796,
366,
33,
1386,
1870,
50,
12629,
13,
20... | 2.954248 | 153 |
import GreyDecision.GreyNumbers: GreyNumber
import GreyDecision.Utility: makeminmax
import LinearAlgebra: det
@testset "Transpose" begin
x = [
GreyNumber(5.0) GreyNumber(3.0) GreyNumber(-2.0);
GreyNumber(-8.0) GreyNumber(6.0) GreyNumber(1.0);
GreyNumber(10.0) GreyNumber(7.0) GreyNumber(13.0)
]
xt = transpose(x)
@test size(xt) == size(x)
@test xt[1, 2] == x[2, 1]
@test xt[1, 3] == x[3, 1]
@test xt[1, 1] == x[1, 1]
@test xt[2, 2] == x[2, 2]
@test xt[3, 3] == x[3, 3]
end
@testset "det(x)" begin
x = [
GreyNumber(5.0) GreyNumber(3.0) GreyNumber(-2.0);
GreyNumber(-8.0) GreyNumber(6.0) GreyNumber(1.0);
GreyNumber(10.0) GreyNumber(7.0) GreyNumber(13.0)
]
y = [
5.0 3.0 -2.0;
-8.0 6.0 1.0;
10.0 7.0 13.0
]
xdet = det(transpose(x) * x)
ydet = det(transpose(y) * y)
@test ydet == xdet.a
@test ydet == xdet.b
end
@testset "inv(x)" begin
atol = 0.001
x = [
GreyNumber(5.0) GreyNumber(3.0) GreyNumber(-2.0);
GreyNumber(-8.0) GreyNumber(6.0) GreyNumber(1.0);
GreyNumber(10.0) GreyNumber(7.0) GreyNumber(13.0)
]
xtx = transpose(x) * x
invxyx = inv(xtx)
result = invxyx * xtx
@test isapprox(result[1,1].a, 1.0, atol = atol)
@test isapprox(result[2,2].a, 1.0, atol = atol)
@test isapprox(result[3,3].a, 1.0, atol = atol)
@test isapprox(result[1,2].a, 0.0, atol = atol)
@test isapprox(result[1,3].a, 0.0, atol = atol)
@test isapprox(result[2,1].a, 0.0, atol = atol)
@test isapprox(result[2,3].a, 0.0, atol = atol)
@test isapprox(result[3,1].a, 0.0, atol = atol)
@test isapprox(result[3,2].a, 0.0, atol = atol)
end | [
11748,
13980,
10707,
1166,
13,
49141,
49601,
25,
13980,
15057,
198,
11748,
13980,
10707,
1166,
13,
18274,
879,
25,
787,
1084,
9806,
198,
11748,
44800,
2348,
29230,
25,
1062,
628,
198,
31,
9288,
2617,
366,
8291,
3455,
1,
2221,
198,
220,
... | 1.861702 | 940 |
<filename>benchmark/_archive/parameter_tuning.jl<gh_stars>1-10
include("../src/experiments/0_setup.jl")
import DataFrames.DataFrame
using DataFrames, CSV
using FastGroupBy
using Base.Threads
K = 100
tries = vcat([Int(2^k-1) for k = 7:31], 3_000_000_000)
for N in tries
println(N)
if N < 2_000_000
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int64(1):Int64(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
sp = @elapsed sumby_sortperm(by, val)
CSV.write(string("benchmark/out/64/sp$N $(replace(string(now()),":","")).csv"),DataFrame(sp = sp))
end
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int64(1):Int64(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srs = @elapsed sumby_multi_rs(by, val)
CSV.write(string("benchmark/out/64/mrs$N $(replace(string(now()),":","")).csv"),DataFrame(srs = srs))
if N < 2_000_000_000
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int64(1):Int64(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srg = @elapsed sumby_radixgroup(by, val)
CSV.write(string("benchmark/out/64/srg$N $(replace(string(now()),":","")).csv"),DataFrame(srg = srg))
end
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int64(1):Int64(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srs = @elapsed sumby_radixsort(by, val)
CSV.write(string("benchmark/out/64/srs$N $(replace(string(now()),":","")).csv"),DataFrame(srs = srs))
end
tries = vcat([Int(2^k-1) for k = 7:31], 3_000_000_000)
for N in tries
println(N)
if N < 400_000
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int32(1):Int32(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
sp = @elapsed sumby_sortperm(by, val)
CSV.write(string("benchmark/out/sp$N $(replace(string(now()),":","")).csv"),DataFrame(sp = sp))
end
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int32(1):Int32(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srs = @elapsed sumby_multi_rs(by, val)
CSV.write(string("benchmark/out/mrs$N $(replace(string(now()),":","")).csv"),DataFrame(srs = srs))
if N < 2_000_000_000
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int32(1):Int32(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srg = @elapsed sumby_radixgroup(by, val)
CSV.write(string("benchmark/out/srg$N $(replace(string(now()),":","")).csv"),DataFrame(srg = srg))
end
by = nothing; val = nothing; gc()
srand(1)
by = rand(Int32(1):Int32(round(N/K)), N);
val = rand(Int32(1):Int32(5), N);
srs = @elapsed sumby_radixsort(by, val)
CSV.write(string("benchmark/out/srs$N $(replace(string(now()),":","")).csv"),DataFrame(srs = srs))
end
| [
27,
34345,
29,
26968,
4102,
47835,
17474,
14,
17143,
2357,
62,
28286,
278,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
17256,
7203,
40720,
10677,
14,
23100,
6800,
14,
15,
62,
40406,
13,
20362,
4943,
198,
11748,
6060,
35439,
... | 2.086672 | 1,373 |
using FedDCD
include("./PrimalMethods.jl")
include("./DualMethods.jl")
# Softmax for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.3
localLr = 1e-2
numRounds = 100
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedAvg_logReg_mnist_lambda1e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedProx_logReg_mnist_lambda1e-2.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp1/Scaffold_logReg_mnist_lambda1e-2.txt"
)
# Softmax for rcv1
fileTrain = "data/rcv1_train.binary"
fileTest = "data/rcv1_train.binary"
λ = 1e-3
participationRate = 0.3
localLr = 1e-1
numRounds = 100
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedAvg_logReg_rcv1_lambda1e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedProx_logReg_rcv1_lambda1e-2.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp1/Scaffold_logReg_rcv1_lambda1e-2.txt"
)
# Softmax + MLP for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.3
localLr = 1e-2
numRounds = 100
# - FedAvg
μ = 0.0
# - FedAvg
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedAvg_MLP_mnist_lambda1e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp1/FedProx_MLP_mnist_lambda1e-2.txt"
)
# - Scaffold
RunScaffoldNN(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp1/Scaffold_MLP_mnist_lambda1e-2.txt"
)
# Softmax for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.1
localLr = 1e-3
numRounds = 500
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_logReg_mnist_lambda1e-2_tau1e-1.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_logReg_mnist_lambda1e-2_tau1e-1.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_logReg_mnist_lambda1e-2_tau1e-1.txt"
)
# Softmax for rcv1
fileTrain = "data/rcv1_train.binary"
fileTest = "data/rcv1_train.binary"
λ = 1e-3
participationRate = 0.1
localLr = 1e-1
numRounds = 500
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_logReg_rcv1_lambda1e-2_tau1e-1.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_logReg_rcv1_lambda1e-2_tau1e-1.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_logReg_rcv1_lambda1e-2_tau1e-1.txt"
)
# Softmax + MLP for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.1
localLr = 1e-2
numRounds = 500
# - FedAvg
μ = 0.0
# - FedAvg
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_MLP_mnist_lambda1e-2_tau1e-1.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_MLP_mnist_lambda1e-2_tau1e-1.txt"
)
# - Scaffold
RunScaffoldNN(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_MLP_mnist_lambda1e-2_tau1e-1.txt"
)
#######################################################################################################
##################################
# Participation rate = 0.05
##################################
# Softmax for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.05
localLr = 1e-3
numRounds = 1000
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_logReg_mnist_lambda1e-2_tau5e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_logReg_mnist_lambda1e-2_tau5e-2.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_logReg_mnist_lambda1e-2_tau5e-2.txt"
)
# Softmax for rcv1
fileTrain = "data/rcv1_train.binary"
fileTest = "data/rcv1_train.binary"
λ = 1e-3
participationRate = 0.05
localLr = 1e-1
numRounds = 1000
# - FedAvg
μ = 0.0
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_logReg_rcv1_lambda1e-2_tau5e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProx(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_logReg_rcv1_lambda1e-2_tau5e-2.txt"
)
# - Scaffold
RunScaffold(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_logReg_rcv1_lambda1e-2_tau5e-2.txt"
)
# Softmax + MLP for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.05
localLr = 1e-2
numRounds = 1000
# - FedAvg
μ = 0.0
# - FedAvg
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedAvg_MLP_mnist_lambda1e-2_tau5e-2.txt"
)
# - FedProx
μ = 1e-4
RunFedAvgAndProxNN(
fileTrain,
fileTest,
λ,
μ,
participationRate,
localLr,
numRounds,
"results/exp2/FedProx_MLP_mnist_lambda1e-2_tau5e-2.txt"
)
# - Scaffold
RunScaffoldNN(
fileTrain,
fileTest,
λ,
participationRate,
localLr,
numRounds,
"results/exp2/Scaffold_MLP_mnist_lambda1e-2_tau5e-2.txt"
)
# Softmax for mnist
fileTrain = "data/mnist.scale"
fileTest = "data/mnist.scale.t"
λ = 1e-2
participationRate = 0.3
localLr = 1e-2
numRounds = 100
# Experiment 2
# rcv1
# τ = 0.3
RunFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.3,
0.1,
100,
"results/exp2/FedDCD_rcv1_lambda1e-3_tau3e-1.txt"
)
RunAccFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.3,
0.1,
100,
"results/exp2/AccFedDCD_rcv1_lambda1e-3_tau3e-1.txt"
)
# τ = 0.1
RunFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.1,
0.1,
500,
"results/exp2/FedDCD_rcv1_lambda1e-3_tau1e-1.txt"
)
RunAccFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.1,
0.1,
500,
"results/exp2/AccFedDCD_rcv1_lambda1e-3_tau1e-1.txt"
)
# τ = 0.05
RunFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.05,
0.1,
1000,
"results/exp2/FedDCD_rcv1_lambda1e-3_tau5e-2.txt"
)
RunAccFedDCD(
"data/rcv1_train.binary",
"data/rcv1_train.binary",
1e-3,
0.05,
0.1,
1000,
"results/exp2/AccFedDCD_rcv1_lambda1e-3_tau5e-2.txt"
)
# mnist
# τ = 0.3
RunFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.3,
0.1,
100,
"results/exp2/FedDCD_mnist_lambda1e-2_tau3e-1.txt"
)
RunAccFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.3,
0.1,
100,
"results/exp2/AccFedDCD_mnist_lambda1e-2_tau3e-1.txt"
)
# τ = 0.1
RunFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.1,
0.1,
500,
"results/exp2/FedDCD_mnist_lambda1e-2_tau1e-1.txt"
)
RunAccFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.1,
0.1,
500,
"results/exp2/AccFedDCD_mnist_lambda1e-2_tau1e-1.txt"
)
# τ = 0.05
RunFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.05,
0.1,
1000,
"results/exp2/FedDCD_mnist_lambda1e-2_tau5e-2.txt"
)
RunAccFedDCDNN(
"data/mnist.scale",
"data/mnist.scale.t",
1e-2,
0.05,
0.1,
1000,
"results/exp2/AccFedDCD_mnist_lambda1e-2_tau5e-2.txt"
)
| [
3500,
10169,
35,
8610,
198,
17256,
7,
1911,
14,
23828,
282,
46202,
13,
20362,
4943,
198,
17256,
7,
1911,
14,
36248,
46202,
13,
20362,
4943,
198,
198,
2,
8297,
9806,
329,
285,
77,
396,
198,
7753,
44077,
796,
366,
7890,
14,
10295,
396... | 1.887915 | 4,791 |
#
# Recursive Fourier Propagation for BoostFractor
#
# V: 2019-08-06
#
# <NAME>
#
export dancer, dance_intro, cheerleader
"""
Propagates the fields through the system
* `amin`: Mimum (local) amplitude of a field, in order to be propagated
* `nmax`: Maximum number of beam iteration steps, directly equivalent to how many boundaries a beam 'sees' at most
* `bdry`: SetupBoundaries-Objekt, containing all relevant geometrical information (disk positions, epsilon, etc)
* `f`: Frequency
* `prop`: Propagator Function to use. Standard is propagator()
* `reflect`: If nothing (standar value), the axion-induced signal is computed.
If set, this field defines a beam, for which the reflected beam will be calculated
* `Xset`, `Yset`: Explicitly set the coordinate system for the fields
* `returnsum`: If false, the out-propagating contributions after each iteration will be returned, without summing.
* `immediatesum`: If false, the out-propagating contributions will be saved and summed up at the end.
"""
function dancer(amin, nmax, sbdry::SetupBoundaries, coords::CoordinateSystem; f=10.0e9, prop=propagator, emit=nothing, reflect=nothing, diskR=0.1, returnsum=true, immediatesum=true)
# Make dancer swallow the same SetupBoundaries object as cheerleader and transformer
bdry = deepcopy(sbdry)
bdry.eps = bdry.eps[2:end]
bdry.distance = bdry.distance[2:end]
bdry.relative_tilt_x = bdry.relative_tilt_x[2:end]
bdry.relative_tilt_y = bdry.relative_tilt_y[2:end]
bdry.relative_surfaces = bdry.relative_surfaces[2:end,:,:]
append!(bdry.r, 0.0)
rightmoving = 1
leftmoving = 2
fields = Array{Complex{Float64}}(zeros(length(bdry.distance), 2, length(coords.X), length(coords.Y)))
#fields = SharedArray(Complex{Float64}, length(bdry.distance), 2, length(coords.X), length(coords.Y))
# number of regions -----^ ^ ^
# number of propagation directions -------^ ^
# dimensions of the fields at each position -------------^
# Pre-allocate memory
if immediatesum
Eout = Array{Complex{Float64}}(zeros(length(coords.X), length(coords.Y),1))
else
Eout = Array{Complex{Float64}}(zeros(length(coords.X), length(coords.Y),nmax+1))
end
# TODO: propagation through and emission from last bdry to the right
if reflect === nothing
if emit === nothing
fields = dance_intro(bdry,coords;diskR=diskR)
else
fields = emit
end
# Eout =
else
fields[length(bdry.distance), leftmoving, :, :] = reflect
# Eout =
end
lambda = wavelength(f)
n = 0 # Iteration Count
a = 1.0 # Maximum Field Amplitude in the iteration
# The measure of music (Takt) .... da capo al fine
while n <= nmax && a >= amin
# The "legs" of the dancer dancing in parallel
# @sync is important to make sure that the outer loop waits for the inner one to finish...
#@sync @parallel
for i in 1:1:length(bdry.r)
# The beats
# Move left leg (rightmoving)
if i > 1
# In place propagate left leg:
fields[i-1,rightmoving,:,:] = prop(fields[i-1,rightmoving,:,:], bdry.distance[i-1], diskR, bdry.eps[i-1], bdry.relative_tilt_x[i-1], bdry.relative_tilt_y[i-1], bdry.relative_surfaces[i-1,:,:], lambda, coords)
end
# Move right leg (leftmoving)
if i < length(bdry.r)
fields[i,leftmoving,:,:] = prop(fields[i,leftmoving,:,:], bdry.distance[i], diskR, bdry.eps[i], bdry.relative_tilt_x[i], bdry.relative_tilt_y[i], bdry.relative_surfaces[i,:,:], lambda, coords)
end
# Reflection & Transmission
if i == 1 # Leftmost case
fields[i, leftmoving,:,:] .*= -bdry.r[i]
# The right-moving does not exist and about the transmitted one we dont care (yet)
elseif i == length(bdry.r) # Rightmost case
# The rightmoving is transmitted to Eout (yeah! that'll be our result!)
# old version without pre-allocation
#Eout = cat(3,Eout, (1+bdry.r[i])*fields[i-1, rightmoving,:,:])
# with pre-allocated array:
if immediatesum
Eout[:,:,1] .+= (1+bdry.r[i]).*fields[i-1, rightmoving,:,:]
else
Eout[:,:,n+1] = (1+bdry.r[i]).*fields[i-1, rightmoving,:,:]
end
# And reflected as well.
fields[i-1, rightmoving,:,:] .*= bdry.r[i]
else # Standard Case
# field(i-1) = transmit(field(i)) + reflect(field(i-1))
# field(i) = transmit(field(i-1)) + reflect(field(i))
# Basically the above, but as much in-place as possible
# Always the field on the left gets a copy
FieldArrivedLeft = copy(fields[i-1, rightmoving,:,:])
# Then we in-place update it
fields[i-1, rightmoving,:,:] .*= bdry.r[i]
fields[i-1, rightmoving,:,:] .+= (1-bdry.r[i]).*fields[i, leftmoving,:,:]
# Then we in-place update the other one with the previously made backup
fields[i, leftmoving,:,:] .*= -bdry.r[i]
fields[i, leftmoving,:,:] .+= (1+bdry.r[i]).*FieldArrivedLeft
end
end
# Now the directions have changed
lmv = copy(leftmoving)
leftmoving = rightmoving
rightmoving = lmv
# a is the maximum field that still occurs in the system
if amin != 0
a = maximum(abs.(fields))
end
# n is the iteration count
n += 1
end
# <NAME>
# The summation at the end is to give the opportunity to intelligently sum
# since different rays might have very different orders of magnitude
# The other reason is, that we are able to return the different components
# such that we can get immediate access to less accurate results
# TODO: See if Julia is really using the most intelligent summing strategy
if returnsum
return sum(reverse(Eout, dims = 3), dims = 3)[:,:,1]
else
return Eout
end
end
"""
dance_intro(bdry::SetupBoundaries, X, Y; bfield=nothing, velocity_x=0, f=10e9,diskR=0.1)
Initialize EM fields. Can include velocity effects.
"""
function dance_intro(bdry::SetupBoundaries, coords::CoordinateSystem; bfield=nothing, velocity_x=0, f=10e9,diskR=0.1)
# Initialize the variable we want to return
fields_initial = Array{Complex{Float64}}(zeros(length(bdry.distance), 2, length(coords.X), length(coords.Y)))
# Inaccuracies of the emitted fields, BField and Velocity Effects ###################
if bfield === nothing
# Make sure that there is only emission on the disk surfaces
bfield = [x^2 + y^2 > diskR^2 ? 0.0 : 1.0 for z in ones(length(bdry.distance)+1), x in coords.X, y in coords.Y]
end
if velocity_x != 0
lambda = wavelength(f)
Ma_PerMeter = 2pi/lambda # k = 2pi/lambda (c/f = lambda)
bfield .*= [exp(-1im*Ma_PerMeter*(-velocity_x)*x) for z in ones(length(bdry.distance)+1), x in coords.X, y in coords.Y]
end
####################################################################################
# Iterate over the gaps and initialize the emissions from them #####################
# This implements Theoretical Foundations (arxiv: 1612.07057) (3.3)
for n in 1:length(bdry.distance)
ax_rightmoving = 0
if n == 1
if bdry.r[1] == 1
ax_rightmoving = -1.0
else #TODO
ax_rightmoving = 0
end
else
# Right-Moving in that gap
eps_i = bdry.eps[n-1]
eps_m = bdry.eps[n]
denominator = eps_i * sqrt(eps_m) + eps_m * sqrt(eps_i)
#ax_i = sqrt(eps_i) * (1 - eps_m/eps_i) / denominator + 0im
ax_rightmoving = sqrt(eps_m) * (1 - eps_i/eps_m) / denominator +0.0im
end
# Left-Moving in that gap
eps_i = bdry.eps[n]
eps_m = (n == length(bdry.distance)) ? 1 : bdry.eps[n+1] # Rightmost epsilon is 1.
denominator = eps_i * sqrt(eps_m) + eps_m * sqrt(eps_i)
ax_leftmoving = sqrt(eps_i) * (1 - eps_m/eps_i) / denominator +0.0im
#ax_m = sqrt(eps_m) * (1 - eps_i/eps_m) / denominator +0.0im
# Fill the initial array
fields_initial[n,1,:,:] = [1.0*ax_rightmoving + 0.0im for x in coords.X, y in coords.Y] .* bfield[n,:,:]
fields_initial[n,2,:,:] = [1.0*ax_leftmoving + 0.0im for x in coords.X, y in coords.Y] .* bfield[n+1,:,:]
end
return fields_initial
end
"""
cheerleader(amin, nmax, bdry::SetupBoundaries; f=10.0e9, prop=propagator, emit=nothing, reflect=nothing, Xset=X, Yset=Y, diskR=0.1, returnboth=false)
New Recursive Fourier Propagation implementation.
# Arguments:
- `amin`: Mimum (local) amplitude of a field, in order to be propagated
- `nmax`: Maximum number of beam iteration steps, directly equivalent to how many boundaries a beam 'sees' at most
- `bdry::SetupBoundaries`: Properties of dielectric boundaries
- `f::Float64` ```> 0```: Frequency of EM radiation
- `prop`: Propagator Function to use. Standard is propagator().
- `emit`: Explicitly set the axion-induced fields emitted from each boundary (to the left and to the right).
If ``nothing`` fields are initialized according to uniform,
homogeneous external B-field with zero axion velocity.
- `reflect`: If `nothing` (standar value), the axion-induced signal is computed.
If set, this field defines a beam, for which the reflected beam will be calculated
- `Xset` and `Yset`: Explicitly set the coordinate system for the fields
- `diskR`: Radius of dielectric disk
- `returnboth::Bool`: If `true` cheerleader returns fields leaving on left and right.
If `false` only returns fields leaving on right.
See [`dancer`](@ref) for old version.
"""
function cheerleader(amin, nmax, bdry::SetupBoundaries, coords::CoordinateSystem; f=10.0e9, prop=propagator, emit=nothing, reflect=nothing, diskR=0.1, returnboth=false)
# Before speed of light was 3e8 here, but this means an error at the permil level, i.e. order ~20MHz at 20GHz,
# if fixing lambda to 1.5 cm, one gets a shift of roughly 10MHz
lambda = wavelength(f)
# Pre-allocate memory
# Note that fields[0,:,:] contains the fields leaving the system on the left
# and fields[length(bdry.distance)+1,:,:] the fields leaving on the right
#fields = OffsetArray(::Array{Complex{Float64}}}, 0:length(bdry.distance)+1, 1:length(X), 1:length(Y))
fields = Array{Complex{Float64}}(zeros( length(bdry.distance)+2, length(coords.X), length(coords.Y)))
# number of regions + 2 outporpagating --^ ^
# dimensions of the fields at each position ------------------------------^
# In a next step this could/should be generalized in the SetupBoundaries structure..
reflectivities_leftmoving = -bdry.r
reflectivities_rightmoving = bdry.r
transmissivities_leftmoving = 1 .- bdry.r
transmissivities_rightmoving = 1 .+ bdry.r
### Indexing of arrays, note the specific indexing of the regions, different from dancer()!.
#Boundaries: [ 1, 2, 3, 4 ]
# ####### | | | | #######
# # <-- # | | | | # --> #
# ####### | | | | #######
#Regions: [ 1 , 2 , 3 , 4 , 5 ]
# emit in this function expects for each region left- and right-propagating fields.
# Note that this is different from dancer() since here we do not take the mirror
# explicit, such that also a transmissivity can be calculated
# TODO: propagation through and emission from last bdry to the right
if reflect === nothing
if emit === nothing
emit = Array{Complex{Float64}}(zeros(length(bdry.distance), 2, length(coords.X), length(coords.Y)))
# we may reuse the dance_intro-function in the standard case
bdrycpy = deepcopy(bdry)
bdrycpy.distance = bdrycpy.distance[2:end]
bdrycpy.eps = bdrycpy.eps[2:end]
emit[2:end,:,:,:] = dance_intro(bdrycpy,coords,diskR=diskR)
end
else
emit = Array{Complex{Float64}}(zeros(length(bdry.distance), 2, length(coords.X), length(coords.Y)))
# This should be right like this, it is explicitly taken care of at the bottom...
emit[length(bdry.distance), 2, :, :] = reflect
end
# Initialize fields ........................................
# Add everything which is emitted to the right
fields = copy(emit[:,1,:,:])
# Push forward the fields rightmoving...
for i in 2:1:length(bdry.distance)-1
# Propagate to the right
fields[i,:,:] = prop(fields[i,:,:], bdry.distance[i], diskR, bdry.eps[i], bdry.relative_tilt_x[i], bdry.relative_tilt_y[i], bdry.relative_surfaces[i,:,:], lambda, coords)
# Reflect and Transmit
fields[i+1,:,:] .+= transmissivities_rightmoving[i].*fields[i,:,:]
fields[i,:,:] .*= reflectivities_rightmoving[i]
end
# Add everything which is emitted to the left (except last gap)
fields[1:length(bdry.distance)-1,:,:] .+= emit[1:length(bdry.distance)-1,2,:,:]
#fields .+= emit[:,2,:,:]
# The last gap is not supposed to contain left-propagating stuff,
# so we have to treat it explicitly (not elegant, but ok)
let i = length(bdry.distance)-1
# Transmit
fields[i,:,:] .+= transmissivities_leftmoving[i].*emit[i+1,2,:,:]
# Reflect
fields[i+1,:,:] .+= reflectivities_leftmoving[i].*emit[i+1,2,:,:]
end
# Main iterations ...............................................
n = 0 # Iteration Count
a = 1.0 # Maximum Field Amplitude in the iteration
while n <= nmax && a >= amin
# Iterate over regions
# The first region explicitly
let i = 1
# Do NOT Propagate to the fields in the current gap to the right
# Region 1 always contains left-moving fields.
# Propagate to the fields in the next gap to the left
fields[i+1,:,:] = prop(fields[i+1,:,:], bdry.distance[i+1], diskR, bdry.eps[i+1], bdry.relative_tilt_x[i+1], bdry.relative_tilt_y[i+1], bdry.relative_surfaces[i+1,:,:], lambda, coords)
# Reflect and Transmit
# No backup copy needed since nothing is coming from the i = 1 region
# Transmit
fields[i,:,:] .+= transmissivities_leftmoving[i].*fields[i+1,:,:]
# Reflect
fields[i+1,:,:] .*= reflectivities_leftmoving[i]
end
# Push forward...
for i in 2:1:length(bdry.distance)-2
# Propagate to the fields in the current gap to the right
fields[i,:,:] = prop(fields[i,:,:], bdry.distance[i], diskR, bdry.eps[i], bdry.relative_tilt_x[i], bdry.relative_tilt_y[i], bdry.relative_surfaces[i,:,:], lambda, coords)
# Propagate to the fields in the next gap to the left
fields[i+1,:,:] = prop(fields[i+1,:,:], bdry.distance[i+1], diskR, bdry.eps[i+1], bdry.relative_tilt_x[i+1], bdry.relative_tilt_y[i+1], bdry.relative_surfaces[i+1,:,:], lambda, coords)
# Reflect and Transmit
fields_next_gap_copy = copy(fields[i+1,:,:])
fields[i+1,:,:] .*= reflectivities_leftmoving[i]
fields[i+1,:,:] .+= transmissivities_rightmoving[i].*fields[i,:,:]
fields[i,:,:] .*= reflectivities_rightmoving[i]
fields[i,:,:] .+= transmissivities_leftmoving[i].*fields_next_gap_copy
end
let i = length(bdry.distance)-1
# Propagate to the fields in the current gap to the right
fields[i,:,:] = prop(fields[i,:,:], bdry.distance[i], diskR, bdry.eps[i], bdry.relative_tilt_x[i], bdry.relative_tilt_y[i], bdry.relative_surfaces[i,:,:], lambda, coords)
# DO NOT Propagate to the fields in the next gap to the left
# Since it only contains fields propagating to the right
# Reflect and Transmit
fields[i+1,:,:] .+= transmissivities_rightmoving[i].*fields[i,:,:]
fields[i,:,:] .*= reflectivities_rightmoving[i]
end
# The last region always contains right-moving fields and is automatically added up correctly
# Check convergence
if amin != 0
a = maximum(abs.(fields))
end
# n is the iteration count
n += 1
end
if returnboth
return fields[1,:,:], fields[length(bdry.distance),:,:]
else
return fields[length(bdry.distance),:,:]
end
end
| [
2,
198,
2,
3311,
30753,
34296,
5277,
8772,
363,
341,
329,
19835,
37,
40450,
198,
2,
198,
2,
569,
25,
13130,
12,
2919,
12,
3312,
198,
2,
198,
2,
1279,
20608,
29,
198,
2,
198,
198,
39344,
38619,
11,
9280,
62,
600,
305,
11,
14042,
... | 2.322375 | 7,361 |
<filename>0011/SCI/src/SCI.jl<gh_stars>1-10
"""A scirntific module (calculate exp(x))"""
module SCI
abstract type AbstractProblem end
struct Problem{T} <: AbstractProblem x::T end
abstract type AbstractAlgorithm end
struct Builtin <: AbstractAlgorithm end
Base.@kwdef struct Taylor <: AbstractAlgorithm n::Int = 10 end
default_algorithm(prob::Problem) = Builtin()
struct Solution{R, P<:AbstractProblem, A<:AbstractAlgorithm} result::R; prob::P; alg::A end
solve(prob::AbstractProblem) = solve(prob, default_algorithm(prob))
solve(prob::AbstractProblem, alg::Builtin) = Solution(exp(prob.x), prob, alg)
solve(prob::AbstractProblem, alg::Taylor) = Solution(sum(prob.x^k/factorial(k) for k in 0:alg.n), prob, alg)
using COM
COM.prettify(sol::Solution{R, P, A}) where {R, P<:AbstractProblem, A<:Builtin} = """
Problem: x = $(sol.prob.x)
Algorithm: builtin exp(x)
Result: $(sol.result)
"""
COM.prettify(sol::Solution{R, P, A}) where {R, P<:AbstractProblem, A<:Taylor} = """
Problem: x = $(sol.prob.x)
Algorithm: Taylor series of exp(x) upto degree $(sol.alg.n)
Result: $(sol.result)
"""
end
| [
27,
34345,
29,
405,
1157,
14,
6173,
40,
14,
10677,
14,
6173,
40,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
32,
629,
343,
429,
811,
8265,
357,
9948,
3129,
378,
1033,
7,
87,
4008,
37811,
198,
21412,
6374,
40,
198... | 2.632458 | 419 |
mutable struct Attribute
name::Symbol
value::String
Attribute(name::Symbol) = new(name, "")
end
const NULL_ATTRIBUTE = Attribute(:NULL)
mutable struct ChartType
concreteName::String
attributes::Vector{Attribute}
ancestors::Vector{ChartType}
ChartType(attributes::Vector{A}, ancestors::Vector{ChartType}=ChartType[]) where {A<:Attribute} = new("NONE", convert(Vector{Attribute}, attributes), ancestors)
ChartType(concreteName::String, attributes::Vector{A}, ancestors::Vector{ChartType}=ChartType[]) where {A<:Attribute} = new(concreteName, convert(Vector{Attribute}, attributes), ancestors)
end
function Base.deepcopy(chart_type::ChartType)
name = chart_type.concreteName
attributes = Vector{Attribute}(undef, length(chart_type.attributes))
for (i,a) in enumerate(chart_type.attributes)
attributes[i] = deepcopy(a)
end
ancestors = Vector{ChartType}(undef, length(chart_type.ancestors))
for (i,ancestor) in enumerate(chart_type.ancestors)
ancestors[i] = deepcopy(ancestor)
end
ChartType(name, attributes, ancestors)
end
function get_all_attributes(chart_type::ChartType)
retval = Attribute[]
for ancestor in chart_type.ancestors
append!(retval, get_all_attributes(ancestor))
end
append!(retval, chart_type.attributes)
retval
end
function Base.getindex(chart_type::ChartType, s::Symbol)
for a in chart_type.attributes
if a.name == s
return a
end
end
for ancestor in chart_type.ancestors
a = ancestor[s]
if a.name != :NULL
return a
end
end
NULL_ATTRIBUTE
end
function Base.setindex!(chart_type::ChartType, v::String, s::Symbol)
for a in chart_type.attributes
if a.name == s
a.value = v
return
end
end
for ancestor in chart_type.ancestors
a = ancestor[s]
if a.name != :NULL
a.value = v
return
end
end
end
const BaseChart = ChartType([Attribute(:width), Attribute(:height)])
const ColorChart = ChartType([Attribute(:colors), Attribute(:colorAccessor), Attribute(:colorDomain)])
const CoordinateGridChart = ChartType([Attribute(:zoomScale),
Attribute(:zoomOutRestrict), Attribute(:mouseZoomable),
Attribute(:x), Attribute(:xUnits),
Attribute(:xAxis), Attribute(:elasticX),
Attribute(:xAxisPadding), Attribute(:y),
Attribute(:yAxis), Attribute(:elasticY),
Attribute(:renderHorizontalGridLines), Attribute(:renderVerticalGridLines),
Attribute(:yAxisPadding), Attribute(:round),
Attribute(:keyAccessor), Attribute(:valueAccessor),
Attribute(:xAxisLabel), Attribute(:yAxisLabel)],
[BaseChart, ColorChart])
const AbstractBubbleChart = ChartType([Attribute(:r), Attribute(:radiusValueAccessor),
Attribute(:minRadiusWithLabel), Attribute(:maxBubbleRelativeSize)],
[ColorChart])
const WidgetChart = ChartType([Attribute(:dimension), Attribute(:group)])
# StackableChart = ChartType(false, [Attribute{}])
const PieChart = ChartType("pieChart",
[Attribute(:slicesCap), Attribute(:innerRadius),
Attribute(:radius), Attribute(:cx),
Attribute(:cy), Attribute(:minAngleForLabel)],
[ColorChart, BaseChart])
const BarChart = ChartType("barChart",
[Attribute(:centerBar), Attribute(:gap)],
[CoordinateGridChart]) # StackableChart
const LineChart = ChartType("lineChart",
[Attribute(:renderArea), Attribute(:dotRadius)],
[CoordinateGridChart]) # StackableChart
const RowChart = ChartType("rowChart",
[Attribute(:gap), Attribute(:elasticX),
Attribute(:labelOffsetX), Attribute(:labelOffsetY)],
[ColorChart, BaseChart])
const BubbleChart = ChartType("bubbleChart",
[Attribute(:elasticRadius)],
[AbstractBubbleChart, CoordinateGridChart])
const DataCountWidget = ChartType("dataCount",
Attribute[],
[WidgetChart])
const DataTableWidget = ChartType("dataTable",
[Attribute(:size), Attribute(:columns),
Attribute(:sortBy), Attribute(:order)],
[WidgetChart])
# DC Chart
mutable struct DCChart
group::Group
typ::ChartType
title::String
parent::String
function DCChart(
typ::ChartType,
group::Group;
title::String = "Chart for " * string(group.dim.name),
parent::String = @sprintf("chart_%06d", rand(0:999999)),
)
new(group, typ, title, parent)
end
end
function randomize_parent(chart::DCChart)
chart.parent = @sprintf("chart_%06d", rand(0:999999))
Union{}
end
function Base.write(io::IO, chart::DCChart, indent::Int)
tabbing = " "^indent
println(io, tabbing, "var ", chart.parent, " = dc.", chart.typ.concreteName, "(\"#", chart.parent, "\")") # TODO: add chart grouping
println(io, tabbing, " .dimension(", chart.group.dim.name, ")")
print(io, tabbing, " .group(", chart.group.name, ")")
attributes = get_all_attributes(chart.typ)
for (i,a) in enumerate(attributes)
if !isempty(a.value)
print(io, "\n", tabbing, " .", string(a.name), "(", a.value, ")")
end
end
println(io, ";")
end
# DC Widget
mutable struct DCWidget
typ::ChartType
parent::String
html::String
columns::Vector{Symbol}
group_results::Bool
function DCWidget(
typ::ChartType,
columns::Vector{Symbol} = Symbol[],
group_results::Bool = false,
html::String = "",
parent::String = @sprintf("chart_%06d", rand(0:999999)),
)
new(typ, parent, html, columns, group_results)
end
end
# Inelegant way to handle the two types of widgets. Change if time permits.
function randomize_parent(widget::DCWidget)
widget.parent = @sprintf("chart_%06d", rand(0:999999))
if widget.typ.concreteName == "dataCount"
widget.html = string("<div id=\"", widget.parent, """\"><span class="filter-count"></span> selected out of <span class="total-count"></span> records</div>""")
elseif widget.typ.concreteName == "dataTable"
html_str = IOBuffer()
if !widget.group_results
write(html_str, "
<style>
#", widget.parent, " .dc-table-group{display:none}
</style>")
end
write(html_str, "
<table class=\"table table-hover\" id=\"", widget.parent, "\">
<thead>
<tr>")
for key in widget.columns
write(html_str, "<th>", key, "</th>")
end
write(html_str, "</tr>
</thead>
</table>")
widget.html = String(take!(html_str))
end
Union{}
end
function Base.write(io::IO, chart::DCWidget, indent::Int)
tabbing = " "^indent
print(io, tabbing, "var ", chart.parent, " = dc.", chart.typ.concreteName, "(\"#", chart.parent, "\")") # TODO: add chart grouping
attributes = get_all_attributes(chart.typ)
for (i,a) in enumerate(attributes)
if !isempty(a.value)
print(io, "\n", tabbing, " .", string(a.name), "(", a.value, ")")
end
end
println(io, ";")
end
"""
can_infer_chart(arr::AbstractVector)
Whether chart inference is supported for the given array type.
"""
isna(x) = isa(x, Missing)
can_infer_chart(arr::AbstractVector) = !any(isna, arr)
can_infer_chart(arr::AbstractVector{I}) where {I<:Integer} = !any(isna, arr)
can_infer_chart(arr::AbstractVector{F}) where {F<:AbstractFloat} = !any(isna, arr) && !any(isinf, arr) && !any(isnan, arr)
can_infer_chart(arr::AbstractVector{S}) where {S<:AbstractString} = !any(isna, arr)
"""
infer_chart(arr::AbstractVector, group::Group)
Constructs a Chart suitable for the type in arr.
"""
infer_chart(arr::AbstractVector{R}, group::Group) where {R<:Real} = barchart(arr, group)
infer_chart(arr::AbstractVector{S}, group::Group) where {S<:AbstractString} = piechart(arr, group)
function scale_default(arr::AbstractVector{R}) where R<:Real
@sprintf("d3.scale.linear().domain([%d,%d])",
floor(Int, minimum(arr)),
ceil(Int, maximum(arr)))
end
function size_default!(chart::ChartType)
chart[:width] = "300.0"
chart[:height] = "225.0"
end
"""
barchart
Infer construction of a DC barchart based on the given group.
"""
function barchart(arr::AbstractVector{I}, group::Group) where I<:Integer
chart = deepcopy(BarChart)
size_default!(chart)
chart[:x] = scale_default(arr)
chart[:xUnits] = "dc.units.fp.precision(.0)"
DCChart(chart, group)
end
function barchart(arr::AbstractVector{F}, group::Group) where F<:AbstractFloat
chart = deepcopy(BarChart)
size_default!(chart)
chart[:centerBar] = "true"
chart[:x] = scale_default(arr)
chart[:xUnits] = "dc.units.fp.precision($(group.dim.bin_width))"
DCChart(chart, group)
end
"""
piechart
Infer construction of a DC piechart based on the given group.
"""
function piechart(arr::AbstractVector{S}, group::Group) where S<:AbstractString
chart = deepcopy(PieChart)
size_default!(chart)
chart[:radius] = string(parse(Float64, chart[:height].value)*0.4)
chart[:slicesCap] = "10"
DCChart(chart, group)
end
function piechart(arr::AbstractVector{I}, group::Group) where I<:Integer
chart = deepcopy(PieChart)
size_default!(chart)
DCChart(chart, group)
end
"""
linechart
Infer construction of a DC linechart based on the given group.
"""
function linechart(arr::AbstractVector{I}, group::Group) where I<:Integer
chart = deepcopy(LineChart)
size_default!(chart)
chart[:x] = scale_default(arr)
chart[:xUnits] = "dc.units.fp.precision(.0)"
DCChart(chart, group)
end
function linechart(arr::AbstractVector{F}, group::Group) where F<:AbstractFloat
chart = deepcopy(LineChart)
size_default!(chart)
chart[:x] = scale_default(arr)
chart[:xUnits] = "dc.units.fp.precision($(group.dim.bin_width))"
DCChart(chart, group)
end
"""
bubblechart
Construct an empty custom DC bubblechart.
"""
function bubblechart(group::Group)
chart = deepcopy(BubbleChart)
size_default!(chart)
DCChart(chart, group)
end
function _generate_accessor(col::Symbol)
if col == :DCCount
return "function (d) { return d.value.DCCount;}"
else
return string("function (d) { return d.value.", col, "_sum;}")
end
end
"""
bubbleChart
Construct a bubblechart using the given master grouping and sums given by
`x_col`, `y_col`, and `r_col` for x position, y position, and radius.
"""
function bubblechart(group::Group, x_col::Symbol, y_col::Symbol, r_col::Symbol)
chart = deepcopy(BubbleChart)
size_default!(chart)
chart[:width] = string(parse(Float64, chart[:width].value) * 1)
chart[:x] = "d3.scale.linear().domain([0,150])" # Will get overriden by elasticX
chart[:elasticX] = "true"
chart[:elasticY] = "true"
chart[:elasticRadius] = "true"
chart[:xAxisPadding] = "50"
chart[:yAxisPadding] = "50"
chart[:keyAccessor] = _generate_accessor(x_col)
chart[:valueAccessor] = _generate_accessor(y_col)
chart[:radiusValueAccessor] = _generate_accessor(r_col)
chart[:maxBubbleRelativeSize] = "0.2"
DCChart(chart, group)
end
"""
rowchart
Infer construction of a DC rowchart based on the given group.
"""
function rowchart(arr::AbstractVector{S}, group::Group) where S<:AbstractString
chart = deepcopy(RowChart)
size_default!(chart)
DCChart(chart, group)
end
function rowchart(arr::AbstractVector{I}, group::Group) where I<:Integer
chart = deepcopy(RowChart)
size_default!(chart)
DCChart(chart, group)
end
"""
datacountwidget
Construct a DC DataCountWidget.
"""
function datacountwidget()
chart = deepcopy(DataCountWidget)
chart[:dimension] = "cf"
chart[:group] = "all"
dcwidget = DCWidget(chart)
randomize_parent(dcwidget)
dcwidget
end
"""
datatablewidget
Construct a DC DataTableWidget.
"""
function datatablewidget(col::Symbol, columns::Vector{Symbol}, group_results::Bool=false)
chart = deepcopy(DataTableWidget)
chart[:dimension] = string(col)
if group_results
chart[:group] = string("function(d) {return d.", col, ";}")
else
chart[:group] = """function(d) {return "Showing All Results";}"""
end
col_str = IOBuffer()
print(col_str, "[\n")
for key in columns
print(col_str, "function(d) {return d.", key, ";},\n")
end
print(col_str, "]")
chart[:columns] = String(take!(col_str))
chart[:size] = "15"
dcwidget = DCWidget(chart, columns, group_results)
randomize_parent(dcwidget)
dcwidget
end
function datatablewidget(columns::Vector{Symbol})
datatablewidget(columns[1], columns, false)
end
#=
# Chart types
DataCountWidget <: BaseChart
DataTableWidget <: BaseChart
BubbleChart <: AbstractBubbleChart, CoordinateGridChart
CompositeChart <: CoordinateGridChart
GeoCloroplethChart <: ColorChart, BaseChart
BubbleOverlayChart <: AbstractBubbleChart, BaseChart
RowChart <: ColorChart, BaseChart
Legend
NumberDisplay <: BaseChart
=#
| [
76,
18187,
2878,
3460,
4163,
198,
197,
3672,
3712,
13940,
23650,
198,
197,
8367,
3712,
10100,
198,
197,
33682,
7,
3672,
3712,
13940,
23650,
8,
796,
649,
7,
3672,
11,
366,
4943,
198,
437,
198,
9979,
15697,
62,
1404,
5446,
9865,
37780,
... | 2.539846 | 4,944 |
function _make_spectogram_gui()
popupmenu_spect = Menu()
popupmenu_spect_freq = MenuItem("Frequency Range")
popupmenu_spect_win = MenuItem("Window Size")
popupmenu_spect_overlap = MenuItem("Window Overlap")
push!(popupmenu_spect,popupmenu_spect_freq)
push!(popupmenu_spect,popupmenu_spect_win)
push!(popupmenu_spect,popupmenu_spect_overlap)
popupmenu_spect_freq_select=Menu(popupmenu_spect_freq)
if VERSION > v"0.7-"
spect_f_handles=Array{MenuItemLeaf}(undef,0)
else
spect_f_handles=Array{MenuItemLeaf}(0)
end
spect_f_options=[300; 1000; 3000; 7500; 15000]
push!(spect_f_handles,MenuItem(string(spect_f_options[1])))
push!(popupmenu_spect_freq_select,spect_f_handles[1])
for i=2:5
push!(spect_f_handles,MenuItem(spect_f_handles[i-1],string(spect_f_options[i])))
push!(popupmenu_spect_freq_select,spect_f_handles[i])
end
popupmenu_spect_win_select=Menu(popupmenu_spect_win)
if VERSION > v"0.7-"
spect_w_handles=Array{MenuItemLeaf}(undef,0)
else
spect_w_handles=Array{MenuItemLeaf}(0)
end
spect_w_options=[10; 50; 100]
push!(spect_w_handles,MenuItem(string(spect_w_options[1])))
push!(popupmenu_spect_win_select,spect_w_handles[1])
for i=2:3
push!(spect_w_handles,MenuItem(spect_w_handles[i-1],string(spect_w_options[i])))
push!(popupmenu_spect_win_select,spect_w_handles[i])
end
Gtk.showall(popupmenu_spect)
(spect_w_handles,spect_f_handles, popupmenu_spect)
end
function add_spect_callbacks(spect_f_handles,spect_w_handles,handles)
for i=1:5
signal_connect(spect_popup_freq_cb,spect_f_handles[i],"activate",Void,(),false,(handles,i-1))
end
for i=1:3
signal_connect(spect_popup_win_cb,spect_w_handles[i],"activate",Void,(),false,(handles,i-1))
end
nothing
end
function prepare_spectrogram(ctx,han)
myheight=height(ctx)
mywidth=width(ctx)
set_source_rgb(ctx,1.0,1.0,1.0)
move_to(ctx,50.0,myheight-35)
show_text(ctx,"-1")
move_to(ctx,50.0,myheight-35)
show_text(ctx,"-1.0")
move_to(ctx,(mywidth-50.0)/2+50,myheight-35)
show_text(ctx,"-0.5")
move_to(ctx,mywidth-20.0,myheight-35)
show_text(ctx,"0.0")
move_to(ctx,mywidth/2,myheight-20)
show_text(ctx,"Time (s)")
move_to(ctx,10.0,myheight-140)
rotate(ctx,-pi/2)
show_text(ctx,"Frequency")
identity_matrix(ctx)
move_to(ctx,35.0,myheight-50.0)
rotate(ctx,-pi/2)
show_text(ctx,"0.0")
identity_matrix(ctx)
move_to(ctx,35.0,myheight-50-125)
rotate(ctx,-pi/2)
show_text(ctx,string(round(Int64,han.spect.f_max*han.spect.f_div/2)))
identity_matrix(ctx)
nothing
end
function draw_spectrogram(rhd::RHD2000,han::Gui_Handles)
ctx=Gtk.getgc(han.c)
x=Gtk.cairo_surface(han.c)
data = ccall((:cairo_image_surface_get_data,Cairo._jl_libcairo),Ptr{UInt32},(Ptr{Void},),x.ptr)
c_h=round(Int64,height(ctx))
c_w=round(Int64,width(ctx))-50
for i=(size(rhd.v,1)+1):rhd.sr
han.v_s[i-SAMPLES_PER_DATA_BLOCK] = han.v_s[i]
end
count=1
if han.band_widgets.lfp_en[han.sc.spike]
for i=(rhd.sr-SAMPLES_PER_DATA_BLOCK+1):rhd.sr
han.v_s[i] = rhd.lfps[count,han.sc.spike]
count+=1
end
else
for i=(rhd.sr-SAMPLES_PER_DATA_BLOCK+1):rhd.sr
han.v_s[i] = rhd.v[count,han.sc.spike]
count+=1
end
end
plot_spectrogram(han.v_s,rhd.sr,han.spect)
in_w = han.spect.t_max
in_h = han.spect.f_max
scale_w = c_w / in_w
scale_h = 250 / in_h
mymin=minimum(han.spect.out)
mymax=maximum(han.spect.out)
myrange=mymax-mymin
rgb_mat = zeros(UInt32,in_h,in_w)
for h=1:in_h
for w=1:in_w
startround = (han.spect.out[h,w]-mymin)/myrange*255
myinput::UInt8 = (startround >= 255) ? 255 : floor(UInt8,startround)+1
myblue=jet_b[myinput]
mygreen=jet_g[myinput]
myred=jet_r[myinput]
rgb_mat[h,w] = 0
rgb_mat[h,w] |= myblue # blue
rgb_mat[h,w] |= (mygreen << 8) #green
rgb_mat[h,w] |= (myred << 16) #red
end
end
for h=1:250
for w=1:c_w
val = rgb_mat[ceil(Int,h/scale_h),ceil(Int,w/scale_w)]
unsafe_store!(data,val,(c_h-h-50)*(c_w+50)+w+50)
end
end
nothing
end
function plot_spectrogram(s,fs,spect)
S = spectrogram(s,spect.win_width_s,spect.win_overlap_s; fs=fs, window=hanning)
temp = power(S)
for i=1:spect.f_max
for j=1:spect.t_max
spect.out[i,j]=log10(temp[i,j])
end
end
nothing
end
function spect_popup_freq_cb(widget::Ptr,user_data::Tuple{Gui_Handles,Int64})
han, event_id = user_data
if event_id == 0
f_max = 300
elseif event_id == 1
f_max = 1000
elseif event_id == 2
f_max = 3000
elseif event_id == 3
f_max = 7500
else
f_max = 15000
end
han.spect = Spectrogram(han.spect.fs; win_width_t = han.spect.win_width_t, win_overlap_t = han.spect.win_overlap_t, f_max = f_max)
nothing
end
function spect_popup_win_cb(widget::Ptr,user_data::Tuple{Gui_Handles,Int64})
han, event_id = user_data
if event_id == 0
mywin = .01
elseif event_id == 1
mywin = .05
else
mywin = .1
end
han.spect = Spectrogram(han.spect.fs; win_width_t = mywin, win_overlap_t = han.spect.win_overlap_t, f_max = han.spect.f_max*han.spect.f_div)
nothing
end
| [
198,
8818,
4808,
15883,
62,
4443,
21857,
62,
48317,
3419,
628,
220,
220,
220,
46207,
26272,
62,
4443,
796,
21860,
3419,
198,
220,
220,
220,
46207,
26272,
62,
4443,
62,
19503,
80,
796,
21860,
7449,
7203,
37,
28707,
13667,
4943,
198,
22... | 1.949548 | 2,874 |
<reponame>CliMA/OceanModelComparison.jl<filename>unstable_bickley/periodic/Analysis.jl
module Analysis
using Oceananigans
using JLD2
using Plots
using Oceananigans.Grids
using Oceananigans.Fields: offset_data
import Oceananigans.Fields: Field
Field(loc::Tuple, grid::AbstractGrid, raw_data::Array) = Field(loc, CPU(), grid, nothing, offset_data(raw_data, grid, loc))
field_from_file(file, loc, name, iter) = Field(loc, file["serialized/grid"], file["timeseries/$name/$iter"])
CellFileField(file, name, iter) = Field((Cell, Cell, Cell), file["serialized/grid"], file["timeseries/$name/$iter"])
XFaceFileField(file, name, iter) = Field((Face, Cell, Cell), file["serialized/grid"], file["timeseries/$name/$iter"])
YFaceFileField(file, name, iter) = Field((Cell, Face, Cell), file["serialized/grid"], file["timeseries/$name/$iter"])
max_abs_tracer(file, iter) = maximum(abs, interior(CellFileField(file, :c, iter)))
tracer_variance(file, iter) = 1/2 * sum(interior(CellFileField(file, :c, iter)).^2)
kinetic_energy(file, iter) = 1/2 * (sum(interior(XFaceFileField(file, :u, iter)).^2) +
sum(interior(YFaceFileField(file, :v, iter)).^2))
function oceananigans_statistics(filename)
file = jldopen(filename)
iterations = parse.(Int, keys(file["timeseries/t"]))
t = [ file["timeseries/t/$iter"] for iter in iterations]
e = [ kinetic_energy(file, iter) for iter in iterations]
χ = [ tracer_variance(file, iter) for iter in iterations]
c★ = [ max_abs_tracer(file, iter) for iter in iterations]
close(file)
Δχ = χ ./ χ[1]
Δe = e ./ e[1]
return t, Δe, Δχ, c★
end
function plot_resolutions(t, data, resolutions; kwargs...)
plt = plot(t[1], data[1]; label = "N² = $(resolutions[1])²", kwargs...)
for i = 2:length(data)
plot!(plt, t[i], data[i]; label = "N² = $(resolutions[i])²", kwargs...)
end
return plt
end
end # module
| [
27,
7856,
261,
480,
29,
2601,
72,
5673,
14,
46607,
17633,
50249,
1653,
13,
20362,
27,
34345,
29,
403,
31284,
62,
65,
624,
1636,
14,
41007,
291,
14,
32750,
13,
20362,
198,
21412,
14691,
198,
198,
3500,
10692,
272,
34090,
198,
3500,
4... | 2.555556 | 756 |
using BenchmarkTools
BenchmarkTools.DEFAULT_PARAMETERS.samples = 100
function compute(file::String)::Int
chars = Dict(Char(Int('A') + i) => i + 1 for i ∈ 0:25)
return sum(i * chars[c] for (i, name) ∈ enumerate(sort(split(replace(file, "\"" => ""), ","))) for c ∈ name)
end
file = read("problems/0022/p022_names.txt", String)
compute(file)
@benchmark compute(file) | [
3500,
25187,
4102,
33637,
198,
44199,
4102,
33637,
13,
7206,
38865,
62,
27082,
2390,
2767,
4877,
13,
82,
12629,
796,
1802,
198,
198,
8818,
24061,
7,
7753,
3712,
10100,
2599,
25,
5317,
198,
220,
220,
220,
34534,
796,
360,
713,
7,
12441... | 2.533784 | 148 |
"""
data_harvest(all=true, paper=false)
Makes all the figures from the paper for the notebook.
This function calls PlotData to make the figures one at a time.
"""
function data_harvest(all=true, paper=false)
cvals = [.5, .99, 1]
itvals = [10, 10, 30]
level=5
for ic=1:3
figure(ic)
PlotData(cvals[ic]; ptitle=~paper)
end
if all
for ic=1:3
figure(ic+3)
PlotData(cvals[ic]; half=true, ptitle=~paper)
end
end
end
"""
PlotData(c; half=false, level=5, ptitle=true)
Generates a figure from the paper using the precomputed data from
data_populate.
"""
function PlotData(c; half=false, level=5, ptitle=true)
if half
nfig=2
T=Float16
TI=[Float16]
leadtitle="Half Precision: c = "
else
nfig=4
T=Float64
TI=[Float64, Float32]
leadtitle="Single and Double Precision: c = "
end
ftitle=string(leadtitle,string(c))
c==1.0 ? (nits=31) : (nits=11)
maxit=nits-1
#aymin=1.e-15
aymin=ymin(c,half)
Datain=zeros(nits,level,nfig)
fname=Fname(c,T)
readmpdata(fname, Datain)
for ic=1:nfig
for iz = 1:level
Datain[:,iz,ic] .= Datain[:,iz,ic]./Datain[1,iz,ic]
end
end
for T in TI
PlotHist(Datain, level, maxit, aymin, T)
end
if ptitle
PyPlot.suptitle(ftitle)
end
end
function ymin(c, half)
ymin = 1.e-15
if half
if c == 1.0
ymin = 1.e-6
else
ymin = 1.e-10
end
end
return ymin
end
function PlotHist(DataC::Array{Float64,3}, pmax, maxit, aymin, T)
fmtplot = ("k-", "k--", "k-.", "k:", "k>:")
gxlabel = "Nonlinear Iterations"
gylabel = L"$|| F ||/||F_0||$"
if T==Float16
b=1
pstart=1
else
b=2
pstart = (T==Float32)*3 + (T==Float64)*1
end
subplot(b,2,pstart)
for ip=1:pmax
semilogy(0:maxit,DataC[:,ip,pstart],fmtplot[ip]);
axis([0.0, maxit, aymin, 1.0])
if pstart == 1
legend(["1024", "2048", "4096", "8192", "16384"])
end
title(string(prestring(T),",", "analytic Jacobian"))
xlabel(gxlabel)
ylabel(gylabel)
end
subplot(b,2,pstart+1)
for ip=1:pmax
semilogy(0:maxit,DataC[:,ip,pstart+1],fmtplot[ip])
title(string(prestring(T),",", "finite difference Jacobian"))
xlabel(gxlabel)
if T != Float16
ylabel(gylabel)
end
axis([0.0, maxit, aymin, 1.0])
end
end
function prestring(T)
if T==Float64
pstr="Double precision"
elseif T==Float32
pstr="Single precision"
else
pstr="Half precision"
end
return pstr
end
| [
37811,
198,
7890,
62,
9869,
4223,
7,
439,
28,
7942,
11,
3348,
28,
9562,
8,
198,
44,
1124,
477,
262,
5538,
422,
262,
3348,
329,
262,
20922,
13,
198,
1212,
2163,
3848,
28114,
6601,
284,
787,
262,
5538,
530,
379,
257,
640,
13,
198,
... | 2.188984 | 1,053 |
export ft_getstatus
function ft_getstatus(ft_handle::Culong)
amountinrxqueue = Ref{Cuint}()
amountintxqueue = Ref{Cuint}()
eventstatus = Ref{Cuint}()
ft_status = ccall((:FT_GetStatus, d2xx),
Cuint,
(Culong,Ref{Cuint},Ref{Cuint},Ref{Cuint}),
ft_handle, amountinrxqueue, amountintxqueue, eventstatus)
checkstatus(ft_status)
return (convert(Int32,amountinrxqueue[]),
convert(Int32,amountintxqueue[]),convert(Int32,eventstatus[]))
end
| [
39344,
10117,
62,
1136,
13376,
198,
198,
8818,
10117,
62,
1136,
13376,
7,
701,
62,
28144,
3712,
34,
377,
506,
8,
198,
220,
2033,
259,
40914,
36560,
796,
6524,
90,
34,
28611,
92,
3419,
198,
220,
2033,
600,
87,
36560,
796,
6524,
90,
... | 2.154167 | 240 |
<reponame>BioTurboNick/LocalizationMicroscopy.jl<filename>src/plotlocalizations.jl
"""
localizationsplot(localizations::Vector{Localizations}, color::Symbol)
localizationsplot(localizations::Vector{Vector{Localization}}, colors::Vector{Symbol})
Plot localizations by spatial coordinates. Assumes that the y-coordinate is inverted. Defaults to 0-to-40960 on each
axis, but option may be specified using keyword arguments xlims=(x1, x2) and ylims=(y1, y2). Colors are specified in the
documentation for Plots.jl. Can tune opacity with opacity= keword argument. factor= can be tuned for display in the pane
or for saving and printing (recommended 4, default 1).
"""
plotlocalizations(locs::Vector{Localization}, color::Symbol; kargs...) =
plotlocalizations([locs], [color]; kargs...)
function plotlocalizations(localizations::Vector{Vector{Localization}}, colors::Vector{Symbol};
xlims=(0, 40960), ylims=(0,40960), opacity=0.75, factor=1)
channelcount = length(localizations)
plot(framestyle=:none, size=((1024,1024) .* factor))
for i ∈ 1:channelcount
coords = localizations[i] |> extractcoordinates
scatter!(coords[1,:], coords[2,:], marker=(1 * factor, stroke(0), opacity, colors[i]))
end
plot!(aspect_ratio=:equal, xlims=xlims, yaxis=(ylims, :flip), legend=:none, grid=:hide, ticks=(0))
end
| [
27,
7856,
261,
480,
29,
42787,
17483,
2127,
23609,
14,
14565,
1634,
13031,
1416,
11081,
13,
20362,
27,
34345,
29,
10677,
14,
29487,
12001,
4582,
13,
20362,
198,
37811,
198,
220,
220,
220,
1957,
4582,
29487,
7,
12001,
4582,
3712,
38469,
... | 2.977876 | 452 |
<reponame>JuliaGPU/VulkanAbstraction
include("prewrap/bitmasks.jl")
include("prewrap/types.jl")
include("prewrap/handles.jl")
include("prewrap/pointers.jl")
include("prewrap/conversions.jl")
include("prewrap/errors.jl")
include("prewrap/spirv.jl")
| [
27,
7856,
261,
480,
29,
16980,
544,
33346,
14,
53,
31263,
4826,
301,
7861,
198,
17256,
7203,
79,
1809,
2416,
14,
2545,
5356,
591,
13,
20362,
4943,
198,
17256,
7203,
79,
1809,
2416,
14,
19199,
13,
20362,
4943,
198,
17256,
7203,
79,
1... | 2.556701 | 97 |
<filename>src/streamers.jl
struct StreamerInfo
streamer
dependencies
end
struct Streamers
tkey::TKey
refs::Dict{Int32, Any}
elements::Vector{StreamerInfo}
end
Base.length(s::Streamers) = length(s.elements)
function Base.show(io::IO, s::Streamers)
for streamer_info in s.elements
println(io, "$(streamer_info.streamer.fName)")
# streamer = streamer_info.streamer
# print(io, "$(streamer.fName): fType = $(streamer.fType), ")
# print(io, "fTypeName: $(streamer.fTypeName)")
end
end
# Structures required to read streamers
struct TStreamerInfo
fName
fTitle
fCheckSum
fClassVersion
fElements
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerInfo})
preamble = Preamble(io, T)
fName, fTitle = nametitle(io)
fCheckSum = readtype(io, UInt32)
fClassVersion = readtype(io, Int32)
fElements = readobjany!(io, tkey, refs)
endcheck(io, preamble)
T(fName, fTitle, fCheckSum, fClassVersion, fElements)
end
safename(s::AbstractString) = replace(s, "::" => "_3a3a_")
function initialise_streamer(s::StreamerInfo)
# FIXME Abstract is not needed when switched to autogenerated streamers
base = Symbol(safename(s.streamer.fName))
supername = Symbol(:Abstract, base)
if !isdefined(@__MODULE__, supername)
@debug "Defining abstract type $supername"
@eval abstract type $(supername) <: ROOTStreamedObject end
end
name = Symbol(base, Symbol("_$(s.streamer.fClassVersion)"))
if !isdefined(@__MODULE__, name)
@debug " creating versioned struct '$name <: $supername'"
@eval struct $(name) <: $(supername) end
# FIXME create the stream!() functions somewhere here...
# println(name)
# @eval struct $name <: ROOTStreamedObject
# data::Dict{Symbol, Any}
# end
else
@debug "Not defining $name since it already has a bootstrapped version"
end
end
"""
function Streamers(io)
Reads all the streamers from the ROOT source.
"""
function Streamers(io)
refs = Dict{Int32, Any}()
start = position(io)
tkey = unpack(io, TKey)
if iscompressed(tkey)
@debug "Compressed stream at $(start)"
seekstart(io, tkey)
compression_header = unpack(io, CompressionHeader)
#FIXME for some reason we need to re-pack such that it ends at exact bytes.
skipped = position(io) - start
# notice our `TKey` size is not the same as official TKey, can't use sizeof()
io_buf = IOBuffer(read(io, tkey.fNbytes - skipped))
if String(compression_header.algo) == "ZL"
stream = IOBuffer(read(ZlibDecompressorStream(io_buf), tkey.fObjlen))
elseif String(compression_header.algo) == "XZ"
stream = IOBuffer(read(XzDecompressorStream(io_buf), tkey.fObjlen))
elseif String(compression_header.algo) == "L4"
skip(io_buf, 8) #skip checksum
stream = IOBuffer(lz4_decompress(read(io_buf), tkey.fObjlen))
else
error("Unsupported compression type '$(String(compression_header.algo))'")
end
else
@debug "Unompressed stream at $(start)"
stream = io
end
preamble = Preamble(stream, Streamers)
skiptobj(stream)
name = readtype(stream, String)
size = readtype(stream, Int32)
streamer_infos = Vector{StreamerInfo}()
@debug "Found $size streamers, continue with parsing."
for i ∈ 1:size
obj = readobjany!(stream, tkey, refs)
if typeof(obj) == TStreamerInfo
@debug " processing streamer info for '$(obj.fName)' (v$(obj.fClassVersion))"
@debug " number of dependencies: $(length(obj.fElements.elements))"
dependencies = Set()
for element in obj.fElements.elements
if typeof(element) == TStreamerBase
@debug " + adding dependency '$(element.fName)'"
push!(dependencies, element.fName)
else
@debug " - skipping dependency '$(element.fName)' with type '$(typeof(element))'"
end
end
@debug " => finishing dependency readout for: $(obj.fName)"
push!(streamer_infos, StreamerInfo(obj, dependencies))
else
@debug " not a TStreamerInfo but '$(typeof(obj))', skipping."
end
# FIXME why not just skip a byte?
skip(stream, readtype(stream, UInt8))
end
endcheck(stream, preamble)
streamer_infos = topological_sort(streamer_infos)
for streamer_info in streamer_infos
initialise_streamer(streamer_info)
end
Streamers(tkey, refs, streamer_infos)
end
"""
function topological_sort(streamer_infos)
Sort the streamers with respect to their dependencies and keep only those
which are not defined already.
The implementation is based on https://stackoverflow.com/a/11564769/1623645
"""
function topological_sort(streamer_infos)
@debug "Starting topological sort of streamers"
provided = Set{String}()
sorted_streamer_infos = []
while length(streamer_infos) > 0
remaining_items = []
emitted = false
@debug " number of remaining streamers to sort: $(length(streamer_infos))"
for streamer_info in streamer_infos
# if all(d -> isdefined(@__MODULE__, Symbol(d)) || d ∈ provided, streamer_info.dependencies)
# if !isdefined(@__MODULE__, Symbol(streamer_info.streamer.fName)) && aliasfor(streamer_info.streamer.fName) === nothing
@debug " processing '$(streamer_info.streamer.fName)' with $(length(streamer_info.dependencies))' dependencies"
if length(streamer_infos) == 1 || all(d -> d ∈ provided, streamer_info.dependencies)
if aliasfor(streamer_info.streamer.fName) === nothing
push!(sorted_streamer_infos, streamer_info)
end
push!(provided, streamer_info.streamer.fName)
emitted = true
else
push!(remaining_items, streamer_info)
end
end
if !emitted
for streamer_info in streamer_infos
filter!(isequal(streamer_info), remaining_items)
end
end
streamer_infos = remaining_items
end
@debug "Finished the topological sort of streamers"
sorted_streamer_infos
end
"""
function readobjany!(io, tkey::TKey, refs)
The main entrypoint where streamers are parsed and cached for later use.
The `refs` dictionary holds the streamers or parsed data which are reused
when already available.
"""
function readobjany!(io, tkey::TKey, refs)
beg = position(io) - origin(tkey)
bcnt = readtype(io, UInt32)
if Int64(bcnt) & Const.kByteCountMask == 0 || Int64(bcnt) == Const.kNewClassTag
# New class or 0 bytes
version = 0
start = 0
tag = bcnt
bcnt = 0
else
version = 1
start = position(io) - origin(tkey)
tag = readtype(io, UInt32)
end
if Int64(tag) & Const.kClassMask == 0
# reference object
if tag == 0
return missing
elseif tag == 1
error("Returning parent is not implemented yet")
elseif !haskey(refs, tag)
# skipping
seek(io, origin(tkey) + beg + bcnt + 4)
return missing
else
return refs[tag]
end
elseif tag == Const.kNewClassTag
cname = readtype(io, CString)
streamer = getfield(@__MODULE__, Symbol(cname))
if version > 0
refs[start + Const.kMapOffset] = streamer
else
refs[length(refs) + 1] = streamer
end
obj = unpack(io, tkey, refs, streamer)
if version > 0
refs[beg + Const.kMapOffset] = obj
else
refs[length(refs) + 1] = obj
end
return obj
else
# reference class, new object
ref = Int64(tag) & ~Const.kClassMask
haskey(refs, ref) || error("Invalid class reference.")
streamer = refs[ref]
obj = unpack(io, tkey, refs, streamer)
if version > 0
refs[beg + Const.kMapOffset] = obj
else
refs[length(refs) + 1] = obj
end
return obj
end
end
struct TList
preamble
name
size
objects
end
Base.length(l::TList) = length(l.objects)
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TList})
preamble = Preamble(io, T)
skiptobj(io)
name = readtype(io, String)
size = readtype(io, Int32)
objects = []
for i ∈ 1:size
push!(objects, readobjany!(io, tkey, refs))
skip(io, readtype(io, UInt8))
end
endcheck(io, preamble)
TList(preamble, name, size, objects)
end
struct TObjArray
name
low
elements
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TObjArray})
preamble = Preamble(io, T)
skiptobj(io)
name = readtype(io, String)
size = readtype(io, Int32)
low = readtype(io, Int32)
elements = Vector{Any}(undef, size)
for i in 1:size
ele = readobjany!(io, tkey, refs)
# !ismissing(ele) && @show ele.fName
elements[i] = ele
end
endcheck(io, preamble)
return TObjArray(name, low, elements)
end
abstract type AbstractTStreamerElement end
@premix @with_kw mutable struct TStreamerElementTemplate
version
fOffset
fName
fTitle
fType
fSize
fArrayLength
fArrayDim
fMaxIndex
fTypeName
fXmin
fXmax
fFactor
end
@TStreamerElementTemplate mutable struct TStreamerElement end
@pour initparse begin
fields = Dict{Symbol, Any}()
end
function parsefields!(io, fields, T::Type{TStreamerElement})
preamble = Preamble(io, T)
fields[:version] = preamble.version
fields[:fOffset] = 0
fields[:fName], fields[:fTitle] = nametitle(io)
fields[:fType] = readtype(io, Int32)
fields[:fSize] = readtype(io, Int32)
fields[:fArrayLength] = readtype(io, Int32)
fields[:fArrayDim] = readtype(io, Int32)
n = preamble.version == 1 ? readtype(io, Int32) : 5
fields[:fMaxIndex] = [readtype(io, Int32) for _ in 1:n]
fields[:fTypeName] = readtype(io, String)
if fields[:fType] == 11 && (fields[:fTypeName] == "Bool_t" || fields[:fTypeName] == "bool")
fields[:fType] = 18
end
fields[:fXmin] = 0.0
fields[:fXmax] = 0.0
fields[:fFactor] = 0.0
if preamble.version == 3
fields[:fXmin] = readtype(io, Float64)
fields[:fXmax] = readtype(io, Float64)
fields[:fFactor] = readtype(io, Float64)
end
endcheck(io, preamble)
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerElement})
@initparse
parsefields!(io, fields, T)
T(;fields...)
end
@TStreamerElementTemplate mutable struct TStreamerBase
fBaseVersion
end
function parsefields!(io, fields, T::Type{TStreamerBase})
preamble = Preamble(io, T)
parsefields!(io, fields, TStreamerElement)
fields[:fBaseVersion] = fields[:version] >= 2 ? readtype(io, Int32) : 0
endcheck(io, preamble)
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerBase})
@initparse
parsefields!(io, fields, T)
T(;fields...)
end
@TStreamerElementTemplate mutable struct TStreamerBasicType end
function parsefields!(io, fields, ::Type{TStreamerBasicType})
parsefields!(io, fields, TStreamerElement)
if Const.kOffsetL < fields[:fType] < Const.kOffsetP
fields[:fType] -= Const.kOffsetP
end
basic = true
if fields[:fType] ∈ (Const.kBool, Const.kUChar, Const.kChar)
fields[:fSize] = 1
elseif fields[:fType] in (Const.kUShort, Const.kShort)
fields[:fSize] = 2
elseif fields[:fType] in (Const.kBits, Const.kUInt, Const.kInt, Const.kCounter)
fields[:fSize] = 4
elseif fields[:fType] in (Const.kULong, Const.kULong64, Const.kLong, Const.kLong64)
fields[:fSize] = 8
elseif fields[:fType] in (Const.kFloat, Const.kFloat16)
fields[:fSize] = 4
elseif fields[:fType] in (Const.kDouble, Const.kDouble32)
fields[:fSize] = 8
elseif fields[:fType] == Const.kCharStar
fields[:fSize] = sizeof(Int)
else
basic = false
end
if basic && fields[:fArrayLength] > 0
fields[:fSize] *= fields[:fArrayLength]
end
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerBasicType})
@initparse
preamble = Preamble(io, T)
parsefields!(io, fields, T)
endcheck(io, preamble)
T(;fields...)
end
@TStreamerElementTemplate mutable struct TStreamerBasicPointer
fCountVersion
fCountName
fCountClass
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerBasicPointer})
@initparse
preamble = Preamble(io, T)
parsefields!(io, fields, TStreamerElement)
fields[:fCountVersion] = readtype(io, Int32)
fields[:fCountName] = readtype(io, String)
fields[:fCountClass] = readtype(io, String)
endcheck(io, preamble)
T(;fields...)
end
@TStreamerElementTemplate mutable struct TStreamerLoop
fCountVersion
fCountName
fCountClass
end
components(::Type{TStreamerLoop}) = [TStreamerElement]
function parsefields!(io, fields, ::Type{TStreamerLoop})
fields[:fCountVersion] = readtype(io, Int32)
fields[:fCountName] = readtype(io, String)
fields[:fCountClass] = readtype(io, String)
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TStreamerLoop})
@initparse
preamble = Preamble(io, T)
for component in components(T)
parsefields!(io, fields, component)
end
parsefields!(io, fields, T)
endcheck(io, preamble)
T(;fields...)
end
abstract type AbstractTStreamSTL end
@TStreamerElementTemplate mutable struct TStreamerSTL <: AbstractTStreamSTL
fSTLtype
fCtype
end
@TStreamerElementTemplate mutable struct TStreamerSTLstring <: AbstractTStreamSTL
fSTLtype
fCtype
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, ::Type{T}) where T <: AbstractTStreamSTL
@initparse
if T == TStreamerSTLstring
wrapper_preamble = Preamble(io, T)
end
preamble = Preamble(io, T)
parsefields!(io, fields, TStreamerElement)
fields[:fSTLtype] = readtype(io, Int32)
fields[:fCtype] = readtype(io, Int32)
if fields[:fSTLtype] == Const.kSTLmultimap || fields[:fSTLtype] == Const.kSTLset
if startswith(fields[:fTypeName], "std::set") || startswith(fields[:fTypeName], "set")
fields[:fSTLtype] = Const.kSTLset
elseif startswith(fields[:fTypeName], "std::multimap") || startswith(fields[:fTypeName], "multimap")
fields[:fSTLtype] = Const.kSTLmultimap
end
end
endcheck(io, preamble)
if T == TStreamerSTLstring
endcheck(io, wrapper_preamble)
end
T(;fields...)
end
const TObjString = String
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{TObjString})
preamble = Preamble(io, T)
skiptobj(io)
value = readtype(io, String)
endcheck(io, preamble)
T(value)
end
abstract type AbstractTStreamerObject end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, ::Type{T}) where T<:AbstractTStreamerObject
@initparse
preamble = Preamble(io, T)
parsefields!(io, fields, TStreamerElement)
endcheck(io, preamble)
T(;fields...)
end
@TStreamerElementTemplate mutable struct TStreamerObject <: AbstractTStreamerObject end
@TStreamerElementTemplate mutable struct TStreamerObjectAny <: AbstractTStreamerObject end
@TStreamerElementTemplate mutable struct TStreamerObjectAnyPointer <: AbstractTStreamerObject end
@TStreamerElementTemplate mutable struct TStreamerObjectPointer <: AbstractTStreamerObject end
@TStreamerElementTemplate mutable struct TStreamerString <: AbstractTStreamerObject end
abstract type ROOTStreamedObject end
# function stream(io, ::Type{T}) where {T<:ROOTStreamedObject}
# fields = Dict{Symbol, Any}()
# preamble = Preamble(io, T)
# stream!(io, fields, T{preamble.version})
# endcheck(io, preamble)
# T(fields)
# end
function stream!(io, fields, ::Type{T}) where {T<:ROOTStreamedObject}
preamble = Preamble(io, T)
streamer_name = Symbol(T, "_$(preamble.version)")
# @show streamer_name
mod, typename = split(String(streamer_name), ".")
# @show mod typename
streamer = getfield(@__MODULE__, Symbol(typename))
# @show streamer
readfields!(io, fields, streamer)
endcheck(io, preamble)
end
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, ::Type{T}) where {T<:ROOTStreamedObject}
cursor = Cursor(position(io), io, tkey, refs)
@initparse
preamble = Preamble(io, T)
streamer_name = Symbol(T, "_$(preamble.version)")
mod, typename = split(String(streamer_name), ".")
streamer = getfield(@__MODULE__, Symbol(typename))
readfields!(cursor, fields, streamer)
streamer(;cursor=cursor, fields...)
end
# function stream!(io, fields, ::Type{T{V}}) where {V, T<:ROOTStreamedObject}
# println("Don't know how to stream $T")
# end
struct TObject <: ROOTStreamedObject end
parsefields!(io, fields, ::Type{TObject}) = skiptobj(io)
struct TString <: ROOTStreamedObject end
unpack(io, tkey::TKey, refs::Dict{Int32, Any}, ::Type{TString}) = readtype(io, String)
struct Undefined <: ROOTStreamedObject
skipped_bytes
end
Base.show(io::IO, u::Undefined) = print(io, "$(typeof(u)) ($(u.skipped_bytes) bytes)")
function unpack(io, tkey::TKey, refs::Dict{Int32, Any}, T::Type{Undefined})
preamble = Preamble(io, T)
bytes_to_skip = preamble.cnt - 6
skip(io, bytes_to_skip)
endcheck(io, preamble)
Undefined(bytes_to_skip)
end
const TArrayD = Vector{Float64}
const TArrayI = Vector{Int32}
function readtype(io, T::Type{Vector{U}}) where U <: Union{Integer, AbstractFloat}
size = readtype(io, eltype(T))
[readtype(io, eltype(T)) for _ in 1:size]
end
| [
27,
34345,
29,
10677,
14,
5532,
364,
13,
20362,
198,
7249,
13860,
263,
12360,
198,
220,
220,
220,
4269,
263,
198,
220,
220,
220,
20086,
198,
437,
198,
198,
7249,
13860,
364,
198,
220,
220,
220,
256,
2539,
3712,
51,
9218,
198,
220,
... | 2.331488 | 7,768 |
<reponame>adolgert/comorbid.jl<gh_stars>0
using comorbid
using Test
@testset "comorbid.jl" begin
@test isapprox(exact_burden_term([.1], [.8], [true]), [.08])
end
| [
27,
7856,
261,
480,
29,
324,
349,
70,
861,
14,
785,
273,
14065,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
401,
273,
14065,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
785,
273,
14065,
13,
20362,
1,
2221,
198,
220,
22... | 2.168831 | 77 |
module mcrosssection1
using FinEtools
using FinEtoolsFlexBeams.CrossSectionModule
using Test
function test()
cs = CrossSectionModule.CrossSectionCircle(s -> 5.9910, s -> [0.0, 0.0, 1.0])
par = cs.parameters(0.0)
for (c, r) in zip((par.A, par.J, par.I1, par.I2, par.I3), (112.75829799164978, 2023.5649824692157, 2023.5649824692157, 1011.7824912346078, 1011.7824912346078))
@test c ≈ r
end
true
end
end
using .mcrosssection1
mcrosssection1.test()
module mcrosssection2
using FinEtools
using FinEtoolsFlexBeams.CrossSectionModule
using Test
function test()
cs = CrossSectionModule.CrossSectionRectangle(s -> 42.0, s -> 4.2, s -> [0.0, 0.0, 1.0])
par = cs.parameters(0.0)
for (c, r) in zip((par.A, par.J, par.I1, par.I2, par.I3), (176.4, 970.849152, 26190.108000000004, 259.30800000000005, 25930.800000000003))
@test c ≈ r
end
true
end
end
using .mcrosssection2
mcrosssection2.test()
module mcrosssection3
using FinEtools
using FinEtoolsFlexBeams.CrossSectionModule
using Test
function test()
cs = CrossSectionModule.CrossSectionRectangle(s -> 1.3 * 4.2, s -> 4.2, s -> [0.0, 0.0, 1.0])
par = cs.parameters(0.0)
for (c, r) in zip((par.A, par.J, par.I1, par.I2, par.I3), (22.932000000000006, 71.66370760423138, 90.68000760000004, 33.71004000000001, 56.969967600000025))
@test c ≈ r
end
true
end
end
using .mcrosssection3
mcrosssection3.test()
module mcrosssection4
using FinEtools
using FinEtoolsFlexBeams.CrossSectionModule
using Test
function test()
R = 0.5
cs = CrossSectionModule.CrossSectionCircle(s -> (1/2+2*s)*R, s -> [0.0, 0.0, 1.0])
par = cs.parameters(0.0)
for (c, r) in zip((par.A, par.J, par.I1, par.I2, par.I3), (0.19634954084936207, 0.006135923151542565, 0.006135923151542565, 0.0030679615757712823, 0.0030679615757712823))
@test c ≈ r
end
true
end
end
using .mcrosssection4
mcrosssection4.test() | [
21412,
285,
19692,
5458,
16,
198,
3500,
4463,
36,
31391,
198,
3500,
4463,
36,
31391,
37,
2588,
3856,
4105,
13,
21544,
16375,
26796,
198,
3500,
6208,
198,
8818,
1332,
3419,
198,
220,
220,
220,
50115,
796,
6372,
16375,
26796,
13,
21544,
... | 2.238095 | 861 |
<gh_stars>1-10
# ------------------------------------- #
# Helper methods used in generated code #
# ------------------------------------- #
# inlined function to extract a single variable. If `x` is a vector then
# extract a single element. If `x` is a Matrix then extract one column of the
# matrix
@inline _unpack_var(x::AbstractVector, i::Integer) = x[i]
@inline _unpack_var(x::AbstractMatrix, i::Integer, ::Type{COrder}) = view(x, :, i)
@inline _unpack_var(x::AbstractMatrix, i::Integer, ::Type{FOrder}) = view(x, i, :)
@inline _unpack_var(x::AbstractMatrix, i::Integer) = view(x, :, i)
# inlined function to extract a single observations of a vector fo variables.
@inline _unpack_obs(x::AbstractMatrix, i::Integer, ::Type{COrder}) = view(x, i, :)
@inline _unpack_obs(x::AbstractMatrix, i::Integer, ::Type{FOrder}) = view(x, :, i)
@inline _unpack_obs(x::AbstractMatrix, i::Integer) = view(x, i, :)
@inline _unpack_obs(x::AbstractVector, i::Integer) = x
# similar to _unpack_var, but instead assigns one element of a vector or one
# column of a matrix
@inline _assign_var(lhs::AbstractVector, rhs::Number, i) = setindex!(lhs, rhs, i)
@inline _assign_var(lhs::AbstractMatrix, rhs::AbstractVector, i) = setindex!(lhs, rhs, :, i)
@inline _assign_var(lhs::AbstractMatrix, rhs::AbstractVector, i, ::Type{COrder}) = setindex!(lhs, rhs, :, i)
@inline _assign_var(lhs::AbstractMatrix, rhs::AbstractVector, i, ::Type{FOrder}) = setindex!(lhs, rhs, i, :)
# determine the size of the output variable, given the number of expressions
# in the equation and all the input arguments
_output_size(n_expr::Int, args::AbstractVector...) = (n_expr,)
_output_size(n_expr::Int, arg::AbstractMatrix) = (size(arg, 1), n_expr)
function _output_size(n_expr::Int, args...)
n_row = 0
# get maximum number of rows among matrix arguments
for a in args
if isa(a, AbstractMatrix)
nr = size(a, 1)
if n_row > 0
# we already found another matrix argument, now we can enforce
# that all matrix arguments have conformable shapes
if nr != n_row
msg = string("Unconformable argument sizes. For vectorized",
" Core.evaluation all matrix arguments must have ",
"the same number of rows.")
throw(DimensionMismatch(msg))
end
else
# we need to update n_row
n_row = nr
end
end
end
(n_row, n_expr)
end
_output_size(n_expr::Int, ::Type{COrder}, args...) = _output_size(n_expr::Int, args...)
function _output_size(n_expr::Int, order::Type{FOrder}, args...)
s = _output_size(n_expr::Int, order::Type{FOrder}, args...)
(s[2],s[1])
end
# Allocate an array of eltype `T` for `n_expr` variables that corresponds
# to input arguments `args...`
_allocate_out(T::Type, n_expr::Int, args::AbstractVector...) = Array{T}(undef, n_expr)
_allocate_out(T::Type, order::Union{Type{COrder},Type{FOrder}}, n_expr::Int, arg::AbstractMatrix) =
Array{T}(undef, _output_size(n_expr, order, arg))
_allocate_out(T::Type, n_expr::Int, arg::AbstractMatrix) =
Array{T}(undef, _output_size(n_expr, arg))
function _allocate_out(T::Type, n_expr::Int, args...)
Array{T}(undef, _output_size(n_expr, args...))
end
function _allocate_out(T::Type, order::Union{Type{COrder},Type{FOrder}}, n_expr::Int, args...)
Array{T}(undef, _output_size(n_expr, order, args...))
end
## Triangular solver
_expr_or_number(x::Union{AbstractString,Symbol,Expr}) = _to_expr(x)
_expr_or_number(x::Number) = x
inf_to_Inf(x::Number) = x
inf_to_Inf(x::Symbol) = x in (:inf, :Inf) ? Inf : x
inf_to_Inf(ex::Expr) = Expr(ex.head, map(inf_to_Inf, ex.args)...)
_to_Float64(x::Real) = convert(Float64, x)
_to_Float64(x::AbstractArray) = map(Float64, x)
function solution_order(d::OrderedDict, it::IncidenceTable, pre_solved::Vector{Symbol}=Symbol[])
# unpack some data
vars = collect(keys(d))
n = length(d)
# allocate
out = zeros(Int, length(d))
# Start with indices for equations that are purely numerical
front = setdiff(1:n, keys(it.by_eq))
out[front] = 1:length(front)
solved = vcat(pre_solved, vars[front])
to_solve = deepcopy(it.by_eq)
# now start stepping through equations
ix = length(front)
for _junk in 2:n+1
for (eq, eq_vars) in to_solve
can_solve = true
for (var, dates) in eq_vars
if !in(var, solved)
can_solve = false
break
end
end
if can_solve
out[eq] = ix+=1
push!(solved, vars[eq])
pop!(to_solve, eq)
end
end
end
!isempty(to_solve) && error("Not triangular system")
return sortperm(out)
end
function solution_order(_d::AbstractDict, pre_solved::Vector{Symbol}=Symbol[])
d = OrderedDict(_d)
it = Dolang.IncidenceTable(collect(values(d)))
solution_order(d, it, pre_solved)
end
solve_triangular_system(d::AbstractDict) = solve_triangular_system(OrderedDict(d))
function solve_triangular_system(d::OrderedDict)
sol_order = solution_order(d)
# extract expressions and variable names in proper order
nms = collect(keys(d))[sol_order]
exprs = collect(values(d))[sol_order]
# build expression to Core.evaluate system in correct order
to_eval = Expr(:block)
to_eval.args = [:($(i[1])=$(i[2])) for i in zip(nms, exprs)]
# add one line to return a tuple of all data
ret = Expr(:tuple); ret.args = nms
# now Core.evaluate and get data
data = eval(:(
let
$to_eval
$ret
end
))
OrderedDict{Symbol,Real}(zip(nms, data))
end
mutable struct TriangularSystemException <: Exception
missing
end
"""
Solves triangular system specified by incidence dictionary.
```
system = Dict(0=>[1,2], 1=>[], 2=>[1] )
solve_dependency(system)
```
or
```
system = Dict(:x=>[:y,:z], :y=>[], :z=>[:y] )
solve_dependency(system)
```
Optionally, one can add specify which subset of variables to solve so that unrequired variables will be ommited in the solution. In
```
system = Dict(:x=>[:y,:z], :y=>[], :z=>[:y], :p=>[:x,y,z] )
solve_dependency(system, [:x,:y,:z])
```
the answer is the same as before since `:p` is not needed to
define the values of `[:x,:y,:z]`.
"""
function solve_dependencies(deps::AbstractDict{T,Set{T}}, unknowns=nothing) where T
solution = []
if unknowns == nothing
needed = Set(keys(deps))
else
needed = Set(unknowns)
end
while length(needed)>0
tt0 = (length(needed), length(solution))
if length(needed)==0
return solution
else
for k in needed
# check whether k is solved
if k in solution
pop!(needed, k)
elseif issubset(deps[k], solution)
push!(solution, k)
else
needed = union(deps[k], needed)
end
end
tt = (length(needed), length(solution))
if tt == tt0
mis = join([string(e) for e in needed], ", ")
exc = TriangularSystemException(mis)
throw(exc)
end
end
end
return solution
end
function get_dependencies(defs::AbstractDict{T,U}; target=:all) where T where U
deps = OrderedDict{Any,Set{Any}}()
def_keys = keys(defs)
for (k, v) in (defs)
# get list of all symbols in this equation
# _syms = collect(values(list_variables(v)))
# allsyms = length(_syms) > 0 ? Set(union(_syms...)) : Set()
if target==:variables
allsyms = list_variables(v)
elseif target==:parameters
allsyms = list_parameters(v)
else
ll = list_symbols(v)
allsyms = cat(ll.parameters, ll.variables; dims=1)
end
# intersect that with the keys in our definitions
ii = intersect(allsyms, def_keys)
ij = Set(ii)
deps[k] = ij
end
deps
end
function reorder_triangular_block(defs::AbstractDict{T,U}) where T where U
deps = get_dependencies(defs)
sol = Dolang.solve_dependencies(deps)
return OrderedDict((k,defs[k]) for k in sol)
end
"""Solves definitions blocks
Keys are timed variables in canonical form (e.g. `(:v,0)`) at date t=0.
Values are expressions, possibly referencing key variables at different dates.
The system is recursively solved for the unknowns, by default the keys.
"""
function solve_definitions(defs::AbstractDict{Tuple{Symbol, Int}, <:SymExpr}, unknowns=keys(defs))
# defs should map timed-vars to expressions.
_defs = deepcopy(defs)
for (v,t) in collect(keys(_defs))
# t should always be 0
@assert t==0
for shift in (-1,1)
_defs[(v,shift)] = Dolang.time_shift(_defs[(v,t)], shift)
end
end
deps = Dolang.get_dependencies(_defs)
solution = Dolang.solve_dependencies(deps, unknowns)
reordered = OrderedDict()
for k in solution
reordered[k] = _defs[k]
end
return reordered
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
20368,
30934,
1303,
198,
2,
5053,
525,
5050,
973,
287,
7560,
2438,
1303,
198,
2,
20368,
30934,
1303,
198,
198,
2,
287,
10837,
2163,
284,
7925,
257,
2060,
7885,
13,
1002,
4600,
87,
63,
... | 2.269814 | 4,088 |
#=
# 5. Viscous flow about a moving body
In this notebook we will demonstrate the simulation of a moving body. It is straightforward
to set up a moving body. The main caveat is that the simulation is slower,
because the integrator must update the operators continuously throughout the simulation.
We will demonstrate this on an oscillating flat plate.
=#
using ViscousFlow
#-
using Plots
#=
### Problem specification and discretization
For simplicity, we will not create a free stream in this problem. Everything
here is the usual.
=#
my_params = Dict()
my_params["Re"] = 200
#-
xlim = (-1.0,1.0)
ylim = (-1.0,1.0)
my_params["grid Re"] = 4.0
g = setup_grid(xlim,ylim,my_params)
Δs = surface_point_spacing(g,my_params)
#=
### Set up body
Set up the plate and place it at the origin. (We don't actually have
to move it, since it defaults to the origin, but it's helpful to put
this here in case we wish to initialize it differently.)
=#
body = Plate(1.0,Δs)
T = RigidTransform((0,0),0)
T(body)
#=
### Set the body motion
Now we specify the body motion. We will use oscillatory pitch-heave kinematics for this:
=#
a = 0.25 # location of pitch axis, a = 0.5 is leading edge
ϕp = -π/2 # phase lag of pitch
ϕh = 0.0 # phase lag of heave
A = 0.25 # amplitude/chord
fstar = 1/π # fc/U
α₀ = 0 # mean angle of attack
Δα = 10π/180 # amplitude of pitching
U₀ = 0.0 # translational motion (set to zero in place of free stream)
K = π*fstar # reduced frequency, K = πfc/U
oscil1 = RigidBodyTools.PitchHeave(U₀,a,K,ϕp,α₀,Δα,A,ϕh)
motion = RigidBodyMotion(oscil1)
# We can inspect the kinematics in this `motion` by plotting them:
plot(motion)
#=
### Define the boundary condition functions
Instead of using the default boundary condition functions, we define
special ones here that provide the instantaneous surface velocity (i.e. the velocity
of every surface point) from the prescribed
motion. Every surface has an "exterior" and "interior" side. For
a flat plate, these two sides are the upper and lower sides, and both sides
are next to the fluid, so both of them are assigned the prescribed velocity
of the plate. (For closed bodies, we would assign this velocity to only
one of the sides, and zero to the other side. We will see an example of this in a later case.)
We pack these into a special dictionary and
pass these to the system construction.
=#
function my_vsplus(t,base_cache,phys_params,motions)
vsplus = zeros_surface(base_cache)
surface_velocity!(vsplus,base_cache,motions,t)
return vsplus
end
function my_vsminus(t,base_cache,phys_params,motions)
vsminus = zeros_surface(base_cache)
surface_velocity!(vsminus,base_cache,motions,t)
return vsminus
end
bcdict = Dict("exterior" => my_vsplus, "interior" => my_vsminus)
#=
### Construct the system structure
Here, we supply the motion and boundary condition functions as additional arguments.
=#
sys = viscousflow_system(g,body,phys_params=my_params,motions=motion,bc=bcdict);
#=
Before we solve the problem, it is useful to note that the Reynolds number
we specified earlier may not be the most physically-meaningful Reynolds number.
More relevant in this problem is the Reynolds number based on the maximum
body speed.
=#
Umax, imax, tmax, bmax = maxlistvelocity(sys)
Re_eff = my_params["Re"]*Umax
#-
u0 = init_sol(sys)
tspan = (0.0,10.0)
integrator = init(u0,tspan,sys)
#=
### Solve
This takes longer per time step than it does for stationary bodies. Here, we only
run it for 1.5 time units just to demonstrate it.
=#
@time step!(integrator,1.5)
#=
### Examine the solution
Let's look at a few snapshots of the vorticity field. Note that the
plotting here requires us to explicitly call the [`surfaces`](@ref)
function to generate the instantaneous configuration of the plate.
=#
sol = integrator.sol
plt = plot(layout = (1,3), size = (800, 300), legend=:false)
tsnap = 0.5:0.5:1.5
for (i, t) in enumerate(tsnap)
plot!(plt[i],vorticity(sol,sys,t),sys,layers=false,title="t = $(round(t,digits=2))",clim=(-5,5),levels=range(-5,5,length=30),color = :RdBu)
plot!(plt[i],surfaces(sol,sys,t))
end
plt
# and the forces
sol = integrator.sol
fx, fy = force(sol,sys,1);
#-
plot(
plot(sol.t,2*fx,xlim=(0,Inf),ylim=(-3,3),xlabel="Convective time",ylabel="\$C_D\$",legend=:false),
plot(sol.t,2*fy,xlim=(0,Inf),ylim=(-6,6),xlabel="Convective time",ylabel="\$C_L\$",legend=:false),
size=(800,350)
)
| [
2,
28,
198,
2,
642,
13,
569,
2304,
516,
5202,
546,
257,
3867,
1767,
198,
818,
428,
20922,
356,
481,
10176,
262,
18640,
286,
257,
3867,
1767,
13,
632,
318,
15836,
198,
1462,
900,
510,
257,
3867,
1767,
13,
383,
1388,
36531,
318,
326... | 2.927759 | 1,495 |
<reponame>ettersi/TreeTensors.jl
module TreeTensors
importall Base
using Tensors
include("Trees.jl")
include("ModeTrees.jl")
# Tree tensor networks
export TreeTensor
typealias TensorDict{T} Dict{Tree,Tensor{T}}
immutable TreeTensor{T}
mtree::AbstractModeTree
tensors::TensorDict{T}
end
(::Type{TreeTensor{T}}){T}(mtree) = TreeTensor(mtree, TensorDict{T}())
# Basic functions
export modetree, tree
Tensors.scalartype{T}(::Type{TreeTensor{T}}) = T
modetree(x::TreeTensor) = x.mtree
tree(x::TreeTensor) = tree(modetree(x))
copy(x::TreeTensor) = TreeTensor(x.mtree, TensorDict{scalartype(x)}(x.tensors))
convert{T}(::Type{TreeTensor{T}}, x::TreeTensor) = TreeTensor(x.mtree, convert(TensorDict{T},x.tensors))
copyconvert{T}(::Type{TreeTensor{T}}, x::TreeTensor{T}) = copy(x)
copyconvert{T}(::Type{TreeTensor{T}}, x::TreeTensor) = convert(TreeTensor{T}, x)
# Indexing and iteration
for f in (
:getindex, :setindex!,
:start, :next, :done,
:eltype, :length,
:keys, :values, :keytype, :valtype
)
@eval $f(x::TreeTensor, args...) = $f(x.tensors, args...)
end
function index(x::TreeTensor, s::Symbol)
if s == :root return tree(x)
else throw(KeyError(s)) end
end
getindex(x::TreeTensor, s::Symbol) = x[index(x,s)]
setindex!(x::TreeTensor, xv, s::Symbol) = x[index(x,s)] = xv
# Initialization
ones{T}(::Type{T}, mtree::AbstractModeTree) = TreeTensor(mtree, Dict{Tree,Tensor{T}}(
v => ones(T, [[Mode(e,1) for e in neighbor_edges(v)]; mtree[v]])
for v in vertices(mtree, root_to_leaves)
))
eye{T}(::Type{T}, mtree::AbstractModeTree) = TreeTensor(square(mtree), Dict{Tree,Tensor{T}}(
v => begin
t = eye(T, mtree[v]);
t.modes = [t.modes; [Mode(e,1) for e in neighbor_edges(v)]];
t
end
for v in vertices(mtree, root_to_leaves)
))
function rand{T}(::Type{T}, mtree::AbstractModeTree, r)
evaluate(r::Int,e) = r
evaluate(r::Dict,e) = r[e]
return TreeTensor(mtree, Dict{Tree,Tensor{T}}(
v => rand(T, [[Mode(e,evaluate(r,e)) for e in neighbor_edges(v)]; mtree[v]])
for v in vertices(mtree, root_to_leaves)
))
end
for f in (:ones, :rand, :eye)
@eval $f(mtree::AbstractModeTree, args...) = $f(Float64, mtree, args...)
end
for f in (:ones, :rand, :eye)
@eval $f(x::TreeTensor, args...) = $f(scalartype(x), modetree(x), args...)
end
# Conversion to and from full
export decompose
function decompose(x::Tensor, mtree, rank)
y = TreeTensor{scalartype(x)}(mtree)
for (v,p) in edges(y, leaves_to_root)
b,s,y[v] = svd(x, [child_edges(v,p);mlabel(mtree[v])], PairSet(v,p), rank)
x = scale!(b,s)
end
y[:root] = x
return y
end
export contract!, contract
function contract!(x::TreeTensor)
for (v,p) in edges(x, leaves_to_root)
x[p] *= x[v]
end
return x[:root]
end
contract(x::TreeTensor) = contract!(copy(x))
# Arithmetic
function +(x::TreeTensor, y::TreeTensor)
@assert modetree(x) == modetree(y) "x and y must have the same mode tree"
mtree = modetree(x)
return TreeTensor(
mtree,
Dict{Tree,Tensor{promote_type(scalartype(x), scalartype(y))}}(
v => padcat(x[v],y[v], neighbor_edges(v))
for v in vertices(mtree, root_to_leaves)
)
)
end
function *(x::TreeTensor, y::TreeTensor)
@assert unsquare(modetree(x)) == unsquare(modetree(y)) "x and y must have the same mode tree"
mtree = modetree(x)
return TreeTensor(
mtree,
Dict{Tree,Tensor{promote_type(scalartype(x),scalartype(y))}}(
v => mergem!(tag(x[v], 1,neighbor_edges(v))*tag(y[v], 2,neighbor_edges(v)), Dict([tag(1,e),tag(2,e)] => e for e in neighbor_edges(v)))
for v in vertices(mtree, root_to_leaves)
)
)
end
*(a::Number, x::TreeTensor) = (y = copyconvert(TreeTensor{promote_type(typeof(a),scalartype(x))}, x); y[:root] *= a; return y)
*(x::TreeTensor, a::Number) = a*x
scale!(a::Number, x::TreeTensor) = (x[:root] *= a; return x)
scale!(x::TreeTensor, a::Number) = scale!(a,x)
-(x::TreeTensor, y::TreeTensor) = x + (-1)*y
# Transposition and conjugation
for f in (:conj,:transpose,:ctranspose)
f! = Symbol(string(f)*"!")
@eval begin
function Base.$f(t::TreeTensor)
return TreeTensor(
modetree(t),
Dict{Tree,Tensor{scalartype(t)}}(
v => $f(tv)
for (v,tv) in t
)
)
end
function Base.$f!(t::TreeTensor)
for v in keys(t)
t[v] = $f!(t[v])
end
return t
end
end
end
# Orthogonalisation and truncation
export orthogonalize!, orthogonalize
function orthogonalize!(x::TreeTensor)
for (v,p) in edges(x, leaves_to_root)
x[v],c = qr(x[v], PairSet(v,p))
x[p] = c*x[p]
end
return x
end
orthogonalize(x::TreeTensor) = orthogonalize!(copy(x))
export truncate!, truncate
function truncate!(x::TreeTensor, rank)
orthogonalize!(x)
s = Dict{PairSet{Tree}, Tensor{real(scalartype(x))}}()
for (v,p) in edges_with_root(x, root_to_leaves)
for u in children(v,p)
e = PairSet(u,v)
b,s[e],d = svd(x[v], e, maxrank())
x[u] = scale!(s[e],d)*x[u]
x[v] = scale!(b,s[e])
end
x[v] = resize(x[v], Dict(e => rank(s[e].data) for e in neighbor_edges(v)))
scale!(x[v], [resize(1./s[e], Dict(e => msize(x[v],e))) for e in child_edges(v,p)]...)
end
return x
end
truncate(x::TreeTensor, rank) = truncate!(copy(x), rank)
# Contracted subtrees
function contracted_subtree(v::Tree,p::Tree, c::TreeTensor, args::TreeTensor...)
cv = retag!(tag!(conj(args[1][v]), 1, neighbor_edges(v)), :C => :_, :R => :C)
for u in children(v,p) cv *= c[u] end
for i = 2:length(args)-1 cv *= tag(args[i][v], i, neighbor_edges(v)) end
cv *= retag!(tag(args[end][v], length(args), neighbor_edges(v)), :C => :_)
return cv
end
function contracted_subtrees(args::TreeTensor...)
c = TreeTensor(
modetree(args[1]),
TensorDict{scalartype(args[1])}()
)
for (v,p) in edges(c, leaves_to_root)
c[v] = contracted_subtree(v,p,c,args...)
end
return c
end
# Norm and dot
export norm!
norm!(x::TreeTensor) = norm(orthogonalize!(x)[:root])
norm( x::TreeTensor) = norm(orthogonalize( x)[:root])
function dot(x::TreeTensor, y::TreeTensor)
return scalar(
contracted_subtree(
tree(x),tree(x),
contracted_subtrees(x,y),
x,y
)
)
end
# Local solvers
function localmatrix(A, v, xAx)
lA = tag(A[v], 2, neighbor_edges(v))
for u in neighbors(v)
lA *= xAx[u]
end
retag!(lA, 1 => :R, 3 => :C)
return lA
end
function localrhs(b, v, xb)
lb = tag(b[v], 2, neighbor_edges(v))
for u in neighbors(v)
lb *= xb[u]
end
untag!(lb, 1)
return lb
end
function localmatvecfunc(A, v, xAx)
Av = tag(A[v],2,neighbor_edges(v))
c = [retag(xAx[u], 3 => :C, 1 => :R) for u in neighbors(v)]
if length(c) == 3
return xv -> begin
(c[3]*((c[1]*Av)*(c[2]*xv)))
end
elseif length(c) == 2
return xv -> begin
(c[2]*(Av*(c[1]*xv)))
end
elseif length(c) == 1
return xv -> begin
(c[1]*Av*xv)
end
else
error("Local matvec structure not implemented!")
end
end
function localproblem(x,A,b, v, xAx,xb)
modes = x[v].modes
matvec = localmatvecfunc(A, v, xAx)
return x[v][mlabel(modes)],
xv -> (xv = Tensor(modes,xv); matvec(xv)[mlabel(modes)]),
localrhs(b,v,xb)[mlabel(modes)]
end
# Local solvers
using IterativeSolvers
abstract LocalSolver
export DenseSolver
immutable DenseSolver <: LocalSolver end
function localsolve!(x,A,b, solver::DenseSolver, v, xAx, xb)
x[v] = localmatrix(A,v,xAx)\localrhs(b,v,xb)
end
export GMRES
immutable GMRES <: LocalSolver
tol::Float64
maxiter::Int
restart::Int
warn::Bool
end
GMRES(; tol = sqrt(eps()), maxiter = 1, restart = 20, warn = false) = GMRES(tol, maxiter, restart, warn)
function localsolve!(x,A,b, solver::GMRES, v, xAx,xb)
_,hist = gmres!(
localproblem(x,A,b, v, xAx,xb)...;
tol = solver.tol,
maxiter = solver.maxiter,
restart = solver.restart
)
if !hist.isconverged && solver.warn
warn(
"Local GMRES iteration did not convergence.\n"*
" LSE size: "*string(length(x[v]))*"\n"*
" Threshold: "*string(hist.threshold)*"\n"*
" Residuals: "*string(hist.residuals)
)
end
end
export CG
immutable CG <: LocalSolver
tol::Float64
maxiter::Int
warn::Bool
end
CG(; tol = sqrt(eps()), maxiter = 20, warn = false) = CG(tol, maxiter, warn)
function localsolve!(x,A,b, solver::CG, v, xAx,xb)
_,hist = cg!(
localmatvec(x,A,b, v, xAx,xb)...;
tol = solver.tol,
maxiter = solver.maxiter
)
if !hist.isconverged && solver.warn
warn(
"Local CG iteration did not convergence.\n"*
" LSE size: "*string(length(x[v]))*"\n"*
" Threshold: "*string(hist.threshold)*"\n"*
" Residuals: "*string(hist.residuals)
)
end
end
# ALS linear solver
export als_solve!
function als_solve!(
x,A,b, solver = GMRES();
maxiter::Int = 20, tol = sqrt(eps(real(scalartype(x))))
)
updatenorms = zeros(real(scalartype(x)), maxiter)
orthogonalize!(x)
xAx = contracted_subtrees(x,A,x)
xb = contracted_subtrees(x,b)
for i = 1:maxiter
for (u,v) in edges(x, both_ways)
# Solve
xu = copy(x[u])
localsolve!(x,A,b, solver, u, xAx,xb)
updatenorms[i] = max(updatenorms[i], norm(xu - x[u]))
# Orthogonalize
x[u],r = qr(x[u],PairSet(u,v))
x[v] = r*x[v]
# Compute contracted subtrees
xAx[u] = contracted_subtree(u,v, xAx, x,A,x)
xb[u] = contracted_subtree(u,v, xb, x,b)
end
# Convergence check
if updatenorms[i] < tol*norm(x[:root])
return x, ConvergenceHistory(true, tol*norm(x[:root]), i, updatenorms[1:i])
end
end
return x, ConvergenceHistory(false, tol*norm(x[:root]), maxiter, updatenorms)
end
# ALS sum
export als_sum!
function als_sum!{T}(
y,x::AbstractVector{TreeTensor{T}};
maxiter::Int = 20, tol = sqrt(eps(real(T)))
)
updatenorms = zeros(real(T), maxiter)
orthogonalize!(y)
yx = [contracted_subtrees(y,x) for x in x]
for i = 1:maxiter
for (u,v) in edges(y, both_ways)
# Project
yunew = sum([localrhs(x, u, yx) for (x,yx) in zip(x,yx)])
updatenorms[i] = max(updatenorms[i], norm(yunew - y[u]))
y[u] = yunew
# Orthogonalize
y[u],r = qr(y[u],PairSet(u,v))
y[v] = r*y[v]
# Compute contracted subtrees
for (x,yx) in zip(x,yx)
local x,yx
yx[u] = contracted_subtree(u,v, yx, y,x)
end
end
# Convergence check
if updatenorms[i] < tol*norm(y[:root])
return y, ConvergenceHistory(true, tol*norm(y[:root]), i, updatenorms[1:i])
end
end
return y, ConvergenceHistory(false, tol*norm(y[:root]), maxiter, updatenorms)
end
# ALS AXPY
export als_axpy!
function als_axpy!(
z,A,x,y;
maxiter::Int = 20, tol = sqrt(eps(real(scalartype(x))))
)
updatenorms = zeros(real(scalartype(x)), maxiter)
orthogonalize!(z)
zAx = contracted_subtrees(z,A,x)
zy = contracted_subtrees(z,y)
for i = 1:maxiter
for (u,v) in edges(z, both_ways)
# Project
zunew = localmatvecfunc(A, u, zAx)(x[u]) + localrhs(y, u, zy)
updatenorms[i] = max(updatenorms[i], norm(zunew - z[u]))
z[u] = zunew
# Orthogonalize
z[u],r = qr(z[u],PairSet(u,v))
z[v] = r*z[v]
# Compute contracted subtrees
zAx[u] = contracted_subtree(u,v, zAx, z,A,x)
zy[u] = contracted_subtree(u,v, zy, z,y)
end
# Convergence check
if updatenorms[i] < tol*norm(z[:root])
return z, ConvergenceHistory(true, tol*norm(z[:root]), i, updatenorms[1:i])
end
end
return z, ConvergenceHistory(false, tol*norm(z[:root]), maxiter, updatenorms)
end
## ALSSD linear solver
#
#function alssd_solve!(
# x,A,b, solver = GMRES();
# residualrank = 4, maxiter::Int = 20, tol = sqrt(eps(real(scalartype(x))))
#)
# residualnorms = zeros(real(scalartype(x)), maxiter)
# tol *= norm(b)
#
# z = rand(x, residualrank)
# for i = 1:maxiter
# als_axpy!(z, -A,x,b; maxiter = 1)
#
# residualnorms[i] = norm!(z)
# if residualnorms[i] < tol
# return x,ConvergenceHistory(true, tol, i, residualnorms[1:i])
# end
#
# x += z
# als_solve!(x,A,b, solver; maxiter = 1)
# truncate!(x, adaptive(tol))
# end
# return x
#end
## ALS operator inversion
#
#function als_inv!(
# X,A, solver = GMRES();
# maxiter::Int = 20, tol = sqrt(eps(real(scalartype(X))))
#)
# updatenorms = zeros(real(scalartype(X)), maxiter)
#
# orthogonalize!(X)
# XAX = contracted_subtrees(X,A,X)
# XAXt = contracted_subtrees(X,A,X)
# xb = contracted_subtrees(x,b)
# for i = 1:maxiter
# for (u,v) in edges(x, both_ways)
# # Solve
# xu = copy(x[u])
# localsolve!(x,A,b, solver, u, xAx,xb)
# updatenorms[i] = max(updatenorms[i], norm(xu - x[u]))
#
# # Orthogonalize
# x[u],r = qr(x[u],PairSet(u,v))
# x[v] = r*x[v]
#
# # Compute contracted subtrees
# xAx[u] = contracted_subtree(u,v, xAx, x,A,x)
# xb[u] = contracted_subtree(u,v, xb, x,b)
# end
#
# # Convergence check
# if updatenorms[i] < tol*norm(x[:root])
# return x, ConvergenceHistory(true, tol*norm(x[:root]), i, updatenorms[1:i])
# end
# end
# return x, ConvergenceHistory(false, tol*norm(x[:root]), maxiter, updatenorms)
#end
end # module
| [
27,
7856,
261,
480,
29,
316,
1010,
72,
14,
27660,
51,
641,
669,
13,
20362,
198,
21412,
12200,
51,
641,
669,
198,
198,
11748,
439,
7308,
198,
3500,
40280,
669,
198,
17256,
7203,
51,
6037,
13,
20362,
4943,
198,
17256,
7203,
19076,
51,... | 1.924831 | 7,410 |
<gh_stars>1-10
function simple_lp(bridged)
MOI.empty!(bridged)
@test MOI.is_empty(bridged)
# add 10 variables - only diagonal is relevant
X = MOI.add_variables(bridged, 2)
# add sdp constraints - only ensuring positivenesse of the diagonal
vov = MOI.VectorOfVariables(X)
c1 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(2.0, X[1]),
MOI.ScalarAffineTerm(1.0, X[2])
], 0.0), MOI.EqualTo(4.0))
c2 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, X[1]),
MOI.ScalarAffineTerm(2.0, X[2])
], 0.0), MOI.EqualTo(4.0))
b1 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, X[1])
], 0.0), MOI.GreaterThan(0.0))
b2 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, X[2])
], 0.0), MOI.GreaterThan(0.0))
MOI.set(bridged,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-4.0, -3.0], [X[1], X[2]]), 0.0)
)
MOI.set(bridged, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(bridged)
obj = MOI.get(bridged, MOI.ObjectiveValue())
@test obj ≈ -9.33333 atol = 1e-2
Xr = MOI.get(bridged, MOI.VariablePrimal(), X)
@test Xr ≈ [1.3333, 1.3333] atol = 1e-2
end
function simple_lp_2_1d_sdp(bridged)
MOI.empty!(bridged)
@test MOI.is_empty(bridged)
# add 10 variables - only diagonal is relevant
X = MOI.add_variables(bridged, 2)
# add sdp constraints - only ensuring positivenesse of the diagonal
vov = MOI.VectorOfVariables(X)
c1 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(2.0, X[1]),
MOI.ScalarAffineTerm(1.0, X[2])
], 0.0), MOI.EqualTo(4.0))
c2 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, X[1]),
MOI.ScalarAffineTerm(2.0, X[2])
], 0.0), MOI.EqualTo(4.0))
b1 = MOI.add_constraint(bridged,
MOI.VectorOfVariables([X[1]]), MOI.PositiveSemidefiniteConeTriangle(1))
b2 = MOI.add_constraint(bridged,
MOI.VectorOfVariables([X[2]]), MOI.PositiveSemidefiniteConeTriangle(1))
MOI.set(bridged,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-4.0, -3.0], [X[1], X[2]]), 0.0)
)
MOI.set(bridged, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(bridged)
obj = MOI.get(bridged, MOI.ObjectiveValue())
@test obj ≈ -9.33333 atol = 1e-2
Xr = MOI.get(bridged, MOI.VariablePrimal(), X)
@test Xr ≈ [1.3333, 1.3333] atol = 1e-2
end
function lp_in_SDP_equality_form(bridged)
MOI.empty!(bridged)
@test MOI.is_empty(bridged)
# add 10 variables - only diagonal is relevant
X = MOI.add_variables(bridged, 10)
# add sdp constraints - only ensuring positivenesse of the diagonal
vov = MOI.VectorOfVariables(X)
cX = MOI.add_constraint(bridged, vov, MOI.PositiveSemidefiniteConeTriangle(4))
c1 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(2.0, X[1]),
MOI.ScalarAffineTerm(1.0, X[3]),
MOI.ScalarAffineTerm(1.0, X[6])
], 0.0), MOI.EqualTo(4.0))
c2 = MOI.add_constraint(bridged,
MOI.ScalarAffineFunction([
MOI.ScalarAffineTerm(1.0, X[1]),
MOI.ScalarAffineTerm(2.0, X[3]),
MOI.ScalarAffineTerm(1.0, X[10])
], 0.0), MOI.EqualTo(4.0))
MOI.set(bridged,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([-4.0, -3.0], [X[1], X[3]]), 0.0)
)
MOI.set(bridged, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(bridged)
obj = MOI.get(bridged, MOI.ObjectiveValue())
@test obj ≈ -9.33333 atol = 1e-2
Xr = MOI.get(bridged, MOI.VariablePrimal(), X)
@test Xr ≈ [1.3333, .0, 1.3333, .0, .0, .0, .0, .0, .0, .0] atol = 1e-2
end
function lp_in_SDP_inequality_form(optimizer)
MOI.empty!(optimizer)
@test MOI.is_empty(optimizer)
# add 10 variables - only diagonal is relevant
X = MOI.add_variables(optimizer, 3)
# add sdp constraints - only ensuring positivenesse of the diagonal
vov = MOI.VectorOfVariables(X)
cX = MOI.add_constraint(optimizer, vov, MOI.PositiveSemidefiniteConeTriangle(2))
c1 = MOI.add_constraint(optimizer,
MOI.VectorAffineFunction(MOI.VectorAffineTerm.([1,1],[
MOI.ScalarAffineTerm(2.0, X[1]),
MOI.ScalarAffineTerm(1.0, X[3]),
]), [-4.0]), MOI.Nonpositives(1))
c2 = MOI.add_constraint(optimizer,
MOI.VectorAffineFunction(MOI.VectorAffineTerm.([1,1],[
MOI.ScalarAffineTerm(1.0, X[1]),
MOI.ScalarAffineTerm(2.0, X[3]),
]), [-4.0]), MOI.Nonpositives(1))
MOI.set(optimizer,
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.([4.0, 3.0], [X[1], X[3]]), 0.0)
)
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(optimizer)
obj = MOI.get(optimizer, MOI.ObjectiveValue())
@test obj ≈ 9.33333 atol = 1e-2
Xr = MOI.get(optimizer, MOI.VariablePrimal(), X)
@test Xr ≈ [1.3333, .0, 1.3333] atol = 1e-2
c1_d = MOI.get(optimizer, MOI.ConstraintDual(), c1)
c2_d = MOI.get(optimizer, MOI.ConstraintDual(), c2)
end
function sdp_from_moi(optimizer)
# min X[1,1] + X[2,2] max y
# X[2,1] = 1 [0 y/2 [ 1 0
# y/2 0 <= 0 1]
# X >= 0 y free
# Optimal solution:
#
# ⎛ 1 1 ⎞
# X = ⎜ ⎟ y = 2
# ⎝ 1 1 ⎠
MOI.empty!(optimizer)
@test MOI.is_empty(optimizer)
X = MOI.add_variables(optimizer, 3)
vov = MOI.VectorOfVariables(X)
cX = MOI.add_constraint(optimizer, vov, MOI.PositiveSemidefiniteConeTriangle(2))
c = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[2]))], [-1.0]), MOI.Zeros(1))
MOI.set(optimizer, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [X[1], X[3]]), 0.0))
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(optimizer)
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ 2 atol=1e-2
Xv = ones(3)
@test MOI.get(optimizer, MOI.VariablePrimal(), X) ≈ Xv atol=1e-2
# @test MOI.get(optimizer, MOI.ConstraintPrimal(), cX) ≈ Xv atol=1e-2
# @test MOI.get(optimizer, MOI.ConstraintDual(), c) ≈ 2 atol=1e-2
# @show MOI.get(optimizer, MOI.ConstraintDual(), c)
end
function double_sdp_from_moi(optimizer)
# solve simultaneously two of these:
# min X[1,1] + X[2,2] max y
# X[2,1] = 1 [0 y/2 [ 1 0
# y/2 0 <= 0 1]
# X >= 0 y free
# Optimal solution:
#
# ⎛ 1 1 ⎞
# X = ⎜ ⎟ y = 2
# ⎝ 1 1 ⎠
MOI.empty!(optimizer)
@test MOI.is_empty(optimizer)
X = MOI.add_variables(optimizer, 3)
Y = MOI.add_variables(optimizer, 3)
vov = MOI.VectorOfVariables(X)
vov2 = MOI.VectorOfVariables(Y)
cX = MOI.add_constraint(optimizer, vov, MOI.PositiveSemidefiniteConeTriangle(2))
cY = MOI.add_constraint(optimizer, vov2, MOI.PositiveSemidefiniteConeTriangle(2))
c = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[2]))], [-1.0]), MOI.Zeros(1))
c2 = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, Y[2]))], [-1.0]), MOI.Zeros(1))
MOI.set(optimizer, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [X[1], X[end], Y[1], Y[end]]), 0.0))
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(optimizer)
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ 2*2 atol=1e-2
Xv = ones(3)
@test MOI.get(optimizer, MOI.VariablePrimal(), X) ≈ Xv atol=1e-2
Yv = ones(3)
@test MOI.get(optimizer, MOI.VariablePrimal(), Y) ≈ Yv atol=1e-2
# @test MOI.get(optimizer, MOI.ConstraintPrimal(), cX) ≈ Xv atol=1e-2
# @test MOI.get(optimizer, MOI.ConstraintDual(), c) ≈ 2 atol=1e-2
# @show MOI.get(optimizer, MOI.ConstraintDual(), c)
end
function double_sdp_with_duplicates(optimizer)
MOI.empty!(optimizer)
x = MOI.add_variable(optimizer)
X = [x, x, x]
vov = MOI.VectorOfVariables(X)
cX = MOI.add_constraint(optimizer, vov, MOI.PositiveSemidefiniteConeTriangle(2))
c = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[2]))], [-1.0]), MOI.Zeros(1))
MOI.set(optimizer, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [X[1], X[3]]), 0.0))
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(optimizer)
@test MOI.get(optimizer, MOI.TerminationStatus()) == MOI.OPTIMAL
@test MOI.get(optimizer, MOI.PrimalStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.DualStatus()) == MOI.FEASIBLE_POINT
@test MOI.get(optimizer, MOI.ObjectiveValue()) ≈ 2 atol=1e-2
Xv = ones(3)
@test MOI.get(optimizer, MOI.VariablePrimal(), X) ≈ Xv atol=1e-2
end
function sdp_wiki(optimizer)
# https://en.wikipedia.org/wiki/Semidefinite_programming
MOI.empty!(optimizer)
@test MOI.is_empty(optimizer)
X = MOI.add_variables(optimizer, 6)
vov = MOI.VectorOfVariables(X)
cX = MOI.add_constraint(optimizer, vov, MOI.PositiveSemidefiniteConeTriangle(3))
cd1 = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[1]))], [-1.0]), MOI.Zeros(1))
cd1 = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[3]))], [-1.0]), MOI.Zeros(1))
cd1 = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[6]))], [-1.0]), MOI.Zeros(1))
c12_ub = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[2]))], [0.1]), MOI.Nonpositives(1)) # x <= -0.1 -> x + 0.1 <= 0
c12_lb = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(-1.0, X[2]))], [-0.2]), MOI.Nonpositives(1)) # x >= -0.2 -> -x + -0.2 <= 0
c23_ub = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(1.0, X[5]))], [-0.5]), MOI.Nonpositives(1)) # x <= 0.5 -> x - 0.5 <= 0
c23_lb = MOI.add_constraint(optimizer, MOI.VectorAffineFunction([MOI.VectorAffineTerm(1,MOI.ScalarAffineTerm(-1.0, X[5]))], [0.4]), MOI.Nonpositives(1)) # x >= 0.4 -> -x + 0.4 <= 0
MOI.set(optimizer, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, [X[4]]), 0.0))
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE)
MOI.optimize!(optimizer)
obj = MOI.get(optimizer, MOI.ObjectiveValue())
@test obj ≈ -0.978 atol=1e-2
MOI.set(optimizer, MOI.ObjectiveSense(), MOI.MAX_SENSE)
MOI.optimize!(optimizer)
obj = MOI.get(optimizer, MOI.ObjectiveValue())
@test obj ≈ 0.872 atol=1e-2
end
simple_lp(optimizer_bridged)
simple_lp_2_1d_sdp(optimizer_bridged)
lp_in_SDP_equality_form(optimizer_bridged)
lp_in_SDP_inequality_form(optimizer_bridged)
sdp_from_moi(optimizer_bridged)
double_sdp_from_moi(optimizer_bridged)
double_sdp_with_duplicates(optimizer_bridged)
sdp_wiki(optimizer_bridged)
# print test
const optimizer_print = MOI.instantiate(
()->ProxSDP.Optimizer(
log_freq = 10, log_verbose = true, timer_verbose = true, extended_log = true, extended_log2 = true,
tol_gap = 1e-4, tol_feasibility = 1e-4),
with_bridge_type = Float64)
sdp_wiki(optimizer_print)
# eig solvers
default_solver = MOI.get(optimizer_bridged, MOI.RawOptimizerAttribute("eigsolver"))
min_size_krylov_eigs = MOI.get(optimizer_bridged, MOI.RawOptimizerAttribute("min_size_krylov_eigs"))
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("eigsolver"), 1)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("min_size_krylov_eigs"), 1)
sdp_wiki(optimizer_bridged)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("eigsolver"), 2)
sdp_wiki(optimizer_bridged)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("min_size_krylov_eigs"), min_size_krylov_eigs)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("eigsolver"), default_solver)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("full_eig_decomp"), true)
sdp_wiki(optimizer_bridged)
MOI.set(optimizer_bridged, MOI.RawOptimizerAttribute("full_eig_decomp"), false)
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
8818,
2829,
62,
34431,
7,
10236,
2004,
8,
628,
220,
220,
220,
13070,
40,
13,
28920,
0,
7,
10236,
2004,
8,
198,
220,
220,
220,
2488,
9288,
13070,
40,
13,
271,
62,
28920,
7,
10236,
2004,
... | 2.02681 | 6,714 |
#=
# MNIST
We begin by importing the required packages.
We load MNIST via the MLDatasets.jl package.
=#
import Makie
import CairoMakie
import MLDatasets
import Flux
import RestrictedBoltzmannMachines as RBMs
import ConvolutionalRBMs as ConvRBMs
using Statistics: mean, var, std
using ValueHistories: MVHistory
using Random: bitrand
using RestrictedBoltzmannMachines: visible, hidden, weights, log_pseudolikelihood, transfer_sample
nothing #hide
#=
Useful function to plot MNIST digits.
=#
"""
imggrid(A)
Given a four dimensional tensor `A` of size `(width, height, ncols, nrows)`
containing `width x height` images in a grid of `nrows x ncols`, this returns
a matrix of size `(width * ncols, height * nrows)`, that can be plotted in a heatmap
to display all images.
"""
function imggrid(A::AbstractArray{<:Any,4})
(width, height, ncols, nrows) = size(A)
return reshape(permutedims(A, (1,3,2,4)), width * ncols, height * nrows)
end
imggrid_border(A::AbstractArray{<:Any,4}, borderval = 1) = imggrid(bordered(A, borderval))
function bordered(A::AbstractArray{<:Any,4}, borderval = 1)
return mapslices(A; dims=(1,2)) do img::AbstractMatrix
[ fill(borderval, 1, size(img, 2) + 2);
fill(borderval, size(img, 1)) img fill(borderval, size(img, 1));
fill(borderval, 1, size(img, 2) + 2)
]
end
end
#=
Load MNIST dataset.
=#
Float = Float32
train_x, train_y = MLDatasets.MNIST.traindata()
digit = 2
train_x = Array{Float}(train_x[:, :, train_y .== digit] .≥ 0.5)
train_y = train_y[train_y .== digit]
println(length(train_y), " training images (for digit = $digit)")
nothing #hide
# Reshape for convolutional input
train_x = reshape(train_x, 1, 28, 28, :) # channel dims, input dims, batch dims
nothing #hide
# Initialize the convolutional RBM.
rbm = ConvRBMs.BinaryConvRBM(Float, 1, 16, (15,15); pad=:same, pool=true)
RBMs.initialize!(rbm, train_x)
nothing #hide
# Pseudolikelihood before training
idx = rand(1:size(train_x)[end], 256)
mean(@time log_pseudolikelihood(rbm, train_x[:,:,:,idx]))
# Initialize training
batchsize = 256
optim = Flux.ADAM()
vm = transfer_sample(visible(rbm), falses(1, 28, 28, batchsize)) # fantasy chains
history = MVHistory()
nothing #hide
# Train!
@time for iter in 1:20
ConvRBMs.pcd!(rbm, train_x; vm, history, batchsize, optim, epochs=5)
lpl = log_pseudolikelihood(rbm, train_x[:, :, :, rand(1:size(train_x)[end], 1024)])
push!(history, :lpl_ave, mean(lpl))
push!(history, :lpl_std, std(lpl))
end
nothing #hide
# Plot of log-pseudolikelihood of trian data during learning.
fig = Makie.Figure(resolution=(600,300))
ax = Makie.Axis(fig[1,1], xlabel = "train time", ylabel="pseudolikelihood")
Makie.band!(ax, get(history, :lpl_ave)[1],
get(history, :lpl_ave)[2] - get(history, :lpl_std)[2]/2,
get(history, :lpl_ave)[2] + get(history, :lpl_std)[2]/2,
color=:lightblue
)
Makie.lines!(ax, get(history, :lpl_ave)..., color=:blue)
fig
#=
Now let's generate some random RBM samples.
=#
nrows, ncols = 10, 15
nsteps = 1000
fantasy_F = zeros(nrows*ncols, nsteps)
fantasy_x = bitrand(1,28,28,nrows*ncols)
fantasy_F[:,1] .= RBMs.free_energy(rbm, fantasy_x)
@time for t in 2:nsteps
fantasy_x .= RBMs.sample_v_from_v(rbm, fantasy_x)
fantasy_F[:,t] .= RBMs.free_energy(rbm, fantasy_x)
end
nothing #hide
# Check equilibration of sampling
fig = Makie.Figure(resolution=(400,300))
ax = Makie.Axis(fig[1,1], xlabel="sampling time", ylabel="free energy")
fantasy_F_μ = vec(mean(fantasy_F; dims=1))
fantasy_F_σ = vec(std(fantasy_F; dims=1))
Makie.band!(ax, 1:nsteps, fantasy_F_μ - fantasy_F_σ/2, fantasy_F_μ + fantasy_F_σ/2)
Makie.lines!(ax, 1:nsteps, fantasy_F_μ)
fig
# Plot the resulting samples.
fig = Makie.Figure(resolution=(40ncols, 40nrows))
ax = Makie.Axis(fig[1,1], yreversed=true)
Makie.image!(ax, imggrid(reshape(fantasy_x, 28, 28, ncols, nrows)), colorrange=(1,0))
Makie.hidedecorations!(ax)
Makie.hidespines!(ax)
fig
# Plot the filters learned
wncols = 4; wnrows = 4
fig = Makie.Figure(resolution=(80wncols, 80wnrows))
ax = Makie.Axis(fig[1,1], yreversed=true)
Makie.image!(ax, imggrid_border(reshape(rbm.w ./ maximum(abs, rbm.w; dims=(2,3)), 15, 15, wncols, wnrows)))
Makie.hidedecorations!(ax)
Makie.hidespines!(ax)
fig
| [
2,
28,
198,
2,
29060,
8808,
198,
198,
1135,
2221,
416,
33332,
262,
2672,
10392,
13,
198,
1135,
3440,
29060,
8808,
2884,
262,
10373,
27354,
292,
1039,
13,
20362,
5301,
13,
198,
46249,
198,
198,
11748,
15841,
494,
198,
11748,
23732,
44,... | 2.389574 | 1,784 |
<reponame>JuliaML/LearnBase.jl
"""
Return the gradient of the learnable parameters w.r.t. some objective
"""
function grad end
function grad! end
"""
Proximal operator of a function (https://en.wikipedia.org/wiki/Proximal_operator)
"""
function prox end
function prox! end
"""
Anything that takes an input and performs some kind
of function to produce an output. For example a linear
prediction function.
"""
abstract type Transformation end
abstract type StochasticTransformation <: Transformation end
abstract type Learnable <: Transformation end
"""
Do a forward pass, and return the output
"""
function transform end
function transform! end
"""
Baseclass for any prediction model that can be minimized.
This means that an object of a subclass contains all the
information needed to compute its own current loss.
"""
abstract type Minimizable <: Learnable end
function update end
function update! end
function learn end
function learn! end
# --------------------------------------------------------------------
import Base: AbstractSet
"A continuous range (inclusive) between a lo and a hi"
struct IntervalSet{T} <: AbstractSet{T}
lo::T
hi::T
end
function IntervalSet(lo::A, hi::B) where {A,B}
T = promote_type(A,B)
IntervalSet{T}(convert(T,lo), convert(T,hi))
end
# numeric interval
randtype(s::IntervalSet{T}) where T <: Number = Float64
Base.rand(s::IntervalSet{T}, dims::Integer...) where T <: Number = rand(dims...) .* (s.hi - s.lo) .+ s.lo
Base.in(x::Number, s::IntervalSet{T}) where T <: Number = s.lo <= x <= s.hi
Base.length(s::IntervalSet{T}) where T <: Number = 1
Base.:(==)(s1::IntervalSet{T}, s2::IntervalSet{T}) where T = s1.lo == s2.lo && s1.hi == s2.hi
# vector of intervals
randtype(s::IntervalSet{T}) where T <: AbstractVector = Vector{Float64}
Base.rand(s::IntervalSet{T}) where T <: AbstractVector = Float64[rand() * (s.hi[i] - s.lo[i]) + s.lo[i] for i=1:length(s)]
Base.in(x::AbstractVector, s::IntervalSet{T}) where T <: AbstractVector = all(i -> s.lo[i] <= x[i] <= s.hi[i], 1:length(s))
Base.length(s::IntervalSet{T}) where T <: AbstractVector = length(s.lo)
"Set of discrete items"
struct DiscreteSet{T<:AbstractArray} <: AbstractSet{T}
items::T
end
randtype(s::DiscreteSet) = eltype(s.items)
Base.rand(s::DiscreteSet, dims::Integer...) = rand(s.items, dims...)
Base.in(x, s::DiscreteSet) = x in s.items
Base.length(s::DiscreteSet) = length(s.items)
Base.getindex(s::DiscreteSet, i::Int) = s.items[i]
Base.:(==)(s1::DiscreteSet, s2::DiscreteSet) = s1.items == s2.items
# operations on arrays of sets
randtype(sets::AbstractArray{S,N}) where {S <: AbstractSet, N} = Array{promote_type(map(randtype, sets)...), N}
Base.rand(sets::AbstractArray{S}) where S <: AbstractSet = eltype(randtype(sets))[rand(s) for s in sets]
function Base.rand(sets::AbstractArray{S}, dim1::Integer, dims::Integer...) where S <: AbstractSet
A = Array{randtype(sets)}(undef, dim1, dims...)
for i in eachindex(A)
A[i] = rand(sets)
end
A
end
function Base.in(xs::AbstractArray, sets::AbstractArray{S}) where S <: AbstractSet
size(xs) == size(sets) && all(map(in, xs, sets))
end
"Groups several heterogenous sets. Used mainly for proper dispatch."
struct TupleSet{T<:Tuple} <: AbstractSet{T}
sets::T
end
TupleSet(sets::AbstractSet...) = TupleSet(sets)
# rand can return arrays or tuples, but defaults to arrays
randtype(sets::TupleSet, ::Type{Vector}) = Vector{promote_type(map(randtype, sets.sets)...)}
Base.rand(sets::TupleSet, ::Type{Vector}) = eltype(randtype(sets, Vector))[rand(s) for s in sets.sets]
randtype(sets::TupleSet, ::Type{Tuple}) = Tuple{map(randtype, sets.sets)...}
Base.rand(sets::TupleSet, ::Type{Tuple}) = map(rand, sets.sets)
function Base.rand(sets::TupleSet, ::Type{OT}, dim1::Integer, dims::Integer...) where OT
A = Array{randtype(sets, OT)}(undef, dim1, dims...)
for i in eachindex(A)
A[i] = rand(sets, OT)
end
A
end
Base.length(sets::TupleSet) = sum(length(s) for s in sets.sets)
Base.iterate(sets::TupleSet) = iterate(sets.sets)
Base.iterate(sets::TupleSet, i) = iterate(sets.sets, i)
randtype(sets::TupleSet) = randtype(sets, Vector)
Base.rand(sets::TupleSet, dims::Integer...) = rand(sets, Vector, dims...)
Base.in(x, sets::TupleSet) = all(map(in, x, sets.sets))
"Returns an AbstractSet representing valid input values"
function inputdomain end
"Returns an AbstractSet representing valid output/target values"
function targetdomain end
| [
27,
7856,
261,
480,
29,
16980,
544,
5805,
14,
20238,
14881,
13,
20362,
198,
37811,
198,
13615,
262,
31312,
286,
262,
2193,
540,
10007,
266,
13,
81,
13,
83,
13,
617,
9432,
198,
37811,
198,
8818,
3915,
886,
198,
8818,
3915,
0,
886,
... | 2.843212 | 1,569 |
# should be your environment and have for nothing to do whith the speed_test
cd("/Users/frank/.julia/dev/TypeDBClient_Speed")
using Pkg
Pkg.activate(".")
using TypeDBClient
using TypeDBClient: CoreSession, CoreClient
using UUIDs
g = TypeDBClient
client = g.CoreClient("127.0.0.1",1729)
Optional{T} = Union{Nothing,T}
# @info "deleting all databases"
# dbs = g.get_all_databases(client)
# for item in dbs
# g.delete_database(client, item.name)
# end
@info "create database typedb"
if !g.contains_database(client, "typedb")
g.create_database(client, "typedb")
end
sess = CoreSession(client, "typedb" , g.Proto.Session_Type.DATA, request_timout=Inf)
function trans_func(task)
return task
end
function coreTransaction(session::g.CoreSession,
sessionId::g.Bytes,
type::g.EnumType,
options::g.TypeDBOptions,
grpc_controller;
request_timout::Real=session.request_timeout)
type = type
options = options
input_channel = Channel{g.Proto.Transaction_Client}(10)
proto_options = g.copy_to_proto(options, g.Proto.Options)
res_imm = g.Proto.transaction(session.client.core_stub.asyncStub , grpc_controller, input_channel, trans_func)
req_result, status = fetch(res_imm)
output_channel = g.grpc_result_or_error(req_result, status, result->result)
println(status)
# open_req = g.TransactionRequestBuilder.open_req(session.sessionID, type, proto_options,session.networkLatencyMillis)
# bidirectionalStream = g.BidirectionalStream(input_channel, output_channel, status)
# trans_id = uuid4()
# result = g.CoreTransaction(type, options, bidirectionalStream, trans_id, sessionId, request_timout, session)
# # The following is only for warming up Transaction. If we didn't do this
# # it could happen that a transaction reach a timeout.
# req_result = g.execute(result, open_req, false)
# kind_of_result = g.Proto.which_oneof(req_result, :res)
# getproperty(req_result, kind_of_result)
return input_channel
end
@info "beginning Threads"
client_in = CoreClient("127.0.0.1", 1729)
sess_in = CoreSession(client_in, "typedb", g.Proto.Session_Type.DATA, request_timout=Inf)
grpc_controller = g.gRPCController(request_timeout=sess_in.request_timeout)
Threads.@threads for i in 1:2
@info "here I am"
res = coreTransaction(sess_in,
sess_in.sessionID,
g.Proto.Transaction_Type.WRITE,
g.TypeDBOptions(),
grpc_controller,
request_timout = sess_in.request_timeout)
@info "I'm close to end"
end
# close(sess)
| [
2,
815,
307,
534,
2858,
290,
423,
329,
2147,
284,
466,
348,
342,
262,
2866,
62,
9288,
198,
10210,
7203,
14,
14490,
14,
8310,
962,
11757,
73,
43640,
14,
7959,
14,
6030,
11012,
11792,
62,
22785,
4943,
198,
3500,
350,
10025,
198,
47,
... | 2.68259 | 942 |
abstract type AbstractLearningUpdate end
function update!(model,
lu::T,
opt,
s_t::Array{Array{AF, 1}, 1},
a_t::Array{<:Integer, 1},
s_tp1::Array{Array{AF, 1}, 1},
r::Array{AF, 1},
terminal,
args...) where {AF<:AbstractFloat, T<:AbstractLearningUpdate}
update!(model, lu, opt, hcat(s_t...), a_t, hcat(s_tp1...), r, terminal, args...)
end
# function update!(model,
# lu::T,
# opt,
# s_t::Array{Array{AF, 1}, 1},
# a_t::Array{<:Integer, 1},
# s_tp1::Array{Array{AF, 1}, 1},
# r::Array{AF, 1},
# terminal) where {AF<:AbstractFloat, T<:AbstractLearningUpdate}
# update!(model, lu, opt, hcat(s_t...), a_t, hcat(s_tp1...), r, terminal)
# end
# function update!(model::GVFNetwork,
# lu, opt, s_t, a_t, s_tp1, r, terminal)
# update!(model.gvf_model, lu[1], opt, model.horde, s_t, a_t, s_tp1, r, terminal)
# update!(model, lu[2], opt, s_t, a_t, s_tp1, r, terminal)
# end
abstract type AbstractQLearning <: AbstractLearningUpdate end
struct QLearning <: AbstractQLearning
γ::Float32
end
function loss(lu::QLearning, model, s_t, a_t, s_tp1, r, terminal, target_model)
γ = lu.γ.*(1 .- terminal)
action_idx = [CartesianIndex(a_t[i], i) for i in 1:length(terminal)]
q_tp1 = maximum(target_model(s_tp1); dims=1)[1,:]
target = (r .+ γ.*q_tp1)
q_t = model(s_t)[action_idx]
return Flux.mse(target, q_t)
end
function loss(lu::QLearning, model, s_t, a_t, s_tp1, r, terminal, target_model::Nothing)
γ = lu.γ.*(1 .- terminal)
action_idx = [CartesianIndex(a_t[i], i) for i in 1:length(terminal)]
q_tp1 = Flux.Tracker.data(maximum(model(s_tp1); dims=1)[1,:])
target = (r .+ γ.*q_tp1)
q_t = model(s_t)[action_idx]
return Flux.mse(target, q_t)
end
struct DoubleQLearning <: AbstractQLearning
γ::Float32
end
function loss(lu::DoubleQLearning, model, s_t, a_t, s_tp1, r, terminal, target_model)
γ = lu.γ.*(1 .- terminal)
action_idx = [CartesianIndex(a_t[i], i) for i in 1:length(terminal)]
q̃_tp1 = Flux.data(model(s_tp1))
q̃_tp1_argmax = findmax(q̃_tp1; dims=1)
action_tp1 = [q̃_tp1_argmax[2][i] for i in 1:length(terminal)]
q_tp1 = target_model(s_tp1)[action_tp1]
target = (r .+ γ.*q_tp1)
q_t = model(s_t)[action_idx]
return Flux.mse(target, q_t)
end
function loss(lu::DoubleQLearning, model, s_t, a_t, s_tp1, r, terminal, target_model::Nothing)
γ = lu.γ.*(1 .- terminal)
action_idx = [CartesianIndex(a_t[i], i) for i in 1:length(terminal)]
q̃_tp1 = Flux.data(model(s_tp1))
q̃_tp1_argmax = findmax(q̃_tp1; dims=1)
action_tp1 = [q̃_tp1_argmax[2][i] for i in 1:length(terminal)]
q_tp1 = Flux.data(model(s_tp1)[action_tp1])
target = (r .+ γ.*q_tp1)
q_t = model(s_t)[action_idx]
return Flux.mse(target, q_t)
end
function update!(model, lu::LU, opt,
s_t::Array{<:AbstractFloat, 2},
a_t::Array{<:Integer, 1},
s_tp1::Array{<:AbstractFloat, 2},
r::Array{<:AbstractFloat, 1},
terminal,
target_model) where {LU<:AbstractQLearning}
ps = params(model)
gs = Flux.gradient(ps) do
loss(lu, model, s_t, a_t, s_tp1, r, terminal, target_model)
end
Flux.Optimise.update!(opt, ps, gs)
end
struct TDLearning end
function loss(lu::TDLearning, model, s_t, a_t, s_tp1, r, terminal, target_model, horde::H) where {H<:Horde}
# get GVF horde parameters
p = [RLCore.get(horde, s_t[:,i], a_t, s_tp1[:,i]) for i in 1:length(terminal)]
c = hcat(getindex.(p, 1)...)
γ = hcat(getindex.(p, 2)...)
π = hcat(getindex.(p, 3)...)
v_t = model(s_t)
v_tp1 = target_model(s_tp1)
target = c .+ γ.*v_tp1 # -> Matrix (preds × batch_size)
return sum((target .- v_t).^2) * (1 // length(terminal))
end
loss(lu::TDLearning, model, s_t, a_t, s_tp1, r, terminal, target_model::Nothing, horde::H) where {H<:Horde} =
loss(lu, model, s_t, a_t, s_tp1, r, terminal, (x)->Flux.data(model(x)), horde)
struct AuxQLearning{T<:AbstractQLearning} <: AbstractLearningUpdate
β::Float32
q_learning::T
td_learning::TDLearning
end
function update!(
model,
lu::AQL,
opt,
s_t::Array{<:AbstractFloat, 2},
a_t::Array{<:Integer, 1},
s_tp1::Array{<:AbstractFloat, 2},
r::Array{<:AbstractFloat, 1},
terminal,
target_model,
horde::H) where {AQL<:AuxQLearning, H<:RLCore.Horde}
num_gvfs = length(horde)
ps = params(model)
gs = Flux.gradient(ps) do
ℒ_q = loss(lu.q_learning, (x)->model(x)[1:(end-num_gvfs), :], s_t, a_t, s_tp1, r, terminal, (x)->target_model(x)[1:(end-num_gvfs), :])
# @show ℒ_q
ℒ_td = loss(lu.td_learning, (x)->model(x)[(end-num_gvfs+1):end, :], s_t, a_t, s_tp1, r, terminal, (x)->target_model(x)[(end-num_gvfs+1):end, :], horde)
# @show ℒ_td
return ℒ_q + lu.β*ℒ_td
end
Flux.Optimise.update!(opt, ps, gs)
end
function update!(
model,
lu::AuxQLearning,
opt,
s_t::Array{<:AbstractFloat, 2},
a_t::Array{<:Integer, 1},
s_tp1::Array{<:AbstractFloat, 2},
r::Array{<:AbstractFloat, 1},
terminal,
target_model,
horde::Nothing)
update!(model, lu.q_learning, opt, s_t, a_t, s_tp1, r, terminal, target_model)
end
| [
198,
397,
8709,
2099,
27741,
41730,
10260,
886,
628,
198,
8818,
4296,
0,
7,
19849,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
300,
84,
3712,
51,
11,
198,
220,
220,
220,
220,
220,
220... | 1.932168 | 2,860 |
<filename>src/features.jl<gh_stars>1-10
function asymmetry(df, id, x, y)
result = DataFrame(TrackID = Int[], len = Int[], asymmetry = Float64[])
@inbounds for n = minimum(df[!, id]):maximum(df[!, id])
m = extract(df, n, id, [x, y])
T = Matrix{Float64}(undef, 2, 2)
mean_x = @views mean(m[:, 1])
mean_y = @views mean(m[:, 2])
t1 = 0.0
t2 = 0.0
t3 = 0.0
N = size(m, 1)
for t = 1:N
_x = m[t, 1] - mean_x
_y = m[t, 2] - mean_y
t1 += abs2(_x)
t2 += _x * _y
t3 += abs2(_y)
end
t1 /= N
t2 /= N
t3 /= N
T[1, 1] = t1
T[1, 2] = t2
T[2, 1] = t2
T[2, 2] = t3
λ1, λ2 = eigvals(T)
A = -log(1 - abs2(λ1 - λ2)/(2*abs2(λ1 + λ2)))
push!(result, [Int(n), Int(N), A])
end
result
end
function non_gaussian_parameter(df, x, frame=:FRAME2)
Δ = diff(df[!, x])
prepend!(Δ, NaN)
Δ[findall(x -> x == 1, df[!, frame])] .= NaN
filter!(!isnan, Δ)
a = mean(Δ.^4)
b = mean(abs2.(Δ))^2
a/3b - 1
end
function efficiency(df, id, x, y)
result = DataFrame(TrackID = Int[], value = Float64[])
@inbounds for n in 1:maximum(df[!, id])
m = extract(df, n, id, [x, y])
c = 0.0
T = size(m, 1)
for t in 1:T-1
c += squared_displacement(m, t, 1)
end
c /= (T-1)
c₀ = squared_displacement(m, 1, T-1)
push!(result, [n, c₀/c])
end
result
end
function _largest_distance(m)
T = size(m, 1)
d = displacement(m, 1)
for δ = 1:T-1
for t = 1:T-δ
dₜ = displacement(m, t, δ)
if dₜ > d
d = dₜ
end
end
end
d
end
function _total_length(m)
T = size(m, 1)
c = 0.0
for t = 1:T-1
c += displacement(m, t, 1)
end
c
end
function fractal_dimension(df, id, x, y)
result = DataFrame(TrackID = Int[], value = Float64[])
@inbounds @simd for n in minimum(df[!, id]):maximum(df[!, id])
m = extract(df, n, id, [x, y])
N = size(m, 1)-1
d = _largest_distance(m)
L = _total_length(m)
push!(result, [n, log(N)/log(N*d/L)])
end
result
end
function gaussianity(
df::DataFrame,
id::Symbol = :TrackID,
x::Symbol = :POSITION_X,
y::Symbol = :POSITION_Y,
)
gaussianity = DataFrame(TrackID = Int64[], gaussianity = Float64[], delta_t = Int64[], n = Int64[])
@inbounds for n in sort(collect(Set(df[!, id])))
m = extract(df, Int(n), id, [x, y])
T = size(m, 1)
for δ = 1:T-1
r⁴ = 0.0
r² = 0.0
@simd for t = 1:(T-δ)
r⁴ += abs2(squared_displacement(m, t, δ))
r² += squared_displacement(m, t, δ)
end
r⁴ /= (T - δ)
r² /= (T - δ)
push!(gaussianity, [n, r⁴/(2*abs2(r²)) - 1, δ, T - δ])
end
end
gaussianity
end | [
27,
34345,
29,
10677,
14,
40890,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
8818,
30372,
11973,
7,
7568,
11,
4686,
11,
2124,
11,
331,
8,
198,
220,
220,
220,
1255,
796,
6060,
19778,
7,
24802,
2389,
796,
2558,
58,
4357,
... | 1.692609 | 1,786 |
<filename>src/StringAndArray/CombinationsOf2Arrays.jl
"""
generate_array(
array_1::Array{Int64},
array_2::Array{Int64},
array_merged::Array{Int64},
i::Int64,
j::Int64,
k::Int64,
l::Int64,
size::Int64,
flag::Bool,
result::Array{Any,1}
)
`generate_array` is a modified [Breadth-First-Search]https://en.wikipedia.org/wiki/Breadth-first_search]
for generating the result-array with the possible combinations of two merged arrays with
ascending ordering.
# Arguments
- `array_1::Array{Int64}`: First sorted array
- `array_2::Array{Int64}`: Second sorted array
- `array_merged::Array{Int64}`: Merged sorted array
- `i::Int64`: Outer lower increment of first array
- `j::Int64`: Outer higher increment of first array
- `k::Int64`: Outer lower increment of second array
- `l::Int64`: Outer higher increment of second array
- `size::Int64`: Current size
- `flag::Bool`: Switching between first and second array
- `result::Array{Any,1}`: Result list of the merged arrays
"""
function generate_array(
array_1::Array{Int64},
array_2::Array{Int64},
array_merged::Array{Int64},
i::Int64,
j::Int64,
k::Int64,
l::Int64,
size::Int64,
flag::Bool,
result::Array{Any,1},
)
if flag
if size > 1
push!(result, array_merged[1:size])
end
for m in i:k
# Define the first element
if size < 2
array_merged[size] = array_1[m]
generate_array(array_1,
array_2, array_merged, m + 1, j, k, l, size, !flag,
result)
else
# Going to the third and higher element of array_1
if array_1[m] > array_merged[size]
array_merged[size + 1] = array_1[m]
generate_array(
array_1,
array_2,
array_merged,
m + 1,
j,
k,
l,
size + 1,
!flag,result
)
end
end
end
else
# Going to the second of array_2
for n in j:l
if array_2[n] > array_merged[size]
array_merged[size + 1] = array_2[n]
generate_array(
array_1,
array_2,
array_merged,
i,
n + 1,
k,
l,
size + 1,
!flag,result
)
end
end
end
return result
end
"""
combinations_of_2arrays(array_1::Array{Int64,1}, array_2::Array{Int64,1}))
Provides all combinations of sorted arrays with an increasing number of elements. Original
idea
# Arguments
- `array_1::Array{Int64,1}`: First sorted array
- `array_2::Array{Int64,1}`: Second sorted array
# Examples
```julia-repl
julia> import ClassicAlgorithmsCollections
julia> arr_1 = [10, 15, 25]
julia> arr_2 = [5, 20, 30]
julia> ClassicAlgorithmsCollections.combinations_of_2arrays(arr_1, arr_2)
[10, 20]
[10, 20, 25, 30]
[10, 30]
[15, 20]
[15, 20, 25, 30]
[15, 30]
[25, 30]
```
"""
function combinations_of_2arrays(array_1::Array{Int64,1}, array_2::Array{Int64,1})
length_array_1, length_array_2 = length(array_1), length(array_2)
init_array = zeros(Int64, (length_array_1 + length_array_2))
# Create array-list for the searching solution
result = Array{Any,1}()
return generate_array(
array_1,
array_2,
init_array,
1,
1,
length_array_1, length_array_2,
1,
true,
result
)
end
| [
27,
34345,
29,
10677,
14,
10100,
1870,
19182,
14,
20575,
7352,
5189,
17,
3163,
20477,
13,
20362,
198,
37811,
198,
220,
220,
220,
7716,
62,
18747,
7,
198,
220,
220,
220,
220,
220,
220,
220,
7177,
62,
16,
3712,
19182,
90,
5317,
2414,
... | 1.892399 | 2,026 |
include("eggify.jl")
using Metatheory.Library
using Metatheory.EGraphs.Schedulers
mult_t = commutative_monoid(:(*), 1)
plus_t = commutative_monoid(:(+), 0)
minus_t = @theory begin
a - a => 0
a + (-b) => a - b
end
mulplus_t = @theory begin
0 * a => 0
a * 0 => 0
a * (b + c) == ((a * b) + (a * c))
a + (b * a) => ((b + 1) * a)
end
pow_t = @theory begin
(y^n) * y => y^(n + 1)
x^n * x^m == x^(n + m)
(x * y)^z == x^z * y^z
(x^p)^q == x^(p * q)
x^0 => 1
0^x => 0
1^x => 1
x^1 => x
inv(x) == x^(-1)
end
function customlt(x, y)
if typeof(x) == Expr && Expr == typeof(y)
false
elseif typeof(x) == typeof(y)
isless(x, y)
elseif x isa Symbol && y isa Number
false
else
true
end
end
canonical_t = @theory begin
# restore n-arity
(x + (+)(ys...)) => +(x, ys...)
((+)(xs...) + y) => +(xs..., y)
(x * (*)(ys...)) => *(x, ys...)
((*)(xs...) * y) => *(xs..., y)
(*)(xs...) |> Expr(:call, :*, sort!(xs; lt = customlt)...)
(+)(xs...) |> Expr(:call, :+, sort!(xs; lt = customlt)...)
end
cas = mult_t ∪ plus_t ∪ minus_t ∪ mulplus_t ∪ pow_t
theory = cas
query = cleanast(:(a + b + (0 * c) + d))
function simplify(ex)
g = EGraph(ex)
params = SaturationParams(
scheduler = BackoffScheduler,
timeout = 20,
schedulerparams = (1000, 5), # fuel and bantime
)
report = saturate!(g, cas, params)
println(report)
res = extract!(g, astsize)
res = rewrite(res, canonical_t; clean = false, m = @__MODULE__) # this just orders symbols and restores n-ary plus and mult
res
end
###########################################
params = SaturationParams(timeout = 20, schedulerparams = (1000, 5))
for i in 1:2
ex = simplify(:(a + b + (0 * c) + d))
println("Best found: $ex")
end
open("src/main.rs", "w") do f
write(f, rust_code(theory, query))
end
| [
17256,
7203,
33856,
1958,
13,
20362,
4943,
198,
3500,
3395,
26221,
652,
13,
23377,
198,
3500,
3395,
26221,
652,
13,
7156,
1470,
82,
13,
50,
1740,
377,
364,
198,
198,
16680,
62,
83,
796,
725,
315,
876,
62,
2144,
1868,
7,
37498,
9,
... | 2.235653 | 819 |
<filename>docs/make.jl
using Documenter, StingerGraphs
makedocs()
deploydocs(
julia = "nightly",
repo = "github.com/stingergraph/StingerGraphs.jl.git"
)
| [
27,
34345,
29,
31628,
14,
15883,
13,
20362,
198,
3500,
16854,
263,
11,
520,
3889,
37065,
82,
198,
198,
76,
4335,
420,
82,
3419,
198,
198,
2934,
1420,
31628,
7,
198,
220,
220,
220,
474,
43640,
796,
366,
3847,
306,
1600,
198,
220,
2... | 2.362319 | 69 |
using RCall
function load_R_library(libname)
reval("library($libname)")
end
| [
3500,
13987,
439,
198,
198,
8818,
3440,
62,
49,
62,
32016,
7,
8019,
3672,
8,
198,
220,
220,
220,
302,
2100,
7203,
32016,
16763,
8019,
3672,
8,
4943,
198,
437,
198
] | 2.612903 | 31 |
struct DBFGS{T1,T2,T3} <: QuasiNewton{T1}
approx::T1
theta::T2
P::T3
end
DBFGS(approx) = DBFGS(approx, 0.2, nothing)
DBFGS(; inverse = true, theta = 0.2) = DBFGS(inverse ? Inverse() : Direct(), theta, nothing)
hasprecon(::DBFGS{<:Any,<:Any,<:Nothing}) = NoPrecon()
hasprecon(::DBFGS{<:Any,<:Any,<:Any}) = HasPrecon()
summary(dbfgs::DBFGS{Inverse}) = "Inverse Damped BFGS"
summary(dbfgs::DBFGS{Direct}) = "Direct Damped BFGS"
function update!(scheme::DBFGS{<:Direct,<:Any,<:Any}, B, s, y)
# We could write this as
# B .+= (y*y')/dot(s, y) - (B*s)*(s'*B)/(s'*B*s)
# B .+= (y*y')/dot(s, y) - b*b'/dot(s, b)
# where b = B*s
# But instead, we split up the calculations. First calculate the denominator
# in the first term
σ = dot(s, y)
ρ = inv(σ) # scalar
# Then calculate the vector b
b = B * s # vector temporary
sb = dot(s, b)
if σ ≥ scheme.theta * sb
θ = 1.0
# Calculate one vector divided by dot(s, b)
ρbb = inv(sb) * b
# And calculate
B .+= (inv(σ) * y) * y' .- ρbb * b'
else
θ = 0.8 * sb / (sb - σ)
r = y * θ + (1 - θ) * b
# Calculate one vector divided by dot(s, b)
ρbb = inv(dot(s, b)) * b
# And calculate
B .+= (inv(dot(s, r)) * r) * r' .- ρbb * b'
end
end
function update(scheme::DBFGS{<:Direct,<:Any}, B, s, y)
# As above, but out of place
σ = dot(s, y)
b = B * s
sb = dot(s, b)
if σ ≥ scheme.theta * sb
θ = 1.0
# Calculate one vector divided by dot(s, b)
ρbb = inv(sb) * b
# And calculate
return B = B .+ (inv(σ) * y) * y' .- ρbb * b'
else
θ = 0.8 * sb / (sb - σ)
r = y * θ + (1 - θ) * b
# Calculate one vector divided by dot(s, b)
ρbb = inv(dot(s, b)) * b
# And calculate
return B = B .+ (inv(dot(s, r)) * r) * r' .- ρbb * b'
end
end
function update(scheme::DBFGS{<:Inverse,<:Any}, H, s, y)
σ = dot(s, y)
ρ = inv(σ)
# if isfinite(ρ)
C = (I - ρ * s * y')
H = C * H * C' + ρ * s * s'
# end
H
end
function update!(scheme::DBFGS{<:Inverse,<:Any}, H, s, y)
σ = dot(s, y)
ρ = inv(σ)
if isfinite(ρ)
Hy = H * y
H .= H .+ ((σ + y' * Hy) .* ρ^2) * (s * s')
Hys = Hy * s'
Hys .= Hys .+ Hys'
H .= H .- Hys .* ρ
end
H
end
function update!(scheme::DBFGS{<:Inverse,<:Any}, A::UniformScaling, s, y)
update(scheme, A, s, y)
end
function update!(scheme::DBFGS{<:Direct,<:Any}, A::UniformScaling, s, y)
update(scheme, A, s, y)
end
| [
7249,
20137,
37,
14313,
90,
51,
16,
11,
51,
17,
11,
51,
18,
92,
1279,
25,
2264,
17053,
3791,
1122,
90,
51,
16,
92,
198,
220,
220,
220,
5561,
3712,
51,
16,
198,
220,
220,
220,
262,
8326,
3712,
51,
17,
198,
220,
220,
220,
350,
... | 1.850142 | 1,408 |
<reponame>rtwalker/StochasticPrograms.jl
"""
SynchronousExecution
Functor object for using synchronous execution in a progressive-hedging algorithm (assuming multiple Julia cores are available). Create by supplying a [`Synchronous`](@ref) object through `execution` in the `ProgressiveHedgingSolver` factory function and then pass to a `StochasticPrograms.jl` model.
"""
struct SynchronousExecution{T <: AbstractFloat,
A <: AbstractVector,
PT <: AbstractPenaltyterm} <: AbstractProgressiveHedgingExecution
subworkers::Vector{SubWorker{T,A,PT}}
function SynchronousExecution(::Type{T}, ::Type{A}, ::Type{PT}) where {T <: AbstractFloat,
A <: AbstractVector,
PT <: AbstractPenaltyterm}
return new{T,A,PT}(Vector{SubWorker{T,A,PT}}(undef, nworkers()))
end
end
function initialize_subproblems!(ph::AbstractProgressiveHedging,
execution::SynchronousExecution,
scenarioproblems::DistributedScenarioProblems,
penaltyterm::AbstractPenaltyterm)
# Create subproblems on worker processes
initialize_subproblems!(ph,
execution.subworkers,
scenarioproblems,
penaltyterm)
# Initial reductions
update_iterate!(ph)
update_dual_gap!(ph)
return nothing
end
function restore_subproblems!(::AbstractProgressiveHedging, execution::SynchronousExecution)
restore_subproblems!(execution.subworkers)
return nothing
end
function resolve_subproblems!(ph::AbstractProgressiveHedging, execution::SynchronousExecution{T}) where T <: AbstractFloat
partial_solutions = Vector{SubproblemSolution{T}}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_solutions[i] = remotecall_fetch(resolve_subproblems!,
w,
execution.subworkers[w-1],
ph.ξ,
penalty(ph))
end
end
return sum(partial_solutions)
end
function update_iterate!(ph::AbstractProgressiveHedging, execution::SynchronousExecution{T,A}) where {T <: AbstractFloat, A <: AbstractVector}
partial_primals = Vector{A}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_primals[i] = remotecall_fetch(collect_primals,
w,
execution.subworkers[w-1],
length(ph.ξ))
end
end
ξ_prev = copy(ph.ξ)
ph.ξ .= sum(partial_primals)
# Update δ₁
ph.data.δ₁ = norm(ph.ξ - ξ_prev, 2) ^ 2
return nothing
end
function update_subproblems!(ph::AbstractProgressiveHedging, execution::SynchronousExecution)
# Update dual prices
@sync begin
for w in workers()
@async remotecall_fetch(
w,
execution.subworkers[w-1],
ph.ξ,
penalty(ph)) do sw, ξ, r
subproblems = fetch(sw)
if length(subproblems) > 0
update_subproblems!(subproblems, ξ, r)
end
end
end
end
return nothing
end
function update_dual_gap!(ph::AbstractProgressiveHedging, execution::SynchronousExecution)
return update_dual_gap!(ph, execution.subworkers)
end
function calculate_objective_value(ph::AbstractProgressiveHedging, execution::SynchronousExecution)
return calculate_objective_value(execution.subworkers)
end
function scalar_subproblem_reduction(value::Function, execution::SynchronousExecution{T}) where T <: AbstractFloat
partial_results = Vector{T}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_results[i] = remotecall_fetch(w, execution.subworkers[w-1], value) do sw, value
subproblems = fetch(sw)
return mapreduce(+, subproblems, init = zero(T)) do subproblem
π = subproblem.probability
return π * value(subproblem)
end
end
end
end
return sum(partial_results)
end
function vector_subproblem_reduction(value::Function, execution::SynchronousExecution{T,A}, n::Integer) where {T <: AbstractFloat, A <: AbstractVector}
partial_results = Vector{A}(undef, nworkers())
@sync begin
for (i,w) in enumerate(workers())
@async partial_results[i] = remotecall_fetch(w, execution.subworkers[w-1], value, n) do sw, value, n
subproblems = fetch(sw)
return mapreduce(+, subproblems, init = zero(T, n)) do subproblem
π = subproblem.probability
return π * value(subproblem)
end
end
end
end
return sum(partial_results)
end
# API
# ------------------------------------------------------------
function (execution::Synchronous)(::Type{T}, ::Type{A}, ::Type{PT}) where {T <: AbstractFloat,
A <: AbstractVector,
PT <: AbstractPenaltyterm}
return SynchronousExecution(T,A,PT)
end
function str(::Synchronous)
return "Synchronous "
end
| [
27,
7856,
261,
480,
29,
81,
4246,
20949,
14,
1273,
5374,
3477,
15167,
82,
13,
20362,
198,
37811,
198,
220,
220,
220,
16065,
11413,
516,
23002,
1009,
198,
198,
24629,
2715,
2134,
329,
1262,
18305,
516,
9706,
287,
257,
10393,
12,
704,
... | 1.986287 | 2,917 |
<reponame>hervasa2/SolidStateDetectors.jl
function simulate_waveforms( mcevents::TypedTables.Table, sim::Simulation{T},
output_dir::AbstractString,
output_base_name::AbstractString = "generated_waveforms";
chunk_n_physics_events::Int = 1000,
Δt::RealQuantity = 4u"ns",
max_nsteps::Int = 1000,
diffusion::Bool = false,
self_repulsion::Bool = false,
number_of_carriers::Int = 1,
number_of_shells::Int = 1,
verbose = false) where {T <: SSDFloat}
n_total_physics_events = length(mcevents)
Δtime = T(to_internal_units(Δt))
n_contacts = length(sim.detector.contacts)
@info "Detector has $(n_contacts) contact(s)"
contacts = sim.detector.contacts
if !ispath(output_dir) mkpath(output_dir) end
nfmt(i::Int) = format(i, zeropadding = true, width = length(digits(n_total_physics_events)))
evt_ranges = chunked_ranges(n_total_physics_events, chunk_n_physics_events)
@info "-> $(length(flatview(mcevents.edep))) energy depositions to simulate."
for evtrange in evt_ranges
ofn = joinpath(output_dir, "$(output_base_name)_evts_$(nfmt(first(evtrange)))-$(nfmt(last(evtrange))).h5")
@info "Now simulating $(evtrange) and storing it in\n\t \"$ofn\""
mcevents_sub = simulate_waveforms(mcevents[evtrange], sim; Δt, max_nsteps, diffusion, self_repulsion, number_of_carriers, number_of_shells, verbose)
HDF5.h5open(ofn, "w") do output
LegendHDF5IO.writedata(output, "generated_waveforms", mcevents_sub)
end
end
end
| [
27,
7856,
261,
480,
29,
372,
11017,
64,
17,
14,
46933,
9012,
47504,
669,
13,
20362,
198,
8818,
29308,
62,
19204,
23914,
7,
285,
344,
85,
658,
3712,
31467,
276,
51,
2977,
13,
10962,
11,
985,
3712,
8890,
1741,
90,
51,
5512,
198,
220... | 1.951299 | 924 |
<filename>src/CoolPkg.jl
module CoolPkg
export add2
"""
add2(a, b)
Adds two numbers together...
* `a`: A number
* `b`: Another number
"""
add2(a, b) = a + b
end
| [
27,
34345,
29,
10677,
14,
34530,
47,
10025,
13,
20362,
198,
21412,
15226,
47,
10025,
198,
198,
39344,
751,
17,
198,
198,
37811,
198,
220,
220,
220,
751,
17,
7,
64,
11,
275,
8,
220,
220,
220,
220,
198,
198,
46245,
734,
3146,
1978,
... | 2.175 | 80 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.