text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from .registry import register
import tensorflow as tf
import numpy as np
import random
NUM_CLASSES = 15 # i.e. number of sort indices
# These sequence lengths do not include the extra padding we need
# to delay RNN outputs until the entire sequence is seen, which is crucial
# because otherwise our model would have to guess sort order before seeing
# the entire sequence
MIN_SEQUENCE_LENGTH = 2
MAX_SEQUENCE_LENGTH = 15
# Input is padded MAX_SEQUENCE_LENGTH times on the right and likewise for output
# but on the left side
PADDED_SEQUENCE_LENGTH = 30
def generate_batch(batch_size):
xs = []
ys = []
seq_lengths = []
ms = []
for _ in range(batch_size):
x = np.zeros((PADDED_SEQUENCE_LENGTH, 1), np.float32)
y = np.zeros((PADDED_SEQUENCE_LENGTH,), np.int32)
target_mask = np.zeros((PADDED_SEQUENCE_LENGTH,), np.int32)
seq_len = random.randint(MIN_SEQUENCE_LENGTH, MAX_SEQUENCE_LENGTH)
x_random = np.random.normal(0, 1, (seq_len, 1))
x[:seq_len] = x_random
# We want this portion to be ignored and one_hot(-1) = all zeros
y[:seq_len] = -1
target_mask[seq_len:2 * seq_len] = 1
y[seq_len:2 * seq_len] = np.argsort(x_random, axis=0).flatten()
xs.append(x)
ys.append(y)
ms.append(target_mask)
# Note that external seq_len is different because we want the RNN to go
# over the sequence first in seq_len steps and then emit its outputs
# one-by-one in another seq_len steps
seq_lengths.append(2 * seq_len)
return np.asarray(xs, np.float32), seq_lengths, ys, ms
@register("sort")
def input_fn(data_sources, params, training):
def _input_fn():
"""
Returns training inputs and output (y).
x: 15 element vector having number sequence of random length followed by 0
y: one-hot encoding representing sorted order of x.
"""
get_batch = lambda: generate_batch(params.batch_size)
x, seq_len, y, target_mask = tf.py_func(
get_batch, [], [tf.float32, tf.int64, tf.int32, tf.int32])
x.set_shape((params.batch_size, PADDED_SEQUENCE_LENGTH, 1))
y = tf.one_hot(y, depth=NUM_CLASSES, axis=-1, dtype=tf.int64)
y.set_shape((params.batch_size, PADDED_SEQUENCE_LENGTH, NUM_CLASSES))
seq_len.set_shape((params.batch_size,))
target_mask.set_shape((params.batch_size, PADDED_SEQUENCE_LENGTH))
return {
"inputs": x,
"seq_length": seq_len,
"difficulty": seq_len,
"target_mask": target_mask
}, y
return _input_fn
|
{"hexsha": "db7320d47d3c2509d6b7b13932af8637fa99e73d", "size": 2473, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/sort_reader.py", "max_stars_repo_name": "for-ai/ACT", "max_stars_repo_head_hexsha": "efe259117a11d0583434d09440702fd75ebcdb99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2018-09-30T13:30:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T15:18:51.000Z", "max_issues_repo_path": "data/sort_reader.py", "max_issues_repo_name": "for-ai/ACT", "max_issues_repo_head_hexsha": "efe259117a11d0583434d09440702fd75ebcdb99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-28T21:59:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T14:42:58.000Z", "max_forks_repo_path": "data/sort_reader.py", "max_forks_repo_name": "for-ai/ACT", "max_forks_repo_head_hexsha": "efe259117a11d0583434d09440702fd75ebcdb99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-11-25T14:12:36.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-02T03:07:02.000Z", "avg_line_length": 33.8767123288, "max_line_length": 80, "alphanum_fraction": 0.7060250708, "include": true, "reason": "import numpy", "num_tokens": 644}
|
from .mmodel import MetaModel, load_model
import numpy as np
class SimpleTripMetaModel(MetaModel):
def __transform_input__(self, y, params):
network = self.network
cmodel = load_model(network.nodes[0].cmodel)
sets = cmodel.sets
N = len(network.nodes)
matrices = { s : np.zeros(shape=(N,N)) for s in sets }
cury = 0
for i in range(N):
for s in sets:
matrices[s][i][i] = y[cury]
cury += 1
newy = []
for s in sets:
newy += list(matrices[s].reshape(N*N))
return newy, params
def __transform_output__(self, ret):
network = self.network
cmodel = load_model(network.nodes[0].cmodel)
sets = cmodel.sets
N = len(network.nodes)
local_pos = lambda k,i,j : k * N * N + i * N + j
global_value = lambda y,k,j: sum([y[local_pos(k,i,j)] for i in range(N)])
results = {}
for i in range(N):
results[i] = {}
for k, s in enumerate(sets):
results[i][s] = global_value(ret, k, i)
return results
def __compute_structures__(self):
network = self.network
N = len(network.nodes)
# Store the models used in the network to avoid multiple dynamic loading
cmodels = {}
# Asociates every node with the starting and ending positions of its parameters in the param vector
params_map = { i : (0,0) for i in range(N) }
node_map = {}
curp = 0
for node in network.nodes:
try:
cmodel = cmodels[node.cmodel]
except KeyError:
cmodel = cmodels[node.cmodel] = load_model(node.cmodel)
params_map[node.id] = (curp, curp + len(cmodel.params))
curp += len(cmodel.params)
node_map[node.id] = node
# Build an adyacency matrix for the network
ady_matrix = [[0 for i in range(N)] for j in range(N)]
for edge in network.edges:
ady_matrix[edge.source][edge.target] = edge.weight
local_pos = lambda k,i,j : k * N * N + i * N + j
global_pos = lambda k,j: [local_pos(k,i,j) for i in range(N)]
return cmodels, ady_matrix, node_map, params_map, local_pos, global_pos
def __generate_code__(self, structures):
network = self.network
cmodels, ady_matrix, node_map, params_map, local_pos, global_pos = structures
N = len(network.nodes)
sets = cmodels[network.nodes[0].cmodel].sets
K = len(sets)
def get_global_symbol(k,j):
return "(" + "+".join([f"y[{local_pos(k,i,j)}]" for i in range(N)]) + ")"
code = "from scipy.integrate import odeint\n\n\n"
code += "def deriv(y, t, params):\n"
code += "\tresult = [0] * len(y)\n"
for k, s in enumerate(sets):
for i in range(N):
for j in range(N):
node = node_map[j]
cmodel = cmodels[node.cmodel]
# Generate the equation of the set s for the node i,j
# This represents the population of the subyacent node i living in the subyacent node j
equation = f"\tresult[{local_pos(k,i,j)}] = "
# Get the value of the sets of node i,j
local_symbols = [f"y[{local_pos(kx,i,j)}]" for kx in range(K)]
global_symbols = [get_global_symbol(kx,j) for kx in range(K)]
start, end = params_map[j]
params_symbols = [f"params[{p}]" for p in range(start, end)]
symbols = local_symbols + global_symbols + params_symbols
equation += cmodel.equations[s](*symbols)
if i == j:
out_weight = 0
for u in range(N):
out_weight += ady_matrix[i][u]
if ady_matrix[u][i] > 0:
equation += f" + {ady_matrix[u][i]} * y[{local_pos(k,i,u)}]"
equation += f" - {out_weight} * y[{local_pos(k,i,i)}]"
else:
equation += f" + {ady_matrix[i][j]} * y[{local_pos(k,i,i)}]"
equation += f" - {ady_matrix[j][i]} * y[{local_pos(k,i,j)}]"
code += equation + "\n"
code += "\treturn result\n"
code += "\n\n"
code += "def solve(y, t, params):\n"
code += "\treturn odeint(deriv, y, t, args=(params,))"
return code
|
{"hexsha": "210907963a91eb20db77e1eea5e5a7ff588d300d", "size": 4706, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mmodel/simple_trip.py", "max_stars_repo_name": "Maximiza-Atemoriza/meta-population-network-model", "max_stars_repo_head_hexsha": "7dfde8d92c50935a963c919227058c99fcd0c649", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mmodel/simple_trip.py", "max_issues_repo_name": "Maximiza-Atemoriza/meta-population-network-model", "max_issues_repo_head_hexsha": "7dfde8d92c50935a963c919227058c99fcd0c649", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-17T20:59:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T20:59:51.000Z", "max_forks_repo_path": "src/mmodel/simple_trip.py", "max_forks_repo_name": "Maximiza-Atemoriza/meta-population-network-model", "max_forks_repo_head_hexsha": "7dfde8d92c50935a963c919227058c99fcd0c649", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1194029851, "max_line_length": 107, "alphanum_fraction": 0.5055248619, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1141}
|
# Check if curl is installed
try
print("Checking for curl...")
cmd = `curl --version`
(VERSION >= v"0.5")? readstring(cmd) : readall(`curl --version`)
println("OK!")
catch
error("Curl not found. Please install curl to use BCBData.")
end
"""
A Julia package to read Brazilian Central Bank (BCB) time series data.
"""
module BCBData
using Base.Dates
export ProxyConfig,
readData
"""
Holds proxy configuration.
"""
type ProxyConfig
host::String
port::Int64
user::String
password::String
function ProxyConfig()
new("", 80, "", "")
end #function
function ProxyConfig( host::String, port::Int64, user::String, password::String)
new(host, port, user, password)
end #function
end # type
"""
readData(code::Int64, startDate::Date, endDate::Date, proxy::ProxyConfig=ProxyConfig())
Read data from Brazilian Central Bank (BCB) database.
# Parameters
* `code::Int64`: time series ID in BCB database. See https://www3.bcb.gov.br/sgspub/ for a list of series and IDs.
* `startDate::Date`: start date.
* `endDate::Date`: end date.
* `proxy::ProxyConfig`: (optional) instance to hold proxy definitions. If not supplied, no proxy config is set.
# Returns
Returns two arrays, one with strings representind dates, the other with corresponding numeric values.
"""
function readData(code::Int64, startDate::Date, endDate::Date, proxy=ProxyConfig())
str_start = Dates.format(startDate, "dd/mm/yyyy")
str_end = Dates.format(endDate, "dd/mm/yyyy")
if(proxy.host == "")
str_proxy = ``
else
host =
str_proxy = `--proxy $(proxy.host):$(proxy.port) --proxy-user $(proxy.user):$(proxy.password)`
end #if
cmd = `curl -s -S --header "SOAPAction:\"http://publico.ws.casosdeuso.sgs.pec.bcb.gov.br#getValoresSeriesXML\"" --header "Accept: text/xml" --header "Accept: multipart/*" --header "Content-Type: text/xml; charset=utf-8" --header "SOAPAction: \"http://publico.ws.casosdeuso.sgs.pec.bcb.gov.br#getValoresSeriesXML\"" -d "<?xml version=\"1.0\"?><SOAP-ENV:Envelope xmlns:SOAP-ENC=\"http://schemas.xmlsoap.org/soap/encoding/\" xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" SOAP-ENV:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\"> <SOAP-ENV:Body> <getValoresSeriesXML xmlns=\"http://publico.ws.casosdeuso.sgs.pec.bcb.gov.br\"> <in0 xsi:type=\"SOAP-ENC:Array\" SOAP-ENC:arrayType=\"NA[1]\"> <item>$code</item> </in0> <in1 xsi:type=\"xsd:string\">$str_start</in1> <in2 xsi:type=\"xsd:string\">$str_end</in2> </getValoresSeriesXML> </SOAP-ENV:Body></SOAP-ENV:Envelope>" $str_proxy https://www3.bcb.gov.br/wssgs/services/FachadaWSSGS`
response = (VERSION >= v"0.5")? readstring(cmd) : readall(cmd)
str_dates = String[]
str_values = Float64[]
pattern = r"DATA>(.*?)</DATA>\n\t\t\t<VALOR>(.*?)&"
for m in eachmatch(pattern, response)
push!(str_dates, m.captures[1])
push!(str_values, parse(Float64, m.captures[2]))
end #for
return(str_dates, str_values)
end #function
end #module
|
{"hexsha": "a3a4cf94ea96b564ab81b747371445c20898ca60", "size": 3173, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/BCBData.jl", "max_stars_repo_name": "lucasprocessi/BCBData.jl", "max_stars_repo_head_hexsha": "b0fe00aa4347ea18853bf6e11bff630cbb0d61f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-05-05T22:23:36.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-09T06:36:07.000Z", "max_issues_repo_path": "src/BCBData.jl", "max_issues_repo_name": "lucasprocessi/BCBData.jl", "max_issues_repo_head_hexsha": "b0fe00aa4347ea18853bf6e11bff630cbb0d61f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/BCBData.jl", "max_forks_repo_name": "lucasprocessi/BCBData.jl", "max_forks_repo_head_hexsha": "b0fe00aa4347ea18853bf6e11bff630cbb0d61f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0568181818, "max_line_length": 1092, "alphanum_fraction": 0.6763315474, "num_tokens": 940}
|
using Documenter, TestThings
makedocs(;
modules=[TestThings],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
"Foo" => "foo.md"
],
repo="https://github.com/under-Peter/TestThings.jl/blob/{commit}{path}#L{line}",
sitename="TestThings.jl",
authors="Andreas Peter",
)
deploydocs(;
repo="github.com/under-Peter/TestThings.jl",
)
|
{"hexsha": "e7cf9a3ede4191ec7165d8b2c238c65ea6c292ca", "size": 383, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "under-Peter/TestThings.jl", "max_stars_repo_head_hexsha": "fec7d90ca332d5e1abd8cdbeb3b94a14d65beb90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "under-Peter/TestThings.jl", "max_issues_repo_head_hexsha": "fec7d90ca332d5e1abd8cdbeb3b94a14d65beb90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "under-Peter/TestThings.jl", "max_forks_repo_head_hexsha": "fec7d90ca332d5e1abd8cdbeb3b94a14d65beb90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2777777778, "max_line_length": 84, "alphanum_fraction": 0.6187989556, "num_tokens": 110}
|
import numpy as np
from sge.parameters import params
import json
import os
def evolution_progress(generation, pop):
fitness_samples = [i['fitness'] for i in pop]
data = '%4d\t%.6e\t%.6e\t%.6e' % (generation, np.min(fitness_samples), np.mean(fitness_samples), np.std(fitness_samples))
if params['VERBOSE']:
print(data)
save_progress_to_file(data)
if generation % params['SAVE_STEP'] == 0:
save_step(generation, pop)
def save_progress_to_file(data):
with open('%s/run_%d/progress_report.csv' % (params['EXPERIMENT_NAME'], params['RUN']), 'a') as f:
f.write(data + '\n')
def save_step(generation, population):
c = json.dumps(population)
open('%s/run_%d/iteration_%d.json' % (params['EXPERIMENT_NAME'], params['RUN'], generation), 'a').write(c)
def save_parameters():
params_lower = dict((k.lower(), v) for k, v in params.items())
c = json.dumps(params_lower)
open('%s/run_%d/parameters.json' % (params['EXPERIMENT_NAME'], params['RUN']), 'a').write(c)
def prepare_dumps():
try:
os.makedirs('%s/run_%d' % (params['EXPERIMENT_NAME'], params['RUN']))
except FileExistsError as e:
pass
save_parameters()
|
{"hexsha": "3f22785106e4184b1f3c8cf0299c0da8e12b0efd", "size": 1203, "ext": "py", "lang": "Python", "max_stars_repo_path": "sge/sge/logger.py", "max_stars_repo_name": "HiroseTomoyuki/sge3", "max_stars_repo_head_hexsha": "3b88f3ef6cbb6dbef959bc73b4fe862222192d67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-10-29T13:13:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T22:07:09.000Z", "max_issues_repo_path": "sge/sge/logger.py", "max_issues_repo_name": "HiroseTomoyuki/sge3", "max_issues_repo_head_hexsha": "3b88f3ef6cbb6dbef959bc73b4fe862222192d67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sge/sge/logger.py", "max_forks_repo_name": "HiroseTomoyuki/sge3", "max_forks_repo_head_hexsha": "3b88f3ef6cbb6dbef959bc73b4fe862222192d67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-03-04T14:59:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-19T15:35:51.000Z", "avg_line_length": 30.8461538462, "max_line_length": 125, "alphanum_fraction": 0.6583541147, "include": true, "reason": "import numpy", "num_tokens": 322}
|
## Author: Sergio García Prado
## Title: Statistical Inference - Non parametric Tests - Exercise 01
rm(list = ls())
x <- c(518, 174, 613, 2010, 2139, 156, 450, 536)
(n.x <- length(x))
# 8
y <- c(899, 326, 2118, 839, 820, 1423, 1687, 1010, 3011, 1739, 1185, 1320, 646,
505, 4236, 4481, 1433, 1806, 400, 421, 335, 1164, 1713, 1356, 390)
(n.y <- length(y))
# 25
(n <- n.x + n.y)
# 33
sum(duplicated(c(y, x))) > 0
# FALSE
(W <- sum(rank(c(y, x))[1:length(y)]))
# 459
(W.mean <- n.y * (n + 1) / 4)
# 212.5
(W.var <- (n.y * n.x * (n + 1)) / 12)
# 566.666666666667
(W.yx <- W - n.y * (n.y + 1) / 2)
# 134
(W.yx.mean <- n.x * n.y / 2)
# 100
(W.yx.var <- n.x * n.y * (n + 1) / 12)
# 566.667
### H0: X <= Y
## Asymptotic pvalue (with continuity correction)
1 - pnorm((W.yx - 0.5 - W.yx.mean) / sqrt(W.yx.var))
# 0.0796719687339751
## Exact pvalue
1 - pwilcox(W.yx - 1, n.y, n.x)
# 0.0812147313815834
### Shift Parameter Estimation
a <- matrix(rep(0, n.x * n.y), n.x, n.y)
for (i in 1:n.x) {
for (j in 1:n.y) {
a[i, j] <- y[j] - x[i]
}
}
# Exact
median(a)
# 491
wilcox.test(y, x, alternative = "greater", conf.int = TRUE)
# Wilcoxon rank sum test
#
# data: y and x
# W = 134, p-value = 0.08121
# alternative hypothesis: true location shift is greater than 0
# 95 percent confidence interval:
# -60 Inf
# sample estimates:
# difference in location
# 491
|
{"hexsha": "72c9df0237a8dde43c5bc794c74ae2f87031da17", "size": 1399, "ext": "r", "lang": "R", "max_stars_repo_path": "statistical-inference/non-parametric/exercise-01.r", "max_stars_repo_name": "garciparedes/r-examples", "max_stars_repo_head_hexsha": "0e0e18439ad859f97eafb27c5e7f77d33da28bc6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-09-15T19:56:31.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-15T19:56:31.000Z", "max_issues_repo_path": "statistical-inference/non-parametric/exercise-01.r", "max_issues_repo_name": "garciparedes/r-examples", "max_issues_repo_head_hexsha": "0e0e18439ad859f97eafb27c5e7f77d33da28bc6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2018-03-23T09:34:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-09T14:13:32.000Z", "max_forks_repo_path": "statistical-inference/non-parametric/exercise-01.r", "max_forks_repo_name": "garciparedes/r-examples", "max_forks_repo_head_hexsha": "0e0e18439ad859f97eafb27c5e7f77d33da28bc6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.9054054054, "max_line_length": 79, "alphanum_fraction": 0.5632594711, "num_tokens": 592}
|
"""
edit_with(y::Dict{Any,Any}; kwargs...)
edit_with(df::DataFrame, editor::T) where T<:Edit
edit_with(df::DataFrame, lst::Array{T}) where T<:Edit
edit_with(df::DataFrame, x::Describe, file::T) where T<:File
edit_with(file::T, y::Dict{Any,Any}; kwargs...)
edit_with(files::Array{T,N} where N, y::Dict{Any,Any}; kwargs...) where T<:File
This function edits the input DataFrame `df` and returns the resultant DataFrame.
# Arguments
- `df::DataFrame` on which to perform the edit.
- `editor::T where T<:Edit`: DataType containing information about which edit to perform.
The following edit options are available and detailed below. If given a dictionary of
edits, they will be made in this order:
- [`SLiDE.Drop`](@ref): Remove information from the DataFrame -- either an entire column
or rows containing specified values.
- [`SLiDE.Rename`](@ref): Change column name `from` -> `to`.
- [`SLiDE.Group`](@ref): Use to edit files containing data in successive dataframes with
an identifying header cell or row.
- [`SLiDE.Match`](@ref): Extract values from the specified column into a column or
columns based on the specified regular expression.
- [`SLiDE.Melt`](@ref): Normalize the dataframe by 'melting' columns into rows,
lengthening the dataframe by duplicating values in the column `on` into new rows and
defining 2 new columns:
1. `var` with header names from the original dataframe.
2. `val` with column values from the original dataframe.
- [`SLiDE.Add`](@ref): Add new column `col` filled with `val`.
- [`SLiDE.Map`](@ref): Define an `output` column containing values based on those in an
`input` column. The mapping columns `from` -> `to` are contained in a .csv `file` in
the coremaps directory. The columns `input` and `from` should contain the same
values, as should `output` and `to`.
- [`SLiDE.Replace`](@ref): Replace values in `col` `from` -> `to`.
- [`SLiDE.Operate`](@ref): Perform an arithmetic operation across multiple DataFrame columns or rows.
- [`SLiDE.Describe`](@ref): This DataType is required when multiple DataFrames will be
appended into one output file (say, if multiple sheets from an XLSX file are
included). Before the DataFrames are appended, a column `col` will be added and
filled with the value in the file descriptor.
- [`SLiDE.Order`](@ref): Rearranges columns in the order specified by `cols` and sets
them to the specified type.
- `file::T where T <: File`: Data file containing information to read.
- `files::Array{T} where T <: File`: List of data files.
- `y::Dict{Any,Any}`: Dictionary containing all editing structures among other values read
from the yaml file. Dictionary keys must correspond EXACTLY with SLiDE.Edit DataType
names, or the edits will not be made.
# Keywords
- `shorten::Bool = false` or `shorten::Int`: if an integer length is specified, the
DataFrame will be shortened to the input value. This is meant to aid troubleshooting
during development.
# Returns
- `df::DataFrame` including edit(s)
"""
function edit_with(df::DataFrame, x::Add)
df = copy(df)
# If adding the length of a string...
if typeof(x.val) == String && occursin("length", x.val)
m = match(r"(?<col>\S*) length", x.val)
# If this is not indicating a column length to add, add the value and exit.
if (m === nothing || !(Symbol(m[:col]) in propertynames(df)))
df[!, x.col] .= x.val
return df
end
# If possible, return the length of characters in the string.
col_len = Symbol(m[:col])
df[!, x.col] .= [ismissing(val_len) ? missing : length(convert_type(String, val_len))
for val_len in df[:,col_len]]
else
df[!, x.col] .= x.val
end
return df
end
function edit_with(df::DataFrame, x::Drop)
df = copy(df)
if x.val === "all" && x.operation == "occursin"
df = edit_with(df, Drop.(propertynames(df)[occursin.(x.col, propertynames(df))], "all", "=="))
end
!(x.col in propertynames(df)) && (return df)
if x.val === "all" # Drop entire column to remove dead weight right away.
df = df[:, setdiff(propertynames(df), [x.col])]
else # Drop rows using an operation or based on a value.
if x.val === missing
dropmissing!(df, x.col)
# elseif x.val === "unique"
# unique!(df, x.col)
else
df[!,x.col] .= convert_type.(typeof(x.val), df[:,x.col])
df = if x.operation == "occursin"
df[.!broadcast(datatype(x.operation), x.val, df[:,x.col]), :]
else
df[.!broadcast(datatype(x.operation), df[:,x.col], x.val), :]
end
end
end
return df
end
function edit_with(df::DataFrame, x::Group)
df = copy(df)
# First, add a column to the original DataFrame indicating where the data set begins.
cols = unique([propertynames(df); x.output])
df[!,:start] = (1:size(df)[1]) .+ 1
# # Next, create a DataFrame describing where to "split" the input DataFrame.
# # Editing with a map will remove all rows that do not contain relevant information.
# # Add a column indicating where each data set STOPS, assuming all completely blank rows
# # were removed by read_file().
df_split = edit_with(copy(df), convert_type(Map, x); kind = :inner)
sort!(unique!(df_split), :start)
df_split[!, :stop] .= vcat(df_split[2:end, :start] .- 2, [size(df)[1]])
# Add a new, blank output column to store identifying information about the data block.
# Then, fill this column based on the identifying row numbers in df_split.
for out in x.output
df[!,out] .= ""
[df[row[:start]:row[:stop], out] .= row[out] for row in eachrow(df_split)]
end
# Finally, remove header rows (these will be blank in the output column),
# as well as the column describing where the sub-DataFrames begin.
df = edit_with(df, Drop.(x.output, "", "=="))
return df[:, cols]
end
function edit_with(df::DataFrame, x::Map; kind = :left)
df = copy(df)
# Save all input column propertynames, read the map file, and isolate relevant columns.
# # This prevents duplicate columns in the final DataFrame.
cols = unique([propertynames(df); x.output])
df_map = copy(read_file(x))
df_map = unique(df_map[:,unique([x.from; x.to])])
# If there are duplicate columns in from/to, differentiate between the two to save results.
duplicates = intersect(x.from, x.to)
if length(duplicates) > 0
(ii_from, ii_to) = (occursin.(duplicates, x.from), occursin.(duplicates, x.to));
x.from[ii_from] = Symbol.(x.from[ii_from], :_0)
[df_map[!,Symbol(col, :_0)] .= df_map[:,col] for col in duplicates]
end
# Rename columns in the mapping DataFrame to temporary values in case any of these
# columns were already present in the input DataFrame.
temp_to = Symbol.(:to_, 1:length(x.to))
temp_from = Symbol.(:from_, 1:length(x.from))
df_map = edit_with(df_map, Rename.([x.to; x.from], [temp_to; temp_from]))
# Ensure the input and mapping DataFrames are consistent in type. Types from the mapping
# DataFrame are used since all values in each column should be of the same type.
for (col, col_map) in zip(x.input, temp_from)
try
new_type = eltypes(dropmissing(df_map[:,[col_map]]))
# new_type = eltype.(eachcol(dropmissing(df_map[:,[col_map]])))
df[!,col] .= convert_type.(new_type, df[:,col])
catch
df_map[!,col_map] .= convert_type.(String, df_map[:,col_map])
end
end
join_cols = Pair.(x.input, temp_from)
x.kind == :inner && (df = innerjoin(df, df_map, on = join_cols; makeunique = true))
x.kind == :outer && (df = outerjoin(df, df_map, on = join_cols; makeunique = true))
x.kind == :left && (df = leftjoin(df, df_map, on = join_cols; makeunique = true))
x.kind == :right && (df = rightjoin(df, df_map, on = join_cols; makeunique = true))
x.kind == :semi && (df = semijoin(df, df_map, on = join_cols; makeunique = true))
# Remove all output column propertynames that might already be in the DataFrame. These will be
# overwritten by the columns from the mapping DataFrame. Finally, remane mapping "to"
# columns from their temporary to output values.
df = df[:, setdiff(propertynames(df), x.output)]
df = edit_with(df, Rename.(temp_to, x.output))
return df[:,cols]
end
function edit_with(df::DataFrame, x::Match)
df = copy(df)
if x.on == r"expand range"
ROWS, COLS = size(df)
cols = propertynames(df)
df = [[DataFrame(Dict(cols[jj] =>
cols[jj] == x.input ? _expand_range(df[ii,jj]) : df[ii,jj]
for jj in 1:COLS)) for ii in 1:ROWS]...;]
else
# Ensure all row values are strings and can be matched with a Regex, and do so.
# Temporarily remove missing values, just in case.
df[:,x.input] .= convert_type.(String, df[:,x.input])
col = edit_with(copy(df), Replace(x.input, missing, ""))[:,x.input]
m = match.(x.on, col)
# Add empty columns for all output columns not already in the DataFrame.
# Where there is a match, fill empty cells. If values in the input column,
# leave cells without a match unchanged.
df = edit_with(df, Add.(setdiff(x.output, propertynames(df)), ""))
[m[ii] != nothing && ([df[ii,out] = m[ii][out] for out in x.output])
for ii in 1:length(m)]
end
return df
end
function edit_with(df::DataFrame, x::Melt)
df = copy(df)
on = intersect(x.on, propertynames(df))
df = melt(df, on, variable_name = x.var, value_name = x.val)
df[!, x.var] .= convert_type.(String, df[:, x.var])
return df
end
function edit_with(df::DataFrame, x::Operate)
df = copy(df)
# If it is a ROW-WISE operation,
if x.axis == :row
df = by(df, x.input, x.output => datatype(x.operation))
df = edit_with(df, Rename.(setdiff(propertynames(df), x.input), ensurearray(x.output)))
end
# If it is a COLUMN-WISE operation,
if x.axis == :col
cols = [setdiff(propertynames(df), unique([x.from; x.to; x.input; x.output])); x.output; x.from]
# Isolate columns to be operated on.
# Append original columns that might be replaced "_0" to preserve information.
df_val = convert_type.(Float64, copy(df[:,x.input]))
x.output in x.input && (df = edit_with(df, Rename(x.output, Symbol(x.output, :_0))))
df[!,x.output] .= broadcast(datatype(x.operation), [col for col in eachcol(df_val)]...)
# Adjust labeling columns: If both from/to descriptive columns are distinct and
# in the DataFrame, Replace the column values from -> to.
for (from, to) in zip(x.from, x.to)
if length(intersect(propertynames(df), [from,to])) == 2
df_comment = dropmissing(unique(df[:, [from; to]]))
df[!, Symbol(from, :_0)] .= df[:,from]
df = edit_with(df, Replace.(from, df_comment[:,from], df_comment[:,to]))
end
end
end
# !!!! How to handle floating point arithmetic? (ex: 1.1 + 0.1 = 1.2000000000000002)
df[!,x.output] .= round.(df[:,x.output], digits=11)
return df
end
function edit_with(df::DataFrame, x::Order)
df = copy(df)
# If not all columns are present, return the DataFrame as is. Such is the case when a
# descriptor column must be added when appending multiple data sets in one DataFrame.
if size(intersect(x.col, propertynames(df)))[1] < size(x.col)[1]
return df
# If all of the columns are present in the original DataFrame,
# reorder the DataFrame columns and set them to the specified type.
else
df = df[!, x.col] # reorder
[df[!, c] .= convert_type.(t, df[!, c]) for (c, t) in zip(x.col, x.type)] # convert
return df
end
end
function edit_with(df::DataFrame, x::Rename)
df = copy(df)
x.from in propertynames(df) && (df = rename(df, x.from => x.to))
x.to == :upper && (df = edit_with(df, Rename.(propertynames(df), uppercase.(propertynames(df)))))
x.to == :lower && (df = edit_with(df, Rename.(propertynames(df), lowercase.(propertynames(df)))))
return df
end
function edit_with(df::DataFrame, x::Replace)
df = copy(df)
!(x.col in propertynames(df)) && (return df)
if x.from === missing && Symbol(x.to) in propertynames(df)
df[ismissing.(df[:,x.col]),x.col] .= df[ismissing.(df[:,x.col]), Symbol(x.to)]
return df
end
if x.to === Not && eltype(df[:,x.col]) == Bool
df[!,x.col] .= .!df[:,x.col]
end
df[!,x.col] .= if x.to === "lower" lowercase.(df[:,x.col])
elseif x.to === "upper" uppercase.(df[:,x.col])
elseif x.to === "uppercasefirst" uppercasefirst.(lowercase.(df[:,x.col]))
elseif x.to === "titlecase" titlecase.(df[:,x.col])
else
replace(strip.(copy(df[:,x.col])), x.from => x.to)
end
return df
end
function edit_with(df::DataFrame, x::Stack)
df = copy(df)
df = [[edit_with(df[:, occursin.(indicator, propertynames(df))],
[Rename.(propertynames(df)[occursin.(indicator, propertynames(df))], x.col);
Add(x.var, replace(string(indicator), "_" => " "))]
) for indicator in x.on]...;]
return dropmissing(df)
end
function edit_with(df::DataFrame, lst::Array{T}) where T<:Edit
[df = edit_with(df, x) for x in lst]
return df
end
function edit_with(df::DataFrame, x::Describe, file::T) where T<:File
return edit_with(copy(df), Add(x.col, file.descriptor))
end
function edit_with(file::T, y::Dict{Any,Any}; shorten = false) where T<:File
df = read_file(y["PathIn"], file; shorten = shorten)
# Specify the order in which edits must occur. "Drop" is included twice, once at the
# beginning and once at the end. First, drop entire columns. Last, drop specific values.
EDITS = ["Rename", "Group", "Stack", "Match", "Melt", "Add", "Map", "Replace", "Drop", "Operate"]
# Find which of thyese edits are represented in the yaml file of defined edits.
KEYS = intersect(EDITS, collect(keys(y)))
"Drop" in KEYS && pushfirst!(KEYS, "Drop")
[df = edit_with(df, y[k]) for k in KEYS]
# Add a descriptor to identify the data from the file that was just added.
# Then, reorder the columns and set them to the correct types.
# This ensures consistency when concattenating.
"Describe" in keys(y) && (df = edit_with(df, y["Describe"], file))
"Order" in keys(y) && (df = edit_with(df, y["Order"]))
return df
end
function edit_with(files::Array{T}, y::Dict{Any,Any}; shorten = false) where T<:File
return [[edit_with(file, y; shorten = shorten) for file in files]...;]
end
function edit_with(y::Dict{Any,Any}; shorten = false)
# Find all dictionary keys corresponding to file names and save these in a list.
file = convert_type(Array, find_oftype(y, File))
df = edit_with(file, y; shorten = shorten)
# return _sort_datastream(df)
end
"""
_sort_datastream(df::DataFrame)
Returns the edited DataFrame, stored in a nicely-sorted order. This is most helpful for
mapping and developing. Sorting isn't *necessary* and we could remove this function to save
some time for users.
"""
function _sort_datastream(df::DataFrame)
colidx = 1:size(df,2)
isvalue = istype(df, AbstractFloat)
ii = colidx[.!isvalue]
# If it's a mapping dataframe...s
if length(ii) == length(setdiff(propertynames(df),[:factor]))
:state_code in propertynames(df) && (ii = intersect(colidx[occursin.(:code, propertynames(df))], ii))
ii = intersect(sortperm(length.(unique.(eachcol(df)))), ii)
splice!(ii, 2:1, colidx[isvalue])
end
return sort(df, ii)
end
"""
_expand_range()
"""
function _expand_range(x::T) where T <: AbstractString
if occursin("-", x)
if all(string(strip(x)) .!= ["31-33", "44-45", "48-49"])
x = split(x, "-")
x = ensurearray(convert_type(Int, x[1]):convert_type(Int, x[1][1:end-1] * x[end][end]))
end
else
x = convert_type(Int, x)
end
return x
end
function _expand_range(x::String)
if match(r"\D", x) !== nothing
m = String.([m.match for m in collect(eachmatch(r"\D.*?", x))])
# length(setdiff(m, [",","-"," "])) .== 0 && (x = [_expand_range.(split(x, ","))...;])
x = length(setdiff(m, [",","-"," "])) .== 0 ? [_expand_range.(split(x, ","))...;] : missing
else
x = convert_type(Int, x)
end
return x
end
_expand_range(x::Missing) = x
_expand_range(x::Int) = x
"""
fill_zero(keys_unique::NamedTuple; value_colnames)
fill_zero(keys_unique::NamedTuple, df::DataFrame)
fill_zero(df::DataFrame...)
fill_zero(d::Dict...)
fill_zero(keys_unique, d::Dict)
# Arguments
- `keys_unique::Tuple`: A list of arrays whose permutations should be included in the
resultant dictionary.
- `keys_unique::NamedTuple`: A list of arrays whose permutations should be included in the
resultant dictionary. The NamedTuple's keys correspond to the DataFrame columns where
they will be stored.
- `d::Dict...`: The dictionary/ies to edit.
- `df::DataFrame...`: The DataFrame(s) to edit.
# Keyword Arguments
- `value_colnames::Any = :value`: "value" column labels to add and set to zero when creating
a new DataFrame. Default is `:value`.
# Usage
This function can be used to fill zeros in either a dictionary or DataFrame.
- Options for DataFrame editing:
- If only (a) dictionary/ies is/are input, the dictionaries will be edited such that
they all contain all permutations of their key values. All dictionaries in a
resultant list of dictionaries will be the same length.
- If a dictionary is input with a list of keys, it will be edited to ensure that it
includes all permutations.
- If only a list of keys is input, a new dictionary will be created, containing all key
permutations with values initialized to zero.
- Options for DataFrame editing:
- If only (a) DataFrame(s) is/are input, the DataFrame(s) will be edited such that
they all contain all permutations of their key values. All DataFrames in a
resultant list of DataFrames will be the same length.
- If a DataFrame is input with a NamedTuple, it will be edited to ensure that it
includes all permutations of the NamedTuple's values.
- If only a NamedTuple is input, a new DataFrame will be created, containing all key
permutations with values initialized to zero.
# Returns
- `d::Dict...` if input included dictionaries and/or Tuples
- `df::DataFrame...` if input included DataFrames and/or NamedTuples
"""
function fill_zero(keys_fill::NamedTuple; value_colnames = :value)
df_fill = DataFrame(permute(keys_fill))
return edit_with(df_fill, Add.(convert_type.(Symbol, value_colnames), 0.))
end
function fill_zero(keys_fill::Tuple; permute_keys::Bool = true)
permute_keys && (keys_fill = permute(keys_fill))
return Dict(k => 0. for k in keys_fill)
end
function fill_zero(d::Vararg{Dict}; permute_keys::Bool = true)
d = copy.(ensurearray(d))
# Find all keys present in the input dictionary/ies and ensure all are present.
keys_fill = unique([collect.(keys.(d))...;])
d = [fill_zero(keys_fill, x; permute_keys = permute_keys) for x in d]
return length(d) == 1 ? d[1] : Tuple(d)
end
function fill_zero(keys_fill::NamedTuple, df::DataFrame)
df = copy(df)
df_fill = fill_zero(keys_fill)
df = fill_zero(df, df_fill)[1]
return df
end
function fill_zero(df::Vararg{DataFrame}; permute_keys::Bool = true)
df = copy.(ensurearray(df))
# Save propertynames of columns containing values to fill zeros later.
# Find descriptor columns to permute OR make consistent across input DataFrames.
value_colnames = find_oftype.(df, AbstractFloat)
cols = intersect(setdiff.(propertynames.(df), value_colnames)...)
# Find a unique list of descriptor keys in the input DataFrame(s). Permute as desired.
df_fill = sort(unique([[x[:,cols] for x in df]...;]))
permute_keys && (df_fill = permute(df_fill))
# For each DataFrame in the list, join the input DataFrame to DataFrame keys_all on the
# descriptor columns shared by both DataFrames. Using a left join will add "missing"
# where a descriptor was not already present, which will be replaced by zero.
[df[ii] = edit_with(leftjoin(df_fill, df[ii], on = cols),
Replace.(value_colnames[ii], missing, 0.0)) for ii in 1:length(df)]
return length(df) == 1 ? df[1] : Tuple(df)
end
function fill_zero(keys_fill::Any, d::Dict; permute_keys::Bool = true)
d = copy(d)
# If permuting keys, find all possible permutations of keys that should be present
# and determine which are missing. Then add missing keys to the dictionary and return.
permute_keys && (keys_fill = permute(keys_fill))
keys_missing = setdiff(keys_fill, collect(keys(d)))
[push!(d, k => 0.) for k in keys_missing]
return d
end
function fill_with(inp::Any, val::Any)
df = fill_zero(inp);
df = edit_with(df, Replace(:value, 0.0, val))
return df
end
"""
extrapolate_year(df::DataFrame, yr::Array{Int64,1}; kwargs...)
extrapolate_year(df::DataFrame, set::Dict; kwargs...)
# Arguments:
- `df::DataFrame` that might be in need of extrapolation.
- `yr::Array{Int64,1}`: List of years overwhich extrapolation is possible (depending on the kwargs)
- `set::Dict` containing list of years, identified by the key `:yr`.
# Keyword Arguments:
- `backward::Bool = true`: Do we extrapolate backward in time?
- `forward::Bool = true`: Do we extrapolate forward in time?
# Returns:
- `df::DataFrame` extrapolated in time.
"""
function extrapolate_year(
df::DataFrame,
yr::Array{Int64,1};
backward::Bool = true,
forward::Bool = true
)
df = copy(df)
yr_diff = setdiff(yr, unique(df[:,:yr]))
length(yr_diff) == 0 && (return df)
cols = setdiff(propertynames(df), [:yr])
cols_ans = propertynames(df)
df_ext = []
if backward
yr_min = minimum(df[:,:yr])
df_min = filter_with(df, (yr = yr_min,))[:,cols]
yr_back = yr_diff[yr_diff .< yr_min]
df_back = crossjoin(DataFrame(yr = yr_back), df_min)[:,cols_ans]
push!(df_ext, df_back)
end
if forward
yr_max = maximum(df[:,:yr])
df_max = filter_with(df, (yr = yr_max,))[:,cols]
yr_forward = yr_diff[yr_diff .> yr_max]
df_forward = crossjoin(DataFrame(yr = yr_forward), df_max)[:,cols_ans]
push!(df_ext, df_forward)
end
return sort([df_ext...; df])
end
function extrapolate_year(
df::DataFrame,
set::Dict;
backward::Bool = true,
forward::Bool = true
)
extrapolate_year(df, set[:yr]; forward = forward, backward = backward)
end
"""
extrapolate_region(df::DataFrame, yr::Array{Int64,1}; kwargs...)
extrapolate_year(df::DataFrame, set::Dict; kwargs...)
# Arguments:
- `df::DataFrame` that might be in need of extrapolation.
- `r::Pair = "md" => "dc"`: `Pair` indicating a region (`r.first`) to extrapolate to another
region (`r.second`). A suggested regional extrapolation: MD data will be used to
approximate DC data in the event that it is missing.
# Keyword Argument:
- `overwrite::Bool = false`: If data in the target region `r.second` is already present,
should it be overwritten?
# Returns:
- `df::DataFrame` extrapolated in time.
"""
function extrapolate_region(df::DataFrame, r::Pair = "md" => "dc"; overwrite = false)
df = copy(df)
if !overwrite
r = r.first => setdiff(ensurearray(r.second), unique(df[:,:r]))
length(r.second) == 0 && (return df)
else
df = edit_with(df, Drop.(:r, r.second, "=="))
end
cols = setdiff(propertynames(df), [:r])
df_close = crossjoin(DataFrame(r = r.second), filter_with(df, (r = r.first,))[:,cols])
return sort([df_close; df])
end
"""
filter_with(df::DataFrame, set::Any; kwargs...)
# Arguments:
- `df::DataFrame` to filter.
- `set::Dict` or `set::NamedTuple`: Values to keep in the DataFrame.
# Keyword Arguments:
- `extrapolate::Bool = false`: Add missing regions/years to the DataFrame?
If `extrapolate` is set to true, the following `kwargs` become relevant:
- When extrapolating over years,
- `backward::Bool = true`: Do we extrapolate backward in time?
- `forward::Bool = true`: Do we extrapolate forward in time?
- When extrapolating across regions,
- `r::Pair = "md" => "dc"`: `Pair` indicating a region (`r.first`) to extrapolate to
another region (`r.second`). A suggested regional extrapolation: MD data will be
used to approximate DC data in the event that it is missing.
- `overwrite::Bool = false`: If data in the target region `r.second` is already present,
should it be overwritten?
# Returns:
- `df::DataFrame` with only the desired keys.
"""
function filter_with(
df::DataFrame,
set::Any;
extrapolate::Bool = false,
forward::Bool = true,
backward::Bool = true,
r::Pair = "md" => "dc",
overwrite::Bool = false
)
df_ans = copy(df)
cols_ans = propertynames(df_ans)
# Find keys that reference both column names in the input DataFrame df and
# values in the set Dictionary. Then, created a DataFrame containing all permutations.
cols = find_oftype(df, Not(AbstractFloat))
cols_set = intersect(cols, collect(keys(set)))
vals_set = [intersect(unique(df[:,k]), ensurearray(set[k])) for k in cols_set]
# vals_set = [set[k] for k in cols_set]
# Drop values that are not in the current set.
df_set = DataFrame(permute(NamedTuple{Tuple(cols_set,)}(vals_set,)))
df_ans = innerjoin(df_ans, df_set, on = cols_set)
if extrapolate
:yr in cols_set && (df_ans = extrapolate_year(df_ans, set; forward = forward, backward = backward))
:r in cols_set && (df_ans = extrapolate_region(df_ans, r; overwrite = overwrite))
end
return sort(df_ans[:,cols_ans])
end
|
{"hexsha": "c8f1dcde4db74924eefe4cad082eaea623fd3b45", "size": 26402, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/parse/edit_data.jl", "max_stars_repo_name": "matbesancon/SLiDE", "max_stars_repo_head_hexsha": "e5df044be8733865d432a51fbdc48912e384ec00", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/parse/edit_data.jl", "max_issues_repo_name": "matbesancon/SLiDE", "max_issues_repo_head_hexsha": "e5df044be8733865d432a51fbdc48912e384ec00", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/parse/edit_data.jl", "max_forks_repo_name": "matbesancon/SLiDE", "max_forks_repo_head_hexsha": "e5df044be8733865d432a51fbdc48912e384ec00", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3176838811, "max_line_length": 109, "alphanum_fraction": 0.644989016, "num_tokens": 6959}
|
-- Errors should precede warnings in info buffer
-- Reported by nad 2018-11-27
module Issue3416 where
A : Set
A = A
B : Set
B = Set
|
{"hexsha": "45386b4f27083b3c2a68979d3496b48711590969", "size": 134, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/interaction/Issue3416.agda", "max_stars_repo_name": "cruhland/agda", "max_stars_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/interaction/Issue3416.agda", "max_issues_repo_name": "cruhland/agda", "max_issues_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/interaction/Issue3416.agda", "max_forks_repo_name": "cruhland/agda", "max_forks_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 13.4, "max_line_length": 48, "alphanum_fraction": 0.7014925373, "num_tokens": 44}
|
import logging
import numpy as np
import rlberry.spaces as spaces
from rlberry.envs.finite import GridWorld
from rlberry.rendering import Scene, GeometricPrimitive
logger = logging.getLogger(__name__)
class SixRoom(GridWorld):
"""
GridWorld with six rooms.
Parameters
----------
reward_free : bool, default=False
If true, no rewards are given to the agent.
array_observation:
If true, the observations are converted to an array (x, y)
instead of a discrete index.
Notes
-----
The function env.sample() does not handle conversions to array states
when array_observation is True. Only the functions env.reset() and
env.step() are covered.
"""
name = "SixRoom"
def __init__(self, reward_free=False, array_observation=False):
self.reward_free = reward_free
self.array_observation = array_observation
# Common parameters
nrows = 11
ncols = 17
start_coord = (0, 0)
terminal_states = ((10, 0),)
success_probability = 0.95
#
walls = ()
for ii in range(11):
if ii not in [2, 8]:
walls += ((ii, 5),)
walls += ((ii, 11),)
for jj in range(17):
if jj != 15:
walls += ((5, jj),)
# Default reward according to the difficulty
default_reward = -0.001
# Rewards according to the difficulty
if self.reward_free:
reward_at = {}
else:
reward_at = {
(10, 0): 10.0,
(4, 4): 0.1,
}
# Init base class
GridWorld.__init__(self,
nrows=nrows,
ncols=ncols,
start_coord=start_coord,
terminal_states=terminal_states,
success_probability=success_probability,
reward_at=reward_at,
walls=walls,
default_reward=default_reward)
# spaces
if self.array_observation:
self.observation_space = spaces.Box(0.0, 1.0, shape=(2,))
def _convert_index_to_float_coord(self, state_index):
yy, xx = self.index2coord[state_index]
# centering
xx = xx + 0.5
yy = yy + 0.5
# map to [0, 1]
xx = xx / self.ncols
yy = yy / self.nrows
return np.array([xx, yy])
def reset(self):
self.state = self.coord2index[self.start_coord]
state_to_return = self.state
if self.array_observation:
state_to_return = self._convert_index_to_float_coord(self.state)
return state_to_return
def step(self, action):
assert self.action_space.contains(action), "Invalid action!"
# save state for rendering
if self.is_render_enabled():
self.append_state_for_rendering(self.state)
# take step
next_state, reward, done, info = self.sample(self.state, action)
self.state = next_state
state_to_return = self.state
if self.array_observation:
state_to_return = self._convert_index_to_float_coord(self.state)
return state_to_return, reward, done, info
def get_background(self):
"""
Returne a scene (list of shapes) representing the background
"""
bg = Scene()
# walls
for wall in self.walls:
y, x = wall
shape = GeometricPrimitive("POLYGON")
shape.set_color((0.25, 0.25, 0.25))
shape.add_vertex((x, y))
shape.add_vertex((x + 1, y))
shape.add_vertex((x + 1, y + 1))
shape.add_vertex((x, y + 1))
bg.add_shape(shape)
# rewards
for (y, x) in self.reward_at:
flag = GeometricPrimitive("POLYGON")
rwd = self.reward_at[(y, x)]
if rwd == 10:
flag.set_color((0.0, 0.5, 0.0))
else:
flag.set_color((0.0, 0.0, 0.5))
x += 0.5
y += 0.25
flag.add_vertex((x, y))
flag.add_vertex((x + 0.25, y + 0.5))
flag.add_vertex((x - 0.25, y + 0.5))
bg.add_shape(flag)
return bg
|
{"hexsha": "131c3a3ef5f3bfa10589e8e6dc907b39570adf06", "size": 4339, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlberry/envs/benchmarks/grid_exploration/six_room.py", "max_stars_repo_name": "rlberry-py/rlberry", "max_stars_repo_head_hexsha": "b4a8dcf17da69fc49a4d2d8b8d1a93e6cb054078", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 86, "max_stars_repo_stars_event_min_datetime": "2020-11-20T21:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T14:57:40.000Z", "max_issues_repo_path": "rlberry/envs/benchmarks/grid_exploration/six_room.py", "max_issues_repo_name": "rlberry-py/rlberry", "max_issues_repo_head_hexsha": "b4a8dcf17da69fc49a4d2d8b8d1a93e6cb054078", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 103, "max_issues_repo_issues_event_min_datetime": "2020-11-17T12:31:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T13:46:16.000Z", "max_forks_repo_path": "rlberry/envs/benchmarks/grid_exploration/six_room.py", "max_forks_repo_name": "rlberry-py/rlberry", "max_forks_repo_head_hexsha": "b4a8dcf17da69fc49a4d2d8b8d1a93e6cb054078", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-11-23T01:47:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:45:24.000Z", "avg_line_length": 29.7191780822, "max_line_length": 76, "alphanum_fraction": 0.538372897, "include": true, "reason": "import numpy", "num_tokens": 1029}
|
import numpy as np
import time, json, pickle
# activation functions
def sigmoid(x,deriv=False):
if deriv:
return sigmoid(x)*(1-sigmoid(x))
return 1/(1+np.exp(-x))
def relu(x,deriv=False):
if deriv:
return (x>0).astype(int)
else:
return np.maximum(x,0)
def softmax(inputs,deriv=False):
inputs = inputs[0].copy()
if -float("inf") in inputs:
inputs = [0 if i==-float("inf") else i for i in inputs]
print("Inputs:",inputs)
inputs = np.array(inputs)
shifti = inputs-np.max(inputs)
exps = np.exp(shifti)
sm = exps/np.sum(exps)
return matrixy(sm)
def tanh(x,deriv=False):
if deriv:
return 1-tanh(x)**2
else:
return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))
def linear(x,deriv=False):
if deriv:
return 1
else:
return x
# helper functions
def add_before(ls,i):
if type(ls)!=list:
ls = ls.ravel().tolist()
return [i]+ls
def add_after(ls,i):
if type(ls)==np.ndarray:
ls = ls.ravel().tolist()
return ls+[i]
def matrixy(arr):
x = arr
if type(x)==np.ndarray:
x = x.ravel().tolist()
return np.array(x).reshape(1,len(x))
class PyNeural:
def __init__(self,*args,**kwargs):
# args can be parent, size, is_input, biased
# kwargs can be activation, parent, biased or is_input
arg = self._parse_args(*args,**kwargs)
self.size = arg["size"]
self.is_input = arg["is_input"]
self.parent = None
if not arg["is_input"]:
self.parent = arg["parent"]
self.biased = arg["biased"]
np.random.seed(int(time.time()))
self._init_weights() # Init self.W var with random weights
self.activation = self.choose_activation(arg["activation"])
if self.parent == None:
self.index = 0
else:
self.index = self.parent.index+1
def full_save_to_file(self,fn):
if not fn.endswith(".pickle"):
fn += ".pickle"
with open(fn,"wb") as f:
f.write(pickle.dumps(self))
@staticmethod
def full_load_from_file(fn):
if not fn.endswith(".pickle"):
fn += ".pickle"
with open(fn, "rb") as f:
data = f.read()
return pickle.loads(data)
def save_to_file(self,fn):
if not fn.endswith(".json"):
fn += ".json"
layers = []
for layer in self.get_layer_list():
data = {}
data["size"] = layer.size
if not layer.is_input:
data["activation"] = layer.act_name
data["W"] = json.dumps(layer.W.tolist())
data["biased"] = layer.biased
data["is_input"] = layer.is_input
layers.append(data)
with open(fn,"w") as f:
f.write(json.dumps(layers))
@staticmethod
def load_from_file(fn):
if not fn.endswith(".json"):
fn += ".json"
with open(fn,"r") as f:
data = json.loads(f.read())
for layer in data:
if layer["is_input"]:
net = PyNeural(layer["size"],is_input=True)
else:
net = PyNeural(net, layer['size'], is_input=False, biased=layer["biased"], activation=layer["activation"])
W = np.array(json.loads(layer["W"]))
net.W = W
return net
def _forward(self,X):
self.X = matrixy(X)
if self.is_input:
self.A = self.X
self.Z = self.X
return self.X
else:
if self.biased:
X = add_after(X,1)
X = matrixy(X)
self.Z = X@self.W
self.A = self.activation(self.Z)
return self.A
def choose_activation(self,activation):
if type(activation)==str:
activation = activation.lower()
if activation=="sigmoid":
self.act_name = activation
return sigmoid
elif activation=="relu":
self.act_name = activation
return relu
# elif activation=="softmax":
# self.act_name = activation
# return softmax
elif activation=="tanh":
self.act_name = activation
return tanh
else:
return activation
def get_layer_list(self):
llist = []
E = self
while E != None:
llist.append(E)
E = E.parent
return llist[::-1]
def _init_weights(self):
dim = [self.parent.size, self.size]
if self.biased:
dim[0] += 1
self.W = 2*np.random.rand(*dim)-1
def _parse_args(self,*args,**kwargs):
options = ["parent", "size", "is_input", "biased"]
arg = {}
c = 0
if not isinstance(args[0], PyNeural):
c += 1
for i,a in enumerate(args):
if i+c<len(options):
arg[options[i+c]] = args[i]
else:
raise ValueError("Too much arguments passed")
for key in kwargs:
arg[key] = kwargs[key]
required = {"activation":"sigmoid", "parent":None, "biased":True, "is_input":False}
for e in required:
if not e in arg:
arg[e] = required[e]
return arg
def predict_one(self,X):
X = matrixy(X)
E = self
llist = []
while E != None:
llist.append(E)
E = E.parent
llist = llist[::-1]
for layer in llist:
X = layer._forward(X)
return X
def predict(self,X):
Y = []
for x in X:
Y.append(self.predict_one(x))
return Y
def calc_delta(self,Y,output=False):
if self.is_input:
return
elif output:
Y = matrixy(Y)
sp = self.activation(self.Z,deriv=True)
self.E = 0.5*(Y-self.A)**2
self.D = -(Y-self.A)*sp
else:
# then the Y needs to be the next layer in forward chain
sp = self.activation(self.Z,deriv=True)
if self.biased:
# extract the bias weight for delta calculation
w = Y.W[:-1,:]
self.D = Y.D@w.T*sp
else:
self.D = Y.D@Y.W.T*sp
def calc_dW(self):
if self.is_input:
return
A = self.parent.A
if self.biased:
A = matrixy(add_after(A,1))
self.dW = A.T@self.D
def optimize(self,learning_rate=0.1):
if not self.is_input:
self.W = self.W-self.dW*learning_rate
def fit_one(self,x,y,learning_rate=0.1):
llist = self.get_layer_list()[::-1]
isinput = True
Yh = self.predict_one(x)
for i,layer in enumerate(llist):
G = y
if not isinput:
G = llist[i-1]
layer.calc_delta(G,isinput)
layer.calc_dW()
isinput = False
for layer in llist:
layer.optimize()
def fit(self,X,Y,n_epochs=1,learning_rate=0.1,verbose=False,pb=None):
if not isinstance(pb,type(None)):
pb.total = len(X)*n_epochs
for i in range(n_epochs):
for x,y in zip(X,Y):
self.fit_one(x,y,learning_rate)
if not isinstance(pb,type(None)):
pb.update(1)
if verbose!=False and (i+1)%verbose==0:
print("Epoch:",i+1)
if not isinstance(pb,type(None)):
pb.close()
def __str__(self):
lsize = []
E = self
while E != None:
lsize.append(E.size)
E = E.parent
return "<PyNeuro({})>".format(lsize[::-1])
def __repr__(self):
return self.__str__()
def input_data(ninput):
ilayer = PyNeural(ninput,True)
return ilayer
def fully_connected(nn,lsize,activation="sigmoid"):
lay = PyNeural(nn,lsize,False,True,activation=activation)
return lay
def main():
X = [
[0,0],
[0,1],
[1,0],
[1,1]
]
Y = [
[0],
[1],
[1],
[0]
]
nn = input_data(2)
nn = fully_connected(nn,2,activation="sigmoid")
nn = fully_connected(nn,1,activation="sigmoid")
Yh = nn.predict(X)
nn.fit(X,Y,n_epochs=10000,learning_rate=0.5,verbose=100)
nn.save_to_file("testing.json")
Yh2 = nn.predict(X)
print("Y:",Y)
print("Y before fit:",[np.round(i[0]).tolist() for i in np.divide(Yh,max(Yh))])
print("Y after fit :",[np.round(i[0]).tolist() for i in np.divide(Yh2,max(Yh2))])
print(" ",[i[0].tolist() for i in Yh2])
print("Done!")
if __name__=="__main__":
main()
|
{"hexsha": "fff2cb6621ab7ff3d753ba4c999363702bb0c861", "size": 8824, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn.py", "max_stars_repo_name": "ramonus/pyneuro", "max_stars_repo_head_hexsha": "0b89bfac3e41fd8b3d58df85c64fb19cf86cfa0b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-05T11:36:54.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-05T11:36:54.000Z", "max_issues_repo_path": "nn.py", "max_issues_repo_name": "ramonus/pyneuro", "max_issues_repo_head_hexsha": "0b89bfac3e41fd8b3d58df85c64fb19cf86cfa0b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nn.py", "max_forks_repo_name": "ramonus/pyneuro", "max_forks_repo_head_hexsha": "0b89bfac3e41fd8b3d58df85c64fb19cf86cfa0b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6388888889, "max_line_length": 122, "alphanum_fraction": 0.5114460562, "include": true, "reason": "import numpy", "num_tokens": 2186}
|
Load LFindLoad.
From lfind Require Import LFind.
Unset Printing Notations.
Set Printing Implicit.
Require Import Arith.
Inductive natural : Type := Succ : natural -> natural | Zero : natural.
Inductive lst : Type := Cons : natural -> lst -> lst | Nil : lst.
Inductive tree : Type := Node : natural -> tree -> tree -> tree | Leaf : tree.
Inductive Pair : Type := mkpair : natural -> natural -> Pair
with Zlst : Type := zcons : Pair -> Zlst -> Zlst | znil : Zlst.
Fixpoint append (append_arg0 : lst) (append_arg1 : lst) : lst
:= match append_arg0, append_arg1 with
| Nil, x => x
| Cons x y, z => Cons x (append y z)
end.
Fixpoint eqb (n m: natural) : bool :=
match n, m with
| Zero, Zero => true
| Zero, Succ _ => false
| Succ _, Zero => false
| Succ n', Succ m' => eqb n' m'
end.
Fixpoint mem (mem_arg1 : lst) (mem_arg0 : natural)
:= match mem_arg0, mem_arg1 with
| x, Nil => false
| x, Cons y z => orb (eqb x y) (mem z x)
end.
Theorem goal36 : forall (x : natural) (y : lst) (z : lst), mem y x = true -> mem (append y z) x = true.
Proof.
intros.
induction y.
- simpl. destruct H.
Admitted.
(* + auto.
+ apply IHy in H. auto.
- contradiction.
Qed. *)
Theorem goal37 : forall (x : natural) (y : lst) (z : lst), mem z x = true -> mem (append y z) x = true.
Proof.
intros.
induction y.
- simpl. rewrite IHy. apply Bool.orb_true_r.
- auto.
Qed.
Theorem theorem0 : forall (x : natural) (y : lst) (z : lst), mem y x = true \/ mem z x = true -> mem (append y z) x = true.
Proof.
intros.
destruct H.
-
apply goal36. assumption.
-
lfind. apply H.
Admitted.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal38_theorem0_56_goal37/goal38.v"}
|
!>----------------------------------------------------------
!! Simple convection resolving convective parameterization code
!!
!! Because this scheme assume it is being run at convection resolving
!! scales, it does not modify the microphysical or cloud fraction variables
!! Instead the simple cu scheme modifies the wind field to create "resolved" updrafts
!! These in turn should lift air, leading to condensation, heating, and more lifting
!!
!! The initial version of this code does not use traditional convective indices (e.g. CAPE, CIN)
!! prefering to permit the physics resolve these effects outside of the convective scheme
!! This could almost be thought of more as a simple dynamics scheme then as a convective scheme
!!
!! The entry point to the code is cu_simple.
!!
!! High level routine descriptions / purpose
!! cu_simple - manages the entire module
!! bouyancy - computes a 3D bouyancy field
!! wind_adjustment - uses the bouyancy field to compute wind adjustments
!!
!! Driver inputs: pressure,th,pii,rho,qv,qc,qr,qs,rain,snow,dt,dz,nx,ny,nz
!! pressure = pressure - 3D - input - Pa - (nx,nz,ny)
!! th = potential temperature - 3D - in/out - K - (nx,nz,ny)
!! pii = exner function - 3D - input - [] - (nx,nz,ny)
!! rho = air density - 3D - input - kg/m^3 - (nx,nz,ny)
!! qv = specific humidity - 3D - in/out - kg/kg - (nx,nz,ny)
!! qc = cloud water content - 3D - in/out - kg/kg - (nx,nz,ny)
!! qr = rain water content - 3D - in/out - kg/kg - (nx,nz,ny)
!! qs = snow water content - 3D - in/out - kg/kg - (nx,nz,ny)
!! dt = time step - 0D - input - sec. - scalar
!! ims, ime = start end of x array memory - 0D - input - n - scalar
!! jms, jme = start end of y array memory - 0D - input - n - scalar
!! kms, kme = start end of z array memory - 0D - input - n - scalar
!! its, ite = start end of x tile to process- 0D - input - n - scalar
!! jts, jte = start end of y tile to process- 0D - input - n - scalar
!! kts, kte = start end of z tile to process- 0D - input - n - scalar
!! </pre>
!!
!! @author
!! Ethan Gutmann (gutmann@ucar.edu)
!!
!!----------------------------------------------------------
module cu_simple_mod
implicit none
real, parameter :: wind_effects(2) = [1, -1]
integer, parameter :: u_x(2) = [[-1, 1], [-1, 1]]
integer, parameter :: u_y(2) = [[ 0, 0], [ 0, 0]]
integer, parameter :: v_y(2) = [[-1, 1], [-1, 1]]
integer, parameter :: v_x(2) = [[ 0, 0], [ 0, 0]]
real, parameter :: time_const = 60.0 ! seconds
contains
subroutine cu_simple(pressure,th,qv,pii,rho,u,v, dt,dz, &
ims, ime, jms, jme, kms, kme, &
its, ite, jts, jte, kts, kte)
implicit none
real, intent(inout) :: pressure (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: th (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: qv (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: pii (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: rho (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: u (ims:ime+1,kms:kme,jms:jme)
real, intent(inout) :: v (ims:ime, kms:kme, jms:jme+1)
real, intent(inout) :: z (ims:ime, kms:kme, jms:jme)
real, intent(inout) :: rain (ims:ime, jms:jme)
real, intent(inout) :: snow (ims:ime, jms:jme)
real, intent(in) :: dt
integer,intent(in) :: ims, ime, jms, jme, kms, kme
integer,intent(in) :: its, ite, jts, jte, kts, kte
real :: bouyancy (ims:ime,kms:kme,jms:jme)
integer :: i, j, k
! $omp parallel default(shared)
! $omp private(i, j, k)
! $omp do
do j = jts,jte
do i = its, ite
call calc_bouyancy(bouyancy(i,:,j), th(i,:,j), qv(i,:,j), z(i,:,j), kms,kme, kts,kte)
call adjust_winds(bouyancy, u, v, i, j, dt, &
ims, ime, jms, jme, kms, kme, &
kts, kte)
enddo
enddo
enddo
! $omp end do
! $omp end parallel
end subroutine cu_simple
subroutine calc_bouyancy(bouyancy, th, qv, z, kms,kme, kts,kte)
implicit none
real, intent(inout) :: bouyancy (kms:kme)
real, intent(in) :: th (kms:kme)
real, intent(in) :: qv (kms:kme)
real, intent(in) :: z (kms:kme)
integer,intent(in) :: kms, kme, kts, kte
integer :: k, k_top, k_bottom
print*, "Calculate bouyancy"
do k = kts, kte
k_top = min(k+1, kme)
k_bottom = max(k-1, kms)
bouyancy(k) = (th(k_top) - th(k_bottom)) / (z(k_top) - z(k_bottom))
enddo
end subroutine calc_bouyancy
end module
|
{"hexsha": "0af05f2e30d4f2e882da8917325f289bf4e3cc48", "size": 5155, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/physics/cu_simple.f90", "max_stars_repo_name": "scrasmussen/icar", "max_stars_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2016-03-15T18:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:00:55.000Z", "max_issues_repo_path": "src/physics/cu_simple.f90", "max_issues_repo_name": "scrasmussen/icar", "max_issues_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2016-03-17T16:10:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T19:57:09.000Z", "max_forks_repo_path": "src/physics/cu_simple.f90", "max_forks_repo_name": "scrasmussen/icar", "max_forks_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2015-12-09T18:13:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T02:29:35.000Z", "avg_line_length": 44.4396551724, "max_line_length": 101, "alphanum_fraction": 0.5191076625, "num_tokens": 1594}
|
% Studies the recovery probability of different algorithms
% at a fixed sparsity level and varying number of signals.
close all;
clear all;
clc;
rng('default');
% Create the directory for storing images
[status_code,message,message_id] = mkdir('bin');
% Signal space
N = 256;
% Number of measurements
M = 48;
% Number of signals
Ss = 1:32;
% Sparsity level
K = 16;
num_trials = 100;
num_ss = length(Ss);
success_with_s.somp = zeros(num_ss, 1);
success_with_s.ra_omp = zeros(num_ss, 1);
success_with_s.ra_ormp = zeros(num_ss, 1);
success_with_s.cosamp_mmv = zeros(num_ss, 1);
success_with_s.ra_cosamp = zeros(num_ss, 1);
snr_threshold = 100;
for ns=1:num_ss
% Current number of signals
S = Ss(ns);
num_successes.somp = 0;
num_successes.ra_omp = 0;
num_successes.ra_ormp = 0;
num_successes.cosamp_mmv = 0;
num_successes.ra_cosamp = 0;
for nt=1:num_trials
% Sensing matrix
Phi = spx.dict.simple.gaussian_dict(M, N);
% Sparse signal generator
gen = spx.data.synthetic.SparseSignalGenerator(N, K, S);
% Gaussian distributed non-zero samples
X = gen.gaussian;
% Measurement vectors
Y = Phi * X;
% Create the solver for simultaneous orthogonal matching pursuit
solver = spx.pursuit.joint.OrthogonalMatchingPursuit(Phi, K, 2);
result = solver.solve(Y);
% Solution vectors
X_Rec = result.Z;
% Comparison
cs = spx.commons.SparseSignalsComparison(X, X_Rec, K);
% Reconstruction SNR
snr = cs.cum_signal_to_noise_ratio;
success = snr > snr_threshold;
num_successes.somp = num_successes.somp + success;
% Create the solver for rank aware orthogonal matching pursuit
solver = spx.pursuit.joint.RankAwareOMP(Phi, K);
result = solver.solve(Y);
% Solution vectors
X_Rec = result.Z;
% Comparison
cs = spx.commons.SparseSignalsComparison(X, X_Rec, K);
% Reconstruction SNR
snr = cs.cum_signal_to_noise_ratio;
success = snr > snr_threshold;
num_successes.ra_omp = num_successes.ra_omp + success;
% Create the solver for rank aware order recursive matching pursuit
solver = spx.pursuit.joint.RankAwareORMP(Phi, K);
result = solver.solve(Y);
% Solution vectors
X_Rec = result.Z;
% Comparison
cs = spx.commons.SparseSignalsComparison(X, X_Rec, K);
% Reconstruction SNR
snr = cs.cum_signal_to_noise_ratio;
success = snr > snr_threshold;
num_successes.ra_ormp = num_successes.ra_ormp + success;
% Create the solver for CoSaMP MMV
solver = spx.pursuit.joint.CoSaMP(Phi, K);
result = solver.solve(Y);
% Solution vectors
X_Rec = result.Z;
% Comparison
cs = spx.commons.SparseSignalsComparison(X, X_Rec, K);
% Reconstruction SNR
snr = cs.cum_signal_to_noise_ratio;
success = snr > snr_threshold;
num_successes.cosamp_mmv = num_successes.cosamp_mmv + success;
% Create the solver for Rank Aware CoSaMP MMV
solver = spx.pursuit.joint.CoSaMP(Phi, K);
solver.RankAwareResidual = true;
result = solver.solve(Y);
% Solution vectors
X_Rec = result.Z;
% Comparison
cs = spx.commons.SparseSignalsComparison(X, X_Rec, K);
% Reconstruction SNR
snr = cs.cum_signal_to_noise_ratio;
success = snr > snr_threshold;
num_successes.ra_cosamp = num_successes.ra_cosamp + success;
fprintf('S: %d, K=%d, trial=%d\n', S, K, nt);
end
success_with_s.somp(ns) = num_successes.somp / num_trials;
success_with_s.ra_omp(ns) = num_successes.ra_omp / num_trials;
success_with_s.ra_ormp(ns) = num_successes.ra_ormp / num_trials;
success_with_s.cosamp_mmv(ns) = num_successes.cosamp_mmv / num_trials;
success_with_s.ra_cosamp(ns) = num_successes.ra_cosamp / num_trials;
end
save ('bin/success_with_s_comparison.mat');
|
{"author": "indigits", "repo": "sparse-plex", "sha": "43cae2978f62938d001baaa03308a2a717ee6c9b", "save_path": "github-repos/MATLAB/indigits-sparse-plex", "path": "github-repos/MATLAB/indigits-sparse-plex/sparse-plex-43cae2978f62938d001baaa03308a2a717ee6c9b/experiments/cosamp_mmv/ex_comparison_with_s.m"}
|
import sys
import time
import cv2 as cv2
import numpy as np
from keras.models import load_model
xtest = []
# Read the test image
test_img = sys.argv[1]
test_img = cv2.imread(test_img)
# Reducing the size of image to 64x64x3
rsize = 64
test_img = cv2.resize(test_img,(rsize,rsize), interpolation = cv2.INTER_CUBIC)
xtest.append(test_img)
xtest = np.array(xtest)
# Load the pre-trained model
model = load_model('data/InOutClassifierModel.h5')
# Predict the classification of the test image.
tic=time.time()
ypred = (model.predict(xtest))
toc=time.time()
#print(toc-tic)
# Display the result
if (((ypred[0])[1])==1.0):
print ("Predicted Category is: Outdoor")
elif(((ypred[0])[0])==1.0):
print ("Predicted Category is: Indoor")
|
{"hexsha": "4bc72b806f7f9412d398a5c9e78fc27018052f45", "size": 739, "ext": "py", "lang": "Python", "max_stars_repo_path": "scene_recognition.py", "max_stars_repo_name": "shreyagu/Scene_Recognition", "max_stars_repo_head_hexsha": "2f5e64d6b546c2af3bed972263e55d6dce8ba952", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-09-13T09:45:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-24T10:17:47.000Z", "max_issues_repo_path": "scene_recognition.py", "max_issues_repo_name": "shreyagu/Scene_Recognition", "max_issues_repo_head_hexsha": "2f5e64d6b546c2af3bed972263e55d6dce8ba952", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scene_recognition.py", "max_forks_repo_name": "shreyagu/Scene_Recognition", "max_forks_repo_head_hexsha": "2f5e64d6b546c2af3bed972263e55d6dce8ba952", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-10-30T02:05:45.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-30T02:05:45.000Z", "avg_line_length": 22.3939393939, "max_line_length": 78, "alphanum_fraction": 0.7225981055, "include": true, "reason": "import numpy", "num_tokens": 213}
|
[STATEMENT]
lemma R_g_ode_law: "(\<forall>s\<in>S. P s \<longrightarrow> (\<forall>t\<in>T. (\<forall>\<tau>\<in>down T t. G (\<phi> \<tau> s)) \<longrightarrow> Q (\<phi> t s))) \<Longrightarrow>
(x\<acute>= (\<lambda>t. f) & G on (\<lambda>s. T) S @ 0) \<le> Ref \<lceil>P\<rceil> \<lceil>Q\<rceil>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>s\<in>S. P s \<longrightarrow> (\<forall>t\<in>T. (\<forall>\<tau>\<in>down T t. G (\<phi> \<tau> s)) \<longrightarrow> Q (\<phi> t s)) \<Longrightarrow> x\<acute>= \<lambda>t. f & G on \<lambda>s. T S @ 0 \<le> Ref \<lceil>P\<rceil> \<lceil>Q\<rceil>
[PROOF STEP]
unfolding sH_g_ode[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>{P\<^bold>}x\<acute>= \<lambda>t. f & G on \<lambda>s. T S @ 0\<^bold>{Q\<^bold>} \<Longrightarrow> x\<acute>= \<lambda>t. f & G on \<lambda>s. T S @ 0 \<le> Ref \<lceil>P\<rceil> \<lceil>Q\<rceil>
[PROOF STEP]
by (rule R2)
|
{"llama_tokens": 402, "file": "Hybrid_Systems_VCs_KleeneAlgebraTests_HS_VC_KAT_ndfun", "length": 2}
|
[STATEMENT]
lemma Literal_eq_iff [simp]:
"Literal b0 b1 b2 b3 b4 b5 b6 s = Literal c0 c1 c2 c3 c4 c5 c6 t
\<longleftrightarrow> (b0 \<longleftrightarrow> c0) \<and> (b1 \<longleftrightarrow> c1) \<and> (b2 \<longleftrightarrow> c2) \<and> (b3 \<longleftrightarrow> c3)
\<and> (b4 \<longleftrightarrow> c4) \<and> (b5 \<longleftrightarrow> c5) \<and> (b6 \<longleftrightarrow> c6) \<and> s = t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Literal b0 b1 b2 b3 b4 b5 b6 s = Literal c0 c1 c2 c3 c4 c5 c6 t) = (b0 = c0 \<and> b1 = c1 \<and> b2 = c2 \<and> b3 = c3 \<and> b4 = c4 \<and> b5 = c5 \<and> b6 = c6 \<and> s = t)
[PROOF STEP]
by transfer simp
|
{"llama_tokens": 299, "file": null, "length": 1}
|
!***********************************************************************
! Integrated Water Flow Model (IWFM)
! Copyright (C) 2005-2021
! State of California, Department of Water Resources
!
! This program is free software; you can redistribute it and/or
! modify it under the terms of the GNU General Public License
! as published by the Free Software Foundation; either version 2
! of the License, or (at your option) any later version.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
! (http://www.gnu.org/copyleft/gpl.html)
!
! You should have received a copy of the GNU General Public License
! along with this program; if not, write to the Free Software
! Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
!
! For tecnical support, e-mail: IWFMtechsupport@water.ca.gov
!***********************************************************************
MODULE Class_AppStream_v50
USE Class_Version , ONLY: ReadVersion
USE Class_BaseAppStream , ONLY: BaseAppStreamType , &
cDataList_AtStrmReach , &
cDataList_AtStrmNode , &
ReadFractionsForGW
USE MessageLogger , ONLY: SetLastMessage , &
LogMessage , &
EchoProgress , &
FILE , &
MessageArray , &
iFatal , &
iMessage
USE GeneralUtilities , ONLY: StripTextUntilCharacter , &
CleanSpecialCharacters , &
ArrangeText , &
UpperCase , &
IntToText , &
EstablishAbsolutePathFilename , &
LocateInList , &
ShellSort , &
GetArrayData , &
ConvertID_To_Index
USE TimeSeriesUtilities , ONLY: TimeStepType , &
TimeIntervalConversion , &
IncrementTimeStamp
USE IOInterface , ONLY: GenericFileType , &
UNKNOWN
USE Class_StrmNode_v50 , ONLY: StrmNode_v50_Type , &
StrmNode_v50_ReadPreprocessedData , &
StrmNode_v50_WritePreprocessedData
USE Class_StrmReach , ONLY: StrmReach_New , &
StrmReach_GetReachNumber , &
StrmReach_WritePreprocessedData , &
StrmReach_CompileReachNetwork
USE Package_ComponentConnectors , ONLY: StrmGWConnectorType , &
StrmLakeConnectorType , &
f_iStrmToLakeFlow , &
f_iLakeToStrmFlow
USE Package_Discretization , ONLY: AppGridType , &
StratigraphyType
USE Package_Misc , ONLY: f_iFlowDest_Outside , &
f_iFlowDest_Lake , &
f_iFlowDest_StrmNode , &
f_iStrmComp , &
f_iLocationType_StrmReach , &
f_iLocationType_StrmNode , &
f_rSmoothMaxP
USE Package_Budget , ONLY: BudgetHeaderType , &
f_cVolumeUnitMarker , &
f_cLocationNameMarker , &
VR , &
f_iPER_CUM
USE Package_Matrix , ONLY: MatrixType
IMPLICIT NONE
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** VARIABLE DEFINITIONS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- PUBLIC ENTITIES
! -------------------------------------------------------------
PRIVATE
PUBLIC :: AppStream_v50_Type
! -------------------------------------------------------------
! --- APPLICATION STREAMS DATA TYPE
! -------------------------------------------------------------
TYPE,EXTENDS(BaseAppStreamType) :: AppStream_v50_Type
PRIVATE
REAL(8) :: DeltaT = 0.0 !Simulation timestep
REAL(8),ALLOCATABLE :: StorChange(:) !Change in storage at each node
TYPE(StrmNode_v50_Type),ALLOCATABLE :: Nodes(:)
TYPE(GenericFileType) :: FinalFlowFile !File that stores flows at the end of simulation
CONTAINS
PROCEDURE,PASS :: SetStaticComponent => AppStream_v50_SetStaticComponent
PROCEDURE,PASS :: SetStaticComponentFromBinFile => ReadPreprocessedData
PROCEDURE,PASS :: SetDynamicComponent => AppStream_v50_SetDynamicComponent
PROCEDURE,PASS :: SetAllComponents => AppStream_v50_SetAllComponents
PROCEDURE,PASS :: SetAllComponentsWithoutBinFile => AppStream_v50_SetAllComponentsWithoutBinFile
PROCEDURE,PASS :: GetSubDataList_ForLocationAndDataType => AppStream_v50_GetSubDataList_ForLocationAndDataType !!Overriding the method defined in the base class
PROCEDURE,PASS :: GetStrmNodeIDs => AppStream_v50_GetStrmNodeIDs
PROCEDURE,PASS :: GetStrmNodeID => AppStream_v50_GetStrmNodeID
PROCEDURE,PASS :: GetStrmNodeIndex => AppStream_v50_GetStrmNodeIndex
PROCEDURE,PASS :: GetNUpstrmNodes => AppStream_v50_GetNUpstrmNodes
PROCEDURE,PASS :: GetUpstrmNodes => AppStream_v50_GetUpstrmNodes
PROCEDURE,PASS :: GetStageFlowRatingTable => AppStream_v50_GetStageFlowRatingTable
PROCEDURE,PASS :: GetVersion => AppStream_v50_GetVersion
PROCEDURE,PASS :: GetBottomElevations => AppStream_v50_GetBottomElevations
PROCEDURE,PASS :: GetNRatingTablePoints => AppStream_v50_GetNRatingTablePoints
PROCEDURE,PASS :: KillImplementation => AppStream_v50_Kill
PROCEDURE,PASS :: WritePreprocessedData => AppStream_v50_WritePreprocessedData
PROCEDURE,PASS :: WriteDataToTextFile => AppStream_v50_WriteDataToTextFile
PROCEDURE,PASS :: UpdateHeads => AppStream_v50_UpdateHeads
PROCEDURE,PASS :: ConvertTimeUnit => AppStream_v50_ConvertTimeUnit
PROCEDURE,PASS :: ConvertFlowToElev => AppStream_v50_ConvertFlowToElev
PROCEDURE,PASS :: Simulate => AppStream_v50_Simulate
PROCEDURE,PASS :: PrintResults => AppStream_v50_PrintResults !Overriding the method defined in the base class
PROCEDURE,PASS :: AdvanceState => AppSTream_v50_AdvanceState !Overriding the method defined in the base class
END TYPE AppStream_v50_Type
! -------------------------------------------------------------
! --- VERSION RELATED ENTITIES
! -------------------------------------------------------------
INTEGER,PARAMETER :: iVersion = 50
INTEGER,PARAMETER :: iLenVersion = 8
CHARACTER(LEN=iLenVersion),PARAMETER :: cVersion = '5.0.0000'
INCLUDE 'AppStream_v50_Revision.fi'
! -------------------------------------------------------------
! --- BUDGET RELATED DATA
! -------------------------------------------------------------
INTEGER,PARAMETER :: NStrmBudColumns = 15
CHARACTER(LEN=30),PARAMETER :: cBudgetColumnTitles(NStrmBudColumns) = ['Upstream Inflow (+)' , &
'Downstream Outflow (-)' , &
'Change in Storage (-)' , &
'Tributary Inflow (+)' , &
'Tile Drain (+)' , &
'Runoff (+)' , &
'Return Flow (+)' , &
'Gain from GW_Inside Model (+)' , &
'Gain from GW_Outside Model (+)' , &
'Gain from Lake (+)' , &
'Riparian ET (-)' , &
'Diversion (-)' , &
'By-pass Flow (-)' , &
'Discrepancy (=)' , &
'Diversion Shortage' ]
! -------------------------------------------------------------
! --- MISC. ENTITIES
! -------------------------------------------------------------
INTEGER,PARAMETER :: ModNameLen = 21
CHARACTER(LEN=ModNameLen),PARAMETER :: ModName = 'Class_AppStream_v50::'
CONTAINS
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** CONSTRUCTORS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- READ RAW STREAM DATA (GENERALLY CALLED IN PRE-PROCESSOR)
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_SetStaticComponent(AppStream,cFileName,AppGrid,Stratigraphy,IsRoutedStreams,StrmGWConnector,StrmLakeConnector,iStat)
CLASS(AppStream_v50_Type),INTENT(OUT) :: AppStream
CHARACTER(LEN=*),INTENT(IN) :: cFileName
TYPE(AppGridType),INTENT(IN) :: AppGrid !Not used in this version
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
LOGICAL,INTENT(IN) :: IsRoutedStreams
TYPE(StrmGWConnectorType),INTENT(OUT) :: StrmGWConnector
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+32) :: ThisProcedure = ModName // 'AppStream_v50_SetStaticComponent'
CHARACTER :: ALine*100
INTEGER :: ErrorCode,iGWNodeIDs(AppGrid%NNodes)
TYPE(GenericFileType) :: DataFile
!Initialize
iStat = 0
iGWNodeIDs = AppGrid%AppNode%ID
!Inform user
CALL EchoProgress('Instantiating streams')
!Set the flag to check if routed or non-routed streams
AppStream%lRouted = IsRoutedStreams
!Open file
CALL DataFile%New(FileName=cFileName,InputFile=.TRUE.,IsTSFile=.FALSE.,Descriptor='Stream configuration data',iStat=iStat)
IF (iStat .EQ. -1) RETURN
!Read away the first line that holds the version number and set the version number using internal variables
CALL DataFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
AppStream%Version = AppStream%Version%New(iLenVersion,cVersion,cRevision)
!Read dimensions
CALL DataFile%ReadData(AppStream%NReaches,iStat) ; IF (iStat .EQ. -1) RETURN
!Compile the total number of stream nodes
CALL CalculateNStrmNodes(DataFile,AppStream%NReaches,AppStream%NStrmNodes,iStat) ; IF (iStat .EQ. -1) RETURN
!Allocate memory
ALLOCATE (AppStream%Nodes(AppStream%NStrmNodes) , &
AppStream%Reaches(AppStream%NReaches) , &
STAT = ErrorCode )
IF (ErrorCode .NE. 0) THEN
CALL SetLastMessage('Error allocating memory for stream configuration data!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Read stream configuration
CALL ReadStreamConfigData(DataFile,Stratigraphy,iGWNodeIDs,StrmGWConnector,StrmLakeConnector,AppStream,iStat)
IF (iStat .EQ. -1) RETURN
!Read stream nodes and fraction of stream-aquifer interaction to be applied to corresponding gw nodes
CALL ReadFractionsForGW(DataFile,AppStream%Nodes%ID,StrmGWConnector,iStat)
IF (iStat .EQ. -1) RETURN
!Close file
CALL DataFile%Kill()
END SUBROUTINE AppStream_v50_SetStaticComponent
! -------------------------------------------------------------
! --- INSTANTIATE DYNAMIC PART OF STREAM DATA (GENERALLY CALLED IN SIMULATION)
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_SetDynamicComponent(AppStream,IsForInquiry,cFileName,cWorkingDirectory,TimeStep,NTIME,iLakeIDs,AppGrid,Stratigraphy,StrmLakeConnector,StrmGWConnector,iStat)
CLASS(AppStream_v50_Type) :: AppStream
LOGICAL,INTENT(IN) :: IsForInquiry
CHARACTER(LEN=*),INTENT(IN) :: cFileName,cWorkingDirectory
TYPE(TimeStepType),INTENT(IN) :: TimeStep
INTEGER,INTENT(IN) :: NTIME,iLakeIDs(:)
TYPE(AppGridType),INTENT(IN) :: AppGrid
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
TYPE(StrmGWConnectorType) :: StrmGWConnector
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+33) :: ThisProcedure = ModName // 'AppStream_v50_SetDynamicComponent'
INTEGER :: indxNode,ICType,ErrorCode,iReachIDs(AppStream%NReaches),iStrmNodeIDs(AppStream%NStrmNodes), &
iGWNodeIDs(AppGrid%NNodes),iStrmNodeID,iStrmNode,indx
TYPE(GenericFileType) :: MainFile
CHARACTER(LEN=1000) :: ALine,DiverFileName,DiverSpecFileName,BypassSpecFileName,DiverDetailBudFileName,ReachBudRawFileName
CHARACTER :: TimeUnitFlow*6
TYPE(BudgetHeaderType) :: BudHeader
CHARACTER(:),ALLOCATABLE :: cVersionSim,cVersionPre,cAbsPathFileName
INTEGER,ALLOCATABLE :: GWNodes(:)
REAL(8) :: FACTH,DummyArray(AppStream%NStrmNodes,2),TimeFactor
LOGICAL :: lProcessed(AppStream%NStrmNodes)
INTEGER,PARAMETER :: ICType_H = 0 , &
ICType_Q = 1 , &
ICTypeList(2) = [ICType_H , ICType_Q]
!Initialize
iStat = 0
iStrmNodeIDs = AppStream%Nodes%ID
iGWNodeIDs = AppGrid%AppNode%ID
!Open main file
CALL MainFile%New(FileName=cFileName,InputFile=.TRUE.,Descriptor='main stream data file',iStat=iStat)
IF (iStat .EQ. -1) RETURN
!Make sure that version numbers from Pre-processor and Simulation match
cVersionPre = AppStream%Version%GetVersion() ; cVersionPre = StripTextUntilCharacter(cVersionPre,'.',Back=.TRUE.)
CALL ReadVersion(MainFile,'STREAM',cVersionSim,iStat) ; IF (iStat .EQ. -1) RETURN
IF (TRIM(cVersionSim) .NE. TRIM(cVersionPre)) THEN
MessageArray(1) = 'Stream Component versions used in Pre-Processor and Simulation must match!'
MessageArray(2) = 'Version number in Pre-Processor = ' // TRIM(cVersionPre)
MessageArray(3) = 'Version number in Simulation = ' // TRIM(cVersionSim)
CALL SetLastMessage(MessageArray(1:3),iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Set the simulation time step
AppStream%DeltaT = TimeStep%DeltaT
!Allocate memory for stream states
IF (.NOT. ALLOCATED(AppStream%State)) ALLOCATE (AppStream%State(AppStream%NStrmNodes))
ALLOCATE (AppStream%StorChange(AppStream%NStrmNodes)) ; AppStream%StorChange = 0.0
!Initialize related files
!-------------------------
!Stream inflow file
CALL MainFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
ALine = StripTextUntilCharacter(ALine,'/')
CALL CleanSpecialCharacters(ALine)
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(ALine)),cWorkingDirectory,cAbsPathFileName)
CALL AppStream%StrmInflowData%New(cAbsPathFileName,cWorkingDirectory,TimeStep,AppStream%NStrmNodes,iStrmNodeIDs,iStat)
IF (iStat .EQ. -1) RETURN
END IF
!Diversion specs file name
CALL MainFile%ReadData(DiverSpecFileName,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
DiverSpecFileName = StripTextUntilCharacter(DiverSpecFileName,'/')
CALL CleanSpecialCharacters(DiverSpecFileName)
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(DiverSpecFileName)),cWorkingDirectory,cAbsPathFileName)
DiverSpecFileName = cAbsPathFileName
END IF
!Bypass specs file name
CALL MainFile%ReadData(BypassSpecFileName,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
BypassSpecFileName = StripTextUntilCharacter(BypassSpecFileName,'/')
CALL CleanSpecialCharacters(BypassSpecFileName)
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(BypassSpecFileName)),cWorkingDirectory,cAbsPathFileName)
BypassSpecFileName = cAbsPathFileName
END IF
!Diversions file name
CALL MainFile%ReadData(DiverFileName,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
DiverFileName = StripTextUntilCharacter(DiverFileName,'/')
CALL CleanSpecialCharacters(DiverFileName)
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(DiverFileName)),cWorkingDirectory,cAbsPathFileName)
DiverFileName = cAbsPathFileName
END IF
!Stream reach budget raw file
CALL MainFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
ReachBudRawFileName = StripTextUntilCharacter(ALine,'/')
CALL CleanSpecialCharacters(ReachBudRawFileName)
IF (ReachBudRawFileName .NE. '') THEN
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(ReachBudRawFileName)),cWorkingDirectory,cAbsPathFileName)
ReachBudRawFileName = cAbsPathFileName
END IF
END IF
!Diversion details raw file
CALL MainFile%ReadData(DiverDetailBudFileName,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
DiverDetailBudFileName = StripTextUntilCharacter(DiverDetailBudFileName,'/')
CALL CleanSpecialCharacters(DiverDetailBudFileName)
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(DiverDetailBudFileName)),cWorkingDirectory,cAbsPathFileName)
DiverDetailBudFileName = cAbsPathFileName
END IF
!Diversions and bypasses
CALL AppStream%AppDiverBypass%New(IsForInquiry,DiverSpecFileName,BypassSpecFileName,DiverFileName,DiverDetailBudFileName,cWorkingDirectory,AppStream%GetVersion(),NTIME,TimeStep,AppStream%NStrmNodes,iStrmNodeIDs,iLakeIDs,AppStream%Reaches,AppGrid,StrmLakeConnector,iStat)
IF (iStat .EQ. -1) RETURN
!Reach IDs
iReachIDs = AppStream%Reaches%ID
!Prepare reach budget output file
IF (ReachBudRawFileName .NE. '') THEN
IF (IsForInquiry) THEN
CALL AppStream%StrmReachBudRawFile%New(ReachBudRawFileName,iStat)
IF (iStat .EQ. -1) RETURN
ELSE
!Sort reach IDs for budget printing in order
ALLOCATE (AppStream%iPrintReachBudgetOrder(AppStream%NReaches))
AppStream%iPrintReachBudgetOrder = [(indx,indx=1,AppStream%NReaches)]
CALL ShellSort(iReachIDs,AppStream%iPrintReachBudgetOrder)
!Restore messed iReachID array
iReachIDs = AppStream%Reaches%ID
!Prepare budget header
BudHeader = PrepareStreamBudgetHeader(AppStream%NReaches,AppStream%iPrintReachBudgetOrder,iReachIDs,iStrmNodeIDs,NTIME,TimeStep,AppStream%GetVersion(),cReachNames=AppStream%Reaches%cName)
CALL AppStream%StrmReachBudRawFile%New(ReachBudRawFileName,BudHeader,iStat)
IF (iStat .EQ. -1) RETURN
CALL BudHeader%Kill()
END IF
AppStream%StrmReachBudRawFile_Defined = .TRUE.
END IF
!End-of-simulation flows file
CALL MainFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
IF (AppStream%lRouted) THEN
ALine = StripTextUntilCharacter(ALine,'/')
CALL CleanSpecialCharacters(ALine)
IF (ALine .NE. '') THEN
CALL EstablishAbsolutePathFileName(TRIM(ADJUSTL(ALine)),cWorkingDirectory,cAbsPathFileName)
IF (IsForInquiry) THEN
CALL AppStream%FinalFlowFile%New(FileName=cAbsPathFileName,InputFile=.TRUE.,IsTSFile=.FALSE.,Descriptor='end-of-simulation stream flows data',iStat=iStat)
ELSE
CALL AppStream%FinalFlowFile%New(FileName=cAbsPathFileName,InputFile=.FALSE.,IsTSFile=.FALSE.,Descriptor='end-of-simulation stream flows data',iStat=iStat)
END IF
IF (iStat .EQ. -1) RETURN
END IF
END IF
!Hydrograph printing
CALL AppStream%StrmHyd%New(AppStream%lRouted,IsForInquiry,cWorkingDirectory,AppStream%NStrmNodes,iStrmNodeIDs,TimeStep,MainFile,iStat)
IF (iStat .EQ. -1) RETURN
!Stream budget at selected segments
CALL AppStream%StrmNodeBudget%New(AppStream%lRouted,IsForInquiry,cWorkingDirectory,iReachIDs,iStrmNodeIDs,NTIME,TimeStep,AppStream%GetVersion(),PrepareStreamBudgetHeader,MainFile,iStat)
IF (iStat .EQ. -1) RETURN
!Stream bed parameters for stream-gw connectivity
CALL StrmGWConnector%CompileConductance(MainFile,AppGrid,Stratigraphy,AppStream%NStrmNodes,iStrmNodeIDs,AppStream%Reaches%UpstrmNode,AppStream%Reaches%DownstrmNode,AppStream%Nodes%BottomElev,iStat)
IF (iStat .EQ. -1) RETURN
!Stream channel parameters
CALL ReadCrossSectionData(Stratigraphy,iGWNodeIDs,iStrmNodeIDs,StrmGWConnector,MainFile,AppStream,iStat)
IF (iStat .EQ. -1) RETURN
!Calculate bed slope, distance between node and the next node, and length of the corresponding segments
CALL StrmGWConnector%GetAllGWNodes(GWNodes)
CALL CompileDistanceLengthSlope(GWNodes,AppGrid,AppStream,iStat)
IF (iStat .EQ. -1) RETURN
!If non-routed streams, return at this point
IF (.NOT. AppStream%lRouted) THEN
DEALLOCATE (GWNodes , STAT=ErrorCode)
CALL MainFile%Kill()
RETURN
END IF
!Initial conditions
CALL MainFile%ReadData(ICType,iStat) ; IF (iStat .EQ. -1) RETURN
IF (LocateInList(ICType,ICTypeList) .EQ. 0) THEN
CALL SetLastMessage('Initial condition type '//TRIM(IntToText(ICType))//' is not recognized!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
CALL MainFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
CALL CleanSpecialCharacters(ALine)
TimeUnitFlow = ADJUSTL(StripTextUntilCharacter(ALine,'/'))
!Make sure time unit for flow initial conditions is specified
IF (ICType .EQ. ICType_Q) THEN
IF (TimeUnitFlow .EQ. '') THEN
CALL SetLastMessage('The time unit for stream initial conditions must be specified if initial conditions are given as flows!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END IF
CALL MainFile%ReadData(FACTH,iStat) ; IF (iStat .EQ. -1) RETURN
CALL MainFile%ReadData(DummyArray,iStat) ; IF (iStat .EQ. -1) RETURN
lProcessed = .FALSE.
DO indxNode=1,AppStream%NStrmNodes
!Make sure that node is recognized
iStrmNodeID = INT(DummyArray(indxNode,1))
CALL ConvertID_To_Index(iStrmNodeID,iStrmNodeIDs,iStrmNode)
IF (iStrmNode .EQ. 0) THEN
CALL SetLastMessage('Stream node ID '//TRIM(IntToText(iStrmNodeID))//' listed for initial conditions is not in the model!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Make sure node is not defined more then once
IF (lProcessed(iStrmNode)) THEN
CALL SetLastMessage('Stream node ID '//TRIM(IntTotext(iStrmNodeID))//' is listed more than once for initail conditions!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
lProcessed(iStrmNode) = .TRUE.
!Assign initial conditions
SELECT CASE (ICType)
CASE (ICType_H)
AppStream%State(iStrmNode)%Head = MAX(AppStream%Nodes(iStrmNode)%BottomElev + DummyArray(indxNode,2) * FACTH , AppStream%Nodes(iStrmNode)%BottomElev)
AppStream%State(iStrmNode)%Head_P = AppStream%State(iStrmNode)%Head
AppStream%State(iStrmNode)%Flow = AppStream%Nodes(iStrmNode)%Flow(AppStream%State(iStrmNode)%Head)
CASE (ICType_Q)
!At this point Manning's roughness time unit is seconds. Convert flow time unit to seconds and
!compute corresponding head accordingly. Later, flow time unit will be converted to simulation
!time unit along with all time units of the parameters in this component.
TimeFactor = TimeIntervalConversion(TimeUnitFlow,'1MIN') * 60D0 !Must multiply with 60 to convert minute to seconds
AppStream%State(iStrmNode)%Flow = DummyArray(indxNode,2) * FACTH / TimeFactor
AppStream%State(iStrmNode)%Head = MAX(AppStream%Nodes(iStrmNode)%Head(AppStream%State(iStrmNode)%Flow) , AppStream%Nodes(iStrmNode)%BottomElev)
AppStream%State(iStrmNode)%Head_P = AppStream%State(iStrmNode)%Head
IF (AppStream%State(iStrmNode)%Head .EQ. -9999.9999d0) THEN
CALL SetLastMessage('There was a convergence problem in converting the initial flow for stream node '//TRIM(IntToText(iStrmNodeID))//' to stream flow depth!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END SELECT
!Flow area
AppStream%Nodes(iStrmNode)%Area_P = AppStream%Nodes(iStrmNode)%Area(AppStream%State(iStrmNode)%Head_P)
END DO
!Clear memory
DEALLOCATE (GWNodes , STAT=ErrorCode)
!Close main file
CALL MainFile%Kill()
END SUBROUTINE AppStream_v50_SetDynamicComponent
! -------------------------------------------------------------
! --- INSTANTIATE COMPLETE STREAM DATA
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_SetAllComponents(AppStream,IsForInquiry,cFileName,cSimWorkingDirectory,TimeStep,NTIME,iLakeIDs,AppGrid,Stratigraphy,BinFile,StrmLakeConnector,StrmGWConnector,iStat)
CLASS(AppStream_v50_Type),INTENT(OUT) :: AppStream
LOGICAL,INTENT(IN) :: IsForInquiry
CHARACTER(LEN=*),INTENT(IN) :: cFileName,cSimWorkingDirectory
TYPE(TimeStepType),INTENT(IN) :: TimeStep
INTEGER,INTENT(IN) :: NTIME,iLakeIDs(:)
TYPE(AppGridType),INTENT(IN) :: AppGrid
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
TYPE(GenericFileType) :: BinFile
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
TYPE(StrmGWConnectorType) :: StrmGWConnector
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+30) :: ThisProcedure = ModName // 'AppStream_v50_SetAllComponents'
!Initialize
iStat = 0
!Echo progress
CALL EchoProgress('Instantiating streams')
!Read the preprocessed data for streams
CALL ReadPreprocessedData(AppStream,BinFile,iStat)
IF (iStat .EQ. -1) RETURN
!Set the dynamic part of AppStream
CALL AppStream_v50_SetDynamicComponent(AppStream,IsForInquiry,cFileName,cSimWorkingDirectory,TimeStep,NTIME,iLakeIDs,AppGrid,Stratigraphy,StrmLakeConnector,StrmGWConnector,iStat)
IF (iStat .EQ. -1) RETURN
!Make sure that if static part is defined, so is the dynamic part
IF (AppStream%NStrmNodes .GT. 0) THEN
IF (SIZE(AppStream%State) .EQ. 0) THEN
MessageArray(1) = 'For proper simulation of streams, relevant stream data files must'
MessageArray(2) = 'be specified when stream nodes are defined in Pre-Processor.'
CALL SetLastMessage(MessageArray(1:2),iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END IF
END SUBROUTINE AppStream_v50_SetAllComponents
! -------------------------------------------------------------
! --- INSTANTIATE COMPLETE STREAM DATA WITHOUT INTERMEDIATE BINARY FILE
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_SetAllComponentsWithoutBinFile(AppStream,IsRoutedStreams,IsForInquiry,cPPFileName,cSimFileName,cSimWorkingDirectory,AppGrid,Stratigraphy,TimeStep,NTIME,iLakeIDs,StrmLakeConnector,StrmGWConnector,iStat)
CLASS(AppStream_v50_Type),INTENT(OUT) :: AppStream
LOGICAL,INTENT(IN) :: IsRoutedStreams,IsForInquiry
CHARACTER(LEN=*),INTENT(IN) :: cPPFileName,cSimFileName,cSimWorkingDirectory
TYPE(AppGridType),INTENT(IN) :: AppGrid
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
TYPE(TimeStepType),INTENT(IN) :: TimeStep
INTEGER,INTENT(IN) :: NTIME,iLakeIDs(:)
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
TYPE(StrmGWConnectorType),INTENT(OUT) :: StrmGWConnector
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+44) :: ThisProcedure = ModName // 'AppStream_v50_SetAllComponentsWithoutBinFile'
!Initialize
iStat = 0
!Instantiate the static components of the AppStream data
CALL AppStream_v50_SetStaticComponent(AppStream,cPPFileName,AppGrid,Stratigraphy,IsRoutedStreams,StrmGWConnector,StrmLakeConnector,iStat)
IF (iStat .EQ. -1) RETURN
!Instantiate the dynamic component of the AppStream data
CALL AppStream_v50_SetDynamicComponent(AppStream,IsForInquiry,cSimFileName,cSimWorkingDirectory,TimeStep,NTIME,iLakeIDs,AppGrid,Stratigraphy,StrmLakeConnector,StrmGWConnector,iStat)
IF (iStat .EQ. -1) RETURN
!Make sure that if static part is defined, so is the dynamic part
IF (AppStream%NStrmNodes .GT. 0) THEN
IF (SIZE(AppStream%State) .EQ. 0) THEN
MessageArray(1) = 'For proper simulation of streams, relevant stream data files must'
MessageArray(2) = 'be specified when stream nodes are defined in Pre-Processor.'
CALL SetLastMessage(MessageArray(1:2),iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END IF
END SUBROUTINE AppStream_v50_SetAllComponentsWithoutBinFile
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** DESTRUCTORS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- KILL STREAM DATA OBJECT
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_Kill(AppStream)
CLASS(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: ErrorCode
!Deallocate array attributes
DEALLOCATE (AppStream%Nodes , AppStream%StorChange , STAT=ErrorCode)
END SUBROUTINE AppStream_v50_Kill
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** GETTERS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- GET SUB-COMPONENTS OF A DATA TYPE FOR POST-PROCESSING AT A LOCATION TYPE
! --- (REDEFINES THE PROCEDURE IN Class_BaseAppStream)
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_GetSubDataList_ForLocationAndDataType(AppStream,iLocationType,cDataType,cSubDataList)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: iLocationType
CHARACTER(LEN=*),INTENT(IN) :: cDataType
CHARACTER(LEN=*),ALLOCATABLE,INTENT(OUT) :: cSubDataList(:)
!Local variables
INTEGER :: ErrorCode
!Initialize
DEALLOCATE (cSubDataList , STAT=ErrorCode)
SELECT CASE (iLocationType)
CASE (f_iLocationType_StrmReach)
!Only stream reach budget has sub-data
IF (TRIM(cDataType) .EQ. cDataList_AtStrmReach) THEN
IF (AppStream%StrmReachBudRawFile_Defined) THEN
ALLOCATE (cSubDataList(NStrmBudColumns))
cSubDataList = cBudgetColumnTitles
END IF
END IF
CASE (f_iLocationType_StrmNode)
!Only stream node budget has sub-data
IF (TRIM(cDataType) .EQ. cDataList_AtStrmNode) THEN
IF (AppStream%StrmNodeBudget%StrmNodeBudRawFile_Defined) THEN
ALLOCATE (cSubDataList(NStrmBudColumns))
cSubDataList = cBudgetColumnTitles
END IF
END IF
END SELECT
END SUBROUTINE AppStream_v50_GetSubDataList_ForLocationAndDataType
! -------------------------------------------------------------
! --- GET VERSION NUMBER
! -------------------------------------------------------------
FUNCTION AppStream_v50_GetVersion(AppStream) RESULT(cVrs)
CLASS(AppStream_v50_Type) :: AppStream
CHARACTER(:),ALLOCATABLE :: cVrs
IF (.NOT. AppStream%Version%IsDefined()) &
AppStream%Version = AppStream%Version%New(iLenVersion,cVersion,cRevision)
cVrs = AppStream%Version%GetVersion()
END FUNCTION AppStream_v50_GetVersion
! -------------------------------------------------------------
! --- GET STREAM NODE IDS
! -------------------------------------------------------------
PURE SUBROUTINE AppStream_v50_GetStrmNodeIDs(AppStream,iStrmNodeIDs)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(OUT) :: iStrmNodeIDs(:)
iStrmNodeIDs = AppStream%Nodes%ID
END SUBROUTINE AppStream_v50_GetStrmNodeIDs
! -------------------------------------------------------------
! --- GET STREAM NODE ID GIVEN INDEX
! -------------------------------------------------------------
PURE FUNCTION AppStream_v50_GetStrmNodeID(AppStream,indx) RESULT(iStrmNodeID)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: indx
INTEGER :: iStrmNodeID
iStrmNodeID = AppStream%Nodes(indx)%ID
END FUNCTION AppStream_v50_GetStrmNodeID
! -------------------------------------------------------------
! --- GET STREAM NODE INDEX GIVEN ID
! -------------------------------------------------------------
PURE FUNCTION AppStream_v50_GetStrmNodeIndex(AppStream,ID) RESULT(Index)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: ID
INTEGER :: Index
!Local variables
INTEGER :: indx
Index = 0
DO indx=1,SIZE(AppStream%Nodes)
IF (ID .EQ. AppStream%Nodes(indx)%ID) THEN
Index = indx
EXIT
END IF
END DO
END FUNCTION AppStream_v50_GetStrmNodeIndex
! -------------------------------------------------------------
! --- GET RATING TABLE (STAGE VS. FLOW) AT A NODE
! --- *** Note: This procedure is not used in this version of AppStream package
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_GetStageFlowRatingTable(AppStream,iNode,Stage,Flow)
CLASS(AppStream_v50_Type),TARGET,INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: iNode !Not used in this version
REAL(8),INTENT(OUT) :: Stage(:),Flow(:)
!Gather content
Stage = 0.0
Flow = 0.0
END SUBROUTINE AppStream_v50_GetStageFlowRatingTable
! -------------------------------------------------------------
! --- GET NUMBER OF RATING TABLE POINTS AT A STREAM NODE
! --- ***Note: This procedure is not used in this version of AppStream package
! -------------------------------------------------------------
PURE FUNCTION AppStream_v50_GetNRatingTablePoints(AppStream,iStrmNode) RESULT(N)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream !Not used in this version
INTEGER,INTENT(IN) :: iStrmNode !Not used in this version
INTEGER :: N
N = 0
END FUNCTION AppStream_v50_GetNRatingTablePoints
! -------------------------------------------------------------
! --- GET BOTTOM ELEVATIONS
! -------------------------------------------------------------
PURE FUNCTION AppStream_v50_GetBottomElevations(AppStream) RESULT(BottomElev)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
REAL(8) :: BottomElev(AppStream%NStrmNodes)
BottomElev = AppStream%Nodes%BottomElev
END FUNCTION AppStream_v50_GetBottomElevations
! -------------------------------------------------------------
! --- GET NUMBER OF NODES DRAINING INTO A NODE
! -------------------------------------------------------------
FUNCTION AppStream_v50_GetNUpstrmNodes(AppStream,iStrmNode) RESULT(iNNodes)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: iStrmNode
INTEGER :: iNNodes
iNNodes = AppStream%Nodes(iStrmNode)%Connectivity%nConnectedNodes
END FUNCTION AppStream_v50_GetNUpstrmNodes
! -------------------------------------------------------------
! --- GET NODES DRAINING INTO A NODE
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_GetUpstrmNodes(AppStream,iNode,UpstrmNodes)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: iNode
INTEGER,ALLOCATABLE,INTENT(OUT) :: UpstrmNodes(:)
!Local variables
INTEGER :: ErrorCode
!Initialize
DEALLOCATE (UpstrmNodes , STAT=ErrorCode)
ALLOCATE (UpstrmNodes(AppStream%Nodes(iNode)%Connectivity%nConnectedNodes))
UpstrmNodes = AppStream%Nodes(iNode)%Connectivity%ConnectedNodes
END SUBROUTINE AppStream_v50_GetUpstrmNodes
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** DATA READERS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- READ PREPROCESSED DATA
! -------------------------------------------------------------
SUBROUTINE ReadPreprocessedData(AppStream,BinFile,iStat)
CLASS(AppStream_v50_Type),INTENT(OUT) :: AppStream
TYPE(GenericFileType) :: BinFile
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+20) :: ThisProcedure = ModName // 'ReadPreprocessedData'
INTEGER :: ErrorCode,iLenVersion
CHARACTER(:),ALLOCATABLE :: cVrs
!Initialize
iStat = 0
!Read version number
CALL BinFile%ReadData(iLenVersion,iStat) ; IF (iStat .EQ. -1) RETURN
ALLOCATE (CHARACTER(iLenVersion) :: cVrs)
CALL BinFile%ReadData(cVrs,iStat) ; IF (iStat .EQ. -1) RETURN
AppStream%Version = AppStream%Version%New(cVrs)
!Routed/non-routed flag
CALL BinFile%ReadData(AppStream%lRouted,iStat) ; IF (iStat .EQ. -1) RETURN
!Read dimensions
CALL BinFile%ReadData(AppStream%NStrmNodes,iStat) ; IF (iStat .EQ. -1) RETURN
CALL BinFile%ReadData(AppStream%NReaches,iStat) ; IF (iStat .EQ. -1) RETURN
!Allocate memory
ALLOCATE (AppStream%Nodes(AppStream%NStrmNodes) , AppStream%Reaches(AppStream%NReaches) , STAT=ErrorCode)
IF (ErrorCode .NE. 0) THEN
CALL SetLastMessage('Error allocating memory for stream data!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Read stream node data
CALL StrmNode_v50_ReadPreprocessedData(AppStream%Nodes,BinFile,iStat)
IF (iStat .EQ. -1) RETURN
!Read stream reach data
CALL StrmReach_New(AppStream%NReaches,Binfile,AppStream%Reaches,iStat)
END SUBROUTINE ReadPreprocessedData
! -------------------------------------------------------------
! --- READ STREAM REACH CONFIGURATION DATA
! -------------------------------------------------------------
SUBROUTINE ReadStreamConfigData(DataFile,Stratigraphy,iGWNodeIDs,StrmGWConnector,StrmLakeConnector,AppStream,iStat)
TYPE(GenericFileType) :: DataFile
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
INTEGER,INTENT(IN) :: iGWNodeIDs(:)
TYPE(StrmGWConnectorType),INTENT(OUT) :: StrmGWConnector
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
TYPE(AppStream_v50_Type) :: AppStream
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+20) :: ThisProcedure = ModName // 'ReadStreamConfigData'
INTEGER :: indxReach,DummyIntArray3(3),indxNode,DummyIntArray2(2),indxStrmNode, &
iDestNode,NNodes,iGWNodes(AppStream%NStrmNodes),iReachID,indxNode1, &
indxReach1,iStrmNodeID,iLayers(AppStream%NStrmNodes)
CHARACTER :: ALine*2000
!Initialize
iStat = 0
!Iterate over reaches
indxStrmNode = 0
DO indxReach=1,AppStream%NReaches
ASSOCIATE (pReach => AppStream%Reaches(indxReach))
CALL DataFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
READ (ALine,*) pReach%ID
CALL GetArrayData(ALine,DummyIntArray3,'stream reach '//TRIM(IntToText(pReach%ID)),iStat) ; IF (iStat .EQ. -1) RETURN
pReach%cName = ALine(1:20)
!Make sure reach ID is not used more than once
DO indxReach1=1,indxReach-1
IF (pReach%ID .EQ. AppStream%Reaches(indxReach1)%ID) THEN
CALL SetLastMessage('Stream reach ID '//TRIM(IntToText(pReach%ID))//' is used more than once!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END DO
!Store data in persistent arrays
pReach%UpstrmNode = indxStrmNode + 1
pReach%DownstrmNode = indxStrmNode + DummyIntArray3(2)
IF (DummyIntArray3(3) .GT. 0) THEN
pReach%OutflowDest = DummyIntArray3(3)
ELSEIF (DummyIntArray3(3) .EQ. 0) THEN
pReach%OutflowDestType = f_iFlowDest_Outside
pReach%OutflowDest = 0
ELSE
pReach%OutflowDestType = f_iFlowDest_Lake
pReach%OutflowDest = -DummyIntArray3(3)
CALL StrmLakeConnector%AddData(f_iStrmToLakeFlow , pReach%DownstrmNode , pReach%OutflowDest)
END IF
!Make sure there are at least 2 stream nodes defined for the reach
NNodes = DummyIntArray3(2)
IF (NNodes .LT. 2) THEN
MessageArray(1) = 'There should be at least 2 stream nodes for each reach.'
MessageArray(2) = 'Reach '//TRIM(IntToText(pReach%ID))//' has less than 2 stream nodes!'
CALL SetLastMessage(MessageArray(1:2),iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Read stream node IDs and corresponding the gw nodes
DO indxNode=1,NNodes
indxStrmNode = indxStrmNode + 1
CALL DataFile%ReadData(DummyIntArray2,iStat) ; IF (iStat .EQ. -1) RETURN
iStrmNodeID = DummyIntArray2(1)
AppStream%Nodes(indxStrmNode)%ID = iStrmNodeID
!Make sure stream node ID is not repeated
DO indxNode1=1,indxStrmNode-1
IF (iStrmNodeID .EQ. AppStream%Nodes(indxNode1)%ID) THEN
CALL SetLastMessage('Stream node ID '//TRIM(IntToText(iStrmNodeID))//' is used more than once!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END DO
!Check gw node ID and store the corresponding index
CALL ConvertID_To_Index(DummyIntArray2(2),iGWNodeIDs,iGWNodes(indxStrmNode))
IF (iGWNodes(indxStrmNode) .EQ. 0) THEN
CALL SetLastMessage('Groundwater node '//TRIM(IntToText(DummyIntArray2(2)))//' listed in stream reach '//TRIM(IntToText(pReach%ID))//' ('//TRIM(pReach%cName)//') for stream node '//TRIM(IntToText(iStrmNodeID))//' is not in the model!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Top active aquifer layer at stream node
iLayers(indxStrmNode) = Stratigraphy%TopActiveLayer(iGWNodes(indxStrmNode))
END DO
END ASSOCIATE
END DO
!Store GW nodes for each stream node in strm-gw connector database
CALL StrmGWConnector%New(iVersion,iGWNodes,iLayers,iStat)
IF (iStat .EQ. -1) RETURN
!Convert outflow destination stream node IDs to indices and make sure that reach numbers are set properly
DO indxReach=1,AppStream%NReaches
ASSOCIATE (pReach => AppStream%Reaches(indxReach))
IF (pReach%OutFlowDestType .NE. f_iFlowDest_StrmNode) CYCLE
iReachID = pReach%ID
iStrmNodeID = pReach%OutFlowDest
CALL ConvertID_To_Index(iStrmNodeID,AppStream%Nodes%ID,iDestNode)
IF (iDestNode .EQ. 0) THEN
CALL SetLastMessage('Outflow stream node '//TRIM(IntToText(iStrmNodeID))//' for reach '//TRIM(IntToText(iReachID))//' ('//TRIM(pReach%cName)//') is not in the model!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
IF (iDestNode .LE. pReach%DownstrmNode) THEN
IF (iDestNode .GE. pReach%UpstrmNode) THEN
CALL SetLastMessage('Stream reach '//TRIM(IntToText(iReachID))//' ('//TRIM(pReach%cName)//') is outflowing back into itself!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END IF
pReach%OutFlowDest = iDestNode
END ASSOCIATE
END DO
!Compile reach network from upstream to downstream
CALL StrmReach_CompileReachNetwork(AppStream%NReaches,AppStream%Reaches,iStat)
IF (iStat .EQ. -1) RETURN
!Compile upstream nodes for each node
CALL CompileUpstrmNodes(AppStream)
END SUBROUTINE ReadStreamConfigData
! -------------------------------------------------------------
! --- READ CROSS SECTION DATA
! -------------------------------------------------------------
SUBROUTINE ReadCrossSectionData(Stratigraphy,iGWNodeIDs,iStrmNodeIDs,StrmGWConnector,DataFile,AppStream,iStat)
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
INTEGER,INTENT(IN) :: iGWNodeIDs(:),iStrmNodeIDs(:)
TYPE(StrmGWConnectorType) :: StrmGWConnector
TYPE(GenericFileType) :: DataFile
TYPE(AppStream_v50_Type) :: AppStream
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+20) :: ThisProcedure = ModName // 'ReadCrossSectionData'
INTEGER :: indxNode,iStrmNode,iGWNode,iLayer,ErrorCode,iStrmNodeID
REAL(8) :: FACTN,FACTLT,AquiferBottomElev,DummyArray(6)
INTEGER,ALLOCATABLE :: iGWNodes(:)
LOGICAL :: lProcessed(AppStream%NStrmNodes)
!Initialize
iStat = 0
lProcessed = .FALSE.
CALL StrmGWConnector%GetAllGWNodes(iGWNodes)
!Read units conversion factors
CALL DataFile%ReadData(FACTN,iStat) ; IF (iStat .EQ. -1) RETURN ; FACTN = 1D0 / (FACTN**(1D0/3D0))
CALL DataFile%ReadData(FACTLT,iStat) ; IF (iStat .EQ. -1) RETURN
!Read cross section data
ASSOCIATE (pNodes => AppStream%Nodes)
DO indxNode=1,AppStream%NStrmNodes
iGWNode = iGWNodes(indxNode)
iLayer = Stratigraphy%TopActiveLayer(iGWNode)
!Read data
CALL DataFile%ReadData(DummyArray,iStat) ; IF (iStat .EQ. -1) RETURN
!Make sure stream node is legit
iStrmNodeID = INT(DummyArray(1))
CALL ConvertID_To_Index(iStrmNodeID,iStrmNodeIDs,iStrmNode)
IF (iStrmNode .EQ. 0) THEN
CALL SetLastMessage('Stream node ID '//TRIM(IntToText(iStrmNodeID))//' listed for cross-section parameter defintion is not in the model!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Make sure stream node is not entered more than once
IF (lProcessed(iStrmNode)) THEN
CALL SetLastMessage('Stream node ID '//TRIM(IntToText(iStrmNodeID))//' is listed more than once for cross-section parameter definition!',iFatal,Thisprocedure)
iStat = -1
RETURN
END IF
lProcessed(iStrmNode) = .TRUE.
!Stream bottom elevation
pNodes(iStrmNode)%BottomElev = DummyArray(2) * FACTLT
AquiferBottomElev = Stratigraphy%BottomElev(iGWNode,iLayer)
IF (pNodes(iStrmNode)%BottomElev .LT. AquiferBottomElev) THEN
MessageArray(1) = 'Aquifer bottom elevation at a stream node should be'
MessageArray(2) = 'less than or equal to the stream bed elevation!'
WRITE (MessageArray(3),'(A,F10.2)') ' Stream node = '//TRIM(IntToText(iStrmNodeID)) //' Stream bed elevation = ',pNodes(iStrmNode)%BottomElev
WRITE (MessageArray(4),'(A,F10.2)') ' GW node = '//TRIM(IntToText(iGWNodeIDs(iGWNode)))//' Aquifer bottom elevation= ',AquiferBottomElev
CALL SetLastMessage(MessageArray(1:4),iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
!Maximum elevation
IF (DummyArray(6) .LT. 0.0) THEN
CALL SetLastMessage('Maximum flow depth at stream node '//TRIM(IntToText(iStrmNode))//' cannot be less than zero!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
pNodes(iStrmNode)%MaxElev = pNodes(iStrmNode)%BottomElev + DummyArray(6) * FACTLT
!Cross section data
pNodes(iStrmNode)%CrossSection%B0 = DummyArray(3) * FACTLT
pNodes(iStrmNode)%CrossSection%s = DummyArray(4) * FACTLT
pNodes(iStrmNode)%CrossSection%n = DummyArray(5) * FACTN
!Make sure that cross section data is specified properly
IF (pNodes(iStrmNode)%CrossSection%B0 .EQ. 0.0 .AND. pNodes(iStrmNode)%CrossSection%s .EQ. 0.0) THEN
CALL SetLastMessage('B0 and s at stream node '//TRIM(IntToText(iStrmNodeID))//' cannot be both zero!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END DO
!Set the hydraulic disconnect elevations in the Stream-gw connector
CALL StrmGWConnector%SetDisconnectElevations(pNodes%BottomElev)
END ASSOCIATE
!Clear memory
DEALLOCATE (iGWNodes , STAT=ErrorCode)
END SUBROUTINE ReadCrossSectionData
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** DATA WRITERS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- PRINT OUT SIMULATION RESULTS
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_PrintResults(AppStream,TimeStep,lEndOfSimulation,QTRIB,QROFF,QRTRN,QDRAIN,QRVET,BottomElev,StrmGWConnector,StrmLakeConnector)
CLASS(AppStream_v50_Type) :: AppStream
TYPE(TimeStepType),INTENT(IN) :: TimeStep
LOGICAL,INTENT(IN) :: lEndOfSimulation
REAL(8),INTENT(IN) :: QTRIB(:),QROFF(:),QRTRN(:),QDRAIN(:),QRVET(:),BottomElev(:)
TYPE(StrmGWConnectorType),INTENT(IN) :: StrmGWConnector
TYPE(StrmLakeConnectorType),INTENT(IN) :: StrmLakeConnector
!Echo progress
CALL EchoProgress('Printing results of stream simulation')
!Print stream flow hydrographs
IF (AppStream%StrmHyd%IsOutFileDefined()) &
CALL AppStream%StrmHyd%PrintResults(AppStream%State,BottomElev,TimeStep,lEndOfSimulation)
!Print stream reach budget
IF (AppStream%StrmReachBudRawFile_Defined) CALL WriteStrmReachFlowsToBudRawFile(QTRIB,QROFF,QRTRN,QDRAIN,QRVET,StrmGWConnector,StrmLakeConnector,AppStream)
!Print stream node budget
IF (AppStream%StrmNodeBudget%StrmNodeBudRawFile_Defined) CALL WriteStrmNodeFlowsToBudRawFile(QTRIB,QROFF,QRTRN,QDRAIN,QRVET,StrmGWConnector,StrmLakeConnector,AppStream)
!Print diversion details
CALL AppStream%AppDiverBypass%PrintResults()
!Print end-of-simulation flows
IF (lEndOfSimulation) THEN
IF (AppStream%FinalFlowFile%iGetFileType() .NE. UNKNOWN) CALL PrintFinalFlows(AppStream%State%Flow,TimeStep,AppStream%FinalFlowFile)
END IF
END SUBROUTINE AppStream_v50_PrintResults
! -------------------------------------------------------------
! ---PRINT END-OF-SIMULATION FLOWS
! -------------------------------------------------------------
SUBROUTINE PrintFinalFlows(Flows,TimeStep,OutFile)
REAL(8),INTENT(IN) :: Flows(:)
TYPE(TimeStepType),INTENT(IN) :: TimeStep
TYPE(GenericFileType) :: OutFile
!Local variables
INTEGER :: indxNode
CHARACTER :: SimulationTime*21,Text*500
!Create the simulation time
IF (TimeStep%TrackTime) THEN
SimulationTime = ADJUSTL(TimeStep%CurrentDateAndTime)
ELSE
WRITE(SimulationTime,'(F10.2,1X,A10)') TimeStep%CurrentTime,ADJUSTL(TimeStep%Unit)
END IF
!Prepare time unit of flow line
WRITE (Text,'(5X,A6,15X,A8)') TimeStep%Unit,'/ TUNITQ'
!Print header
CALL OutFile%WriteData('C'//REPEAT('*',79))
CALL OutFile%WriteData('C ***** STREAM FLOWS AT '//TRIM(SimulationTime))
CALL OutFile%WriteData('C'//REPEAT('*',79))
CALL OutFile%WriteData('C')
CALL OutFile%WriteData('C'//REPEAT('-',79))
CALL OutFile%WriteData(' 1 / ICTYPE')
CALL OutFile%WriteData(TRIM(Text))
CALL OutFile%WriteData(' 1.0 / FACTHQ')
CALL OutFile%WriteData('C'//REPEAT('-',79))
CALL OutFile%WriteData('C IR HQS')
CALL OutFile%WriteData('C'//REPEAT('-',79))
!Print final flows
DO indxNode=1,SIZE(Flows)
WRITE (Text,'(I8,F18.3)') indxNode,Flows(indxNode)
CALL OutFile%WriteData(TRIM(Text))
END DO
END SUBROUTINE PrintFinalFlows
! -------------------------------------------------------------
! --- WRITE PREPROCESSED DATA
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_WritePreprocessedData(AppStream,OutFile)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
TYPE(GenericFileType) :: OutFile
!LOcal variables
CHARACTER(:),ALLOCATABLE :: cVersionLocal
!Write version number
cVersionLocal = AppStream%Version%GetVersion()
CALL OutFile%WriteData(LEN(cVersionLocal))
CALL OutFile%WriteData(cVersionLocal)
!Routed/non-routed flag
CALL OutFile%WriteData(AppStream%lRouted)
!Write dimensions
CALL OutFile%WriteData(AppStream%NStrmNodes)
CALL OutFile%WriteData(AppStream%NReaches)
!Write node data
CALL StrmNode_v50_WritePreprocessedData(AppStream%Nodes,OutFile)
!Write reach data
CALL StrmReach_WritePreprocessedData(AppStream%Reaches,OutFile)
END SUBROUTINE AppStream_v50_WritePreprocessedData
! -------------------------------------------------------------
! --- WRITE PREPROCESSED DATA TO TEXT FILE
! --- Note: Assumes Standard Output File is opened
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_WriteDataToTextFile(AppStream,iGWNodeIDs,UNITLTOU,FACTLTOU,Stratigraphy,StrmGWConnector,iStat)
CLASS(AppStream_v50_Type),INTENT(IN) :: AppStream
INTEGER,INTENT(IN) :: iGWNodeIDs(:)
CHARACTER(LEN=*),INTENT(IN) :: UNITLTOU
REAL(8),INTENT(IN) :: FACTLTOU
TYPE(StratigraphyType),INTENT(IN) :: Stratigraphy
TYPE(StrmGWConnectorType),INTENT(IN) :: StrmGWConnector
INTEGER,INTENT(OUT) :: iStat
!Local variables
INTEGER :: indxReach,indxNode,iGWNode,iLayer,ErrorCode,iGWNodeID,iStrmNodeIDs(AppStream%NStrmNodes)
REAL(8) :: GSElev,AquiferBottom,StrmBottom,DELZ,DELA
INTEGER,ALLOCATABLE :: UpstrmNodes(:),iGWNodes(:)
CHARACTER :: ALine*1000
!Initialize
iStat = 0
iStrmNodeIDs = AppStream%Nodes%ID
!If there are no streams, write relevant information and return
IF (AppStream%NStrmNodes .EQ. 0) THEN
CALL LogMessage('***** THERE ARE NO STREAM NODES *****',iMessage,'',FILE)
RETURN
END IF
!Initialize
CALL StrmGWConnector%GetAllGWNodes(iGWNodes)
!Write titles
CALL LogMessage(' REACH STREAM GRID GROUND INVERT AQUIFER ALLUVIAL UPSTREAM',iMessage,'',FILE)
CALL LogMessage(' NO. NO. NO. ELEV. ELEV. DEPTH BOTTOM THICKNESS NODES',iMessage,'',FILE)
CALL LogMessage(' (ALL UNITS ARE IN '//TRIM(UNITLTOU)//')',iMessage,'',FILE)
!Write stream reach data
DO indxReach=1,AppStream%NReaches
DO indxNode=AppStream%Reaches(indxReach)%UpstrmNode,AppStream%Reaches(indxReach)%DownstrmNode
iGWNode = iGWNodes(indxNode)
iGWNodeID = iGWNodeIDs(iGWNode)
iLayer = Stratigraphy%TopActiveLayer(iGWNode)
GSElev = Stratigraphy%GSElev(iGWNode)
AquiferBottom = Stratigraphy%BottomElev(iGWNode,iLayer)
StrmBottom = AppStream%Nodes(indxNode)%BottomElev
DELZ = GSElev - StrmBottom
DELA = StrmBottom - AquiferBottom
CALL AppStream_v50_GetUpstrmNodes(AppStream,indxNode,UpstrmNodes)
WRITE (ALine,'(1X,3I6,5F10.1,5X,10(I4,1X))') AppStream%Reaches(indxReach)%ID , &
iStrmNodeIDs(indxNode) , &
iGWNodeID , &
GSElev*FACTLTOU , &
StrmBottom*FACTLTOU , &
DELZ*FACTLTOU , &
AquiferBottom*FACTLTOU , &
DELA*FACTLTOU , &
iStrmNodeIDs(UpstrmNodes)
CALL LogMessage(TRIM(ALine),iMessage,'',FILE)
END DO
CALL LogMessage('',iMessage,'',FILE)
END DO
!Clear memory
DEALLOCATE (UpstrmNodes , iGWNodes , STAT=ErrorCode)
END SUBROUTINE AppStream_v50_WriteDataToTextFile
! -------------------------------------------------------------
! --- WRITE RAW STREAM NODE BUDGET DATA
! -------------------------------------------------------------
SUBROUTINE WriteStrmNodeFlowsToBudRawFile(QTRIB,QROFF,QRTRN,QDRAIN,QRVET,StrmGWConnector,StrmLakeConnector,AppStream)
REAL(8),DIMENSION(:),INTENT(IN) :: QTRIB,QROFF,QRTRN,QDRAIN,QRVET
TYPE(StrmGWConnectorType),INTENT(IN) :: StrmGWConnector
TYPE(StrmLakeConnectorType),INTENT(IN) :: StrmLakeConnector
TYPE(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: iNode,indxNode
REAL(8) :: DummyArray(NStrmBudColumns,AppStream%StrmNodeBudget%NBudNodes)
REAL(8),DIMENSION(AppStream%StrmNodeBudget%NBudNodes) :: UpstrmFlows,DownstrmFlows,TributaryFlows,DrainInflows, &
Runoff,ReturnFlows,StrmGWFlows_InModel,LakeInflows,Error, &
Diversions,Bypasses,DiversionShorts,RiparianET,StorChange, &
StrmGWFlows_OutModel
INTEGER,ALLOCATABLE :: UpstrmNodes(:)
ASSOCIATE (pNodes => AppStream%Nodes , &
pState => AppStream%State )
!Iterate over nodes
DO indxNode=1,AppStream%StrmNodeBudget%NBudNodes
iNode = AppStream%StrmNodeBudget%iBudNodes(indxNode)
!Upstream flows
CALL AppStream%GetUpstrmNodes(iNode,UpstrmNodes)
UpstrmFlows(indxNode) = SUM(pState(UpStrmNodes)%Flow)
IF (AppStream%StrmInflowData%lDefined) UpstrmFlows(indxNode) = UpstrmFlows(indxNode) + AppStream%StrmInflowData%Inflows(iNode)
!Downstream flows
DownstrmFlows(indxNode) = AppStream%State(iNode)%Flow
!Change in storage
StorChange(indxNode) = AppStream%StorChange(iNode)
!Tributary flows
TributaryFlows(indxNode) = QTRIB(iNode)
!Inflows from tile drains
DrainInflows(indxNode) = QDRAIN(iNode)
!Runoff
Runoff(indxNode) = QROFF(iNode)
!Return flow
ReturnFlows(indxNode) = QRTRN(iNode)
!Stream-gw interaction occuring inside the model
!(+: flow from stream to groundwater, so multiply with - to represent Gain from GW)
StrmGWFlows_InModel(indxNode) = - StrmGWConnector%GetFlowAtSomeStrmNodes(iNode,iNode,lInsideModel=.TRUE.)
!Stream-gw interaction occuring outside the model
!(+: flow from stream to groundwater, so multiply with - to represent Gain from GW)
StrmGWFlows_OutModel(indxNode) = - StrmGWConnector%GetFlowAtSomeStrmNodes(iNode,iNode,lInsideModel=.FALSE.)
!Inflow from lakes
LakeInflows(indxNode) = StrmLakeConnector%GetFlow(f_iLakeToStrmFlow,iNode)
!Riparian ET
RiparianET(indxNode) = QRVET(iNode)
END DO
END ASSOCIATE
!Diversions
Diversions = AppStream%AppDiverBypass%GetNodeDiversions(AppStream%StrmNodeBudget%iBudNodes)
!Bypasses
Bypasses = AppStream%AppDiverBypass%GetNodeNetBypass(AppStream%StrmNodeBudget%iBudNodes)
!Error
Error = UpstrmFlows &
- DownstrmFlows &
- StorChange &
+ TributaryFlows &
+ DrainInflows &
+ Runoff &
+ ReturnFlows &
+ StrmGWFlows_InModel &
+ StrmGWFlows_OutModel &
+ LakeInflows &
- RiparianET &
- Diversions &
- Bypasses
!Diversion shortages
DiversionShorts = AppStream%AppDiverBypass%GetNodeDiversionShort(AppStream%StrmNodeBudget%iBudNodes)
!Compile data in array
DummyArray(1,:) = UpstrmFlows
DummyArray(2,:) = DownstrmFlows
DummyArray(3,:) = StorChange
DummyArray(4,:) = TributaryFlows
DummyArray(5,:) = DrainInflows
DummyArray(6,:) = Runoff
DummyArray(7,:) = ReturnFlows
DummyArray(8,:) = StrmGWFlows_InModel
DummyArray(9,:) = StrmGWFlows_OutModel
DummyArray(10,:) = LakeInflows
DummyArray(11,:) = RiparianET
DummyArray(12,:) = Diversions
DummyArray(13,:) = Bypasses
DummyArray(14,:) = Error
DummyArray(15,:) = DiversionShorts
!Print out values to binary file
CALL AppStream%StrmNodeBudget%StrmNodeBudRawFile%WriteData(DummyArray)
END SUBROUTINE WriteStrmNodeFlowsToBudRawFile
! -------------------------------------------------------------
! --- WRITE RAW STREAM REACH BUDGET DATA
! -------------------------------------------------------------
SUBROUTINE WriteStrmReachFlowsToBudRawFile(QTRIB,QROFF,QRTRN,QDRAIN,QRVET,StrmGWConnector,StrmLakeConnector,AppStream)
REAL(8),DIMENSION(:),INTENT(IN) :: QTRIB,QROFF,QRTRN,QDRAIN,QRVET
TYPE(StrmGWConnectorType),INTENT(IN) :: StrmGWConnector
TYPE(StrmLakeConnectorType),INTENT(IN) :: StrmLakeConnector
TYPE(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: indxReach,indxReach1,iNode,iUpstrmReach,iUpstrmNode, &
iDownstrmNode,indx,iReach
REAL(8) :: DummyArray(NStrmBudColumns,AppStream%NReaches)
REAL(8),DIMENSION(AppStream%NReaches) :: UpstrmFlows,DownstrmFlows,TributaryFlows,DrainInflows, &
Runoff,ReturnFlows,StrmGWFlows_InModel,LakeInflows,Error, &
Diversions,Bypasses,DiversionShorts,RiparianET,StorChange, &
StrmGWFlows_OutModel
!Initialize
UpstrmFlows = 0.0
!Iterate over reaches
DO indxReach=1,AppStream%NReaches
iReach = AppStream%iPrintReachBudgetOrder(indxReach)
iUpstrmNode = AppStream%Reaches(iReach)%UpstrmNode
iDownstrmNode = AppStream%Reaches(iReach)%DownstrmNode
!Upstream flows
DO indxReach1=1,AppStream%Reaches(iReach)%NUpstrmReaches
iUpstrmReach = AppStream%Reaches(iReach)%UpstrmReaches(indxReach1)
iNode = AppStream%Reaches(iUpstrmReach)%DownstrmNode
UpstrmFlows(iReach) = UpstrmFlows(iReach) + AppStream%State(iNode)%Flow
END DO
IF (AppStream%StrmInflowData%lDefined) UpstrmFlows(iReach) = UpstrmFlows(iReach) + SUM(AppStream%StrmInflowData%Inflows(iUpstrmNode:iDownstrmNode))
!Change in storage
StorChange(iReach) = SUM(AppStream%StorChange(iUpstrmNode:iDownstrmNode))
!Tributary flows
TributaryFlows(iReach) = SUM(QTRIB(iUpstrmNode:iDownstrmNode))
!Inflows from tile drains
DrainInflows(iReach) = SUM(QDRAIN(iUpstrmNode:iDownstrmNode))
!Runoff
Runoff(iReach) = SUM(QROFF(iUpstrmNode:iDownstrmNode))
!Return flow
ReturnFlows(iReach) = SUM(QRTRN(iUpstrmNode:iDownstrmNode))
!Stream-gw interaction inside the model
!(+: flow from stream to groundwater)
StrmGWFlows_InModel(iReach) = - StrmGWConnector%GetFlowAtSomeStrmNodes(iUpstrmNode,iDownstrmNode,lInsideModel=.TRUE.)
!Stream-gw interaction outside the model
!(+: flow from stream to groundwater)
StrmGWFlows_OutModel(iReach) = - StrmGWConnector%GetFlowAtSomeStrmNodes(iUpstrmNode,iDownstrmNode,lInsideModel=.FALSE.)
!Inflow from lakes
LakeInflows(iReach) = 0.0
DO indx=iUpstrmNode,iDownstrmNode
LakeInflows(iReach) = LakeInflows(iReach) + StrmLakeConnector%GetFlow(f_iLakeToStrmFlow,indx)
END DO
!Riparian ET
RiparianET(iReach) = SUM(QRVET(iUpstrmNode:iDownstrmNode))
END DO
!Downstream flows
DownstrmFlows = AppStream%State(AppStream%Reaches%DownStrmNode)%Flow
!Diversions
Diversions = AppStream%AppDiverBypass%GetReachDiversions(AppStream%NReaches,AppStream%Reaches)
!Bypasses
Bypasses = AppStream%AppDiverBypass%GetReachNetBypass(AppStream%NStrmNodes,AppStream%NReaches,AppStream%Reaches)
!Error
Error = UpstrmFlows &
- DownstrmFlows &
- StorChange &
+ TributaryFlows &
+ DrainInflows &
+ Runoff &
+ ReturnFlows &
+ StrmGWFlows_InModel &
+ StrmGWFlows_OutModel &
+ LakeInflows &
- RiparianET &
- Diversions &
- Bypasses
!Diversion shortages
DiversionShorts = AppStream%AppDiverBypass%GetReachDiversionShort(AppStream%NStrmNodes,AppStream%NReaches,AppStream%Reaches)
!Compile data in array
DummyArray(1,:) = UpstrmFlows
DummyArray(2,:) = DownstrmFlows
DummyArray(3,:) = StorChange
DummyArray(4,:) = TributaryFlows
DummyArray(5,:) = DrainInflows
DummyArray(6,:) = Runoff
DummyArray(7,:) = ReturnFlows
DummyArray(8,:) = StrmGWFlows_InModel
DummyArray(9,:) = StrmGWFlows_OutModel
DummyArray(10,:) = LakeInflows
DummyArray(11,:) = RiparianET
DummyArray(12,:) = Diversions
DummyArray(13,:) = Bypasses
DummyArray(14,:) = Error
DummyArray(15,:) = DiversionShorts
!Print out values to binary file
CALL AppStream%StrmReachBudRawFile%WriteData(DummyArray)
END SUBROUTINE WriteStrmReachFlowsToBudRawFile
! ******************************************************************
! ******************************************************************
! ******************************************************************
! ***
! *** MISC. METHODS
! ***
! ******************************************************************
! ******************************************************************
! ******************************************************************
! -------------------------------------------------------------
! --- ADVANCE STATE OF STREAMS IN TIME
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_AdvanceState(AppStream)
CLASS(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: indxNode
AppStream%State%Head_P = AppStream%State%Head
DO indxNode=1,AppStream%NStrmNodes
AppStream%Nodes(indxNode)%Area_P = AppStream%Nodes(indxNode)%Area(AppStream%State(indxNode)%Head_P)
END DO
END SUBROUTINE AppStream_v50_AdvanceState
! -------------------------------------------------------------
! --- CONVERT STREAM FLOWS TO STREAM SURFACE ELEVATIONS
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_ConvertFlowToElev(AppStream)
CLASS(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: indxNode
DO indxNode=1,AppStream%NStrmNodes
AppStream%State(indxNode)%Head = AppStream%Nodes(indxNode)%Head(AppStream%State(indxNode)%Flow)
END DO
END SUBROUTINE AppStream_v50_ConvertFlowToElev
! -------------------------------------------------------------
! --- CONVERT TIME UNIT OF STREAMS RELATED ENTITIES
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_ConvertTimeUnit(AppStream,NewUnit)
CLASS(AppStream_v50_Type) :: AppStream
CHARACTER(LEN=*),INTENT(IN) :: NewUnit
!Local variables
INTEGER :: indxNode
REAL(8) :: Factor
!Convert bypass rating table time units
CALL AppStream%AppDiverBypass%ConvertTimeUnit(NewUnit)
!Convert time unit of Manning's roughness coefficient (second) to simulation unit of time
Factor = TimeIntervalConversion(NewUnit,'1MIN') * 60D0 !Must multiply with 60 to convert minute to seconds
IF (Factor .NE. 1.0) THEN
DO indxNode=1,AppStream%NStrmNodes
AppStream%Nodes(indxNode)%CrossSection%n = AppStream%Nodes(indxNode)%CrossSection%n / Factor
!Update the initial flows
AppStream%State(indxNode)%Flow = AppStream%State(indxNode)%Flow * Factor
END DO
END IF
END SUBROUTINE AppStream_v50_ConvertTimeUnit
! -------------------------------------------------------------
! --- CALCULATE STREAM FLOWS
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_Simulate(AppStream,GWHeads,Runoff,ReturnFlow,TributaryFlow,DrainInflows,RiparianET,RiparianETFrac,StrmGWConnector,StrmLakeConnector,Matrix)
CLASS(AppStream_v50_Type) :: AppStream
REAL(8),INTENT(IN) :: GWHeads(:,:),Runoff(:),ReturnFlow(:),TributaryFlow(:),DrainInflows(:),RiparianET(:)
REAL(8),INTENT(OUT) :: RiparianETFrac(:)
TYPE(StrmGWConnectorType) :: StrmGWConnector
TYPE(StrmLakeConnectorType) :: StrmLakeConnector
TYPE(MatrixType) :: Matrix
!Local variables
INTEGER :: indx,indxReach,indxNode,iUpstrmNode,indxUpstrmNode,iNodes_Connect(20), &
iUpNode,ErrorCode,iDownstrmNode,inConnectedNodes,NNodes,NDiver,iDim
REAL(8) :: rHead,rInflow,rOutflow,rBypass_Recieved,rUpdateCoeff(20),rCoeff,rDeltaT, &
rFlow,RHSMin,rBypassOut,rRipET
REAL(8),DIMENSION(AppStream%NStrmNodes) :: rBCInflows,rUpdateRHS,HRG,rAvailableFlows,rdArea_dStage,rdFlow_dStage,rArea, &
rStrmGWFlow_AtMinHead
INTEGER,ALLOCATABLE :: iStrmNodes(:),iLakes(:)
INTEGER,PARAMETER :: iCompIDs_Connect(20) = f_iStrmComp
!Inform user about simulation progress
CALL EchoProgress('Simulating stream flows')
!Initialize
NNodes = SIZE(GWHeads , DIM=1)
NDiver = AppStream%AppDiverBypass%NDiver
rDeltaT = AppStream%DeltaT
rBCInflows = AppStream%StrmInflowData%GetInflows_AtAllNodes(AppStream%NStrmNodes)
CALL StrmLakeConnector%ResetStrmToLakeFlows()
!Get groundwater heads at stream nodes
CALL StrmGWConnector%GetGWHeadsAtStrmNodes(GWHeads,HRG)
!Stream-gw interaction at minimum stream head (= stream bottom elevation)
CALL StrmGWConnector%ComputeStrmGWFlow_AtMinHead(AppStream%Nodes%BottomElev,HRG,AppStream%Nodes%MaxElev,AppStream%Nodes,rStrmGWFlow_AtMinHead)
!Initialize bypass flows to zero (only for those that originate within the model)
DO indx=1,AppStream%AppDiverBypass%NBypass
IF (AppStream%AppDiverBypass%Bypasses(indx)%iNode_Exp .GT. 0) THEN
AppStream%AppDiverBypass%Bypasses(indx)%Bypass_Out = 0.0
AppStream%AppDiverBypass%Bypasses(indx)%Bypass_Received = 0.0
END IF
END DO
!Calcuate area, flow, derivates of area and flow for all nodes
DO indxNode=1,AppStream%NStrmNodes
rHead = AppStream%State(indxNode)%Head
AppStream%State(indxNode)%Flow = AppStream%Nodes(indxNode)%Flow(rHead)
rdArea_dStage(indxNode) = AppStream%Nodes(indxNode)%dArea(rHead)
rdFlow_dStage(indxNode) = AppStream%Nodes(indxNode)%dFlow(rHead)
rArea(indxNode) = AppStream%Nodes(indxNode)%Area(rHead)
END DO
!Update the matrix equation
DO indxReach=1,AppStream%NReaches
iUpstrmNode = AppStream%Reaches(indxReach)%UpstrmNode
iDownstrmNode = AppStream%Reaches(indxReach)%DownstrmNode
DO indxNode=iUpstrmNode,iDownstrmNode
!Initialize
rFlow = AppStream%State(indxNode)%Flow
rInflow = 0.0
rOutflow = 0.0
!Recieved bypass
rBypass_Recieved = AppStream%AppDiverBypass%GetBypassReceived_AtADestination(f_iFlowDest_StrmNode,indxNode)
!Inflows at the stream node with known values
rInflow = rBCInflows(indxNode) & !Inflow as defined by the user
+ Runoff(indxNode) & !Direct runoff of precipitation
+ ReturnFlow(indxNode) & !Return flow of applied water
+ TributaryFlow(indxNode) & !Tributary inflows from small watersheds and creeks
+ DrainInflows(indxNode) & !Inflow from tile drains
+ rBypass_Recieved & !Received by-pass flows
+ StrmLakeConnector%GetFlow(f_iLakeToStrmFlow,indxNode) !Flows from lake outflow
!Inflows from upstream nodes
DO indxUpstrmNode=1,AppStream%Nodes(indxNode)%Connectivity%nConnectedNodes
iUpNode = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(indxUpstrmNode)
rInflow = rInflow + AppStream%State(iUpNode)%Flow
END DO
!Initial estimate of available flow for outflow terms
IF (indxNode .EQ. iUpstrmNode) THEN
rCoeff = AppStream%Nodes(indxNode)%Length / rDeltaT
rAvailableFlows(indxNode) = rInflow - rCoeff * (rArea(indxNode) - AppStream%Nodes(indxNode)%Area_P)
rAvailableFlows(indxNode) = MAX(rAvailableFlows(indxNode) , 0.0)
ELSE
rCoeff = 0.5d0 * AppStream%Nodes(indxNode)%Length / rDeltaT
rAvailableFlows(indxNode) = rInflow - rCoeff * (rArea(indxNode) + rArea(indxNode-1) - AppStream%Nodes(indxNode)%Area_P - AppStream%Nodes(indxNode-1)%Area_P)
rAvailableFlows(indxNode) = MAX(rAvailableFlows(indxNode) , 0.0)
END IF
!Diversion
IF (NDiver .GT. 0) THEN
rOutFlow = MIN(rAvailableFlows(indxNode) , AppStream%AppDiverBypass%NodalDiverRequired(indxNode))
AppStream%AppDiverBypass%NodalDiverActual(indxNode) = rOutflow
rAvailableFlows(indxNode) = rAvailableFlows(indxNode) - rOutflow
END IF
!Bypass
CALL AppStream%AppDiverBypass%ComputeBypass(indxNode,rAvailableFlows(indxNode),StrmLakeConnector,rBypassOut)
rOutflow = rOutflow + rBypassOut
rAvailableFlows(indxNode) = rAvailableFlows(indxNode) - rBypassOut
!Riparian ET outflow
IF (RiparianET(indxNode) .GT. 0.0) THEN
rRipET = MIN(RiparianET(indxNode) , rAvailableFlows(indxNode))
RiparianETFrac(indxNode) = rRipET / RiparianET(indxNode)
rOutflow = rOutflow + rRipET
rAvailableFlows(indxNode) = rAvailableFlows(indxNode) - rRipET
ELSE
RiparianETFrac(indxNode) = 0.0
END IF
!Compute the matrix rhs function and its derivatives w.r.t. stream elevation
!----------------------------------------------------------------------------
!First node of each reach is treated as boundary node
IF (indxNode .EQ. iUpstrmNode) THEN
!RHS function at minimum stream head (to be used for storage correction)
RHSMin = -rCoeff * AppStream%Nodes(indxNode)%Area_P - rInflow + rStrmGWFlow_AtMinHead(indxNode)
RHSMin = MAX(RHSMin , 0.0)
!Rate of change in storage
AppStream%StorChange(indxNode) = rCoeff * (rArea(indxNode) - AppStream%Nodes(indxNode)%Area_P) - RHSMin
!RHS function
rUpdateRHS(indxNode) = AppStream%StorChange(indxNode) + rFlow - rInflow + rOutFlow
!Update Jacobian - entries for stream node
iNodes_Connect(1) = indxNode
rUpdateCoeff(1) = rCoeff * rdArea_dStage(indxNode) + rdFlow_dStage(indxNode)
inConnectedNodes = AppStream%Nodes(indxNode)%Connectivity%nConnectedNodes
IF (rFlow .EQ. 0.0) THEN
!If flow is zero because of outflows or correction in the storage, do not consider the effect of upstream nodes on the gradient
IF (rOutFlow .GT. 0.0 .OR. RHSMin .GT. 0.0) THEN
iNodes_Connect(2:1+inConnectedNodes) = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes
rUpdateCoeff(2:1+inConnectedNodes) = 0.0
!Otherwise, normal calculations for the derivative w.r.t. upstream nodes
ELSE
DO indxUpstrmNode=1,inConnectedNodes
iUpNode = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(indxUpstrmNode)
iNodes_Connect(1+indxUpstrmNode) = iUpNode
rUpdateCoeff(1+indxUpstrmNode) = -rdFlow_dStage(iUpNode)
END DO
END IF
ELSE
!Normal calculations for the derivative w.r.t. upstream nodes
DO indxUpstrmNode=1,inConnectedNodes
iUpNode = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(indxUpstrmNode)
iNodes_Connect(1+indxUpstrmNode) = iUpNode
rUpdateCoeff(1+indxUpstrmNode) = -rdFlow_dStage(iUpNode)
END DO
END IF
iDim = inConnectedNodes + 1
CALL Matrix%UpdateCOEFF(f_iStrmComp,indxNode,iDim,iCompIDs_Connect(1:iDim),iNodes_Connect(1:iDim),rUpdateCoeff(1:iDim))
!Otherwise, treat node normal
ELSE
!RHS function at minimum stream head (to be used for storage correction)
RHSMin = rCoeff * (rArea(indxNode-1) - AppStream%Nodes(indxNode)%Area_P - AppStream%Nodes(indxNode-1)%Area_P) &
- rInflow + rStrmGWFlow_AtMinHead(indxNode)
RHSMin = MAX(RHSMin , 0.0)
!Rate of change in storage
AppStream%StorChange(indxNode) = rCoeff * (rArea(indxNode) + rArea(indxNode-1) - AppStream%Nodes(indxNode)%Area_P - AppStream%Nodes(indxNode-1)%Area_P) - RHSMin
!RHS function
rUpdateRHS(indxNode) = AppStream%StorChange(indxNode) + rFlow - rInflow + rOutflow
!Update Jacobian - entries for stream node
iNodes_Connect(1) = indxNode
rUpdateCoeff(1) = rCoeff * rdArea_dStage(indxNode) + rdFlow_dStage(indxNode)
inConnectedNodes = AppStream%Nodes(indxNode)%Connectivity%nConnectedNodes
IF (rFlow .EQ. 0.0) THEN
!If flow is zero because of outflows or correction in the storage, do not consider the effect of upstream nodes on the gradient
IF (rOutFlow .GT. 0.0 .OR. RHSMin .GT. 0.0) THEN
iNodes_Connect(2:1+inConnectedNodes) = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes
rUpdateCoeff(2:1+inConnectedNodes) = 0.0
!Otherwise, normal calculations for the derivative w.r.t. upstream nodes
ELSE
DO indxUpstrmNode=1,inConnectedNodes
iUpNode = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(indxUpstrmNode)
iNodes_Connect(1+indxUpstrmNode) = iUpNode
IF (iUpNode .EQ. indxNode-1) THEN
rUpdateCoeff(1+indxUpstrmNode) = rCoeff * rdArea_dStage(indxNode-1) - rdFlow_dStage(indxNode-1)
ELSE
rUpdateCoeff(1+indxUpstrmNode) = -rdFlow_dStage(iUpNode)
END IF
END DO
END IF
ELSE
!Normal calculations for the derivative w.r.t. upstream nodes
DO indxUpstrmNode=1,inConnectedNodes
iUpNode = AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(indxUpstrmNode)
iNodes_Connect(1+indxUpstrmNode) = iUpNode
IF (iUpNode .EQ. indxNode-1) THEN
rUpdateCoeff(1+indxUpstrmNode) = rCoeff * rdArea_dStage(indxNode-1) - rdFlow_dStage(indxNode-1)
ELSE
rUpdateCoeff(1+indxUpstrmNode) = -rdFlow_dStage(iUpNode)
END IF
END DO
END IF
iDim = inConnectedNodes + 1
CALL Matrix%UpdateCOEFF(f_iStrmComp,indxNode,iDim,iCompIDs_Connect(1:iDim),iNodes_Connect(1:iDim),rUpdateCoeff(1:iDim))
END IF
END DO
END DO
!Update RHS vector
CALL Matrix%UpdateRHS(f_iStrmComp,1,rUpdateRHS)
!Stream flows to lakes
CALL StrmLakeConnector%GetSourceIDs(f_iStrmToLakeFlow,iStrmNodes)
CALL StrmLakeConnector%GetDestinationIDs(f_iStrmToLakeFlow,iLakes)
DO indxNode=1,SIZE(iStrmNodes)
CALL StrmLakeConnector%SetFlow(f_iStrmToLakeFlow,iStrmNodes(indxNode),iLakes(indxNode),AppStream%State(iStrmNodes(indxNode))%Flow)
END DO
!Simulate diversion related flows
CALL AppStream%AppDiverBypass%ComputeDiversions(AppStream%NStrmNodes)
!Simulate stream-gw interaction
CALL StrmGWConnector%Simulate(NNodes,HRG,AppStream%State%Head,rAvailableFlows,Matrix,AppStream%Nodes,AppStream%Nodes%MaxElev)
!Clear memory
DEALLOCATE (iStrmNodes , iLakes , STAT=ErrorCode)
END SUBROUTINE AppStream_v50_Simulate
! -------------------------------------------------------------
! --- COMPILE LIST OF NODES DRAINING INTO A NODE
! -------------------------------------------------------------
SUBROUTINE CompileUpstrmNodes(AppStream)
TYPE(AppStream_v50_Type) :: AppStream
!Local variables
INTEGER :: indxReach,iCount,TempNodes(50),indxNode,indxReach1,iUpstrmNode,iDownstrmNode
!Iterate over each reach and node
DO indxReach=AppStream%NReaches,1,-1
iUpstrmNode = AppStream%Reaches(indxReach)%UpstrmNode
iDownstrmNode = AppStream%Reaches(indxReach)%DownstrmNode
DO indxNode=iUpstrmNode,iDownstrmNode
!Initialize counter
iCount = 0
!indxNode is the first node in reach
IF (indxNode .EQ. iUpstrmNode) THEN
DO indxReach1=1,indxReach-1
IF (AppStream%Reaches(indxReach1)%OutflowDestType .NE. f_iFlowDest_StrmNode) CYCLE
IF (AppStream%Reaches(indxReach1)%OutflowDest .EQ. indxNode) THEN
iCount = iCount + 1
TempNodes(iCount) = AppStream%Reaches(indxReach1)%DownstrmNode
END IF
END DO
ALLOCATE (AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(iCount))
AppStream%Nodes(indxNode)%Connectivity%nConnectedNodes = iCount
AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes = TempNodes(1:iCount)
!indxNode is not the first node in the reach
ELSE
iCount = 1
TempNodes(1) = indxNode - 1
DO indxReach1=1,indxReach-1
IF (AppStream%Reaches(indxReach1)%OutflowDestType .NE. f_iFlowDest_StrmNode) CYCLE
IF (AppStream%Reaches(indxReach1)%OutflowDest .EQ. indxNode) THEN
iCount = iCount + 1
TempNodes(iCount) = AppStream%Reaches(indxReach1)%DownstrmNode
END IF
END DO
ALLOCATE (AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes(iCount))
AppStream%Nodes(indxNode)%Connectivity%nConnectedNodes = iCount
AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes = TempNodes(1:iCount)
END IF
!Order the upstream nodes
CALL ShellSort(AppStream%Nodes(indxNode)%Connectivity%ConnectedNodes)
END DO
END DO
END SUBROUTINE CompileUpstrmNodes
! -------------------------------------------------------------
! --- MODIFY HEADS USING DELTA_HEADS
! -------------------------------------------------------------
SUBROUTINE AppStream_v50_UpdateHeads(AppStream,HDelta)
CLASS(AppStream_v50_Type) :: AppStream
REAL(8),INTENT(IN) :: HDelta(:)
AppStream%State%Head = MIN(MAX(AppStream%State%Head-HDelta , AppStream%Nodes%BottomElev) , AppStream%Nodes%MaxElev)
END SUBROUTINE AppStream_v50_UpdateHeads
! -------------------------------------------------------------
! --- COMPILE DISTANCE BETWEEN NODES, SLOPE AND LENGTH ASSOCIATED WITH EACH NODE
! -------------------------------------------------------------
SUBROUTINE CompileDistanceLengthSlope(GWNodes,AppGrid,AppStream,iStat)
INTEGER,INTENT(IN) :: GWNodes(:)
TYPE(AppGridType),INTENT(IN) :: AppGrid
TYPE(AppStream_v50_Type) :: AppStream
INTEGER,INTENT(OUT) :: iStat
!Local variables
CHARACTER(LEN=ModNameLen+26) :: ThisProcedure = ModName // 'CompileDistanceLengthSlope'
INTEGER :: indxReach,indxNode,iUpstrmNode,iDownstrmNode,iGWDownstrmNode,iGWNode
REAL(8) :: B_Distance,F_Distance,CA,CB
!Initialize
iStat = 0
ASSOCIATE (pReaches => AppStream%Reaches , &
pNodes => AppStream%Nodes )
!Loop over reaches
DO indxReach=1,AppStream%NReaches
iUpstrmNode = pReaches(indxReach)%UpstrmNode
iDownstrmNode = pReaches(indxReach)%DownstrmNode
B_Distance = 0.0
iGWNode = GWNodes(iUpstrmNode)
!Loop over nodes
DO indxNode=iUpstrmNode,iDownstrmNode-1
!Corresponding GW nodes
iGWDownstrmNode = GWNodes(indxNode+1)
!Distance to the next node node
CA = AppGrid%X(iGWDownstrmNode) - AppGrid%X(iGWNode)
CB = AppGrid%Y(iGWDownstrmNode) - AppGrid%Y(iGWNode)
F_Distance = SQRT(CA*CA + CB*CB)
!Stream segment length
pNodes(indxNode)%Length = (F_Distance + B_Distance) / 2d0
!Slope
IF (indxNode .GT. iUpstrmNode) THEN
pNodes(indxNode)%Slope = (pNodes(indxNode-1)%BottomElev - pNodes(indxNode)%BottomElev) / B_Distance
IF (pNodes(indxNode)%Slope .LE. 0.0) THEN
CALL SetLastMessage('Slope at stream node '//TRIM(IntToText(AppStream%Nodes(indxNode)%ID))//' must be greater than zero!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END IF
!Advance variables
iGwNode = iGWDownstrmNode
B_Distance = F_Distance
END DO
pNodes(iDownstrmNode)%Length = B_Distance / 2d0
!Slope at the first and last node of reach
pNodes(iDownstrmNode)%Slope = (pNodes(iDownstrmNode-1)%BottomElev - pNodes(iDownstrmNode)%BottomElev) / B_Distance
pNodes(iUpstrmNode)%Slope = pNodes(iUpstrmNode+1)%Slope
IF (pNodes(iUpstrmNode)%Slope .LE. 0.0) THEN
CALL SetLastMessage('Slope at stream node '//TRIM(IntToText(AppStream%Nodes(iUpstrmNode)%ID))//' must be greater than zero!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
IF (pNodes(iDownstrmNode)%Slope .LE. 0.0) THEN
CALL SetLastMessage('Slope at stream node '//TRIM(IntToText(AppStream%Nodes(iDownstrmNode)%ID))//' must be greater than zero!',iFatal,ThisProcedure)
iStat = -1
RETURN
END IF
END DO
END ASSOCIATE
END SUBROUTINE CompileDistanceLengthSlope
! -------------------------------------------------------------
! --- FUNCTION TO PREPARE THE BUDGET HEADER DATA FOR STREAM BUDGETS
! --- (REDEFINES THE PROCEDURE IN Class_BaseAppStream WITH THE SAME NAME)
! -------------------------------------------------------------
FUNCTION PrepareStreamBudgetHeader(NLocations,iPrintReachBudgetOrder,iReachIDs,iStrmNodeIDs,NTIME,TimeStep,cVersion,cReachNames,iBudNodes) RESULT(Header)
INTEGER,INTENT(IN) :: NLocations,iPrintReachBudgetOrder(:),iReachIDs(:),iStrmNodeIDs(:),NTIME
TYPE(TimeStepType),INTENT(IN) :: TimeStep
CHARACTER(LEN=*),INTENT(IN) :: cVersion
CHARACTER(LEN=*),OPTIONAL,INTENT(IN) :: cReachNames(:)
INTEGER,OPTIONAL,INTENT(IN) :: iBudNodes(:)
TYPE(BudgetHeaderType) :: Header
!Local variables
INTEGER,PARAMETER :: TitleLen = 212 , &
NTitles = 3 , &
NColumnHeaderLines = 4
INTEGER :: iCount,indxLocation,indxCol,indx,I,ID,iReach
TYPE(TimeStepType) :: TimeStepLocal
CHARACTER :: UnitT*10,TextTime*17
LOGICAL :: lNodeBudOutput
CHARACTER(LEN=21),PARAMETER :: FParts(NStrmBudColumns)=['UPSTRM_INFLOW' , &
'DOWNSTRM_OUTFLOW' , &
'STORAGE_CHANGE' , &
'TRIB_INFLOW' , &
'TILE_DRN' , &
'RUNOFF' , &
'RETURN_FLOW' , &
'GAIN_FROM_GW_INMODEL' , &
'GAIN_FROM_GW_OUTMODEL' , &
'GAIN_FROM_LAKE' , &
'RIPARIAN_ET' , &
'DIVERSION' , &
'BYPASS' , &
'DISCREPANCY' , &
'DIVER_SHORTAGE' ]
!Initialize flag for budget type
IF (PRESENT(iBudNodes)) THEN
lNodeBudOutput = .TRUE.
ELSE
lNodeBudOutput = .FALSE.
END IF
!Increment the initial simulation time to represent the data begin date for budget binary output files
TimeStepLocal = TimeStep
IF (TimeStep%TrackTime) THEN
TimeStepLocal%CurrentDateAndTime = IncrementTimeStamp(TimeStepLocal%CurrentDateAndTime,TimeStepLocal%DeltaT_InMinutes)
UnitT = ''
ELSE
TimeStepLocal%CurrentTime = TimeStepLocal%CurrentTime + TimeStepLocal%DeltaT
UnitT = '('//TRIM(TimeStep%Unit)//')'
END IF
TextTime = ArrangeText(TRIM(UnitT),17)
!Budget descriptor
IF (lNodeBudOutput) THEN
Header%cBudgetDescriptor = 'stream node budget'
ELSE
Header%cBudgetDescriptor = 'stream reach budget'
END IF
!Simulation time related data
Header%NTimeSteps = NTIME
Header%TimeStep = TimeStepLocal
!Areas
Header%NAreas = 0
ALLOCATE (Header%Areas(0))
!Data for ASCII output
ASSOCIATE (pASCIIOutput => Header%ASCIIOutput)
pASCIIOutput%TitleLen = TitleLen
pASCIIOutput%NTitles = NTitles
ALLOCATE(pASCIIOutput%cTitles(NTitles) , pASCIIOutput%lTitlePersist(NTitles))
pASCIIOutput%cTitles(1) = ArrangeText('IWFM STREAM PACKAGE (v'//TRIM(cVersion)//')' , pASCIIOutput%TitleLen)
pASCIIOutput%cTitles(2) = ArrangeText('STREAM FLOW BUDGET IN '//f_cVolumeUnitMarker//' FOR '//f_cLocationNameMarker , pASCIIOutput%TitleLen)
pASCIIOutput%cTitles(3) = REPEAT('-',pASCIIOutput%TitleLen)
pASCIIOutput%lTitlePersist(1:2) = .TRUE.
pASCIIOutput%lTitlePersist(3) = .FALSE.
pASCIIOutput%cFormatSpec = ADJUSTL('(A16,1X,50(F12.1,1X))')
pASCIIOutput%NColumnHeaderLines = NColumnHeaderLines
END ASSOCIATE
!Location names
Header%NLocations = NLocations
ALLOCATE (Header%cLocationNames(NLocations))
IF (lNodeBudOutput) THEN
DO indx=1,NLocations
ID = iStrmNodeIDs(iBudNodes(indx))
Header%cLocationNames(indx) = 'NODE '//TRIM(IntToText(ID))
END DO
ELSE
DO indx=1,NLocations
iReach = iPrintReachBudgetOrder(indx)
ID = iReachIDs(iReach)
Header%cLocationNames(indx) = TRIM(cReachNames(iReach)) // '(REACH '// TRIM(IntToText(ID)) // ')'
END DO
END IF
!Locations
ALLOCATE (Header%Locations(1) , &
Header%Locations(1)%cFullColumnHeaders(NStrmBudColumns+1) , &
Header%Locations(1)%iDataColumnTypes(NStrmBudColumns) , &
Header%Locations(1)%iColWidth(NStrmBudColumns+1) , &
Header%Locations(1)%cColumnHeaders(NStrmBudColumns+1,NColumnHeaderLines) , &
Header%Locations(1)%cColumnHeadersFormatSpec(NColumnHeaderLines) )
ASSOCIATE (pLocation => Header%Locations(1))
pLocation%NDataColumns = NStrmBudColumns
pLocation%cFullColumnHeaders(1) = 'Time'
pLocation%cFullColumnHeaders(2:) = cBudgetColumnTitles
pLocation%iDataColumnTypes = [VR ,& !Upstream inflow
VR ,& !Downstream outflow
VR ,& !Change in storage
VR ,& !Tributary inflow
VR ,& !Tile drain
VR ,& !Runoff
VR ,& !Return flow
VR ,& !Gain from GW inside model
VR ,& !Gain from GW outside model
VR ,& !Gain from lake
VR ,& !Riparian ET
VR ,& !Diversion
VR ,& !By-pass flow
VR ,& !Discrepancy
VR ] !Diversion shortage
pLocation%iColWidth = [17,(13,I=1,NStrmBudColumns)]
ASSOCIATE (pColumnHeaders => pLocation%cColumnHeaders , &
pFormatSpecs => pLocation%cColumnHeadersFormatSpec )
TextTime = ArrangeText(TRIM(UnitT),17)
pColumnHeaders(:,1) = [' ',' Upstream',' Downstream',' Change in ',' Tributary',' Tile ',' ',' Return','Gain from GW ',' Gain from GW',' Gain from',' Riparian ',' ',' By-pass',' ',' Diversion']
pColumnHeaders(:,2) = [' Time ',' Inflow ',' Outflow ',' Storage ',' Inflow ',' Drain',' Runoff',' Flow ','inside Model ','outside Model',' Lake ',' ET ',' Diversion',' Flow ',' Discrepancy',' Shortage ']
pColumnHeaders(:,3) = [ TextTime,' (+) ',' (-) ',' (-) ',' (+) ',' (+) ',' (+) ',' (+) ',' (+) ',' (+) ',' (+) ',' (-) ',' (-) ',' (-) ',' (=) ',' ']
pColumnHeaders(:,4) = ''
pFormatSpecs(1) = '(A17,15A13)'
pFormatSpecs(2) = '(A17,15A13)'
pFormatSpecs(3) = '(A17,15A13)'
pFormatSpecs(4) = '('//TRIM(IntToText(TitleLen))//'(1H-),'//TRIM(IntToText(NStrmBudColumns+1))//'A0)'
END ASSOCIATE
END ASSOCIATE
!Data for DSS output
ASSOCIATE (pDSSOutput => Header%DSSOutput)
ALLOCATE (pDSSOutput%cPathNames(NStrmBudColumns*(Header%NLocations)) , pDSSOutput%iDataTypes(1))
iCount = 1
IF (lNodeBudOutput) THEN
DO indxLocation=1,Header%NLocations
DO indxCol=1,NStrmBudColumns
pDSSOutput%cPathNames(iCount) = '/IWFM_STRMNODE_BUD/' // & !A part
TRIM(UpperCase(Header%cLocationNames(indxLocation)))//'/' // & !B part
'VOLUME/' // & !C part
'/' // & !D part
TRIM(TimeStep%Unit)//'/' // & !E part
TRIM(FParts(indxCol))//'/' !F part
iCount = iCount+1
END DO
END DO
ELSE
DO indxLocation=1,Header%NLocations
DO indxCol=1,NStrmBudColumns
pDSSOutput%cPathNames(iCount) = '/IWFM_STRMRCH_BUD/' // & !A part
TRIM(UpperCase(Header%cLocationNames(indxLocation)))//'/' // & !B part
'VOLUME/' // & !C part
'/' // & !D part
TRIM(TimeStep%Unit)//'/' // & !E part
TRIM(FParts(indxCol))//'/' !F part
iCount = iCount+1
END DO
END DO
END IF
pDSSOutput%iDataTypes = f_iPER_CUM
END ASSOCIATE
END FUNCTION PrepareStreamBudgetHeader
! -------------------------------------------------------------
! --- OBTAIN TOTAL NUMBER OF STREAM NODES (REDEFINED THE METHOD LISTED IN BaseAppStream CLASS)
! -------------------------------------------------------------
SUBROUTINE CalculateNStrmNodes(DataFile,NReaches,NStrmNodes,iStat)
TYPE(GenericFileType) :: DataFile
INTEGER,INTENT(IN) :: NReaches
INTEGER,INTENT(OUT) :: NStrmNodes,iStat
!Local variables
INTEGER :: indxReach,indxNode,iDummyArray(2),iDummy
CHARACTER :: ALine*7
!Initialize
iStat = 0
NStrmNodes = 0
!Read and accumulate number of stream nodes
DO indxReach=1,NReaches
CALL DataFile%ReadData(iDummyArray,iStat) ; IF (iStat .EQ. -1) RETURN
NStrmNodes = NStrmNodes + iDummyArray(2)
DO indxNode=1,iDummyArray(2)
CALL DataFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN
END DO
END DO
!Rewind the data file back to where it was
CALL DataFile%RewindFile()
CALL DataFile%ReadData(ALine,iStat) ; IF (iStat .EQ. -1) RETURN !Stream component version
CALL DataFile%ReadData(iDummy,iStat) ; IF (iStat .EQ. -1) RETURN !Number of reaches
END SUBROUTINE CalculateNStrmNodes
END MODULE
|
{"hexsha": "1d7a42ec8b2a764dff1f767fa0636797cac7a083", "size": 108945, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "code/SourceCode/Package_AppStream/VERSION_5.0/Class_AppStream_v50.f90", "max_stars_repo_name": "giorgk/IWFM-SAFE", "max_stars_repo_head_hexsha": "849abb22bd7b29c113d0537d228340abe6672b40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/SourceCode/Package_AppStream/VERSION_5.0/Class_AppStream_v50.f90", "max_issues_repo_name": "giorgk/IWFM-SAFE", "max_issues_repo_head_hexsha": "849abb22bd7b29c113d0537d228340abe6672b40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/SourceCode/Package_AppStream/VERSION_5.0/Class_AppStream_v50.f90", "max_forks_repo_name": "giorgk/IWFM-SAFE", "max_forks_repo_head_hexsha": "849abb22bd7b29c113d0537d228340abe6672b40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.6106557377, "max_line_length": 290, "alphanum_fraction": 0.5404745514, "num_tokens": 26143}
|
#=
This file is auto-generated. Do not edit.
=#
#! format: off
"""
mutable struct AndersonFouadMachine <: Machine
R::Float64
Xd::Float64
Xq::Float64
Xd_p::Float64
Xq_p::Float64
Xd_pp::Float64
Xq_pp::Float64
Td0_p::Float64
Tq0_p::Float64
Td0_pp::Float64
Tq0_pp::Float64
ext::Dict{String, Any}
states::Vector{Symbol}
n_states::Int
internal::InfrastructureSystemsInternal
end
Parameters of 6-states synchronous machine: Anderson-Fouad model
# Arguments
- `R::Float64`: Resistance after EMF in machine per unit, validation range: `(0, nothing)`
- `Xd::Float64`: Reactance after EMF in d-axis per unit, validation range: `(0, nothing)`
- `Xq::Float64`: Reactance after EMF in q-axis per unit, validation range: `(0, nothing)`
- `Xd_p::Float64`: Transient reactance after EMF in d-axis per unit, validation range: `(0, nothing)`
- `Xq_p::Float64`: Transient reactance after EMF in q-axis per unit, validation range: `(0, nothing)`
- `Xd_pp::Float64`: Sub-Transient reactance after EMF in d-axis per unit, validation range: `(0, nothing)`
- `Xq_pp::Float64`: Sub-Transient reactance after EMF in q-axis per unit, validation range: `(0, nothing)`
- `Td0_p::Float64`: Time constant of transient d-axis voltage, validation range: `(0, nothing)`
- `Tq0_p::Float64`: Time constant of transient q-axis voltage, validation range: `(0, nothing)`
- `Td0_pp::Float64`: Time constant of sub-transient d-axis voltage, validation range: `(0, nothing)`
- `Tq0_pp::Float64`: Time constant of sub-transient q-axis voltage, validation range: `(0, nothing)`
- `ext::Dict{String, Any}`
- `states::Vector{Symbol}`: The states are:
ψq: q-axis stator flux,
ψd: d-axis stator flux,
eq_p: q-axis transient voltage,
ed_p: d-axis transient voltage,
eq_pp: q-axis subtransient voltage,
ed_pp: d-axis subtransient voltage
- `n_states::Int`: AndersonFouadMachine has 6 states
- `internal::InfrastructureSystemsInternal`: power system internal reference, do not modify
"""
mutable struct AndersonFouadMachine <: Machine
"Resistance after EMF in machine per unit"
R::Float64
"Reactance after EMF in d-axis per unit"
Xd::Float64
"Reactance after EMF in q-axis per unit"
Xq::Float64
"Transient reactance after EMF in d-axis per unit"
Xd_p::Float64
"Transient reactance after EMF in q-axis per unit"
Xq_p::Float64
"Sub-Transient reactance after EMF in d-axis per unit"
Xd_pp::Float64
"Sub-Transient reactance after EMF in q-axis per unit"
Xq_pp::Float64
"Time constant of transient d-axis voltage"
Td0_p::Float64
"Time constant of transient q-axis voltage"
Tq0_p::Float64
"Time constant of sub-transient d-axis voltage"
Td0_pp::Float64
"Time constant of sub-transient q-axis voltage"
Tq0_pp::Float64
ext::Dict{String, Any}
"The states are:
ψq: q-axis stator flux,
ψd: d-axis stator flux,
eq_p: q-axis transient voltage,
ed_p: d-axis transient voltage,
eq_pp: q-axis subtransient voltage,
ed_pp: d-axis subtransient voltage"
states::Vector{Symbol}
"AndersonFouadMachine has 6 states"
n_states::Int
"power system internal reference, do not modify"
internal::InfrastructureSystemsInternal
end
function AndersonFouadMachine(R, Xd, Xq, Xd_p, Xq_p, Xd_pp, Xq_pp, Td0_p, Tq0_p, Td0_pp, Tq0_pp, ext=Dict{String, Any}(), )
AndersonFouadMachine(R, Xd, Xq, Xd_p, Xq_p, Xd_pp, Xq_pp, Td0_p, Tq0_p, Td0_pp, Tq0_pp, ext, [:ψq, :ψd, :eq_p, :ed_p, :eq_pp, :ed_pp], 6, InfrastructureSystemsInternal(), )
end
function AndersonFouadMachine(; R, Xd, Xq, Xd_p, Xq_p, Xd_pp, Xq_pp, Td0_p, Tq0_p, Td0_pp, Tq0_pp, ext=Dict{String, Any}(), states=[:ψq, :ψd, :eq_p, :ed_p, :eq_pp, :ed_pp], n_states=6, internal=InfrastructureSystemsInternal(), )
AndersonFouadMachine(R, Xd, Xq, Xd_p, Xq_p, Xd_pp, Xq_pp, Td0_p, Tq0_p, Td0_pp, Tq0_pp, ext, states, n_states, internal, )
end
# Constructor for demo purposes; non-functional.
function AndersonFouadMachine(::Nothing)
AndersonFouadMachine(;
R=0,
Xd=0,
Xq=0,
Xd_p=0,
Xq_p=0,
Xd_pp=0,
Xq_pp=0,
Td0_p=0,
Tq0_p=0,
Td0_pp=0,
Tq0_pp=0,
ext=Dict{String, Any}(),
)
end
"""Get [`AndersonFouadMachine`](@ref) `R`."""
get_R(value::AndersonFouadMachine) = value.R
"""Get [`AndersonFouadMachine`](@ref) `Xd`."""
get_Xd(value::AndersonFouadMachine) = value.Xd
"""Get [`AndersonFouadMachine`](@ref) `Xq`."""
get_Xq(value::AndersonFouadMachine) = value.Xq
"""Get [`AndersonFouadMachine`](@ref) `Xd_p`."""
get_Xd_p(value::AndersonFouadMachine) = value.Xd_p
"""Get [`AndersonFouadMachine`](@ref) `Xq_p`."""
get_Xq_p(value::AndersonFouadMachine) = value.Xq_p
"""Get [`AndersonFouadMachine`](@ref) `Xd_pp`."""
get_Xd_pp(value::AndersonFouadMachine) = value.Xd_pp
"""Get [`AndersonFouadMachine`](@ref) `Xq_pp`."""
get_Xq_pp(value::AndersonFouadMachine) = value.Xq_pp
"""Get [`AndersonFouadMachine`](@ref) `Td0_p`."""
get_Td0_p(value::AndersonFouadMachine) = value.Td0_p
"""Get [`AndersonFouadMachine`](@ref) `Tq0_p`."""
get_Tq0_p(value::AndersonFouadMachine) = value.Tq0_p
"""Get [`AndersonFouadMachine`](@ref) `Td0_pp`."""
get_Td0_pp(value::AndersonFouadMachine) = value.Td0_pp
"""Get [`AndersonFouadMachine`](@ref) `Tq0_pp`."""
get_Tq0_pp(value::AndersonFouadMachine) = value.Tq0_pp
"""Get [`AndersonFouadMachine`](@ref) `ext`."""
get_ext(value::AndersonFouadMachine) = value.ext
"""Get [`AndersonFouadMachine`](@ref) `states`."""
get_states(value::AndersonFouadMachine) = value.states
"""Get [`AndersonFouadMachine`](@ref) `n_states`."""
get_n_states(value::AndersonFouadMachine) = value.n_states
"""Get [`AndersonFouadMachine`](@ref) `internal`."""
get_internal(value::AndersonFouadMachine) = value.internal
"""Set [`AndersonFouadMachine`](@ref) `R`."""
set_R!(value::AndersonFouadMachine, val) = value.R = val
"""Set [`AndersonFouadMachine`](@ref) `Xd`."""
set_Xd!(value::AndersonFouadMachine, val) = value.Xd = val
"""Set [`AndersonFouadMachine`](@ref) `Xq`."""
set_Xq!(value::AndersonFouadMachine, val) = value.Xq = val
"""Set [`AndersonFouadMachine`](@ref) `Xd_p`."""
set_Xd_p!(value::AndersonFouadMachine, val) = value.Xd_p = val
"""Set [`AndersonFouadMachine`](@ref) `Xq_p`."""
set_Xq_p!(value::AndersonFouadMachine, val) = value.Xq_p = val
"""Set [`AndersonFouadMachine`](@ref) `Xd_pp`."""
set_Xd_pp!(value::AndersonFouadMachine, val) = value.Xd_pp = val
"""Set [`AndersonFouadMachine`](@ref) `Xq_pp`."""
set_Xq_pp!(value::AndersonFouadMachine, val) = value.Xq_pp = val
"""Set [`AndersonFouadMachine`](@ref) `Td0_p`."""
set_Td0_p!(value::AndersonFouadMachine, val) = value.Td0_p = val
"""Set [`AndersonFouadMachine`](@ref) `Tq0_p`."""
set_Tq0_p!(value::AndersonFouadMachine, val) = value.Tq0_p = val
"""Set [`AndersonFouadMachine`](@ref) `Td0_pp`."""
set_Td0_pp!(value::AndersonFouadMachine, val) = value.Td0_pp = val
"""Set [`AndersonFouadMachine`](@ref) `Tq0_pp`."""
set_Tq0_pp!(value::AndersonFouadMachine, val) = value.Tq0_pp = val
"""Set [`AndersonFouadMachine`](@ref) `ext`."""
set_ext!(value::AndersonFouadMachine, val) = value.ext = val
|
{"hexsha": "e046a8e599cd9ee2bf6c67acf7d74f169c8e8901", "size": 7190, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/models/generated/AndersonFouadMachine.jl", "max_stars_repo_name": "marenat/PowerSystems.jl", "max_stars_repo_head_hexsha": "c4adfafff95808e75c884e5f9162b1fabdbc9622", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 80, "max_stars_repo_stars_event_min_datetime": "2018-04-04T16:42:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-04T19:50:57.000Z", "max_issues_repo_path": "src/models/generated/AndersonFouadMachine.jl", "max_issues_repo_name": "marenat/PowerSystems.jl", "max_issues_repo_head_hexsha": "c4adfafff95808e75c884e5f9162b1fabdbc9622", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 360, "max_issues_repo_issues_event_min_datetime": "2018-04-25T22:38:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-17T18:59:01.000Z", "max_forks_repo_path": "src/models/generated/AndersonFouadMachine.jl", "max_forks_repo_name": "marenat/PowerSystems.jl", "max_forks_repo_head_hexsha": "c4adfafff95808e75c884e5f9162b1fabdbc9622", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-12-11T10:44:38.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-21T15:53:58.000Z", "avg_line_length": 42.2941176471, "max_line_length": 228, "alphanum_fraction": 0.6955493741, "num_tokens": 2374}
|
# -*- coding: utf-8 -*-
import numpy as np,sys
import os
import glob
import cv2
import math
import pickle
import datetime, random
import pandas as pd
from utils import *
np.random.seed(1)
img_rows=224
img_cols=224
df = pd.read_csv('driver_imgs_list.csv') # supplied by kaggle
by_drivers = df.groupby('subject')
unique_drivers = list(by_drivers.groups.keys())
# Set validation set percentage with regards to training set
val_pct = 0.2
random.shuffle(unique_drivers)
# These are the drivers we will be entirely moving to the validation set
to_val_drivers = unique_drivers[:int(len(unique_drivers) * val_pct)]
#split the kaggle train images according to driver so that each driver is never repeated in the train and valid dataset.
df1 = pd.read_csv('train_driver_imgs_list.csv') # exported from local database
df2 = pd.read_csv('val_driver_imgs_list.csv') # exported from local database
train_imgs_list= list(df1['img']) #list(map(lambda x: x[:-4].strip(), df1['img']) ) # get train img list
val_imgs_list= list(df2['img'] )#list(map(lambda x: x[:-4].strip(), df2['img'])) #
test_folder = "dataset/test"
target_train_folder = "dataset/keras_train_batch2/"
target_valid_folder ="dataset/keras_valid_batch2/"
def vgg_image(x):
image = load_img(x, target_size=(224, 224))
image = img_to_array(image)
image = image.reshape(( image.shape[0], image.shape[1], image.shape[2]))
image = preprocess_input(image)
#print(image.shape)
#print(type(image))
return image
def get_im(path, img_rows, img_cols, color_type=1):
# Load as grayscale
if color_type == 1:
img = cv2.imread(path, 0)
elif color_type == 3:
img = cv2.imread(path)
# Reduce size
resized = cv2.resize(img, (img_cols, img_rows))
#resized = resized.astype(np.float32, copy=False)
return resized
#return img
def load_train(img_rows, img_cols, color_type=1):
x_train = []
x_val = []
train={}
val={}
y_train=[]
y_val=[]
val_driver_id = []
train_driver_id = []
driver_data = get_driver_data()
print('Read train images')
for j in range(10):
print('Load folder c{}'.format(j))
path = os.path.join('dataset', 'train',
'c' + str(j), '*.jpg')
files = glob.glob(path)
d='c'+str(j)
os.mkdir(target_train_folder +d)
os.mkdir(target_valid_folder +d)
for fl in files:
flbase = os.path.basename(fl)
if flbase in train_imgs_list:
img = get_im(fl, img_rows, img_cols, color_type)
#img = vgg_image(fl)
#sys.exit()
cv2.imwrite(os.path.join(target_train_folder + 'c' + str(j), flbase), img)
#cv2.imwrite(os.path.join(train_folder , flbase), img)
#train[j]=img
#x_train.append(img)
#y_train.append(j)
#train_driver_id.append(driver_data[flbase])
elif flbase in val_imgs_list:
img = get_im(fl, img_rows, img_cols, color_type)
#img = vgg_image(fl)
cv2.imwrite(os.path.join(target_valid_folder + 'c' + str(j), flbase), img)
#cv2.imwrite(os.path.join(val_folder , flbase), img)
#val[j]=img
#x_val.append(img)
#y_val.append(j)
#val_driver_id.append(driver_data[flbase])
else:
print("image name is not in none of the following list: train and the val")
sys.exit()
#saveFile("vgg_y_val.csv",y_val)
#saveFile("vgg_y_train.csv",y_train)
#saveFile( "vgg_x_val.csv",x_val)
#saveFile( "vgg_x_train.csv",x_train)
#saveFile( "vgg_train_driver_id.csv",train_driver_id)
return 1
#return x_train, y_train #, driver_id #, unique_drivers
def load_test(img_rows, img_cols, color_type=1):
print('Read test images')
path = os.path.join('dataset', 'test', '*.jpg')
files = glob.glob(path)
x_test_normalized = []
x_test_id_normalized = []
total = 0
thr = math.floor(len(files)/10)
for fl in files:
#print(fl,type(fl))
#sys.exit()
flbase = os.path.basename(fl)
#img = vgg_image(fl, img_rows, img_cols, color_type)
img = vgg_image(fl)
#cv2.imwrite(os.path.join(test_folder , flbase), img)
#img = cv2.imread(fl)
x_test_normalized.append(img)
x_test_id_normalized.append(flbase)
total += 1
if total % thr == 0:
print('Read {} images from {}'.format(total, len(files)))
#save_array("x_test.csv",np.array(x_test))
#saveFile("x_test.csv",x_test)
save_array("x_test_id_vgg6.dat",np.array(x_test_id_normalized))
save_array("x_test_vgg6.dat",np.array(x_test_normalized))
return 1#x_test_id
#load_train(img_rows, img_cols, color_type=3)
#load_test(img_rows, img_cols, color_type=3)
#print("image processing finished and saved")
|
{"hexsha": "12d299abcc35044e794a136bd0f008fab59d1dd7", "size": 5130, "ext": "py", "lang": "Python", "max_stars_repo_path": "image_process.py", "max_stars_repo_name": "18461271/state_farm_driver_distraction_detection", "max_stars_repo_head_hexsha": "6032ed7fb5d88deb60bfd22af12ce8046d3fd5d3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "image_process.py", "max_issues_repo_name": "18461271/state_farm_driver_distraction_detection", "max_issues_repo_head_hexsha": "6032ed7fb5d88deb60bfd22af12ce8046d3fd5d3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image_process.py", "max_forks_repo_name": "18461271/state_farm_driver_distraction_detection", "max_forks_repo_head_hexsha": "6032ed7fb5d88deb60bfd22af12ce8046d3fd5d3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8741258741, "max_line_length": 121, "alphanum_fraction": 0.6118908382, "include": true, "reason": "import numpy", "num_tokens": 1291}
|
#coding: utf-8
import os
import sys
import copy
import json
import random
import torch
import pickle
import random
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from collections import defaultdict, OrderedDict
try:
from .fix_label import fix_general_label_error
except:
from fix_label import fix_general_label_error
import utils.scaffold
ModeKeys = utils.scaffold.ModeKeys
EXPERIMENT_DOMAINS = [
"hotel", "train", "restaurant", "attraction", "taxi"]
def get_slot_information(ontology):
ontology_domains = dict([
(k, v) for k, v in ontology.items()
if k.split("-")[0] in EXPERIMENT_DOMAINS])
slots = [k.replace(" ","").lower()
if ("book" not in k) else k.lower()
for k in ontology_domains.keys()]
return slots
def read_ontology(data_path):
ont_file = os.path.join(data_path, "ontology.json")
with open(ont_file, 'r', encoding='utf8') as fd:
ontology = json.load(fd)
all_slots = get_slot_information(ontology)
return all_slots
def read_dial_data(args, data_file, split, all_slots, training, max_line=None):
data = []
max_resp_len, max_value_len = 0, 0
domain_counter = defaultdict(int)
# data_file = os.path.join(data_path, "proc", "{}_dials.json".format(split))
with open(data_file, 'r', encoding='utf8') as fd:
dials = json.load(fd)
# determine training data ratio, default is 100%
if training and split=="train" and args["data_ratio"] < 1:
random.Random(10).shuffle(dials)
dials = dials[:int(len(dials)*float(args['data_ratio']))]
allow_domains = set(EXPERIMENT_DOMAINS)
allow_slots = copy.deepcopy(all_slots)
if args["except_domain"] != "":
if split in ["train", "dev"]:
allow_domains = set([d for d in allow_domains
if args["except_domain"] not in d and d not in args["except_domain"]])
else:
allow_domains = set([d for d in allow_domains
if args["except_domain"] in d and d in args["except_domain"]])
elif args["only_domain"] != "":
allow_domains = set([d for d in allow_domains
if d in args["only_domain"] or args["only_domain"] in d])
allow_slots = [k for k in allow_slots if k.split('-')[0] in allow_domains]
if "slot_number" in args and args["slot_number"] is not None:
allow_slots = allow_slots[:args["slot_number"]]
cnt_lin = 1
for dial_dict in dials:
dialog_history = ""
last_belief_dict = {}
# filter and count domains
for domain in dial_dict["domains"]:
if domain not in EXPERIMENT_DOMAINS:
continue
domain_counter[domain] += 1
# Unseen domain setting: skip dial
if args["only_domain"] != "" and args["only_domain"] not in dial_dict["domains"]:
continue
if (args["except_domain"] != "" and split == "test" and args["except_domain"] not in dial_dict["domains"]) or \
(args["except_domain"] != "" and split != "test" and [args["except_domain"]] == dial_dict["domains"]):
continue
last_dict = fix_general_label_error(dial_dict["dialogue"][-1]["belief_state"], False, all_slots)
turn_belief_dict = OrderedDict([(k, v)
for k, v in last_dict.items() if k in allow_slots])
if len(turn_belief_dict) == 0:
continue
# read data
for ti, turn in enumerate(dial_dict["dialogue"]):
turn_domain = turn["domain"]
turn_id = turn["turn_idx"]
turn_uttr = turn["system_transcript"] + " ; " + turn["transcript"] + " ; "
turn_uttr_strip = turn_uttr.strip()
dialog_history += (turn["system_transcript"] + " ; " + turn["transcript"] + " ; ")
source_text = dialog_history.strip()
turn_belief_dict = fix_general_label_error(turn["belief_state"], False, all_slots)
turn_belief_dict = OrderedDict([(k, v)
for k, v in turn_belief_dict.items() if k in allow_slots])
turn_belief_list = [str(k)+'-'+str(v) for k, v in turn_belief_dict.items()]
# if (args["all_vocab"] or split=="train") and training:
# mem_lang.index_words(turn_belief_dict, 'belief')
class_label, generate_y, slot_mask, gating_label = [], [], [], []
start_ptr_label, end_ptr_label = [], []
for slot in allow_slots:
if slot in turn_belief_dict.keys():
generate_y.append(turn_belief_dict[slot])
if turn_belief_dict[slot] == "dontcare":
gating_label.append("dontcare")
elif turn_belief_dict[slot] == "none":
gating_label.append("none")
else:
gating_label.append("ptr")
max_value_len = max(max_value_len, len(turn_belief_dict[slot]))
else:
generate_y.append("none")
gating_label.append("none")
data_detail = {
"ID": dial_dict["dialogue_idx"],
"domains": dial_dict["domains"],
"turn_domain": turn_domain,
"turn_id": turn_id,
"dialog_history": source_text,
"turn_belief": turn_belief_list,
"gating_label": gating_label,
"turn_uttr": turn_uttr_strip,
"generate_y": generate_y
}
data.append(data_detail)
max_resp_len = max(max_resp_len, len(source_text.split()))
cnt_lin += 1
if max_line and cnt_lin >= max_line:
break
# if "t{}".format(max_value_len-1) not in mem_lang.word2index.keys() and training:
# for time_i in range(max_value_len):
# mem_lang.index_words("t{}".format(time_i), 'utter')
print("domain_counter", domain_counter)
return data, max_resp_len, allow_slots
class MultiWOZ(Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, data_info, slot_temp):
"""Reads source and target sequences from txt files."""
self.ID = data_info['ID']
self.turn_domain = data_info['turn_domain']
self.turn_id = data_info['turn_id']
self.dialog_history = data_info['dialog_history']
self.turn_belief = data_info['turn_belief']
self.gating_label = data_info['gating_label']
self.turn_uttr = data_info['turn_uttr']
self.generate_y = data_info["generate_y"]
self.num_total_seqs = len(self.dialog_history)
self.slot_temp = slot_temp
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
item_info = {
"ID": self.ID[index],
"turn_id": self.turn_id[index],
"turn_belief": self.turn_belief[index],
"gating_label": self.gating_label[index],
"context_plain":self.dialog_history[index].split(),
"turn_uttr_plain": self.turn_uttr[index],
"turn_domain": self.turn_domain[index],
"generate_y": [v.split() for v in self.generate_y[index]],
"slot_temp": self.slot_temp
}
return item_info
def __len__(self):
return self.num_total_seqs
def collate_fn(data):
data.sort(key=lambda x: len(x['context_plain']), reverse=True)
item_info = {}
for key in data[0].keys():
item_info[key] = [d[key] for d in data]
return item_info
def sample_map_and_batch(args, pairs, batch_size, shuffle, slot_temp):
if(shuffle and args['fisher_sample']>0):
random.shuffle(pairs)
pairs = pairs[:args['fisher_sample']]
# distribute
data_info = {}
data_keys = pairs[0].keys()
for k in data_keys:
data_info[k] = []
# distribute
for pair in pairs:
for k in data_keys:
data_info[k].append(pair[k])
dataset = MultiWOZ(data_info, slot_temp)
if args["imbalance_sampler"] and shuffle:
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=shuffle,
sampler=ImbalancedDatasetSampler(dataset),
collate_fn=collate_fn)
else:
data_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
collate_fn=collate_fn)
return data_loader
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
"""
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = {}
for idx in self.indices:
label = self._get_label(dataset, idx)
if label in label_to_count:
label_to_count[label] += 1
else:
label_to_count[label] = 1
# weight for each sample
weights = [1.0 / label_to_count[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.turn_domain[idx]
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
def prepare_data_loader(args, data_path, training):
batch_size = args['batch'] if args['batch'] else 100
# eval_batch = args["eval_batch"] if args["eval_batch"] else 100
eval_batch = batch_size
# Create saving folder
folder_name = os.path.join(data_path, "save")
print("folder_name", folder_name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
all_slots = read_ontology(data_path)
# gating_dict = {"ptr":0, "dontcare":1, "none":2}
if training:
train_file = os.path.join(data_path, "proc", "train_dials.json")
pair_train, train_max_len, slot_train = read_dial_data(
args, train_file, "train", all_slots, training)
train_loader = sample_map_and_batch(args, pair_train, batch_size, True, slot_train)
# train_loader = sample_map_and_batch(args, pair_train, batch_size, False)
dev_file = os.path.join(data_path, "proc", "dev_dials.json")
pair_dev, dev_max_len, slot_dev = read_dial_data(
args, dev_file, "dev", all_slots, training)
dev_loader = sample_map_and_batch(args, pair_dev, eval_batch, False, slot_dev)
test_file = os.path.join(data_path, "proc", "test_dials.json")
pair_test, test_max_len, slot_test = read_dial_data(
args, test_file, "test", all_slots, training)
test_loader = sample_map_and_batch(args, pair_test, eval_batch, False, slot_test)
else:
pair_train, train_max_len, slot_train = [], 0, {}
train_loader = []
dev_file = os.path.join(data_path, "proc", "dev_dials.json")
pair_dev, dev_max_len, slot_dev = read_dial_data(
args, dev_file, "dev", all_slots, training)
dev_loader = sample_map_and_batch(args, pair_dev, eval_batch, False, slot_dev)
test_file = os.path.join(data_path, "proc", "test_dials.json")
pair_test, test_max_len, slot_test = read_dial_data(
args, test_file, "test", all_slots, training)
test_loader = sample_map_and_batch(args, pair_test, eval_batch, False, slot_test)
test_4d_loader = []
if args['except_domain']!="":
test_file = os.path.join(data_path, "proc", "test_dials.json")
pair_test_4d, _, slot_test_4d = read_dial_data(
args, test_file, "dev", all_slots, training)
test_4d_loader = sample_map_and_batch(args, pair_test_4d, eval_batch, False, slot_dev)
max_word = max(train_max_len, dev_max_len, test_max_len) + 1
print("Read %s pairs train" % len(pair_train))
print("Read %s pairs dev" % len(pair_dev))
print("Read %s pairs test" % len(pair_test))
print("Max. length of dialog words for RNN: %s " % max_word)
slots_list = [all_slots, slot_train, slot_dev, slot_test]
print("[Train Set & Dev Set Slots]: Number is {} in total".format(str(len(slots_list[2]))))
print(slots_list[2])
print("[Test Set Slots]: Number is {} in total".format(str(len(slots_list[3]))))
print(slots_list[3])
return train_loader, dev_loader, test_loader, test_4d_loader
if __name__=='__main__':
pass
|
{"hexsha": "63271bbd51f4c30429e382e95c14c777e595dd26", "size": 13432, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/interface.py", "max_stars_repo_name": "cjliux/mdst.c2f", "max_stars_repo_head_hexsha": "5617624b25ddaa11ffbc07401d3fe0276ca220d5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-17T12:12:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-12T14:28:55.000Z", "max_issues_repo_path": "data/interface.py", "max_issues_repo_name": "cjliux/mdst.c2f", "max_issues_repo_head_hexsha": "5617624b25ddaa11ffbc07401d3fe0276ca220d5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/interface.py", "max_forks_repo_name": "cjliux/mdst.c2f", "max_forks_repo_head_hexsha": "5617624b25ddaa11ffbc07401d3fe0276ca220d5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7089337176, "max_line_length": 119, "alphanum_fraction": 0.6105568791, "include": true, "reason": "import numpy", "num_tokens": 3148}
|
/*
* Config.cpp
*
* Copyright (c) 2014, Alessandro Pezzato
*/
#include "Config.h"
#include <boost/property_tree/json_parser.hpp>
namespace threescanner {
using namespace boost::property_tree;
using namespace boost::property_tree::json_parser;
Config::Config(const std::string& filename) :
pt_() {
read_json(filename.c_str(), pt_);
}
Config::Config(const boost::property_tree::ptree& pt) :
pt_(pt) {
}
Config Config::getChild(const std::string& path) const {
return Config(pt_.get_child(path));
}
Config::~Config() {
}
} /* namespace threescanner */
|
{"hexsha": "655995811e6f5fbddf8d03fb704015cc96a9d707", "size": 573, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/lib/common/Config.cpp", "max_stars_repo_name": "alepez/threescanner", "max_stars_repo_head_hexsha": "7fe03ecde0c7f18c4059f42a69c59e56e854c7f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12.0, "max_stars_repo_stars_event_min_datetime": "2016-01-18T14:50:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T09:12:26.000Z", "max_issues_repo_path": "src/lib/common/Config.cpp", "max_issues_repo_name": "lazytiger/threescanner", "max_issues_repo_head_hexsha": "7fe03ecde0c7f18c4059f42a69c59e56e854c7f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/common/Config.cpp", "max_forks_repo_name": "lazytiger/threescanner", "max_forks_repo_head_hexsha": "7fe03ecde0c7f18c4059f42a69c59e56e854c7f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2016-07-07T07:30:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-26T19:18:49.000Z", "avg_line_length": 16.8529411765, "max_line_length": 56, "alphanum_fraction": 0.7015706806, "num_tokens": 143}
|
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : Benjamin Perret #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import higra as hg
import numpy as np
def triangular_filter(image, size):
"""
Compute a triangular filter on the given 2d image.
The triangular filter is obtained by convolving the image with the kernel
[1 2 ... size (size + 1) size ... 2 1] / (size + 1)^2 and its transpose.
@TODO@ add efficient implementation
:param image: a 2d array
:param size: a positive integer
:return: a 2d array with the same shape as image
"""
kernel = np.asarray(list(range(size + 1)) + list(range(size, 0, -1)))
im2 = np.pad(image, size, 'symmetric')
im2 = np.apply_along_axis(lambda m: np.convolve(m, kernel, mode='valid'), axis=0, arr=im2)
im2 = np.apply_along_axis(lambda m: np.convolve(m, kernel, mode='valid'), axis=1, arr=im2)
return im2
def gradient_orientation(gradient_image, scale=4):
"""
Estimate gradient orientation.
Reimplementation of similar function from Piotr Dollar's matlab edge tool box.
:param gradient_image: 2d image with gradient values
:param scale: a positive integer (size of the triangular filter)
:return: 2d image with estimated gradient orientation in [0; pi]
"""
filtered_gradient = triangular_filter(gradient_image, scale)
dy, dx = np.gradient(filtered_gradient)
_, dxx = np.gradient(dx)
dyy, dxy = np.gradient(dy)
angle = np.mod(np.arctan2(dyy * np.sign(-dxy), dxx), np.pi)
return angle
|
{"hexsha": "94fe38d576e091d00ce3f04ccdcb8bb9c4138b21", "size": 2115, "ext": "py", "lang": "Python", "max_stars_repo_path": "higra/image/image_utils.py", "max_stars_repo_name": "deisemaia/Higra", "max_stars_repo_head_hexsha": "82cb78b606a383f3961faa882457a9a987f802e0", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 64, "max_stars_repo_stars_event_min_datetime": "2019-08-18T19:23:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T04:15:04.000Z", "max_issues_repo_path": "higra/image/image_utils.py", "max_issues_repo_name": "deisemaia/Higra", "max_issues_repo_head_hexsha": "82cb78b606a383f3961faa882457a9a987f802e0", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": 120, "max_issues_repo_issues_event_min_datetime": "2019-08-16T09:10:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-17T09:42:58.000Z", "max_forks_repo_path": "higra/image/image_utils.py", "max_forks_repo_name": "deisemaia/Higra", "max_forks_repo_head_hexsha": "82cb78b606a383f3961faa882457a9a987f802e0", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-10-04T07:35:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-10T19:59:11.000Z", "avg_line_length": 39.9056603774, "max_line_length": 94, "alphanum_fraction": 0.5286052009, "include": true, "reason": "import numpy", "num_tokens": 454}
|
function [xxx1,xxx2] =dymism(r1,r3,l,r4,r5,choice,rpm,loc)
%-----------------------------------------------------------
format bank;
% clf;
t = 0:1:360;
tg = t*pi/180.0;
cx =[0,0,l,0];
r2 = sqrt(l^2 - r3^2 + r1^2);
jj = 0;
%-----------------------------------------------------------
for j = 1:1:361
jj = jj+1;
x1 = r1 * cos(tg(j));
x2 = r1 * sin(tg(j));
[cx41, cx42, cx43] = x4fun(x1,x2,r3,l);
[x41,x42] = qfun(cx41,cx42,cx43);
x31 = x3fun(x1,x2,x41,r3,l);
x32 = x3fun(x1,x2,x42,r3,l);
a(jj,1) = jj;
a(jj,2) = x1;
a(jj,3) = x2;
a(jj,4) = x31;
a(jj,5) = x41;
a(jj,6) = x32;
a(jj,7) = x42;
a(jj,8) = tang(x31-l,x41);
a(jj,9) = tang(x32-l,x42);
%------------------------------------------------------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 % new check
[cx61,cx62,cx63] = x6fun(x1,x2,x31,x41,r1,r4,r5);
[x61,x62] = qfun(cx61,cx62,cx63);
x51 = x5fun(x1,x2,x31,x41,x61,r1,r4,r5);
x52 = x5fun(x1,x2,x31,x41,x62,r1,r4,r5);
a(jj,10) = x51;
a(jj,11) = x61;
a(jj,12) = x52;
a(jj,13) = x62;
[ccx61,ccx62,ccx63] = x6fun(x1,x2,x32,x42,r1,r4,r5);
[xx61,xx62] = qfun(ccx61,ccx62,ccx63);
xx51 = x5fun(x1,x2,x32,x42,xx61,r1,r4,r5);
xx52 = x5fun(x1,x2,x32,x42,xx62,r1,r4,r5);
a(jj,14) = xx51;
a(jj,15) = xx61;
a(jj,16) = xx52;
a(jj,17) = xx62;
end; % new check
end;
%-----------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 % new check
for i1 = 2:1:361
[a(i1,10),a(i1,11),a(i1,12),a(i1,13)] = organize(a(i1-1,10),a(i1-1,11),a(i1,10),a(i1,11),a(i1,12),a(i1,13));
[a(i1,14),a(i1,15),a(i1,16),a(i1,17)] = organize(a(i1-1,14),a(i1-1,15),a(i1,14),a(i1,15),a(i1,16),a(i1,17));
end;
end; % new check
%-----------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 & choice == 1
% xxx = plot(a(:,2),a(:,3),a(:,4),a(:,5),a(:,10),a(:,11))
[xxx1,xxx2] = cvelo(a(:,2),a(:,3),a(:,4),a(:,5),a(:,10),a(:,11),rpm,loc);
end;
%--------------------------------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 & choice == 2
% xxx = plot(a(:,2),a(:,3),a(:,4),a(:,5),a(:,12),a(:,13));
[xxx1,xxx2] = cvelo(a(:,2),a(:,3),a(:,4),a(:,5),a(:,12),a(:,13),rpm,loc)
end;
%-------------------------------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 & choice == 3
% xxx = plot(a(:,2),a(:,3),a(:,6),a(:,7),a(:,14),a(:,15));
[xxx1,xxx2] = cvelo(a(:,2),a(:,3),a(:,6),a(:,7),a(:,14),a(:,15),rpm,loc);
end;
%----------------------------------------------------------------------------------
if l < r4+r5 & r4*r5 > 0 & choice == 4
% xxx = plot(a(:,2),a(:,3),a(:,6),a(:,7),a(:,16),a(:,17));
[xxx1,xxx2] = cvelo(a(:,2),a(:,3),a(:,6),a(:,7),a(:,16),a(:,17),rpm,loc);
end;
%---------------------------------------------------------------------------
save link_age.dat xxx1 -ascii
disp('The answers are')
disp('Given crank location')
disp('Velocity')
disp('velocity angle')
disp('Accelleration')
disp('Accelleration angle')
disp('.....')
disp('For three nodes');
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/8690-linkage-mechanism-mechanical-engineering/linkage2/dymism.m"}
|
[STATEMENT]
lemma cond_disj_distr:"(P \<or> (Q \<triangleleft> b \<triangleright> S)) = ((P \<or> Q) \<triangleleft> b \<triangleright> (P \<or> S))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (P \<or> Q \<triangleleft> b \<triangleright> S) = (P \<or> Q) \<triangleleft> b \<triangleright> (P \<or> S)
[PROOF STEP]
by (pred_auto)
|
{"llama_tokens": 145, "file": "UTP_utp_utp_pred_laws", "length": 1}
|
module Diagnostics
using NCDatasets
using Statistics
using LinearAlgebra
using EnsembleKalmanProcesses
using EnsembleKalmanProcesses.ParameterDistributions
import EnsembleKalmanProcesses: construct_sigma_ensemble, construct_mean, construct_cov
import EnsembleKalmanProcesses: construct_successful_mean, construct_successful_cov
using ..ReferenceModels
using ..ReferenceStats
using ..HelperFuncs
const NC = NCDatasets
export io_dictionary_ensemble, io_dictionary_reference, io_dictionary_metrics
export io_dictionary_particle_state, io_dictionary_particle_eval
export io_dictionary_val_metrics, io_dictionary_val_particle_eval
export io_dictionary_val_reference, io_dictionary_prior
"""
io_dictionary_reference()
io_dictionary_reference(
ref_stats::ReferenceStatistics,
ref_models::Vector{ReferenceModel},
write_full_stats::Bool = true,
)
Dictionary of diagnostics pertaining to the `ReferenceModel`s and `ReferenceStatistics` that define the inverse problem.
Elements:
- `Gamma` :: Covariance matrix in the inverse problem latent space (regularized low-dimensional encoding).
- `Gamma_full` :: Covariance matrix of normalized observed variables in full space (possibly ill-conditioned). Only written to file if `write_full_stats` is true.
- `Gamma_full_diag` :: Diagonal of `Gamma_full`, useful when `Gamma_full` is not written to file.
- `y` :: Observations in the inverse problem latent space (low-dimensional encoding).
- `y_full` :: Normalized observations in full space.
- `P_pca` :: PCA projection matrix from full space to low-dimensional latent space.
- `num_vars` :: Maximum number of observed fields (not dimensions) per `ReferenceModel`.
- `var_dof` :: Maximum number of degrees of freedom of each field per `ReferenceModel`.
- `config_pca_dim` :: Dimensionality of the latent space associated with each `ReferenceModel`.
- `config_name` :: Name of each `ReferenceModel` used to construct the inverse problem.
- `config_z_obs` :: Vertical locations of the observations of each `ReferenceModel`.
- `norm_factor` :: Pooled variance used to normalize each field of each `ReferenceModel`.
"""
function io_dictionary_reference()
io_dict = Dict(
"Gamma" => (; dims = ("out", "out"), group = "reference", type = Float64),
"Gamma_full" => (; dims = ("out_full", "out_full"), group = "reference", type = Float64),
"Gamma_full_diag" => (; dims = ("out_full",), group = "reference", type = Float64),
"y" => (; dims = ("out",), group = "reference", type = Float64),
"y_full" => (; dims = ("out_full",), group = "reference", type = Float64),
"P_pca" => (; dims = ("out_full", "out"), group = "reference", type = Float64),
"num_vars" => (; dims = ("config",), group = "reference", type = Int16),
"var_dof" => (; dims = ("config",), group = "reference", type = Int16),
"config_pca_dim" => (; dims = ("config",), group = "reference", type = Int16),
"config_name" => (; dims = ("config",), group = "reference", type = String),
"config_z_obs" => (; dims = ("config", "dof"), group = "reference", type = Float64),
"norm_factor" => (; dims = ("config", "config_field"), group = "reference", type = Float64),
)
return io_dict
end
function io_dictionary_reference(
ref_stats::ReferenceStatistics,
ref_models::Vector{ReferenceModel},
write_full_stats::Bool = true,
)
orig_dict = io_dictionary_reference()
d_full = full_length(ref_stats)
d = pca_length(ref_stats)
num_vars = [length(norm_scale) for norm_scale in ref_stats.norm_vec]
var_dof = Int.([size(P_pca, 1) for P_pca in ref_stats.pca_vec] ./ num_vars)
config_pca_dim = [size(P_pca, 2) for P_pca in ref_stats.pca_vec]
config_name = [
rm.case_name == "LES_driven_SCM" ? join(split(basename(rm.y_dir), ".")[2:end], "_") : rm.case_name for
rm in ref_models
]
config_z_obs = zeros(length(ref_models), maximum(var_dof))
for (i, rm) in enumerate(ref_models)
z_obs = get_z_obs(rm)
config_z_obs[i, 1:length(z_obs)] = z_obs
end
P_pca_full = zeros(d_full, d)
idx_row = 1
idx_col = 1
for P_pca in ref_stats.pca_vec
rows, cols = size(P_pca)
P_pca_full[idx_row:(idx_row + rows - 1), idx_col:(idx_col + cols - 1)] = P_pca
idx_row += rows
idx_col += cols
end
io_dict = Dict(
"Gamma" => Base.setindex(orig_dict["Gamma"], ref_stats.Γ, :field),
"Gamma_full_diag" => Base.setindex(orig_dict["Gamma_full_diag"], Array(diag(ref_stats.Γ_full)), :field),
"y" => Base.setindex(orig_dict["y"], ref_stats.y, :field),
"y_full" => Base.setindex(orig_dict["y_full"], ref_stats.y_full, :field),
"P_pca" => Base.setindex(orig_dict["P_pca"], P_pca_full, :field),
"num_vars" => Base.setindex(orig_dict["num_vars"], num_vars, :field),
"var_dof" => Base.setindex(orig_dict["var_dof"], var_dof, :field),
"config_pca_dim" => Base.setindex(orig_dict["config_pca_dim"], config_pca_dim, :field),
"config_name" => Base.setindex(orig_dict["config_name"], config_name, :field),
"config_z_obs" => Base.setindex(orig_dict["config_z_obs"], config_z_obs, :field),
)
max_num_fields = maximum([length(norm_vec) for norm_vec in ref_stats.norm_vec])
norm_factor = zeros(length(ref_stats.norm_vec), max_num_fields)
for (i, norm_vec) in enumerate(ref_stats.norm_vec)
num_fields = length(norm_vec)
norm_factor[i, 1:num_fields] = norm_vec
end
io_dict["norm_factor"] = Base.setindex(orig_dict["norm_factor"], norm_factor, :field)
if write_full_stats
io_dict["Gamma_full"] = Base.setindex(orig_dict["Gamma_full"], Array(ref_stats.Γ_full), :field)
end
return io_dict
end
"""
io_dictionary_val_reference()
io_dictionary_val_reference(
ref_stats::ReferenceStatistics,
ref_models::Vector{ReferenceModel},
write_full_stats::Bool = true,
)
Dictionary of diagnostics pertaining to the `ReferenceModel`s and `ReferenceStatistics` in the validation set.
Elements:
- `Gamma_val` :: Covariance matrix in latent space, using the same truncation as for the training set.
- `Gamma_full_val` :: Covariance matrix of normalized observed variables in full space. Only written to file if `write_full_stats` is true.
- `Gamma_full_diag_val` :: Diagonal of `Gamma_full_val`, useful when `Gamma_full_val` is not written to file.
- `y_val` :: Observations in latent space, for observed fields in the validation set.
- `y_full_val` :: Normalized observations in full space, for the validation set.
- `P_pca_val` :: PCA projection matrix from full space to low-dimensional latent space, for the validation set.
- `num_vars_val` :: Maximum number of observed fields (not dimensions) per validation `ReferenceModel`.
- `var_dof_val` :: Maximum number of degrees of freedom of each field per validation `ReferenceModel`.
- `config_pca_dim_val` :: Dimensionality of the latent space associated with each validation `ReferenceModel`.
- `config_name_val` :: Name of each `ReferenceModel` in the validation set.
- `config_z_obs_val` :: Vertical locations of the observations of each validation `ReferenceModel`.
- `norm_factor_val` :: Pooled variance used to normalize each field of each validation `ReferenceModel`.
"""
function io_dictionary_val_reference()
io_dict = Dict(
"Gamma_val" => (; dims = ("out_val", "out_val"), group = "reference", type = Float64),
"Gamma_full_val" => (; dims = ("out_full_val", "out_full_val"), group = "reference", type = Float64),
"Gamma_full_diag_val" => (; dims = ("out_full_val",), group = "reference", type = Float64),
"y_val" => (; dims = ("out_val",), group = "reference", type = Float64),
"y_full_val" => (; dims = ("out_full_val",), group = "reference", type = Float64),
"P_pca_val" => (; dims = ("out_full_val", "out_val"), group = "reference", type = Float64),
"num_vars_val" => (; dims = ("config_val",), group = "reference", type = Int16),
"var_dof_val" => (; dims = ("config_val",), group = "reference", type = Int16),
"config_pca_dim_val" => (; dims = ("config_val",), group = "reference", type = Int16),
"config_name_val" => (; dims = ("config_val",), group = "reference", type = String),
"config_z_obs_val" => (; dims = ("config_val", "dof_val"), group = "reference", type = Float64),
"norm_factor_val" => (; dims = ("config_val", "config_field_val"), group = "reference", type = Float64),
)
return io_dict
end
function io_dictionary_val_reference(
ref_stats::ReferenceStatistics,
ref_models::Vector{ReferenceModel},
write_full_stats::Bool = true,
)
orig_dict = io_dictionary_val_reference()
d_full = full_length(ref_stats)
d = pca_length(ref_stats)
num_vars = [length(norm_scale) for norm_scale in ref_stats.norm_vec]
var_dof = Int.([size(P_pca, 1) for P_pca in ref_stats.pca_vec] ./ num_vars)
config_pca_dim = [size(P_pca, 2) for P_pca in ref_stats.pca_vec]
config_name = [
rm.case_name == "LES_driven_SCM" ? join(split(basename(rm.y_dir), ".")[2:end], "_") : rm.case_name for
rm in ref_models
]
config_z_obs = zeros(length(ref_models), maximum(var_dof))
for (i, rm) in enumerate(ref_models)
z_obs = get_z_obs(rm)
config_z_obs[i, 1:length(z_obs)] = z_obs
end
P_pca_full = zeros(d_full, d)
idx_row = 1
idx_col = 1
for P_pca in ref_stats.pca_vec
rows, cols = size(P_pca)
P_pca_full[idx_row:(idx_row + rows - 1), idx_col:(idx_col + cols - 1)] = P_pca
idx_row += rows
idx_col += cols
end
io_dict = Dict(
"Gamma_val" => Base.setindex(orig_dict["Gamma_val"], ref_stats.Γ, :field),
"Gamma_full_diag_val" =>
Base.setindex(orig_dict["Gamma_full_diag_val"], Array(diag(ref_stats.Γ_full)), :field),
"y_val" => Base.setindex(orig_dict["y_val"], ref_stats.y, :field),
"y_full_val" => Base.setindex(orig_dict["y_full_val"], ref_stats.y_full, :field),
"P_pca_val" => Base.setindex(orig_dict["P_pca_val"], P_pca_full, :field),
"num_vars_val" => Base.setindex(orig_dict["num_vars_val"], num_vars, :field),
"var_dof_val" => Base.setindex(orig_dict["var_dof_val"], var_dof, :field),
"config_pca_dim_val" => Base.setindex(orig_dict["config_pca_dim_val"], config_pca_dim, :field),
"config_name_val" => Base.setindex(orig_dict["config_name_val"], config_name, :field),
"config_z_obs_val" => Base.setindex(orig_dict["config_z_obs_val"], config_z_obs, :field),
)
max_num_fields = maximum([length(norm_vec) for norm_vec in ref_stats.norm_vec])
norm_factor = zeros(length(ref_stats.norm_vec), max_num_fields)
for (i, norm_vec) in enumerate(ref_stats.norm_vec)
num_fields = length(norm_vec)
norm_factor[i, 1:num_fields] = norm_vec
end
io_dict["norm_factor_val"] = Base.setindex(orig_dict["norm_factor_val"], norm_factor, :field)
if write_full_stats
io_dict["Gamma_full_val"] = Base.setindex(orig_dict["Gamma_full_val"], Array(ref_stats.Γ_full), :field)
end
return io_dict
end
"""
io_dictionary_prior()
io_dictionary_prior(priors::ParameterDistribution)
Parameter prior diagnostics dictionary.
Elements:
- `u_mean_prior` :: Prior mean in unconstrained parameter space.
- `phi_mean_prior` :: Prior mean in constrained parameter space.
- `u_var_prior` :: Diagonal of the prior covariance in unconstrained space.
- `phi_low_unc_prior` :: Lower uncertainty bound (μ-1σ_prior) of prior in constrained space.
- `phi_upp_unc_prior` :: Upper uncertainty bound (μ+1σ_prior) of prior in constrained space.
- `phi_low_std_prior` :: Lower standard bound (μ-1) of prior in constrained space. Useful measure of minimum allowed values for bounded parameters.
- `phi_upp_std_prior` :: Upper standard bound (μ+1) of prior in constrained space. Useful measure of maximum allowed values for bounded parameters.
"""
function io_dictionary_prior()
io_dict = Dict(
"u_mean_prior" => (; dims = ("param",), group = "prior", type = Float64),
"phi_mean_prior" => (; dims = ("param",), group = "prior", type = Float64),
"u_var_prior" => (; dims = ("param",), group = "prior", type = Float64),
"phi_low_unc_prior" => (; dims = ("param",), group = "prior", type = Float64),
"phi_upp_unc_prior" => (; dims = ("param",), group = "prior", type = Float64),
"phi_low_std_prior" => (; dims = ("param",), group = "prior", type = Float64),
"phi_upp_std_prior" => (; dims = ("param",), group = "prior", type = Float64),
)
return io_dict
end
function io_dictionary_prior(priors::ParameterDistribution)
orig_dict = io_dictionary_prior()
u_mean = mean(priors)
u_var = var(priors)
# The estimator of the mean is valid in unconstrained space, so we must transform the mean.
ϕ_mean = transform_unconstrained_to_constrained(priors, u_mean)
# Transform prior uncertainty bands to constrained space
u_low = u_mean .- sqrt.(u_var)
u_upp = u_mean .+ sqrt.(u_var)
ϕ_low = transform_unconstrained_to_constrained(priors, u_low)
ϕ_upp = transform_unconstrained_to_constrained(priors, u_upp)
# Transform standard uncertainty bands (1σ in unconstrained space) to constrained space
ϕ_low_std = transform_unconstrained_to_constrained(priors, u_mean .- 1.0)
ϕ_upp_std = transform_unconstrained_to_constrained(priors, u_mean .+ 1.0)
io_dict = Dict(
"u_mean_prior" => Base.setindex(orig_dict["u_mean_prior"], u_mean, :field),
"phi_mean_prior" => Base.setindex(orig_dict["phi_mean_prior"], ϕ_mean, :field),
"u_var_prior" => Base.setindex(orig_dict["u_var_prior"], u_var, :field),
"phi_low_unc_prior" => Base.setindex(orig_dict["phi_low_unc_prior"], ϕ_low, :field),
"phi_upp_unc_prior" => Base.setindex(orig_dict["phi_upp_unc_prior"], ϕ_upp, :field),
"phi_low_std_prior" => Base.setindex(orig_dict["phi_low_std_prior"], ϕ_low_std, :field),
"phi_upp_std_prior" => Base.setindex(orig_dict["phi_upp_std_prior"], ϕ_upp_std, :field),
)
return io_dict
end
"""
io_dictionary_metrics()
io_dictionary_metrics(ekp::EnsembleKalmanProcess, mse_full::Vector{FT}) where {FT <: Real}
Scalar metrics dictionary.
Evaluations of the data-model mismatch in inverse problem (i.e., latent) space are denoted `loss`.
Errors computed in normalized physical (i.e., full) space are denoted `mse_full`. Differences between
these two metrics include:
- Covariance matrix defining the inner product (covariance weighting in `loss` vs L2 norm in `mse_full`),
- Treatment of trailing eigenvalues (truncation and regularization vs considering all eigenmodes).
- The `loss` includes the L2 penalty term, `mse_full` does not.
Elements:
- `loss_mean_g` :: `(ḡ - y)'Γ_inv(ḡ - y)`. This is the ensemble mean loss seen by the Kalman inversion process.
- `loss_mean` :: Ensemble mean of `(g - y)'Γ_inv(g - y)`.
- `loss_min` :: Ensemble min of `(g - y)'Γ_inv(g - y)`.
- `loss_max` :: Ensemble max of `(g - y)'Γ_inv(g - y)`.
- `loss_var` :: Variance estimate of `(g - y)'Γ_inv(g - y)`, empirical (EKI/EKS) or quadrature (UKI).
- `loss_nn_mean` :: `(g_nn - y)'Γ_inv(nn - y)`, where `g_nn` is the forward model output at the particle closest to the mean in parameter space.
- `mse_full_mean` :: Ensemble mean of MSE(`g_full`, `y_full`).
- `mse_full_min` :: Ensemble min of MSE(`g_full`, `y_full`).
- `mse_full_max` :: Ensemble max of MSE(`g_full`, `y_full`).
- `mse_full_var` :: Variance estimate of MSE(`g_full`, `y_full`), empirical (EKI/EKS) or quadrature (UKI).
- `mse_full_nn_mean` :: MSE(`g_full`, `y_full`) of particle closest to the mean in parameter space. The mean in parameter space is the solution to the particle-based inversion.
- `failures` :: Number of particle failures per iteration. If the calibration is run with the "high_loss" failure handler, this diagnostic will not capture the failures due to masking.
- `nn_mean_index` :: Particle index of the nearest neighbor to the ensemble mean in parameter space. This index is used to construct `..._nn_mean` metrics.
"""
function io_dictionary_metrics()
io_dict = Dict(
"loss_mean_g" => (; dims = ("iteration",), group = "metrics", type = Float64),
"loss_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"loss_min" => (; dims = ("iteration",), group = "metrics", type = Float64),
"loss_max" => (; dims = ("iteration",), group = "metrics", type = Float64),
"loss_var" => (; dims = ("iteration",), group = "metrics", type = Float64),
"loss_nn_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"mse_full_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"mse_full_min" => (; dims = ("iteration",), group = "metrics", type = Float64),
"mse_full_max" => (; dims = ("iteration",), group = "metrics", type = Float64),
"mse_full_var" => (; dims = ("iteration",), group = "metrics", type = Float64),
"mse_full_nn_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"failures" => (; dims = ("iteration",), group = "metrics", type = Int16),
"nn_mean_index" => (; dims = ("iteration",), group = "metrics", type = Int16),
)
return io_dict
end
function io_dictionary_metrics(ekp::EnsembleKalmanProcess, mse_full::Vector{FT}) where {FT <: Real}
orig_dict = io_dictionary_metrics()
# Get failures
failures = length(filter(isnan, mse_full))
# Get nearest_to_mean point
nn_mean = get_mean_nearest_neighbor(ekp)
# Failure-safe variance
mse_full_var = get_metric_var(ekp, mse_full)
# Get mse at nearest_to_mean point
mse_full_nn_mean = mse_full[nn_mean]
# Get loss (latent space)
loss = compute_ensemble_loss(ekp)
# Failure-safe variance
loss_var = get_metric_var(ekp, loss)
# Get loss at nearest_to_mean point
loss_nn_mean = loss[nn_mean]
# Filter NaNs
loss_filt = filter(!isnan, loss)
mse_filt = filter(!isnan, mse_full)
io_dict = Dict(
"loss_mean_g" => Base.setindex(orig_dict["loss_mean_g"], get_error(ekp)[end], :field),
"loss_mean" => Base.setindex(orig_dict["loss_mean"], mean(loss_filt), :field),
"loss_min" => Base.setindex(orig_dict["loss_min"], minimum(loss_filt), :field),
"loss_max" => Base.setindex(orig_dict["loss_max"], maximum(loss_filt), :field),
"loss_var" => Base.setindex(orig_dict["loss_var"], loss_var, :field),
"loss_nn_mean" => Base.setindex(orig_dict["loss_nn_mean"], loss_nn_mean, :field),
"mse_full_mean" => Base.setindex(orig_dict["mse_full_mean"], mean(mse_filt), :field),
"mse_full_min" => Base.setindex(orig_dict["mse_full_min"], minimum(mse_filt), :field),
"mse_full_max" => Base.setindex(orig_dict["mse_full_max"], maximum(mse_filt), :field),
"mse_full_var" => Base.setindex(orig_dict["mse_full_var"], mse_full_var, :field),
"mse_full_nn_mean" => Base.setindex(orig_dict["mse_full_nn_mean"], mse_full_nn_mean, :field),
"failures" => Base.setindex(orig_dict["failures"], failures, :field),
"nn_mean_index" => Base.setindex(orig_dict["nn_mean_index"], nn_mean, :field),
)
return io_dict
end
"""
io_dictionary_val_metrics()
io_dictionary_val_metrics(ekp::EnsembleKalmanProcess, mse_full::Vector{FT}) where {FT <: Real}
Dictionary of scalar validation metrics.
Evaluations of the data-model mismatch in inverse problem (i.e., latent) space are denoted `loss`.
Errors computed in normalized physical (i.e., full) space are denoted `mse_full`. Differences between
these two metrics include:
- Covariance matrix defining the inner product (covariance weighting in `loss` vs L2 norm in `mse_full`),
- Treatment of trailing eigenvalues (truncation and regularization vs considering all eigenmodes).
- The `loss` includes the L2 penalty term, `mse_full` does not.
Elements:
- `val_loss_mean` :: Ensemble mean of validation `(g - y)'Γ_inv(g - y)`.
- `val_loss_min` :: Ensemble min of validation `(g - y)'Γ_inv(g - y)`.
- `val_loss_max` :: Ensemble max of validation `(g - y)'Γ_inv(g - y)`.
- `val_loss_var` :: Variance estimate of validation `(g - y)'Γ_inv(g - y)`, empirical (EKI/EKS) or quadrature (UKI).
- `val_loss_nn_mean` :: Validation `(g_nn - y)'Γ_inv(nn - y)`, where `g_nn` is the validation forward model output at the particle closest to the mean in parameter space.
- `val_mse_full_mean` :: Ensemble mean of MSE(`g_full_val`, `y_full_val`).
- `val_mse_full_min` :: Ensemble min of MSE(`g_full_val`, `y_full_val`).
- `val_mse_full_max` :: Ensemble max of MSE(`g_full_val`, `y_full_val`).
- `val_mse_full_var` :: Variance estimate of MSE(`g_full_val`, `y_full_val`), empirical (EKI/EKS) or quadrature (UKI).
- `val_mse_full_nn_mean` :: MSE(`g_full_val`, `y_full_val`) of particle closest to the mean in parameter space. The mean in parameter space is the solution to the particle-based inversion.
"""
function io_dictionary_val_metrics()
io_dict = Dict(
"val_loss_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_loss_min" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_loss_max" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_loss_var" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_loss_nn_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_mse_full_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_mse_full_min" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_mse_full_max" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_mse_full_var" => (; dims = ("iteration",), group = "metrics", type = Float64),
"val_mse_full_nn_mean" => (; dims = ("iteration",), group = "metrics", type = Float64),
)
return io_dict
end
function io_dictionary_val_metrics(
ekp::EnsembleKalmanProcess,
val_ref_stats::ReferenceStatistics,
g_val::Matrix{FT},
val_mse_full::Vector{FT},
) where {FT <: Real}
orig_dict = io_dictionary_val_metrics()
# Get nearest_to_mean point
nn_mean = get_mean_nearest_neighbor(ekp)
# Failure-safe variance
val_mse_full_var = get_metric_var(ekp, val_mse_full)
# Get mse at nearest_to_mean point
val_mse_full_nn_mean = val_mse_full[nn_mean]
# Get loss (latent space), augmenting val_ref_stats if necessary
d_aug = size(g_val, 1)
d = length(val_ref_stats.y)
if d_aug > d
y_val = zeros(d_aug)
y_val[1:d] = val_ref_stats.y
y_val[(d + 1):d_aug] = ekp.obs_mean[(end - d_aug + d + 1):end]
Γ_θ = ekp.obs_noise_cov[(end - d_aug + d + 1):end, (end - d_aug + d + 1):end]
Γ_val = cat([val_ref_stats.Γ, Γ_θ]..., dims = (1, 2))
else
y_val = val_ref_stats.y
Γ_val = val_ref_stats.Γ
end
val_loss = compute_ensemble_loss(g_val, y_val, Γ_val)
# Failure-safe variance
val_loss_var = get_metric_var(ekp, val_loss)
# Get loss at nearest_to_mean point
val_loss_nn_mean = val_loss[nn_mean]
# Filter NaNs
val_loss_filt = filter(!isnan, val_loss)
val_mse_filt = filter(!isnan, val_mse_full)
io_dict = Dict(
"val_loss_mean" => Base.setindex(orig_dict["val_loss_mean"], mean(val_loss_filt), :field),
"val_loss_min" => Base.setindex(orig_dict["val_loss_min"], minimum(val_loss_filt), :field),
"val_loss_max" => Base.setindex(orig_dict["val_loss_max"], maximum(val_loss_filt), :field),
"val_loss_var" => Base.setindex(orig_dict["val_loss_var"], val_loss_var, :field),
"val_loss_nn_mean" => Base.setindex(orig_dict["val_loss_nn_mean"], val_loss_nn_mean, :field),
"val_mse_full_mean" => Base.setindex(orig_dict["val_mse_full_mean"], mean(val_mse_filt), :field),
"val_mse_full_min" => Base.setindex(orig_dict["val_mse_full_min"], minimum(val_mse_filt), :field),
"val_mse_full_max" => Base.setindex(orig_dict["val_mse_full_max"], maximum(val_mse_filt), :field),
"val_mse_full_var" => Base.setindex(orig_dict["val_mse_full_var"], val_mse_full_var, :field),
"val_mse_full_nn_mean" => Base.setindex(orig_dict["val_mse_full_nn_mean"], val_mse_full_nn_mean, :field),
)
return io_dict
end
"""
io_dictionary_particle_state()
io_dictionary_particle_state(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
Dictionary of particle-wise parameter diagnostics, not involving forward model evaluations.
Elements:
- `u` :: Parameter ensemble in unconstrained (inverse problem) space.
- `phi` :: Parameter ensemble in constrained (physical) space.
"""
function io_dictionary_particle_state()
io_dict = Dict(
"u" => (; dims = ("particle", "param", "iteration"), group = "particle_diags", type = Float64),
"phi" => (; dims = ("particle", "param", "iteration"), group = "particle_diags", type = Float64),
)
return io_dict
end
function io_dictionary_particle_state(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
orig_dict = io_dictionary_particle_state()
u = get_u_final(ekp)
ϕ = transform_unconstrained_to_constrained(priors, u)
io_dict =
Dict("u" => Base.setindex(orig_dict["u"], u', :field), "phi" => Base.setindex(orig_dict["phi"], ϕ', :field))
return io_dict
end
"""
io_dictionary_particle_eval()
io_dictionary_particle_eval(
ekp::EnsembleKalmanProcess,
g_full::Matrix{FT},
mse_full::Vector{FT},
d::IT,
d_full::IT,
batch_indices::Vector{IT},
) where {FT <: Real, IT <: Integer}
Dictionary of particle-wise diagnostics involving forward model evaluations.
Elements:
- `g` :: Forward model evaluation in inverse problem space.
- `g_full` :: Forward model evaluation in primitive output space, normalized using the pooled field covariance.
- `mse_full` :: Particle-wise evaluation of MSE(`g_full`, `y_full`).
- `batch_indices` :: Indices of `ReferenceModel`s evaluated per iteration.
"""
function io_dictionary_particle_eval()
io_dict = Dict(
"g" => (; dims = ("particle", "out_aug", "iteration"), group = "particle_diags", type = Float64),
"g_full" => (; dims = ("particle", "out_full", "iteration"), group = "particle_diags", type = Float64),
"mse_full" => (; dims = ("particle", "iteration"), group = "particle_diags", type = Float64),
"batch_indices" => (; dims = ("batch_index", "iteration"), group = "particle_diags", type = Int16),
)
return io_dict
end
function io_dictionary_particle_eval(
ekp::EnsembleKalmanProcess,
g_full::Matrix{FT},
mse_full::Vector{FT},
d::IT,
d_full::IT,
batch_indices::Vector{IT},
) where {FT <: Real, IT <: Integer}
orig_dict = io_dictionary_particle_eval()
g_aug = get_g_final(ekp)
d_batch, N_ens = size(g_aug)
# Fill "g" array with zeros and modify leading rows with possibly batched `g`
g_filled = zeros(d, N_ens)
g_filled[1:d_batch, :] = g_aug
# Fill "g_full" array with zeros and modify leading rows with possibly batched `g`
d_full_batch = size(g_full, 1)
g_full_filled = zeros(d_full, N_ens)
g_full_filled[1:d_full_batch, :] = g_full
io_dict = Dict(
"g" => Base.setindex(orig_dict["g"], g_filled', :field), # Avoid params in augmented state
"g_full" => Base.setindex(orig_dict["g_full"], g_full_filled', :field),
"mse_full" => Base.setindex(orig_dict["mse_full"], mse_full, :field),
"batch_indices" => Base.setindex(orig_dict["batch_indices"], batch_indices, :field),
)
return io_dict
end
"""
io_dictionary_val_particle_eval()
io_dictionary_val_particle_eval(
g::Matrix{FT},
g_full::Matrix{FT},
mse_full::Vector{FT},
d::IT,
d_full::IT,
batch_indices::Vector{IT},
) where {FT <: Real, IT <: Integer}
Dictionary of particle-wise validation diagnostics involving forward model evaluations.
Elements:
- `val_g` :: Validation forward model evaluation in reduced space.
- `val_g_full` :: Validation forward model evaluation in primitive output space, normalized using the pooled field covariance.
- `val_mse_full` :: Particle-wise evaluation of MSE(`val_g_full`, `val_y_full`).
- `val_batch_indices` :: Indices of validation `ReferenceModel`s evaluated per iteration.
"""
function io_dictionary_val_particle_eval()
io_dict = Dict(
"val_g" => (; dims = ("particle", "out_aug_val", "iteration"), group = "particle_diags", type = Float64),
"val_g_full" =>
(; dims = ("particle", "out_full_val", "iteration"), group = "particle_diags", type = Float64),
"val_mse_full" => (; dims = ("particle", "iteration"), group = "particle_diags", type = Float64),
"val_batch_indices" => (; dims = ("batch_index_val", "iteration"), group = "particle_diags", type = Int16),
)
return io_dict
end
function io_dictionary_val_particle_eval(
g::Matrix{FT},
g_full::Matrix{FT},
mse_full::Vector{FT},
d_aug::IT,
d_full::IT,
batch_indices::Vector{IT},
) where {FT <: Real, IT <: Integer}
orig_dict = io_dictionary_val_particle_eval()
d_batch, N_ens = size(g)
# Fill "g" array with zeros and modify leading rows with possibly batched `g`
g_filled = zeros(d_aug, N_ens)
g_filled[1:d_batch, :] = g
# Fill "g_full" array with zeros and modify leading rows with possibly batched `g`
d_full_batch = size(g_full, 1)
g_full_filled = zeros(d_full, N_ens)
g_full_filled[1:d_full_batch, :] = g_full
io_dict = Dict(
"val_g" => Base.setindex(orig_dict["val_g"], g_filled', :field),
"val_g_full" => Base.setindex(orig_dict["val_g_full"], g_full_filled', :field),
"val_mse_full" => Base.setindex(orig_dict["val_mse_full"], mse_full, :field),
"val_batch_indices" => Base.setindex(orig_dict["val_batch_indices"], batch_indices, :field),
)
return io_dict
end
"""
io_dictionary_ensemble()
io_dictionary_ensemble(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
Dictionary of ensemble parameter diagnostics.
Elements:
- `u_mean` :: Ensemble mean parameter in unconstrained (inverse problem) space.
- `phi_mean` :: Ensemble mean parameter in constrained (physical) space.
- `u_cov` :: Sample parameter covariance in unconstrained (inverse problem) space.
- `phi_cov` :: Sample parameter covariance in constrained (physical) space.
- `phi_low_unc` :: Lower uncertainty bound (μ-1σ) of the parameter value in constrained (physical) space.
- `phi_upp_unc` :: Upper uncertainty bound (μ+1σ) of the parameter value in constrained (physical) space.
"""
function io_dictionary_ensemble()
io_dict = Dict(
"u_mean" => (; dims = ("param", "iteration"), group = "ensemble_diags", type = Float64),
"phi_mean" => (; dims = ("param", "iteration"), group = "ensemble_diags", type = Float64),
"u_cov" => (; dims = ("param", "param", "iteration"), group = "ensemble_diags", type = Float64),
"phi_cov" => (; dims = ("param", "param", "iteration"), group = "ensemble_diags", type = Float64),
"phi_low_unc" => (; dims = ("param", "iteration"), group = "ensemble_diags", type = Float64),
"phi_upp_unc" => (; dims = ("param", "iteration"), group = "ensemble_diags", type = Float64),
)
return io_dict
end
function io_dictionary_ensemble(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
orig_dict = io_dictionary_ensemble()
u_mean = get_u_mean(ekp)
u_cov = get_u_cov(ekp)
# The estimator of the mean is valid in unconstrained space, so we must transform the mean.
ϕ_mean = transform_unconstrained_to_constrained(priors, u_mean)
# Transform uncertainty bands to constrained space
u_low = u_mean .- sqrt.(diag(u_cov))
u_upp = u_mean .+ sqrt.(diag(u_cov))
ϕ_low = transform_unconstrained_to_constrained(priors, u_low)
ϕ_upp = transform_unconstrained_to_constrained(priors, u_upp)
# The covariance of ϕ is not the transformed covariance, this is just a linear approximator.
ϕ_cov = get_ϕ_cov(ekp, priors)
io_dict = Dict(
"u_mean" => Base.setindex(orig_dict["u_mean"], u_mean, :field),
"phi_mean" => Base.setindex(orig_dict["phi_mean"], ϕ_mean, :field),
"u_cov" => Base.setindex(orig_dict["u_cov"], u_cov, :field),
"phi_cov" => Base.setindex(orig_dict["phi_cov"], ϕ_cov, :field),
"phi_low_unc" => Base.setindex(orig_dict["phi_low_unc"], ϕ_low, :field),
"phi_upp_unc" => Base.setindex(orig_dict["phi_upp_unc"], ϕ_upp, :field),
)
return io_dict
end
function get_u_mean(ekp::EnsembleKalmanProcess)
if isa(ekp.process, Unscented)
return get_u_mean_final(ekp)
else
u = get_u_final(ekp)
return vcat(mean(u, dims = 2)...)
end
end
function get_u_cov(ekp::EnsembleKalmanProcess)
if isa(ekp.process, Unscented)
return deepcopy(ekp.process.uu_cov[end])
else
u = get_u_final(ekp)
return cov(u, dims = 2)
end
end
"""
get_ϕ_cov(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
Get the last parameter covariance estimate in constrained (physical) space.
For ensemble methods, the covariance of the transformed parameters is returned.
For unscented methods, the covariance is computed through a quadrature on the
transformed quadrature points. The covariance of the transformed parameters
returned here is equal to the transformed covariance only under a first order
Taylor approximation, which is consistent with other approximations underlying the
calibration method.
Inputs:
- `ekp` :: The EnsembleKalmanProcess.
- `priors` :: The priors defining transformations between constrained and unconstrained space.
Outputs:
- The parameter covariance in constrained space.
"""
function get_ϕ_cov(ekp::EnsembleKalmanProcess, priors::ParameterDistribution)
if isa(ekp.process, Unscented)
u_mean = get_u_mean_final(ekp)
u_cov = deepcopy(ekp.process.uu_cov[end])
u_p = construct_sigma_ensemble(ekp.process, u_mean, u_cov)
ϕ_p = transform_unconstrained_to_constrained(priors, u_p)
ϕ_p_mean = construct_mean(ekp, ϕ_p)
return construct_cov(ekp, ϕ_p, ϕ_p_mean)
else
u = get_u_final(ekp)
ϕ = transform_unconstrained_to_constrained(priors, u)
return cov(ϕ, dims = 2)
end
end
"""
get_metric_var(ekp::EnsembleKalmanProcess, metric::Vector{FT}) where {FT <: Real}
Computes the ensemble variance of a scalar metric.
For ensemble methods, the sample variance of the metric is returned. For unscented methods,
the variance is computed through a quadrature. Ensemble members where the metric is `NaN`
are filtered out of the computation.
Inputs:
- `ekp` :: The EnsembleKalmanProcess.
- `metric` :: A vector containing the value of the metric for each ensemble member.
Outputs:
- The ensemble variance of `metric`.
"""
function get_metric_var(ekp::EnsembleKalmanProcess, metric::Vector{FT}) where {FT <: Real}
if isa(ekp.process, Unscented)
if any(isnan.(metric))
succ_ens = [i for i = 1:length(metric) if !isnan(metric[i])]
metric_mean = construct_successful_mean(ekp, metric, succ_ens)
return construct_successful_cov(ekp, metric, metric_mean, succ_ens)
else
metric_mean = construct_mean(ekp, metric)
return construct_cov(ekp, metric, metric_mean)
end
else
return var(filter(!isnan, metric))
end
end
"""
get_mean_nearest_neighbor(ekp::EnsembleKalmanProcess)
Returns the index of the nearest neighbor to the ensemble mean parameter, in unconstrained space.
"""
function get_mean_nearest_neighbor(ekp::EnsembleKalmanProcess)
u = get_u_final(ekp)
u_mean = mean(u, dims = 2)
return argmin(vcat(sum((u .- u_mean) .^ 2, dims = 1)...))
end
"""
compute_ensemble_loss(
g::AbstractMatrix{FT},
y::AbstractVector{FT},
Γ::Union{AbstractMatrix{FT}, UniformScaling{FT}},
) where {FT <: Real}
compute_ensemble_loss(ekp::EnsembleKalmanProcess)
Computes the covariance-weighted error `(g - y)'Γ_inv(g - y)` for each ensemble member.
"""
function compute_ensemble_loss(
g::AbstractMatrix{FT},
y::AbstractVector{FT},
Γ::Union{AbstractMatrix{FT}, UniformScaling{FT}},
) where {FT <: Real}
diff = g .- y # [d, N_ens]
loss = zeros(size(g, 2)) # [N_ens, 1]
for i in 1:size(g, 2)
Γ_inv_diff = Γ \ diff[:, i] # [d, 1]
loss[i] = dot(diff[:, i], Γ_inv_diff)
end
return loss
end
function compute_ensemble_loss(ekp::EnsembleKalmanProcess)
g = get_g_final(ekp)
return compute_ensemble_loss(g, ekp.obs_mean, ekp.obs_noise_cov)
end
end # module
|
{"hexsha": "cc4eb6b7b8924eaf3f2f7b6f47535c637c20c1ce", "size": 37121, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Diagnostics.jl", "max_stars_repo_name": "CliMA/CalibrateEDMF.jl", "max_stars_repo_head_hexsha": "237e21f9b2228bd0cb008976fc4d61ea3722196c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-09-23T18:57:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T06:58:17.000Z", "max_issues_repo_path": "src/Diagnostics.jl", "max_issues_repo_name": "CliMA/CalibrateEDMF.jl", "max_issues_repo_head_hexsha": "237e21f9b2228bd0cb008976fc4d61ea3722196c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 203, "max_issues_repo_issues_event_min_datetime": "2021-10-01T23:30:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:00:59.000Z", "max_forks_repo_path": "src/Diagnostics.jl", "max_forks_repo_name": "CliMA/CalibrateEDMF.jl", "max_forks_repo_head_hexsha": "237e21f9b2228bd0cb008976fc4d61ea3722196c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.4086845466, "max_line_length": 189, "alphanum_fraction": 0.6817434875, "num_tokens": 9812}
|
import numpy as np
import pytest
from gamegym.games.matrix import (GameOfChicken, MatchingPennies, MatrixGame, MatrixZeroSumGame,
PrisonersDilemma, RockPaperScissors)
from gamegym.strategy import ConstStrategy, UniformStrategy
from gamegym.utils import get_rng
from gamegym.algorithms.stats import sample_payoff
def test_base():
gs = [
PrisonersDilemma(),
GameOfChicken(),
RockPaperScissors(),
MatchingPennies(),
MatrixZeroSumGame([[1, 3], [3, 2], [0, 0]], [["A", "B", "C"], [0, 1]]),
MatrixGame([[1], [2], [3]], [["A1", "A2", "A3"]]),
MatrixGame(np.zeros([2, 4, 5, 3], dtype=np.int32)),
]
for g in gs:
s = g.start()
assert not s.is_terminal()
assert s.player == 0
assert len(s.actions) == g.m.shape[0]
repr(s)
repr(g)
s = s.play(g.actions[0])
g = RockPaperScissors()
s = g.start()
assert s.observations == ((), (), ())
s = s.play("R")
assert s.observations == ("R", (), ())
s = s.play("P")
assert s.is_terminal()
assert s.observations == (("R", "P"), ("R", "P"), ("R", "P"))
assert ((-1, 1) == s.payoff).all()
def test_strategies():
g = RockPaperScissors()
rng = get_rng(seed=41)
s1 = [UniformStrategy(), UniformStrategy()]
v1 = sample_payoff(g, s1, 300, rng=rng)
assert sum(v1[0]) == pytest.approx(0.0)
assert v1[0] == pytest.approx([0.0, 0.0], abs=0.1)
s2 = [
ConstStrategy((1.0, 0.0, 0.0)),
ConstStrategy((0.5, 0.5, 0.0)),
]
v2 = sample_payoff(g, s2, 300, rng=rng)
assert sum(v2[0]) == pytest.approx(0.0)
assert v2[0] == pytest.approx([-0.5, 0.5], abs=0.1)
|
{"hexsha": "013c8b41ef23a5429f0ef0b27806ea1532aff567", "size": 1722, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/games/test_matrix.py", "max_stars_repo_name": "spirali/gamegym", "max_stars_repo_head_hexsha": "8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2018-10-05T20:52:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T05:59:27.000Z", "max_issues_repo_path": "tests/games/test_matrix.py", "max_issues_repo_name": "spirali/gamegym", "max_issues_repo_head_hexsha": "8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-10-07T12:11:20.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-03T10:47:25.000Z", "max_forks_repo_path": "tests/games/test_matrix.py", "max_forks_repo_name": "spirali/gamegym", "max_forks_repo_head_hexsha": "8c2dbb7969cabae9ca86c0dab74c6ddc5fbd21bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-10-07T10:27:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-13T18:47:37.000Z", "avg_line_length": 30.75, "max_line_length": 96, "alphanum_fraction": 0.5540069686, "include": true, "reason": "import numpy", "num_tokens": 553}
|
ELEMENTAL FUNCTION func_radians(x) RESULT(ans)
! This function returns the passed angle converted from degrees to radians.
USE ISO_FORTRAN_ENV
IMPLICIT NONE
! Declare input variables ...
REAL(kind = REAL64), INTENT(in) :: x ! The input angle in degrees.
! Declare output variables ...
REAL(kind = REAL64) :: ans ! The output angle in radians.
! Set value ...
ans = const_pi * x / 180.0e0_REAL64 ! [rad]
END FUNCTION func_radians
|
{"hexsha": "c9d9c3e93dc5aa135771b34bf37093f596e33ee1", "size": 622, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "mod_safe/func_radians.f90", "max_stars_repo_name": "Guymer/fortranlib", "max_stars_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-05-28T02:05:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-16T16:50:21.000Z", "max_issues_repo_path": "mod_safe/func_radians.f90", "max_issues_repo_name": "Guymer/fortranlib", "max_issues_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-06-17T16:49:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T18:47:36.000Z", "max_forks_repo_path": "mod_safe/func_radians.f90", "max_forks_repo_name": "Guymer/fortranlib", "max_forks_repo_head_hexsha": "30e27b010cf4bc5acf0f3a63d50f11789640e0e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-11T04:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-11T04:51:33.000Z", "avg_line_length": 36.5882352941, "max_line_length": 122, "alphanum_fraction": 0.5112540193, "num_tokens": 121}
|
[STATEMENT]
lemma ofilter_Un[simp]: "\<lbrakk>ofilter A; ofilter B\<rbrakk> \<Longrightarrow> ofilter(A \<union> B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>local.ofilter A; local.ofilter B\<rbrakk> \<Longrightarrow> local.ofilter (A \<union> B)
[PROOF STEP]
unfolding ofilter_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>A \<subseteq> Field r \<and> (\<forall>a\<in>A. local.under a \<subseteq> A); B \<subseteq> Field r \<and> (\<forall>a\<in>B. local.under a \<subseteq> B)\<rbrakk> \<Longrightarrow> A \<union> B \<subseteq> Field r \<and> (\<forall>a\<in>A \<union> B. local.under a \<subseteq> A \<union> B)
[PROOF STEP]
by blast
|
{"llama_tokens": 267, "file": null, "length": 2}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 01:28:34 2019
@author: Titus
"""
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import core_photo_analyzer as cpa
d1=7448.1
d2=7449.55
folder="../../58_32_Core_CT_Scans/7448.1-7449.55"
n=len(os.listdir(folder))
dz=(d2-d1)/n
z=d1
xs=[]
ys=[]
zs=[]
for file in os.listdir(folder):
pts=cpa.white_finder(folder+'/'+file,z)
# try:
for i in range(len(pts[0])):
xs.append(pts[0][i])
ys.append(pts[1][i])
zs.append(pts[2][i])
# except:
# pass
z+=dz
#%%
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter3D(xs[::1],ys[::1],zs[::1],s=1)
plt.show()
|
{"hexsha": "3fd745848761346acf43dba180b396d0c8984417", "size": 703, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/core_visualizer.py", "max_stars_repo_name": "titusquah/Final-Project", "max_stars_repo_head_hexsha": "21615f2f524fe1a18765305eca823e54585af121", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-30T22:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-30T22:05:47.000Z", "max_issues_repo_path": "src/core_visualizer.py", "max_issues_repo_name": "titusquah/Final-Project", "max_issues_repo_head_hexsha": "21615f2f524fe1a18765305eca823e54585af121", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/core_visualizer.py", "max_forks_repo_name": "titusquah/Final-Project", "max_forks_repo_head_hexsha": "21615f2f524fe1a18765305eca823e54585af121", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.1463414634, "max_line_length": 49, "alphanum_fraction": 0.6429587482, "include": true, "reason": "import numpy", "num_tokens": 251}
|
## Automatically adapted for numpy.oldnumeric Apr 14, 2008 by -c
from builtins import range
def writeMeshMatlabFormat(mesh,meshFileBase):
"""
build array data structures for matlab finite element mesh representation
and write to a file to view and play with in matlatb
in matlab can then print mesh with
pdemesh(p,e,t)
where
p is the vertex or point matrix
e is the edge matrix, and
t is the element matrix
points matrix is [2 x num vertices]
format :
row 1 = x coord,
row 2 = y coord for nodes in mesh
edge matrix is [7 x num edges]
format:
row 1 = start vertex number
row 2 = end vertex number
row 3 = start value in edge parameterization, should be 0
row 4 = end value in edge parameterization, should be 1
row 5 = global edge id, base 1
row 6 = subdomain on left? always 1 for now
row 7 = subdomain on right? always 0 for now
element matrix is [4 x num elements]
row 1 = vertex 1 global number
row 2 = vertex 2 global number
row 3 = vertex 3 global number
row 4 = triangle subdomain number
where 1,2,3 is a local counter clockwise numbering of vertices in
triangle
"""
import numpy as numpy
matlabBase = 1
p = numpy.zeros((2,mesh['nNodes_global']),numpy.float_)
e = numpy.zeros((7,mesh['nElementBoundaries_global']),numpy.float_)
t = numpy.zeros((4,mesh['nElements_global']),numpy.float_)
#load p,e,t and write file
mfile = open(meshFileBase+'.m','w')
mfile.write('p = [ ... \n')
for nN in range(mesh['nNodes_global']):
p[0,nN]=mesh['nodeArray'][nN,0]
p[1,nN]=mesh['nodeArray'][nN,1]
mfile.write('%g %g \n' % tuple(p[:,nN]))
mfile.write(']; \n')
mfile.write("p = p\';\n") #need transpose for matlab
mfile.write('e = [ ... \n')
for ebN in range(mesh['nElementBoundaries_global']):
e[0,ebN]=mesh['elementBoundaryNodesArray'][ebN,0] + matlabBase #global node number of start node base 1
e[1,ebN]=mesh['elementBoundaryNodesArray'][ebN,1] + matlabBase #global node number of end node base 1
e[2,ebN]=0.0 #edge param. is 0 to 1
e[3,ebN]=1.0
e[4,ebN]=ebN + matlabBase #global edge number base 1
e[5,ebN]=0 #subdomain to left
e[6,ebN]=1 #subdomain to right
mfile.write('%g %g %g %g %g %g %g \n' % tuple(e[:,ebN]))
mfile.write(']; \n')
mfile.write("e = e\';\n") #need transpose for matlab
#write triangles last
mfile.write('t = [ ... \n')
for eN in range(mesh['nElements_global']):
t[0,eN]=mesh['elementNodesArray'][eN,0]+matlabBase #global node number for vertex 0
t[1,eN]=mesh['elementNodesArray'][eN,1]+matlabBase #global node number for vertex 0
t[2,eN]=mesh['elementNodesArray'][eN,2]+matlabBase #global node number for vertex 0
t[3,eN]=1 #subdomain id
mfile.write('%g %g %g %g \n' % tuple(t[:,eN]))
mfile.write(']; \n');
mfile.write("t = t\';\n") #need transpose for matlab
mfile.close()
return p,e,t
########################################################################
if __name__ == '__main__':
import os,shelve
import ppmatlab,numpy.oldnumeric as numpy
os.listdir('./results')
filename = './results/re_forsyth2_ss_2d_pre_forsyth2_ss_2d_c0p1_n_mesh_results.dat'
res = shelve.open(filename)
mesh = res['mesh']
mmfile = 'forsyth2MeshMatlab'
p,e,t = ppmatlab.writeMeshMatlabFormat(mesh,mmfile)
|
{"hexsha": "aa756acd81e3df8341363e1657f73a5931b7a50d", "size": 3523, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/ppmatlab.py", "max_stars_repo_name": "acatwithacomputer/proteus", "max_stars_repo_head_hexsha": "80dfad95da6ab4d18a88a035f55c26b03540a864", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/ppmatlab.py", "max_issues_repo_name": "acatwithacomputer/proteus", "max_issues_repo_head_hexsha": "80dfad95da6ab4d18a88a035f55c26b03540a864", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2018-02-08T23:22:59.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-06T19:40:32.000Z", "max_forks_repo_path": "scripts/ppmatlab.py", "max_forks_repo_name": "acatwithacomputer/proteus", "max_forks_repo_head_hexsha": "80dfad95da6ab4d18a88a035f55c26b03540a864", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-17T03:25:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-17T03:25:34.000Z", "avg_line_length": 33.5523809524, "max_line_length": 111, "alphanum_fraction": 0.6193585013, "include": true, "reason": "import numpy", "num_tokens": 1074}
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import numpy as np # we're going to use numpy to process input and output data
import onnxruntime # to inference ONNX models, we use the ONNX Runtime
import onnx
import sys, os
from onnx import numpy_helper
import urllib.request
import json
import cv2
import time
# display images in notebook
#from PIL import Image, ImageDraw, ImageFont
class ImageClassification(object):
"""Image classification class for ONNX Runtime
"""
def __init__(self, data, model_dir):
print("Call: Constructor: ImageClassification.__init__")
# TBD Need to add error check
self.platform = str(data["Platform"])
self.model_filename = str(data["ModelFileName"])
if "LabelFileName" in data:
self.label_filename = str(data["LabelFileName"])
if "InputStream" in data:
self.video_inp = str(data["InputStream"])
# Look for input width and height from cvexport.manifest
# if not present, read from model.onnx file (ref: onnxruntime_session_init)
if "ScaleWidth" in data:
self.model_inp_width = int(data["ScaleWidth"])
if "ScaleHeight" in data:
self.model_inp_height = int(data["ScaleHeight"])
if "RenderFlag" in data:
self.render = int(data["RenderFlag"])
else:
self.render = 1
if "MeanVec" in data:
self.mean_vec = data["MeanVec"]
else:
self.mean_vec = [0.485, 0.456, 0.406]
if "StddevVec" in data:
self.stddev_vec = data["StddevVec"]
else:
self.stddev_vec = [0.229, 0.224, 0.225]
if "InputFormat" in data:
self.input_format = str(data["InputFormat"])
self.session = None
self.onnxruntime_session_init(model_dir)
def onnxruntime_session_init(self, model_dir):
if self.session is not None:
self.session = None
self.session = onnxruntime.InferenceSession(str(str(model_dir) + str('/') + str(self.model_filename)))
self.input_name = self.session.get_inputs()[0].name
# Reading input width & height from onnx model file
self.model_inp_width = self.session.get_inputs()[0].shape[2]
self.model_inp_height = self.session.get_inputs()[0].shape[3]
if os.path.isfile(str(str(model_dir) + str('/') + str(self.label_filename))):
with open(str(str(model_dir) + str('/') + str(self.label_filename)), 'r') as f:
self.labels = [l.strip() for l in f.readlines()]
else:
print("Warning: Labels file not found")
self.labels = None
def load_labels(self, path):
with open(path) as f:
for cnt, line in enumerate(f):
self.labels.append(line.rstrip("\n"))
print("total_classes =", cnt)
def preprocess(self, input_data):
# convert the input data into the float32 input
img_data = input_data.astype('float32')
img_data = img_data.reshape(1, 3, self.model_inp_width, self.model_inp_height)
#normalize
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i,:,:] = (img_data[i,:,:]/255 - self.mean_vec[i]) / self.stddev_vec[i]
return norm_img_data
def predict_image(self, frame):
image_data = cv2.resize(frame, (self.model_inp_width, self.model_inp_height), interpolation = cv2.INTER_AREA)
image_data = np.array(image_data).transpose(2, 0, 1)
#image_data = np.array(frame).transpose(2, 0, 1)
input_data = self.preprocess(image_data)
input_name = self.session.get_inputs()[0].name
raw_result = {}
start = time.time()
raw_result = self.session.run([], {input_name: input_data})[1]
end = time.time()
inference_time = end - start
for i in raw_result:
label_dict = i
predictions = []
v = []
if self.labels is None:
self.labels = []
for key, value in label_dict.items():
self.labels.append(str(key))
v.append(value)
for key in self.labels:
predictions.append(label_dict[key])
return predictions, inference_time
def softmax(self, x):
x = x.reshape(-1)
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def postprocess(self, result):
return self.softmax(np.array(result)).tolist()
#def main(config_filename):
# ic_model = ONNXRuntimeImageClassification(config_filename)
|
{"hexsha": "38bc38a7c1c12533febe02c6045e11036098b1c0", "size": 4765, "ext": "py", "lang": "Python", "max_stars_repo_path": "factory-ai-vision/EdgeSolution/modules/VisionSampleModule/image_classification.py", "max_stars_repo_name": "piyushka17/azure-intelligent-edge-patterns", "max_stars_repo_head_hexsha": "0d088899afb0022daa2ac434226824dba2c997c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 176, "max_stars_repo_stars_event_min_datetime": "2019-07-03T00:20:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T07:51:22.000Z", "max_issues_repo_path": "factory-ai-vision/EdgeSolution/modules/VisionSampleModule/image_classification.py", "max_issues_repo_name": "piyushka17/azure-intelligent-edge-patterns", "max_issues_repo_head_hexsha": "0d088899afb0022daa2ac434226824dba2c997c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 121, "max_issues_repo_issues_event_min_datetime": "2019-06-24T20:47:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T02:16:18.000Z", "max_forks_repo_path": "factory-ai-vision/EdgeSolution/modules/VisionSampleModule/image_classification.py", "max_forks_repo_name": "piyushka17/azure-intelligent-edge-patterns", "max_forks_repo_head_hexsha": "0d088899afb0022daa2ac434226824dba2c997c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 144, "max_forks_repo_forks_event_min_datetime": "2019-06-18T18:48:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:14:46.000Z", "avg_line_length": 34.2805755396, "max_line_length": 117, "alphanum_fraction": 0.617838405, "include": true, "reason": "import numpy", "num_tokens": 1096}
|
[STATEMENT]
lemma path_image_rectpath:
assumes "Re a1 \<le> Re a3" "Im a1 \<le> Im a3"
shows "path_image (rectpath a1 a3) =
{z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union>
{z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}" (is "?lhs = ?rhs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
define a2 a4 where "a2 = Complex (Re a3) (Im a1)" and "a4 = Complex (Re a1) (Im a3)"
[PROOF STATE]
proof (state)
this:
a2 = Complex (Re a3) (Im a1)
a4 = Complex (Re a1) (Im a3)
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
have "?lhs = closed_segment a1 a2 \<union> closed_segment a2 a3 \<union>
closed_segment a4 a3 \<union> closed_segment a1 a4"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. path_image (rectpath a1 a3) = closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4
[PROOF STEP]
by (simp_all add: rectpath_def Let_def path_image_join closed_segment_commute
a2_def a4_def Un_assoc)
[PROOF STATE]
proof (state)
this:
path_image (rectpath a1 a3) = closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
path_image (rectpath a1 a3) = closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
have "\<dots> = ?rhs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4 = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Re a1 \<le> Re a3
Im a1 \<le> Im a3
goal (1 subgoal):
1. closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4 = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
by (auto simp: rectpath_def Let_def path_image_join a2_def a4_def
closed_segment_same_Re closed_segment_same_Im closed_segment_eq_real_ivl)
[PROOF STATE]
proof (state)
this:
closed_segment a1 a2 \<union> closed_segment a2 a3 \<union> closed_segment a4 a3 \<union> closed_segment a1 a4 = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
goal (1 subgoal):
1. path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
path_image (rectpath a1 a3) = {z. Re z \<in> {Re a1, Re a3} \<and> Im z \<in> {Im a1..Im a3}} \<union> {z. Im z \<in> {Im a1, Im a3} \<and> Re z \<in> {Re a1..Re a3}}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2111, "file": null, "length": 12}
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
###THEANO_FLAGS='cuda.root=/opt/cuda-5.0.35,mode=FAST_RUN,device=gpu0,floatX=float32,exception_verbosity=high' python dnn.py
"""
"""
import logging
import sys
from collections import OrderedDict
import theano
import theano.tensor as T
from layers.layers import LinearLayer, SigmoidLayer, HiddenLayer
from theano.tensor.shared_randomstreams import RandomStreams
from training_schemes.rprop import compile_RPROP_train_function
class DNN(object):
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
n_outs=10, l1_reg=None, l2_reg=None,
hidden_layers_sizes=[500, 500],
hidden_activation='tanh', output_activation='linear',
use_rprop=0, rprop_init_update=0.001):
logger = logging.getLogger("DNN initialization")
self.sigmoid_layers = []
self.params = []
self.delta_params = []
self.n_layers = len(hidden_layers_sizes)
self.output_activation = output_activation
self.use_rprop = use_rprop
self.rprop_init_update = rprop_init_update
self.l1_reg = l1_reg
self.l2_reg = l2_reg
assert self.n_layers > 0
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x')
self.y = T.matrix('y')
for i in range(self.n_layers):
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].output
sigmoid_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.tanh) ##T.nnet.sigmoid) #
self.sigmoid_layers.append(sigmoid_layer)
self.params.extend(sigmoid_layer.params)
self.delta_params.extend(sigmoid_layer.delta_params)
# add final layer
if self.output_activation == 'linear':
self.final_layer = LinearLayer(rng=numpy_rng,
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
elif self.output_activation == 'sigmoid':
self.final_layer = SigmoidLayer(
rng=numpy_rng,
input=self.sigmoid_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs, activation=T.nnet.sigmoid)
else:
logger.critical(
"This output activation function: %s is not supported right now!" % (self.output_activation))
sys.exit(1)
self.params.extend(self.final_layer.params)
self.delta_params.extend(self.final_layer.delta_params)
### MSE
self.finetune_cost = T.mean(
T.sum((self.final_layer.output - self.y) * (self.final_layer.output - self.y), axis=1))
self.errors = T.mean(T.sum((self.final_layer.output - self.y) * (self.final_layer.output - self.y), axis=1))
### L1-norm
if self.l1_reg is not None:
for i in range(self.n_layers):
W = self.params[i * 2]
self.finetune_cost += self.l1_reg * (abs(W).sum())
### L2-norm
if self.l2_reg is not None:
for i in range(self.n_layers):
W = self.params[i * 2]
self.finetune_cost += self.l2_reg * T.sqr(W).sum()
def build_finetune_functions(self, train_shared_xy, valid_shared_xy, batch_size, \
return_valid_score_i=False):
(train_set_x, train_set_y) = train_shared_xy
(valid_set_x, valid_set_y) = valid_shared_xy
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
learning_rate = T.fscalar('learning_rate')
momentum = T.fscalar('momentum')
layer_size = len(self.params)
lr_list = []
for i in range(layer_size):
lr_list.append(learning_rate)
##top 2 layers use a smaller learning rate
##hard-code now, change it later
if layer_size > 4:
for i in range(layer_size - 4, layer_size):
lr_list[i] = learning_rate * 0.5
# compute list of fine-tuning updates
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
if self.use_rprop == 0:
updates = OrderedDict()
layer_index = 0
for dparam, gparam in zip(self.delta_params, gparams):
updates[dparam] = momentum * dparam - gparam * lr_list[layer_index]
layer_index += 1
for dparam, param in zip(self.delta_params, self.params):
updates[param] = param + updates[dparam]
on_unused_input_value = 'raise' ## Theano's default
elif self.use_rprop:
updates = compile_RPROP_train_function(self, gparams)
on_unused_input_value = 'warn'
## Retain learning rate and momentum to make interface backwards compatible,
## even with RPROP where we don't use them, means we have to use on_unused_input='warn'.
train_fn = theano.function(inputs=[index, theano.Param(learning_rate, default=0.125),
theano.Param(momentum, default=0.5)],
outputs=self.errors,
updates=updates,
on_unused_input=on_unused_input_value,
givens={self.x: train_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: train_set_y[index * batch_size:
(index + 1) * batch_size]})
valid_fn = theano.function([],
outputs=self.errors,
givens={self.x: valid_set_x,
self.y: valid_set_y})
valid_score_i = theano.function([index],
outputs=self.errors,
givens={self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:
(index + 1) * batch_size]})
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in range(n_valid_batches)]
if return_valid_score_i:
return train_fn, valid_fn, valid_score_i
else:
return train_fn, valid_fn
def parameter_prediction(self, test_set_x): # , batch_size
n_test_set_x = test_set_x.get_value(borrow=True).shape[0]
test_out = theano.function([], self.final_layer.output,
givens={self.x: test_set_x[0:n_test_set_x]})
predict_parameter = test_out()
return predict_parameter
## the function to output activations at a hidden layer
def generate_top_hidden_layer(self, test_set_x, bn_layer_index):
n_test_set_x = test_set_x.get_value(borrow=True).shape[0]
test_out = theano.function([], self.sigmoid_layers[bn_layer_index].output,
givens={self.x: test_set_x[0:n_test_set_x]})
predict_parameter = test_out()
return predict_parameter
if __name__ == '__main__':
train_scp = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nn_scp/train.scp'
valid_scp = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nn_scp/gen.scp'
model_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/practice/nnets_model'
log_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/practice/log'
finetune_lr = 0.01
pretraining_epochs = 100
pretrain_lr = 0.01
training_epochs = 100
batch_size = 32
n_ins = 898
n_outs = 229
hidden_layers_sizes = [512, 512, 512]
# test_DBN(train_scp, valid_scp, log_dir, model_dir, n_ins, n_outs, hidden_layers_sizes,
# finetune_lr, pretraining_epochs, pretrain_lr, training_epochs, batch_size)
dnn_generation()
|
{"hexsha": "587a61622845920f47e78c921127e3533d50eb54", "size": 10937, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/dnn.py", "max_stars_repo_name": "shartoo/merlin-tf-slim", "max_stars_repo_head_hexsha": "4c7d48d5f634273dd51d2e29562d3ed1195d9151", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/dnn.py", "max_issues_repo_name": "shartoo/merlin-tf-slim", "max_issues_repo_head_hexsha": "4c7d48d5f634273dd51d2e29562d3ed1195d9151", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/dnn.py", "max_forks_repo_name": "shartoo/merlin-tf-slim", "max_forks_repo_head_hexsha": "4c7d48d5f634273dd51d2e29562d3ed1195d9151", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5074074074, "max_line_length": 124, "alphanum_fraction": 0.5882783213, "include": true, "reason": "import theano,from theano", "num_tokens": 2337}
|
%!TEX root = ../../main.tex
\chapter{LaTeX Beispielcode}
Dies ist der Text des ersten Kapitels.Nur erwähnte Literaturverweise werden auch im Literaturverzeichnis gedruckt: \cite[S.12 ff]{baumgartner:2002}, \cite[S.1-3]{dreyfus:1980}
Meine erste Fußnote\footnote{Ich bin eine Fußnote} darf auch nicht fehlen. Fußnoten sind dazu da, dass man Begriffe näher erklärt, die aber dem vertrauten Leser wahrscheinlich eh bekannt sind.
\begin{figure}[h]
\centering
\includegraphics[height=.8\textwidth]{logo.png}
\caption{Das Logo der Musterfirma\footnotemark}
\end{figure}
%\begin{wrapfigure}{r}{.4\textwidth}
%\centering
%\includegraphics[height=.35\textwidth]{logo.png}
%\vspace{-15pt}
%\caption{Das Logo der Musterfirma\footnotemark}
%\end{wrapfigure}
%Quelle muss in Fußnote stehen (da sonst aufgrund eines Fehlers nicht kompiliert
% wird)
\footnotetext{aus \cite{mustermann:2012}}
Looking for the one superhero comic you just have to read. Following the antics and adventures of May Mayday Parker, this Spider-book has everything you could want in a comic--action, laughs, mystery and someone in a Spidey suit. Collects Alias \#1-28, What If. Jessica Jones had Joined the Avengers. In her inaugural arc, Jessicas life immediately becomes expendable when she uncovers the potentially explosive secret of one heros true identity. In her inaugural arc, Jessicas life immediately becomes expendable when she uncovers the potentially explosive secret of one heros true identity.
Manchmal braucht man auch Formeln. LaTeX hat einen sehr guten Formeleditor, der eigentlich selbsterklärend ist. Die Formeln weden automatisch nummeriert, aber man kann immer im Text mit einem Label wie Formel \ref{xyz} Bezug nehmen.
\begin{equation}
t-t_{0}=\sqrt{\frac{l}{g}}\int_{0}^{\varphi}{\frac{d\psi}{\sqrt{1-k^{2}\sin^{2} {\psi}}}} = \sqrt{\frac{l}{g}} F(k,\varphi)
\label{xyz}
\end{equation}
Manchmal braucht man Aufzählungen, die man in einzelnenen Punkten aufführt.
\begin{itemize}
\item Dies ist der erste Punkt, der aufgeführt wird.
\item Dies ist der zweite Punkt, der aufgeführt wird. Manchmal will man auch etwas \textbf{fett} oder \textit{kursiv} oder \textbf{\textit{beides in Kombination}} drucken.
\item Dies ist der dritte Punkt, der aufgeführt wird.
\end{itemize}
Once upon a time, Jessica Jones was a costumed super-hero, just not a very good one. First, a story where Wolverine and Hulk come together, and then Captain America and Cable meet up. In a city of Marvels, Jessica Jones never found her niche. The classic adventures of Spider-Man from the early days up until the 90s. Looking for the one superhero comic you just have to read. In her inaugural arc, Jessicas life immediately becomes expendable when she uncovers the potentially explosive secret of one heros true identity.
Erste Erwähnung eines Akronyms wird als Fußnote angezeigt. Jede weitere wird
nur verlinkt: \acf{AGPL}. \cite{fsf:2007}
Verweise auf das Glossar: \gls{Glossareintrag}, \glspl{Glossareintrag}
%title wird unter dem Bsp. abgedruckt
%caption wird im Verzeichnis abgedruckt
%label wird zum referenzieren benutzt, muss einzigartig sein.
\begin{lstlisting}[caption=Code-Beispiel, label=Bsp.1]
public class HelloWorld {
public static void main (String[] args) {
// Ausgabe Hello World!
System.out.println("Hello World!");
}
}
\end{lstlisting}
%language ändert die Sprache. (Wenn nur eine Sprache verwendet wird, kann diese Sprache in einstellungen.tex geändert werden. Standardmäßig Java.)
\begin{lstlisting}[caption=Python-Code, label=Python-Code, language=Python]
def quicksort(liste):
if len(liste) <= 1:
return liste
pivotelement = liste.pop()
links = [element for element in liste if element < pivotelement]
rechts = [element for element in liste if element >= pivotelement]
return quicksort(links) + [pivotelement] + quicksort(rechts)
# Quelle: http://de.wikipedia.org/wiki/Python_(Programmiersprache)
\end{lstlisting}
\section{lorem ipsum}
Looking for the one superhero comic you just have to read. Following the antics and adventures of May Mayday Parker, this Spider-book has everything you could want in a comic--action, laughs, mystery and someone in a Spidey suit. Collects Alias \#1-28, What If. Jessica Jones had Joined the Avengers. In her inaugural arc, Jessicas life immediately becomes expendable when she uncovers the potentially explosive secret of one heros true identity.
Manchmal braucht man auch Tabellen. Ein Beispiel sieht man in Tabelle \ref{tabelle1}, welche mit einem beliebigen Label bezeichnet werden kann. Die Tabelle taucht dann automatisch im Tabellenverzeichnis auf.
\begin{table}[h!]
\begin{center}
\begin{tabular}{ | m{5cm} | m{1cm}| m{1cm} | }
\hline
cell1 dummy text dummy text dummy text& cell2 & cell3 \\
\hline
cell1 dummy text dummy text dummy text & cell5 & cell6 \\
\hline
cell7 & cell8 & cell9 \\
\hline
\end{tabular}
\end{center}
\caption{Test der Funktion der Tabelle und ihrer Darstellung}
\label{tabelle1}
\end{table}
Once upon a time, Jessica Jones was a costumed super-hero, just not a very good one. First, a story where Wolverine and Hulk come together, and then Captain America and Cable meet up. In a city of Marvels, Jessica Jones never found her niche. The classic adventures of Spider-Man from the early days up until the 90s. Looking for the one superhero comic you just have to read.
Meet all of Spideys deadly enemies, from the Green Goblin and Doctor Octopus to Venom and Carnage, plus see Peter Parker fall in love, face tragedy and triumph, and learn that with great power comes great responsibility. In a city of Marvels, Jessica Jones never found her niche. Bitten by a radioactive spider, high school student Peter Parker gained the speed, strength and powers of a spider. Looking for the one superhero comic you just have to read. What do you get when you ask the question, What if Spider-Man had a daughter.
The classic adventures of Spider-Man from the early days up until the 90s. Amazing Spider-Man is the cornerstone of the Marvel Universe. But will each partner’s combined strength be enough. Adopting the name Spider-Man, Peter hoped to start a career using his new abilities. Youve found it.
\section{Verweis auf Code}
Verweis auf den Code \autoref{Bsp.1}.\\
und der Python-Code \autoref{Python-Code}.
Zweite Erwähnung einer Abkürzung \ac{AGPL} (Erlärung wird nicht mehr angezeigt)
|
{"hexsha": "9152d63d1c80cbba23f62739ae42868f09d1ed9e", "size": 6601, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/chapter/codesnippets.tex", "max_stars_repo_name": "sezuisa/T2000_Sarah_Haegele", "max_stars_repo_head_hexsha": "906b8194849a0cbbb0d002258ac9007ccdf13797", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/chapter/codesnippets.tex", "max_issues_repo_name": "sezuisa/T2000_Sarah_Haegele", "max_issues_repo_head_hexsha": "906b8194849a0cbbb0d002258ac9007ccdf13797", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/chapter/codesnippets.tex", "max_forks_repo_name": "sezuisa/T2000_Sarah_Haegele", "max_forks_repo_head_hexsha": "906b8194849a0cbbb0d002258ac9007ccdf13797", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.691588785, "max_line_length": 592, "alphanum_fraction": 0.7517042872, "num_tokens": 1763}
|
!*******************************************************************************
! File thscte_mc.f
! Contains module thscte_mc
! SUBROUTINE thscte_mc_model_compute
!
! Module is part of the V3FIT stellarator equilibrium reconstruction code.
! The postfix _mc in the name indicates Model Compute
! The module contains the subroutine thscte_mc_model_compute
! Both an thscte_desc and a model are needed for this computation.
!*******************************************************************************
! MODULE thscte_mc
! (thscte - Thomson Scattering Te Model Compute Calculations)
! SECTION I. VARIABLE DECLARATIONS
! SECTION II. INTERFACE BLOCKS
! SECTION III. MAIN COMPUTATIONAL SUBROUTINE
! SECTION IV. _OLD (SUPERCEDED) CODE
! SECTION X. COMMENTS FOR DIFFERENT REVISIONS
!*******************************************************************************
MODULE thscte_mc
!*******************************************************************************
! SECTION I. VARIABLE DECLARATIONS
!*******************************************************************************
!-------------------------------------------------------------------------------
! Type declarations, constants, utilities
!-------------------------------------------------------------------------------
USE stel_kinds
USE v3f_global
USE vmec_utils
USE read_wout_mod
!-------------------------------------------------------------------------------
! thscte Derived Type
!-------------------------------------------------------------------------------
USE thscte_T
!-------------------------------------------------------------------------------
! Model Derived Types
!-------------------------------------------------------------------------------
USE model_T
!------------------------------------------------------------------------------
IMPLICIT NONE
!*******************************************************************************
! SECTION II. INTERFACE BLOCKS
!*******************************************************************************
!-------------------------------------------------------------------------------
!-------------------------------------------------------------------------------
CONTAINS
!*******************************************************************************
! SECTION III. MAIN COMPUTATIONAL SUBROUTINE
!*******************************************************************************
!-------------------------------------------------------------------------------
! Compute an thscte signal
!
! Information comes from the thscte_desc and the model
!-------------------------------------------------------------------------------
!-------------------------------------------------------------------------------
! Actual computation of the model signal is in this subroutine
! s_type = diagnostic
! d_type = thscte
! Thomson scattering Te
! signal_model_compute_thscte
!
!-------------------------------------------------------------------------------
SUBROUTINE thscte_mc_model_compute(a_thscte,a_model,mod_signal, &
& mod_sigma)
!-------------------------------------------------------------------------------
! ARGUMENTS
! a_thscte - type thscte_desc - holds Thomson Scattering info
! a_model - type model - holds eq_state and te profile information
! mod_signal - output of the generated signal
! mod_sigma - output sigma
!-------------------------------------------------------------------------------
TYPE (thscte_desc), INTENT (inout) :: a_thscte
TYPE (model), INTENT (inout), TARGET :: a_model
REAL(rprec), POINTER, DIMENSION(:) :: mod_signal, mod_sigma
!-------------------------------------------------------------------------------
! Local Variables
!-------------------------------------------------------------------------------
INTEGER :: istat1
REAL(rprec), DIMENSION(3) :: r_cyl(1:3)=0.0
REAL(rprec), DIMENSION(3) :: xcart
REAL(rprec) :: te
CHARACTER(len=*), PARAMETER :: sub_name = &
& 'thscte_mc_model_compute'
!-------------------------------------------------------------------------------
! START OF EXECUTABLE CODE
!-------------------------------------------------------------------------------
! Allocate data and sigma
IF (ASSOCIATED(mod_signal)) THEN
DEALLOCATE(mod_signal,mod_sigma, stat=istat1)
CALL assert_eq(0,istat1,sub_name // &
& 'mod_signal, sigma dealloc')
ENDIF
ALLOCATE(mod_signal(1),mod_sigma(1),stat=istat1)
CALL assert_eq(0,istat1,sub_name // 'mod_signal, sigma alloc')
xcart = a_thscte % xcart
r_cyl(1) = SQRT(xcart(1) ** 2 + xcart(2) ** 2)
r_cyl(2) = ATAN2(xcart(2),xcart(1))
r_cyl(3) = xcart(3)
te = model_get_te_xcyl(a_model,r_cyl)
mod_signal(1) = te
! make up a sigma
mod_sigma(1)=0.
END SUBROUTINE thscte_mc_model_compute
!*******************************************************************************
! SECTION X. COMMENTS FOR DIFFERENT REVISIONS
!*******************************************************************************
!
! JDH 2011-10-23
! First version for thscte_mc. Based on sxrch_mc
END MODULE thscte_mc
|
{"hexsha": "a7f17dc607ee9b182a7e221deefe0c11a48b8728", "size": 5500, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/V3FITA/Sources/thscte_mc.f", "max_stars_repo_name": "jonathanschilling/VMEC_8_49", "max_stars_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/V3FITA/Sources/thscte_mc.f", "max_issues_repo_name": "jonathanschilling/VMEC_8_49", "max_issues_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/V3FITA/Sources/thscte_mc.f", "max_forks_repo_name": "jonathanschilling/VMEC_8_49", "max_forks_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.3076923077, "max_line_length": 81, "alphanum_fraction": 0.3592727273, "num_tokens": 986}
|
using Optim
using BITEModel
const BM=BITEModel
######
# Choose glacier & options
######
@def plotyes = false
# ITMIX glacier with IV data:
glaciers_iv = [:Brewster, :Hellstugubreen, :NorthGlacier,
:SouthGlacier, :Tasman, :Unteraar, :Synthetic1,
:Synthetic2, :Synthetic3]
glacier = glaciers_iv[1]
glacier = :Unteraar
gid = [ITMIXGlacier(glacier), SyntheticGlacier(:bench)][1]
pl_kws = Dict{Symbol,Any}()
if isa(gid, ITMIXGlacier)
pl_kws[:use_glogem] = true
end
gl,gb,pp,pm,pn,pl = BM.init_forward(gid; pl_kws...)
####################
# Do it
####################
(theta0, logposterior, logposterior1d, logprior, logprior_debug,
loglikelihood, loglikelihood1d, fwdm_fn, fwdm1d_fn,
pmcmc_defaults, fit_target) =
BM.init_inverse(gb, pp, pm, pn, n_1d_theta=n_1d_theta, run=run,
theta0_dict = theta0_dict )
# some tests:
fwdm_fn(theta0.th0);
print("Time to run forward model:")
@time fwdsol = fwdm_fn(theta0.th0);
print("Posterior value:")
@show logposterior(theta0.th0)[1]
print("Time to calculate posterior:")
@time logposterior(theta0.th0)[1]
########
# max posterior estimation
#######
if false
println("Optimizing for maximum posterior...")
theta_max2 = optimize(theta_vec -> -logposterior(theta_vec)[1], float(theta0.th0), iterations=200, f_tol=1.0)
# these don't work so well:
# theta_max2 = optimize(theta_vec -> -logposterior(theta_vec)[1], float(theta0.th0), GradientDescent())
#theta_max2 = optimize(theta_vec -> -logposterior(theta_vec)[1], float(theta0.th0), LBFGS())
@show theta_max2
error("asdf")
theta0.th0[:] = theta_max2.minimizer
end
###############
## MCMC fitting
###############
pmcmc = BM.MCMCNum(pmcmc,
niter=0.1*10^6,
nthin = 50)
# sample posterior
res = mcmc(logposterior, theta0, pmcmc; verbose=true)
varnames = BM.get_varnames(theta0)
using KissMCMC
print_results(res.thetas, res.accept_ratio, names=varnames)
# sample the prior to get that distribution too:
pmcmc_prior = BM.MCMCNum(pmcmc, niter=0.1*10^6,
nthin = 50)
res_prior = mcmc(logprior, theta0, pmcmc_prior; verbose=true)
# BM.savemcmc(thetas, blobs, gb,
# pp, pm, pn, pmcmc,
# theta0, run, sigmas, error_on_dirty=false)
#end
# ### plots
if plotyes
display(BM.plotinv1d(gb, blobs, reuse=false))
display(BM.plotinv1d_err(gb, blobs, reuse=false))
display(BM.plotinv2d(gl, blobs, reuse=false))
display(BM.plotinv2d_h(gl, blobs, reuse=false))
display(BM.plotinv2d_iverr(gl, blobs, reuse=false))
display(BM.plottheta(thetas, theta0))
display(BM.plottheta(thetas, theta0, toplot=:btilde, reuse=false))
display(BM.plottheta(thetas, theta0, toplot=:fsl, reuse=false))
display(BM.plottheta(thetas, theta0, toplot=:temp, reuse=false))
display(BM.plottheta_violin((thetas, thetas_prior), theta0, :btilde, reuse=false, width=1))
display(BM.plottheta_violin((thetas, thetas_prior), theta0, :fsl, reuse=false, width=1))
display(BM.plottheta_violin((thetas, thetas_prior), theta0, :temp, reuse=false, width=1))
end
|
{"hexsha": "f428931afdf4a17ac0d0f71793a14d4c5557408d", "size": 3147, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/inverse.jl", "max_stars_repo_name": "mauro3/BITEmodel.jl", "max_stars_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-09-23T00:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T18:32:49.000Z", "max_issues_repo_path": "scripts/inverse.jl", "max_issues_repo_name": "mauro3/BITEmodel.jl", "max_issues_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/inverse.jl", "max_forks_repo_name": "mauro3/BITEmodel.jl", "max_forks_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-27T17:01:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T02:47:41.000Z", "avg_line_length": 31.1584158416, "max_line_length": 113, "alphanum_fraction": 0.6673021926, "num_tokens": 951}
|
module MultilayerQG
export
fwdtransform!,
invtransform!,
streamfunctionfrompv!,
pvfromstreamfunction!,
updatevars!,
set_q!,
set_ψ!,
energies,
fluxes
using
FFTW,
CUDA,
LinearAlgebra,
StaticArrays,
Reexport
@reexport using FourierFlows
using LinearAlgebra: mul!, ldiv!
using FFTW: rfft, irfft
using FourierFlows: parsevalsum, parsevalsum2, superzeros, plan_flows_rfft
nothingfunction(args...) = nothing
"""
Problem(; parameters...)
Construct a multi-layer QG problem.
"""
function Problem(nlayers::Int, # number of fluid layers
dev = CPU();
# Numerical parameters
nx = 128,
Lx = 2π,
ny = nx,
Ly = Lx,
dt = 0.01,
# Physical parameters
f₀ = 1.0, # Coriolis parameter
β = 0.0, # y-gradient of Coriolis parameter
g = 1.0, # gravitational constant
U = zeros(nlayers), # imposed zonal flow U(y) in each layer
H = 1/nlayers * ones(nlayers), # rest fluid height of each layer
ρ = Array{Float64}(1:nlayers), # density of each layer
eta = nothing, # topographic PV
# Bottom Drag and/or (hyper)-viscosity
μ = 0,
ν = 0,
nν = 1,
# Timestepper and equation options
stepper = "RK4",
calcFq = nothingfunction,
stochastic = false,
linear = false,
T = Float64)
# topographic PV
eta === nothing && (eta = zeros(dev, T, (nx, ny)))
grid = TwoDGrid(dev, nx, Lx, ny, Ly; T=T)
params = Params(nlayers, g, f₀, β, ρ, H, U, eta, μ, ν, nν, grid, calcFq=calcFq, dev=dev)
vars = calcFq == nothingfunction ? Vars(dev, grid, params) : (stochastic ? StochasticForcedVars(dev, grid, params) : ForcedVars(dev, grid, params))
eqn = linear ? LinearEquation(dev, params, grid) : Equation(dev, params, grid)
FourierFlows.Problem(eqn, stepper, dt, grid, vars, params, dev)
end
abstract type BarotropicParams <: AbstractParams end
struct Params{T, Aphys3D, Aphys2D, Aphys1D, Atrans4D, Trfft} <: AbstractParams
# prescribed params
nlayers :: Int # Number of fluid layers
g :: T # Gravitational constant
f₀ :: T # Constant planetary vorticity
β :: T # Planetary vorticity y-gradient
ρ :: Aphys3D # Array with density of each fluid layer
H :: Aphys3D # Array with rest height of each fluid layer
U :: Aphys3D # Array with imposed constant zonal flow U(y) in each fluid layer
eta :: Aphys2D # Array containing topographic PV
μ :: T # Linear bottom drag
ν :: T # Viscosity coefficient
nν :: Int # Hyperviscous order (nν=1 is plain old viscosity)
calcFq! :: Function # Function that calculates the forcing on QGPV q
# derived params
g′ :: Aphys1D # Array with the reduced gravity constants for each fluid interface
Qx :: Aphys3D # Array containing x-gradient of PV due to eta in each fluid layer
Qy :: Aphys3D # Array containing y-gradient of PV due to β, U, and eta in each fluid layer
S :: Atrans4D # Array containing coeffients for getting PV from streamfunction
S⁻¹ :: Atrans4D # Array containing coeffients for inverting PV to streamfunction
rfftplan :: Trfft # rfft plan for FFTs
end
struct SingleLayerParams{T, Aphys3D, Aphys2D, Trfft} <: BarotropicParams
# prescribed params
β :: T # Planetary vorticity y-gradient
U :: Aphys3D # Imposed constant zonal flow U(y)
eta :: Aphys2D # Array containing topographic PV
μ :: T # Linear bottom drag
ν :: T # Viscosity coefficient
nν :: Int # Hyperviscous order (nν=1 is plain old viscosity)
calcFq! :: Function # Function that calculates the forcing on QGPV q
# derived params
Qx :: Aphys3D # Array containing x-gradient of PV due to eta
Qy :: Aphys3D # Array containing meridional PV gradient due to β, U, and eta
rfftplan :: Trfft # rfft plan for FFTs
end
function convert_U_to_U3D(dev, nlayers, grid, U::AbstractArray{TU, 1}) where TU
T = eltype(grid)
if length(U) == nlayers
U_2D = zeros(dev, T, (1, nlayers))
U_2D[:] = U
U_2D = repeat(U_2D, outer=(grid.ny, 1))
else
U_2D = zeros(dev, T, (grid.ny, 1))
U_2D[:] = U
end
U_3D = zeros(dev, T, (1, grid.ny, nlayers))
@views U_3D[1, :, :] = U_2D
return U_3D
end
function convert_U_to_U3D(dev, nlayers, grid, U::AbstractArray{TU, 2}) where TU
T = eltype(grid)
U_3D = zeros(dev, T, (1, grid.ny, nlayers))
@views U_3D[1, :, :] = U
return U_3D
end
function convert_U_to_U3D(dev, nlayers, grid, U::Number)
T = eltype(grid)
A = ArrayType(dev)
U_3D = reshape(repeat([T(U)], outer=(grid.ny, 1)), (1, grid.ny, nlayers))
return A(U_3D)
end
function Params(nlayers, g, f₀, β, ρ, H, U, eta, μ, ν, nν, grid; calcFq=nothingfunction, effort=FFTW.MEASURE, dev::Device=CPU()) where TU
T = eltype(grid)
A = ArrayType(dev)
ny, nx = grid.ny , grid.nx
nkr, nl = grid.nkr, grid.nl
kr, l = grid.kr , grid.l
U = convert_U_to_U3D(dev, nlayers, grid, U)
Uyy = real.(ifft(-l.^2 .* fft(U)))
Uyy = repeat(Uyy, outer=(nx, 1, 1))
etah = rfft(A(eta))
etax = irfft(im * kr .* etah, nx)
etay = irfft(im * l .* etah, nx)
Qx = zeros(dev, T, (nx, ny, nlayers))
@views @. Qx[:, :, nlayers] += etax
Qy = zeros(dev, T, (nx, ny, nlayers))
Qy = T(β) .- Uyy # T(β) is needed to ensure that Qy remains same type as U
@views @. Qy[:, :, nlayers] += etay
rfftplanlayered = plan_flows_rfft(A{T, 3}(undef, grid.nx, grid.ny, nlayers), [1, 2]; flags=effort)
if nlayers==1
return SingleLayerParams(T(β), U, eta, T(μ), T(ν), nν, calcFq, Qx, Qy, rfftplanlayered)
else # if nlayers≥2
ρ = reshape(T.(ρ), (1, 1, nlayers))
H = reshape(T.(H), (1, 1, nlayers))
g′ = T(g) * (ρ[2:nlayers] - ρ[1:nlayers-1]) ./ ρ[2:nlayers] # reduced gravity at each interface
Fm = @. T(f₀^2 / (g′ * H[2:nlayers]))
Fp = @. T(f₀^2 / (g′ * H[1:nlayers-1]))
typeofSkl = SArray{Tuple{nlayers, nlayers}, T, 2, nlayers^2} # StaticArrays of type T and dims = (nlayers x nlayers)
S = Array{typeofSkl, 2}(undef, (nkr, nl))
calcS!(S, Fp, Fm, nlayers, grid)
S⁻¹ = Array{typeofSkl, 2}(undef, (nkr, nl))
calcS⁻¹!(S⁻¹, Fp, Fm, nlayers, grid)
S, S⁻¹, Fp, Fm = A(S), A(S⁻¹), A(Fp), A(Fm) # convert to appropriate ArrayType
CUDA.@allowscalar @views Qy[:, :, 1] = @. Qy[:, :, 1] - Fp[1] * (U[:, :, 2] - U[:, :, 1])
for j = 2:nlayers-1
CUDA.@allowscalar @views Qy[:, :, j] = @. Qy[:, :, j] - Fp[j] * (U[:, :, j+1] - U[:, :, j]) + Fm[j-1] * (U[:, :, j-1] - U[:, :, j])
end
CUDA.@allowscalar @views Qy[:, :, nlayers] = @. Qy[:, :, nlayers] - Fm[nlayers-1] * (U[:, :, nlayers-1] - U[:, :, nlayers])
return Params(nlayers, T(g), T(f₀), T(β), A(ρ), A(H), U, eta, T(μ), T(ν), nν, calcFq, A(g′), Qx, Qy, S, S⁻¹, rfftplanlayered)
end
end
numberoflayers(params) = params.nlayers
numberoflayers(::SingleLayerParams) = 1
# ---------
# Equations
# ---------
function hyperdissipation(dev, params, grid)
T = eltype(grid)
L = ArrayType(dev){T}(undef, (grid.nkr, grid.nl, numberoflayers(params)))
@. L = - params.ν * grid.Krsq^params.nν
@views @. L[1, 1, :] = 0
return L
end
function LinearEquation(dev, params, grid)
L = hyperdissipation(dev, params, grid)
return FourierFlows.Equation(L, calcNlinear!, grid)
end
function Equation(dev, params, grid)
L = hyperdissipation(dev, params, grid)
return FourierFlows.Equation(L, calcN!, grid)
end
# ----
# Vars
# ----
struct Vars{Aphys, Atrans, F, P} <: AbstractVars
q :: Aphys
ψ :: Aphys
u :: Aphys
v :: Aphys
qh :: Atrans
ψh :: Atrans
uh :: Atrans
vh :: Atrans
Fqh :: F
prevsol :: P
end
const ForcedVars = Vars{<:AbstractArray, <:AbstractArray, <:AbstractArray, Nothing}
const StochasticForcedVars = Vars{<:AbstractArray, <:AbstractArray, <:AbstractArray, <:AbstractArray}
"""
Vars(dev, grid, params)
Returns the vars for unforced multi-layer QG problem with `grid` and `params`.
"""
function Vars(dev::Dev, grid, params) where Dev
T = eltype(grid)
nlayers = numberoflayers(params)
@devzeros Dev T (grid.nx, grid.ny, nlayers) q ψ u v
@devzeros Dev Complex{T} (grid.nkr, grid.nl, nlayers) qh ψh uh vh
return Vars(q, ψ, u, v, qh, ψh, uh, vh, nothing, nothing)
end
"""
ForcedVars(dev, grid, params)
Returns the vars for forced multi-layer QG problem with `grid` and `params`.
"""
function ForcedVars(dev::Dev, grid, params) where Dev
T = eltype(grid)
nlayers = numberoflayers(params)
@devzeros Dev T (grid.nx, grid.ny, nlayers) q ψ u v
@devzeros Dev Complex{T} (grid.nkr, grid.nl, nlayers) qh ψh uh vh Fqh
return Vars(q, ψ, u, v, qh, ψh, uh, vh, Fqh, nothing)
end
"""
StochasticForcedVars(dev, rid, params)
Returns the vars for forced multi-layer QG problem with `grid` and `params`.
"""
function StochasticForcedVars(dev::Dev, grid, params) where Dev
T = eltype(grid)
nlayers = numberoflayers(params)
@devzeros Dev T (grid.nx, grid.ny, nlayers) q ψ u v
@devzeros Dev Complex{T} (grid.nkr, grid.nl, nlayers) qh ψh uh vh Fqh prevsol
return Vars(q, ψ, u, v, qh, ψh, uh, vh, Fqh, prevsol)
end
fwdtransform!(varh, var, params::AbstractParams) = mul!(varh, params.rfftplan, var)
invtransform!(var, varh, params::AbstractParams) = ldiv!(var, params.rfftplan, varh)
function streamfunctionfrompv!(ψh, qh, params, grid)
for j=1:grid.nl, i=1:grid.nkr
CUDA.@allowscalar @views ψh[i, j, :] .= params.S⁻¹[i, j] * qh[i, j, :]
end
end
function pvfromstreamfunction!(qh, ψh, params, grid)
for j=1:grid.nl, i=1:grid.nkr
CUDA.@allowscalar @views qh[i, j, :] .= params.S[i, j] * ψh[i, j, :]
end
end
function streamfunctionfrompv!(ψh, qh, params::SingleLayerParams, grid)
@. ψh = -grid.invKrsq * qh
end
function pvfromstreamfunction!(qh, ψh, params::SingleLayerParams, grid)
@. qh = -grid.Krsq * ψh
end
"""
calcS!(S, Fp, Fm, nlayers, grid)
Constructs the array S, which consists of nlayer x nlayer static arrays S_kl that relate
the q's and ψ's at every wavenumber: q̂_{k, l} = S_kl * ψ̂_{k, l}.
"""
function calcS!(S, Fp, Fm, nlayers, grid)
F = Matrix(Tridiagonal(Fm, -([Fp; 0] + [0; Fm]), Fp))
for n=1:grid.nl, m=1:grid.nkr
CUDA.@allowscalar k² = grid.Krsq[m, n]
Skl = SMatrix{nlayers, nlayers}(- k² * I + F)
S[m, n] = Skl
end
return nothing
end
"""
calcS⁻¹!(S, Fp, Fm, nlayers, grid)
Constructs the array S⁻¹, which consists of nlayer x nlayer static arrays (S_kl)⁻¹ that
relate the q's and ψ's at every wavenumber: ψ̂_{k, l} = (S_kl)⁻¹ * q̂_{k, l}.
"""
function calcS⁻¹!(S⁻¹, Fp, Fm, nlayers, grid)
T = eltype(grid)
F = Matrix(Tridiagonal(Fm, -([Fp; 0] + [0; Fm]), Fp))
for n=1:grid.nl, m=1:grid.nkr
CUDA.@allowscalar k² = grid.Krsq[m, n] == 0 ? 1 : grid.Krsq[m, n]
Skl = - k² * I + F
S⁻¹[m, n] = SMatrix{nlayers, nlayers}(I / Skl)
end
S⁻¹[1, 1] = SMatrix{nlayers, nlayers}(zeros(T, (nlayers, nlayers)))
return nothing
end
# -------
# Solvers
# -------
function calcN!(N, sol, t, clock, vars, params, grid)
nlayers = numberoflayers(params)
calcN_advection!(N, sol, vars, params, grid)
@views @. N[:, :, nlayers] += params.μ * grid.Krsq * vars.ψh[:, :, nlayers] # bottom linear drag
addforcing!(N, sol, t, clock, vars, params, grid)
return nothing
end
function calcNlinear!(N, sol, t, clock, vars, params, grid)
nlayers = numberoflayers(params)
calcN_linearadvection!(N, sol, vars, params, grid)
@views @. N[:, :, nlayers] += params.μ * grid.Krsq * vars.ψh[:, :, nlayers] # bottom linear drag
addforcing!(N, sol, t, clock, vars, params, grid)
return nothing
end
"""
calcN_advection!(N, sol, vars, params, grid)
Calculates the advection term.
"""
function calcN_advection!(N, sol, vars, params, grid)
@. vars.qh = sol
streamfunctionfrompv!(vars.ψh, vars.qh, params, grid)
@. vars.uh = -im * grid.l * vars.ψh
@. vars.vh = im * grid.kr * vars.ψh
invtransform!(vars.u, vars.uh, params)
@. vars.u += params.U # add the imposed zonal flow U
uQx, uQxh = vars.q, vars.uh # use vars.q and vars.uh as scratch variables
@. uQx = vars.u * params.Qx # (U+u)*∂Q/∂x
fwdtransform!(uQxh, uQx, params)
@. N = - uQxh # -\hat{(U+u)*∂Q/∂x}
invtransform!(vars.v, vars.vh, params)
vQy, vQyh = vars.q, vars.vh # use vars.q and vars.vh as scratch variables
@. vQy = vars.v * params.Qy # v*∂Q/∂y
fwdtransform!(vQyh, vQy, params)
@. N -= vQyh # -\hat{v*∂Q/∂y}
invtransform!(vars.q, vars.qh, params)
uq , vq = vars.u , vars.v # use vars.u and vars.v as scratch variables
uqh, vqh = vars.uh, vars.vh # use vars.uh and vars.vh as scratch variables
@. uq *= vars.q # (U+u)*q
@. vq *= vars.q # v*q
fwdtransform!(uqh, uq, params)
fwdtransform!(vqh, vq, params)
@. N -= im * grid.kr * uqh + im * grid.l * vqh # -\hat{∂[(U+u)q]/∂x} - \hat{∂[vq]/∂y}
return nothing
end
"""
calcN_linearadvection!(N, sol, vars, params, grid)
Calculates the advection term of the linearized equations.
"""
function calcN_linearadvection!(N, sol, vars, params, grid)
@. vars.qh = sol
streamfunctionfrompv!(vars.ψh, vars.qh, params, grid)
@. vars.uh = -im * grid.l * vars.ψh
@. vars.vh = im * grid.kr * vars.ψh
invtransform!(vars.u, vars.uh, params)
@. vars.u += params.U # add the imposed zonal flow U
uQx, uQxh = vars.q, vars.uh # use vars.q and vars.uh as scratch variables
@. uQx = vars.u * params.Qx # (U+u)*∂Q/∂x
fwdtransform!(uQxh, uQx, params)
@. N = - uQxh # -\hat{(U+u)*∂Q/∂x}
invtransform!(vars.v, vars.vh, params)
vQy, vQyh = vars.q, vars.vh # use vars.q and vars.vh as scratch variables
@. vQy = vars.v * params.Qy # v*∂Q/∂y
fwdtransform!(vQyh, vQy, params)
@. N -= vQyh # -\hat{v*∂Q/∂y}
invtransform!(vars.q, vars.qh, params)
@. vars.u = params.U
Uq , Uqh = vars.u , vars.uh # use vars.u and vars.uh as scratch variables
@. Uq *= vars.q # U*q
fwdtransform!(Uqh, Uq, params)
@. N -= im * grid.kr * Uqh # -\hat{∂[U*q]/∂x}
return nothing
end
addforcing!(N, sol, t, clock, vars::Vars, params, grid) = nothing
function addforcing!(N, sol, t, clock, vars::ForcedVars, params, grid)
params.calcFq!(vars.Fqh, sol, t, clock, vars, params, grid)
@. N += vars.Fqh
return nothing
end
# ----------------
# Helper functions
# ----------------
"""
updatevars!(vars, params, grid, sol)
updatevars!(prob)
Update all problem variables using `sol`.
"""
function updatevars!(vars, params, grid, sol)
@. vars.qh = sol
streamfunctionfrompv!(vars.ψh, vars.qh, params, grid)
@. vars.uh = -im * grid.l * vars.ψh
@. vars.vh = im * grid.kr * vars.ψh
invtransform!(vars.q, deepcopy(vars.qh), params)
invtransform!(vars.ψ, deepcopy(vars.ψh), params)
invtransform!(vars.u, deepcopy(vars.uh), params)
invtransform!(vars.v, deepcopy(vars.vh), params)
return nothing
end
updatevars!(prob) = updatevars!(prob.vars, prob.params, prob.grid, prob.sol)
"""
set_q!(sol, params, vars, grid, q)
set_q!(prob)
Set the solution `prob.sol` as the transform of `q` and updates variables.
"""
function set_q!(sol, params, vars, grid, q)
A = typeof(vars.q)
fwdtransform!(vars.qh, A(q), params)
@. vars.qh[1, 1, :] = 0
@. sol = vars.qh
updatevars!(vars, params, grid, sol)
return nothing
end
function set_q!(sol, params::SingleLayerParams, vars, grid, q::AbstractArray{T, 2}) where T
A = typeof(vars.q[:, :, 1])
q_3D = vars.q
@views q_3D[:, :, 1] = A(q)
set_q!(sol, params, vars, grid, q_3D)
return nothing
end
set_q!(prob, q) = set_q!(prob.sol, prob.params, prob.vars, prob.grid, q)
"""
set_ψ!(params, vars, grid, sol, ψ)
set_ψ!(prob)
Set the solution `prob.sol` to correspond to the transform of streamfunction `ψ` and
updates variables.
"""
function set_ψ!(sol, params, vars, grid, ψ)
A = typeof(vars.ψ)
fwdtransform!(vars.ψh, A(ψ), params)
pvfromstreamfunction!(vars.qh, vars.ψh, params, grid)
invtransform!(vars.q, vars.qh, params)
set_q!(sol, params, vars, grid, vars.q)
return nothing
end
function set_ψ!(sol, params::SingleLayerParams, vars, grid, ψ::AbstractArray{T, 2}) where T
A = typeof(vars.ψ[:, :, 1])
ψ_3D = vars.ψ
@views ψ_3D[:, :, 1] = A(ψ)
set_ψ!(sol, params, vars, grid, ψ_3D)
return nothing
end
set_ψ!(prob, ψ) = set_ψ!(prob.sol, prob.params, prob.vars, prob.grid, ψ)
"""
energies(vars, params, grid, sol)
energies(prob)
Returns the kinetic energy of each fluid layer KE``_1, ...,`` KE``_{n}``, and the
potential energy of each fluid interface PE``_{3/2}, ...,`` PE``_{n-1/2}``, where ``n``
is the number of layers in the fluid. (When ``n=1``, only the kinetic energy is returned.)
The kinetic energy at the ``j``-th fluid layer is
```math
\\textrm{KE}_j = \\frac{H_j}{H} \\int \\frac1{2} |\\boldsymbol{\\nabla} \\psi_j|^2 \\frac{\\mathrm{d}^2 \\boldsymbol{x}}{L_x L_y} \\ , \\quad j = 1, \\dots, n \\ ,
```
while the potential energy that corresponds to the interface ``j+1/2`` (i.e., interface between the ``j``-th and ``(j+1)``-th fluid layer) is
```math
\\textrm{PE}_{j+1/2} = \\int \\frac1{2} \\frac{f_0^2}{g'_{j+1/2}} (\\psi_j - \\psi_{j+1})^2 \\frac{\\mathrm{d}^2 \\boldsymbol{x}}{L_x L_y} \\ , \\quad j = 1, \\dots, n-1 \\ .
```
"""
function energies(vars, params, grid, sol)
nlayers = numberoflayers(params)
KE, PE = zeros(nlayers), zeros(nlayers-1)
@. vars.qh = sol
streamfunctionfrompv!(vars.ψh, vars.qh, params, grid)
abs²∇𝐮h = vars.uh # use vars.uh as scratch variable
@. abs²∇𝐮h = grid.Krsq * abs2(vars.ψh)
for j = 1:nlayers
CUDA.@allowscalar KE[j] = 1 / (2 * grid.Lx * grid.Ly) * parsevalsum(abs²∇𝐮h[:, :, j], grid) * params.H[j] / sum(params.H)
end
for j = 1:nlayers-1
CUDA.@allowscalar PE[j] = 1 / (2 * grid.Lx * grid.Ly) * params.f₀^2 / params.g′[j] * parsevalsum(abs2.(vars.ψh[:, :, j+1] .- vars.ψh[:, :, j]), grid)
end
return KE, PE
end
function energies(vars, params::SingleLayerParams, grid, sol)
@. vars.qh = sol
streamfunctionfrompv!(vars.ψh, vars.qh, params, grid)
abs²∇𝐮h = vars.uh # use vars.uh as scratch variable
@. abs²∇𝐮h = grid.Krsq * abs2(vars.ψh)
return 1 / (2 * grid.Lx * grid.Ly) * parsevalsum(abs²∇𝐮h, grid)
end
energies(prob) = energies(prob.vars, prob.params, prob.grid, prob.sol)
"""
fluxes(vars, params, grid, sol)
fluxes(prob)
Returns the lateral eddy fluxes within each fluid layer, lateralfluxes``_1,...,``lateralfluxes``_n``
and also the vertical eddy fluxes at each fluid interface
verticalfluxes``_{3/2}``, ...,`` verticalfluxes``_{n-1/2}, where ``n`` is the number of layers in the fluid.
(When ``n=1``, only the lateral fluxes are returned.)
The lateral eddy fluxes whithin the ``j``-th fluid layer are
```math
\\textrm{lateralfluxes}_j = \\frac{H_j}{H} \\int U_j \\, \\upsilon_j \\, \\partial_y u_j
\\frac{\\mathrm{d}^2 \\boldsymbol{x}}{L_x L_y} \\ , \\quad j = 1, \\dots, n \\ ,
```
while the vertical eddy fluxes at the ``j+1/2``-th fluid interface (i.e., interface between
the ``j``-th and ``(j+1)``-th fluid layer) are
```math
\\textrm{verticalfluxes}_{j+1/2} = \\int \\frac{f_0^2}{g'_{j+1/2} H} (U_j - U_{j+1}) \\,
\\upsilon_{j+1} \\, \\psi_{j} \\frac{\\mathrm{d}^2 \\boldsymbol{x}}{L_x L_y} \\ , \\quad
j = 1 , \\dots , n-1.
```
"""
function fluxes(vars, params, grid, sol)
nlayers = numberoflayers(params)
lateralfluxes, verticalfluxes = zeros(nlayers), zeros(nlayers-1)
updatevars!(vars, params, grid, sol)
∂u∂yh = vars.uh # use vars.uh as scratch variable
∂u∂y = vars.u # use vars.u as scratch variable
@. ∂u∂yh = im * grid.l * vars.uh
invtransform!(∂u∂y, ∂u∂yh, params)
lateralfluxes = (sum(@. params.H * params.U * vars.v * ∂u∂y; dims=(1, 2)))[1, 1, :]
lateralfluxes *= grid.dx * grid.dy / (grid.Lx * grid.Ly * sum(params.H))
for j = 1:nlayers-1
CUDA.@allowscalar verticalfluxes[j] = sum(@views @. params.f₀^2 / params.g′[j] * (params.U[: ,:, j] - params.U[:, :, j+1]) * vars.v[:, :, j+1] * vars.ψ[:, :, j] ; dims=(1, 2))[1]
CUDA.@allowscalar verticalfluxes[j] *= grid.dx * grid.dy / (grid.Lx * grid.Ly * sum(params.H))
end
return lateralfluxes, verticalfluxes
end
function fluxes(vars, params::SingleLayerParams, grid, sol)
updatevars!(vars, params, grid, sol)
∂u∂yh = vars.uh # use vars.uh as scratch variable
∂u∂y = vars.u # use vars.u as scratch variable
@. ∂u∂yh = im * grid.l * vars.uh
invtransform!(∂u∂y, ∂u∂yh, params)
lateralfluxes = (sum(@. params.U * vars.v * ∂u∂y; dims=(1, 2)))[1, 1, :]
lateralfluxes *= grid.dx * grid.dy / (grid.Lx * grid.Ly)
return lateralfluxes
end
fluxes(prob) = fluxes(prob.vars, prob.params, prob.grid, prob.sol)
end # module
|
{"hexsha": "954c01f3267b3a6cb45898f9bdd708457ab16fe8", "size": 21746, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/multilayerqg.jl", "max_stars_repo_name": "liasiegelman/GeophysicalFlows.jl", "max_stars_repo_head_hexsha": "2bbc137d560aebcde4a90bece4e71c16b313b93f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/multilayerqg.jl", "max_issues_repo_name": "liasiegelman/GeophysicalFlows.jl", "max_issues_repo_head_hexsha": "2bbc137d560aebcde4a90bece4e71c16b313b93f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/multilayerqg.jl", "max_forks_repo_name": "liasiegelman/GeophysicalFlows.jl", "max_forks_repo_head_hexsha": "2bbc137d560aebcde4a90bece4e71c16b313b93f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5538922156, "max_line_length": 182, "alphanum_fraction": 0.594408167, "num_tokens": 7567}
|
import os
import random
import pickle
import numpy as np
from TrainingData.load_corpus.load_corpus_data import get_cds_words
# seed rng for sampling of contexts in function 'get_data_set'
random.seed(6675)
# helper functions
def get_file_path():
return os.path.dirname(os.path.realpath(__file__))
def scale_to_unit_interval(np_array):
return np_array * 1.0 / np.max(np_array)
def next_words_to_int(next_words):
unique_next_words = list(set(next_words))
to_int = dict(zip(unique_next_words, range(len(next_words))))
ints = [to_int[word] for word in next_words]
return ints
def sample_right_context_words(target_word, window_size, sent, index, vocabulary, right_context_words, target_words):
# sampled sentence-internal window to the right
# only consider context words within this window
t = random.randint(1, window_size)
to_right = min(len(sent), index + t + 1)
for next_word_idx in range(index + 1, to_right):
context_word = sent[next_word_idx]
if context_word in vocabulary:
# (context_word, target_word) pair
right_context_words.append(context_word)
target_words.append(target_word)
def increment_left_context_vector(target_word, index, window_size, sent, left_context_words, left_context_vectors,
tokens_to_int):
# sentence internal window to the left
# only consider context words within this window
from_left = max(0, index - window_size)
for cxword_index in range(from_left, index):
context_word = sent[cxword_index]
if context_word in left_context_vectors:
# increment count in left_context_vector
column_idx = tokens_to_int[context_word]
left_context_vectors[target_word][column_idx] += 1
# keep track of the number of left-context words
left_context_words.add(context_word)
########################################################################################################################
# get / save data set functions
def create_and_save_data_set(file_name, vocabulary, window_size):
""" Call 'get_data_set' and save result to file. """
target_word_ints, right_context_word_ints, embeddings_dict = get_data_set(vocabulary, window_size)
assert len(right_context_word_ints) == len(target_word_ints)
data_set = {'target_words': target_word_ints,
'right_context_words': right_context_word_ints,
'embeddings_dict': embeddings_dict}
data_dir = get_file_path() + '/textual/'
pickle.dump(data_set, open(data_dir + file_name, 'wb'))
def get_data_set(vocabulary, window_size):
"""
Extract and return a data set consisting of normalized frequency vectors for the left context and of
sampled target-context word pairs for the right context.
In the training corpus, consider in turn each sentence $S$ of length $N$. Consider each word $w_n$ at position
$n <= N$ as a target word iff $w_n$ is included in the vocabulary $V$. Extract the left context of $w_n$ and sample
words from the right context of $w_n$. This is done as follows:
- extracting the left context:
For each target word in the vocabulary $V$, create a left-context vector $v$ of zeros before iterating
through the corpus. Each value $v_i$ in $v$ corresponds to a word $w_i$ in the vocabulary, and each word
$w_i$ in the vocabulary corresponds to a value $v_i$ in $v$.
When iterating through the corpus, for a given target word $w_n$, if $w_i$ occurs within
a sentence-internal window of $window_size$ words to the left of $w_n$, increment $v_i$ by one.
After having gone through the entire corpus, normalize $v$ to unit interval.
$v$ is thus a normalized vector of left-context word frequencies.
- sampling words from the right context:
Given target word $w_n$, sample an integer $t$ from the uniform distribution ${1, ... window_size}$.
Then, consider each word $w_j$ within a sentence-internal window of $t$ words to the right of $w_n$
as a right-context word. If $w_j$ is in the vocabulary, add the target-context word pair $(w_n, w_j)$ to
the training set.
:type vocabulary: array
:param vocabulary: an array of word strings, in the format 'word-pos_tag', where 'pos_tag' is one
of 'v' (verb), 'n' (noun), 'adj' (adjective), 'fn' (function word / closed class word).
:type window_size: int
:param window_size: sentence-internal window of words to the left and right of target words within which
potential context words will be considered
:return: target_words list of target word strings
:return: right_context_words list of right-context word strings. each right-context word at position
$i$ in 'right_context_words' occurred in the right context of the
target word string at position $i$ in 'target_words'
:return: embeddings_dict dictionary mapping each target word strings to a left-context vector
"""
print 'Extracting data set...'
# map each token from the vocabulary to a unique integer
tokens_to_int = dict(zip(vocabulary, range(len(vocabulary))))
# store left-context-vectors by target word index
left_context_vectors = {w: np.zeros(len(vocabulary)) for w in vocabulary}
left_context_words = set() # keeps track of all left-context words (for which we increment counts in
# left-context vectors)
target_words = [] # tokens for which we collect words from the right context, stored as integers
right_context_words = [] # words sampled from the right context of target words, stored as integers
# each context word at index $i$ in 'right_context_word_ints' is sampled
# from the right context of the target word at position $i$ in 'target_word_ints'
# list of lists of tokens; each list of tokens is a sentence containing tagged tokens
words_by_sents = get_cds_words(collapse_function_words=True)
for sent in words_by_sents:
for index, target_word in enumerate(sent):
if target_word in vocabulary:
sample_right_context_words(target_word, window_size, sent, index, vocabulary, right_context_words,
target_words)
increment_left_context_vector(target_word, index, window_size, sent, left_context_words,
left_context_vectors, tokens_to_int)
print '...done. Got a data set with %s right-context words and frequencies for %s / %s left-context words.' % \
(len(set(right_context_words)), len(left_context_words), len(vocabulary))
print 'Number of sampled right-context words: %s' % len(right_context_words)
# normalize left-context vectors to unit interval
embeddings = [scale_to_unit_interval(v) for v in left_context_vectors.values()]
embeddings_dict = dict(zip(left_context_vectors.keys(), embeddings))
return target_words, right_context_words, embeddings_dict
|
{"hexsha": "7aa084a53bce09bed81810ec7033a9990ee8f201", "size": 7378, "ext": "py", "lang": "Python", "max_stars_repo_path": "TrainingData/data_sets/extract_from_corpus.py", "max_stars_repo_name": "RobGrimm/prediction_based", "max_stars_repo_head_hexsha": "b31e43cd9ca56016d265ee6438dea53554e3efab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-10-26T16:42:13.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-28T20:36:43.000Z", "max_issues_repo_path": "TrainingData/data_sets/extract_from_corpus.py", "max_issues_repo_name": "RobGrimm/prediction_based", "max_issues_repo_head_hexsha": "b31e43cd9ca56016d265ee6438dea53554e3efab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TrainingData/data_sets/extract_from_corpus.py", "max_forks_repo_name": "RobGrimm/prediction_based", "max_forks_repo_head_hexsha": "b31e43cd9ca56016d265ee6438dea53554e3efab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.9090909091, "max_line_length": 120, "alphanum_fraction": 0.6672539984, "include": true, "reason": "import numpy", "num_tokens": 1564}
|
import os
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing import image
import time
import csv
import os
import os.path
from collections import Counter
import matplotlib.pyplot as plt
# emotions array
emotions_csv = []
# list of emotions
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
# funtion graph will append all the emotions
def graph(a):
emotions_csv.append(a)
print(emotions_csv)
#load model, which we have saved during the traing
model = model_from_json(open("./model.json", "r").read())
#load weights which we have saved during the traing
model.load_weights('C:/Vineeth/Learning/Python - Advances/project final code/model_filter.h5')
# Cascading classifiers are trained with several hundred positive sample views of a particular object it is imported from the opencv
face_haar_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
# video capture
cap=cv2.VideoCapture(0)
while True:
# captures frame and returns boolean value and captured image
ret,test_img=cap.read()
if not ret:
continue
gray_img= cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY) #an image from one color space to another, in this case BGR to gray color
# face detection
faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
for (x,y,w,h) in faces_detected:
cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=7) # making an rectangle around the face
roi_gray=gray_img[y:y+w,x:x+h]#cropping region of interest i.e. face area from image
roi_gray=cv2.resize(roi_gray,(48,48)) # resizing roi
img_pixels = image.img_to_array(roi_gray)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255
# making the prediction with the model.
predictions = model.predict(img_pixels)
#find max indexed array
max_index = np.argmax(predictions[0])
# list of emotion
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
# predicted emotions
predicted_emotion = emotions[max_index]
# text on image with the predited emotions
cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
graph(predicted_emotion)
resized_img = cv2.resize(test_img, (1000, 700))
# image show
cv2.imshow('Facial emotion analysis ',resized_img)
if cv2.waitKey(10) == ord('q'):#wait until 'q' key is pressed
break
cap.release()
# destroy all windows
cv2.destroyAllWindows
# after emotions are detected by the videocamera, then displaying result of the overall emotions using pie chart
time.sleep(5)
my_dic = dict(Counter(emotions_csv))
print(my_dic)
# colors used in the pie chart
colors = ( "#003f5c", "#374c80", "#7a5195",
"#bc5090", "#ef5675", "#ff764a", "#ffa600")
# wp will help to different the sizes, the linewidth is 1 and color is black
wp = { 'linewidth' : 1, 'edgecolor' : "black" }
# Data to plot
labels = [] # labels are emotions
sizes = [] # number of emotions
for x, y in my_dic.items():
labels.append(x)
sizes.append(y)
def func(pct, allvalues):
absolute = int(pct / 100.*np.sum(allvalues))
return "{:.1f}%\n({:d} emotions)".format(pct, absolute)
# plotting the pie chart
fig, ax = plt.subplots(figsize =(10, 7)) # fig size
wedges, texts, autotexts = ax.pie(sizes,
autopct = lambda pct: func(pct, sizes),
labels = labels,
shadow = False,
colors = colors,
startangle = 90,
wedgeprops = wp,
textprops = dict(color ="white"))
# For the legend
for a in emotions:
labels.append(a) if a not in labels else labels
n = len(labels) - len(sizes)
for a in range(0,n):
sizes.append(0)
print(sizes) #
ax.legend(wedges, labels,
title ="Emotions",
loc ="lower center",
bbox_to_anchor =(1, 0, 0.5, 1))
plt.setp(autotexts, size = 8, weight ="bold")
ax.set_title("Detected Emotions") # title
# show plot
plt.show()
|
{"hexsha": "62a7455ef19f1efb2cf79a52cb0cf667f7dea895", "size": 4501, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_video.py", "max_stars_repo_name": "Vineethpaul09/Facial-Emotions-Detection-Training", "max_stars_repo_head_hexsha": "23a78dcee3380b34e302d593c0dc9ea28dc1482d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_video.py", "max_issues_repo_name": "Vineethpaul09/Facial-Emotions-Detection-Training", "max_issues_repo_head_hexsha": "23a78dcee3380b34e302d593c0dc9ea28dc1482d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_video.py", "max_forks_repo_name": "Vineethpaul09/Facial-Emotions-Detection-Training", "max_forks_repo_head_hexsha": "23a78dcee3380b34e302d593c0dc9ea28dc1482d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1983471074, "max_line_length": 133, "alphanum_fraction": 0.6189735614, "include": true, "reason": "import numpy", "num_tokens": 1123}
|
from copy import deepcopy
import numpy as np
from astropy import units as u
import pytest
from ctapipe.containers import ImageParametersContainer, HillasParametersContainer
from ctapipe.instrument import SubarrayDescription, TelescopeDescription
from ctapipe.image.cleaning import tailcuts_clean
from ctapipe.image.hillas import hillas_parameters, HillasParameterizationError
from ctapipe.io import SimTelEventSource
from ctapipe.reco.hillas_reconstructor import HillasReconstructor, HillasPlane
from ctapipe.utils import get_dataset_path
from ctapipe.coordinates import TelescopeFrame
from astropy.coordinates import SkyCoord, AltAz
from ctapipe.calib import CameraCalibrator
def test_estimator_results():
"""
creating some planes pointing in different directions (two
north-south, two east-west) and that have a slight position errors (+-
0.1 m in one of the four cardinal directions """
horizon_frame = AltAz()
p1 = SkyCoord(alt=43 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=47 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle1 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, 1, 0] * u.m)
p1 = SkyCoord(alt=44 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle2 = HillasPlane(p1=p1, p2=p2, telescope_position=[1, 0, 0] * u.m)
p1 = SkyCoord(alt=44.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=46.5 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle3 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, -1, 0] * u.m)
p1 = SkyCoord(alt=43.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=45.5 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle4 = HillasPlane(p1=p1, p2=p2, telescope_position=[-1, 0, 0] * u.m)
# Create a dummy subarray
# (not used here, but required to initialize the reconstructor)
subarray = SubarrayDescription(
"test array",
tel_positions={1: np.zeros(3) * u.m},
tel_descriptions={
1: TelescopeDescription.from_name(
optics_name="SST-ASTRI", camera_name="CHEC"
)
},
)
# creating the fit class and setting the the great circle member
fit = HillasReconstructor(subarray)
hillas_planes = {1: circle1, 2: circle2, 3: circle3, 4: circle4}
# performing the direction fit with the minimisation algorithm
# and a seed that is perpendicular to the up direction
dir_fit_minimise, _ = fit.estimate_direction(hillas_planes)
print("direction fit test minimise:", dir_fit_minimise)
def test_h_max_results(example_subarray):
"""
creating some planes pointing in different directions (two
north-south, two east-west) and that have a slight position errors (+-
0.1 m in one of the four cardinal directions """
horizon_frame = AltAz()
p1 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle1 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, 1, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle2 = HillasPlane(p1=p1, p2=p2, telescope_position=[1, 0, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=45 * u.deg, frame=horizon_frame)
circle3 = HillasPlane(p1=p1, p2=p2, telescope_position=[0, -1, 0] * u.m)
p1 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
p2 = SkyCoord(alt=0 * u.deg, az=90 * u.deg, frame=horizon_frame)
circle4 = HillasPlane(p1=p1, p2=p2, telescope_position=[-1, 0, 0] * u.m)
# Create a dummy subarray
# (not used here, but required to initialize the reconstructor)
subarray = example_subarray
# creating the fit class and setting the the great circle member
fit = HillasReconstructor(subarray)
hillas_planes = {1: circle1, 2: circle2, 3: circle3, 4: circle4}
# performing the direction fit with the minimisation algorithm
# and a seed that is perpendicular to the up direction
h_max_reco = fit.estimate_h_max(hillas_planes)
print("h max fit test minimise:", h_max_reco)
# the results should be close to the direction straight up
np.testing.assert_allclose(h_max_reco.value, 0, atol=1e-8)
# np.testing.assert_allclose(fitted_core_position.value, [0, 0], atol=1e-3)
def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):
"""
The HillasReconstructor is supposed to fail
in these cases:
- less than two teleskopes
- any width is NaN
- any width is 0
This test takes 1 shower from a test simtel file and modifies a-posteriori
some hillas dictionaries to make it non-reconstructable.
It is supposed to fail if no Exception or another Exception gets thrown.
"""
# 4-LST bright event already calibrated
# we'll clean it and parametrize it again in the TelescopeFrame
subarray, event = subarray_and_event_gamma_off_axis_500_gev
tel_azimuth = {}
tel_altitude = {}
#source = EventSource(filename, max_events=1)
#subarray = source.subarray
calib = CameraCalibrator(subarray)
fit = HillasReconstructor(subarray)
#for event in source:
calib(event)
hillas_dict = {}
for tel_id, dl1 in event.dl1.tel.items():
geom = subarray.tel[tel_id].camera.geometry
tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth
tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude
mask = tailcuts_clean(
geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0
)
dl1.parameters = ImageParametersContainer()
try:
moments = hillas_parameters(geom[mask], dl1.image[mask])
hillas_dict[tel_id] = moments
dl1.parameters.hillas = moments
except HillasParameterizationError:
dl1.parameters.hillas = HillasParametersContainer()
continue
# copy event container to modify it
event_copy = deepcopy(event)
# overwrite all image parameters but the last one with dummy ones
for tel_id in list(event_copy.dl1.tel.keys())[:-1]:
event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()
fit(event_copy)
assert event_copy.dl2.stereo.geometry["HillasReconstructor"].is_valid is False
# Now use the original event, but overwrite the last width to 0
event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m
fit(event)
assert event.dl2.stereo.geometry["HillasReconstructor"].is_valid is False
# Now use the original event, but overwrite the last width to NaN
event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m
fit(event)
assert event.dl2.stereo.geometry["HillasReconstructor"].is_valid is False
def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):
"""Reconstruction is here done only in the TelescopeFrame,
since the previous tests test already for the compatibility between
frames"""
# 4-LST bright event already calibrated
# we'll clean it and parametrize it again in the TelescopeFrame
subarray, event = subarray_and_event_gamma_off_axis_500_gev
# define reconstructor
reconstructor = HillasReconstructor(subarray)
hillas_dict = {}
telescope_pointings = {}
for tel_id, dl1 in event.dl1.tel.items():
telescope_pointings[tel_id] = SkyCoord(
alt=event.pointing.tel[tel_id].altitude,
az=event.pointing.tel[tel_id].azimuth,
frame=AltAz(),
)
geom_CameraFrame = subarray.tel[tel_id].camera.geometry
# this could be done also out of this loop,
# but in case of real data each telescope would have a
# different telescope_pointing
geom_TelescopeFrame = geom_CameraFrame.transform_to(
TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])
)
mask = tailcuts_clean(
geom_TelescopeFrame,
dl1.image,
picture_thresh=5.0,
boundary_thresh=2.5,
keep_isolated_pixels=False,
min_number_picture_neighbors=2,
)
try:
hillas_dict[tel_id] = hillas_parameters(
geom_TelescopeFrame[mask], dl1.image[mask]
)
# the original event is created from a
# pytest fixture with "session" scope, so it's always the same
# and if we used the same event we would overwrite the image
# parameters for the next tests, thus causing their failure
test_event = deepcopy(event)
test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()
test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]
except HillasParameterizationError as e:
print(e)
continue
# Get shower geometry
reconstructor(event)
# get the result from the correct DL2 container
result = event.dl2.stereo.geometry["HillasReconstructor"]
# get the reconstructed coordinates in the sky
reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())
# get the simulated coordinates in the sky
true_coord = SkyCoord(
alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()
)
# check that we are not more far than 0.1 degrees
assert reco_coord.separation(true_coord) < 0.1 * u.deg
@pytest.mark.parametrize("filename",
["gamma_divergent_LaPalma_baseline_20Zd_180Az_prod3_test.simtel.gz",
"gamma_LaPalma_baseline_20Zd_180Az_prod3b_test.simtel.gz"])
def test_CameraFrame_against_TelescopeFrame(filename):
input_file = get_dataset_path(
"gamma_divergent_LaPalma_baseline_20Zd_180Az_prod3_test.simtel.gz"
)
source = SimTelEventSource(input_file, max_events=10)
calib = CameraCalibrator(subarray=source.subarray)
reconstructor = HillasReconstructor(source.subarray)
reconstructed_events = 0
for event in source:
calib(event)
# make a copy of the calibrated event for the camera frame case
# later we clean and paramretrize the 2 events in the same way
# but in 2 different frames to check they return compatible results
event_camera_frame = deepcopy(event)
telescope_pointings = {}
hillas_dict_camera_frame = {}
hillas_dict_telescope_frame = {}
for tel_id, dl1 in event.dl1.tel.items():
event_camera_frame.dl1.tel[tel_id].parameters = ImageParametersContainer()
event.dl1.tel[tel_id].parameters = ImageParametersContainer()
# this is needed only here to transform the camera geometries
telescope_pointings[tel_id] = SkyCoord(
alt=event.pointing.tel[tel_id].altitude,
az=event.pointing.tel[tel_id].azimuth,
frame=AltAz(),
)
geom_camera_frame = source.subarray.tel[tel_id].camera.geometry
# this could be done also out of this loop,
# but in case of real data each telescope would have a
# different telescope_pointing
geom_telescope_frame = geom_camera_frame.transform_to(
TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])
)
mask = tailcuts_clean(
geom_telescope_frame, dl1.image, picture_thresh=10.0, boundary_thresh=5.0
)
try:
moments_camera_frame = hillas_parameters(
geom_camera_frame[mask], dl1.image[mask]
)
moments_telescope_frame = hillas_parameters(
geom_telescope_frame[mask], dl1.image[mask]
)
if (moments_camera_frame.width.value > 0) and (moments_telescope_frame.width.value > 0):
event_camera_frame.dl1.tel[
tel_id
].parameters.hillas = moments_camera_frame
dl1.parameters.hillas = moments_telescope_frame
hillas_dict_camera_frame[tel_id] = moments_camera_frame
hillas_dict_telescope_frame[tel_id] = moments_telescope_frame
else:
continue
except HillasParameterizationError as e:
print(e)
continue
if (len(hillas_dict_camera_frame) > 2) and (len(hillas_dict_telescope_frame) > 2):
reconstructor(event_camera_frame)
reconstructor(event)
reconstructed_events += 1
else: # this event was not good enough to be tested on
continue
# Compare old approach with new approach
result_camera_frame = event_camera_frame.dl2.stereo.geometry["HillasReconstructor"]
result_telescope_frame = event.dl2.stereo.geometry["HillasReconstructor"]
assert result_camera_frame.is_valid
assert result_telescope_frame.is_valid
for field in event.dl2.stereo.geometry["HillasReconstructor"].as_dict():
C = np.asarray(result_camera_frame.as_dict()[field])
T = np.asarray(result_telescope_frame.as_dict()[field])
assert (np.isclose(C, T, rtol=1e-03, atol=1e-03, equal_nan=True)).all()
assert reconstructed_events > 0 # check that we reconstruct at least 1 event
|
{"hexsha": "418d6461f42388cf35cbce0fedac1af478b41e1b", "size": 13562, "ext": "py", "lang": "Python", "max_stars_repo_path": "ctapipe/reco/tests/test_HillasReconstructor.py", "max_stars_repo_name": "watsonjj/ctapipe", "max_stars_repo_head_hexsha": "fc98748d7a38f50040f1fbe3ce5e174ad8c0ba0a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2015-06-23T15:24:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T22:30:58.000Z", "max_issues_repo_path": "ctapipe/reco/tests/test_HillasReconstructor.py", "max_issues_repo_name": "watsonjj/ctapipe", "max_issues_repo_head_hexsha": "fc98748d7a38f50040f1fbe3ce5e174ad8c0ba0a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1537, "max_issues_repo_issues_event_min_datetime": "2015-06-24T11:27:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:08.000Z", "max_forks_repo_path": "ctapipe/reco/tests/test_HillasReconstructor.py", "max_forks_repo_name": "watsonjj/ctapipe", "max_forks_repo_head_hexsha": "fc98748d7a38f50040f1fbe3ce5e174ad8c0ba0a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 275, "max_forks_repo_forks_event_min_datetime": "2015-07-09T14:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T22:25:51.000Z", "avg_line_length": 39.6549707602, "max_line_length": 104, "alphanum_fraction": 0.6741631028, "include": true, "reason": "import numpy,from astropy", "num_tokens": 3431}
|
"""
Technical Analysis Factors
--------------------------
"""
from bottleneck import (
nanargmax,
nanmax,
nanmean,
nansum,
)
from numpy import (
abs,
clip,
diff,
fmax,
inf,
isnan,
NINF,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.term import SingleInputMixin
from zipline.utils.control_flow import ignore_nanwarnings
from .factor import CustomFactor
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
|
{"hexsha": "ba58dd58867f780da86358c7e8b541fb1af4b826", "size": 2698, "ext": "py", "lang": "Python", "max_stars_repo_path": "zipline/pipeline/factors/technical.py", "max_stars_repo_name": "jimgoo/zipline-fork", "max_stars_repo_head_hexsha": "7e898ae36d0cadafe443491e4f3670d587e9716c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-29T01:46:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-29T01:46:35.000Z", "max_issues_repo_path": "zipline/pipeline/factors/technical.py", "max_issues_repo_name": "jimgoo/zipline-fork", "max_issues_repo_head_hexsha": "7e898ae36d0cadafe443491e4f3670d587e9716c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-08-09T20:43:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T20:43:08.000Z", "max_forks_repo_path": "zipline/pipeline/factors/technical.py", "max_forks_repo_name": "jimgoo/zipline-fork", "max_forks_repo_head_hexsha": "7e898ae36d0cadafe443491e4f3670d587e9716c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-08-31T12:34:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T22:28:48.000Z", "avg_line_length": 24.5272727273, "max_line_length": 79, "alphanum_fraction": 0.6252779837, "include": true, "reason": "from numpy,from numexpr", "num_tokens": 692}
|
# Rounded random numbers from discrete uniform distribution between [1,n]
unidrnd(n::Int64) = round(Int,rand(Uniform(1,n)))
# Sort rows based on the col reference
function sortrows!(A, col; order=false)
if isa(col, Int)
return A[sortperm(A[:,col], rev=order),:]
else
# Sorted along col[1]
A_sorted = A[sortperm(A[:,col[1]], rev = order),:]
# Break ties
end
end
# Check convergence
isconverged!(new::Float64, old::Float64, reltol::Float64, iteration::Int64) = abs(new - old) / new <= reltol ? iteration += 1 : iteration = 0
# Check time
istimedout(t::Float64, t0::Float64, time_limit::Int64) = (t - t0) / 3600. > time_limit
# Status
function get_status(it::Int64, it_unchanged::Int64, timedout::Bool, options::Options)
if it >= options.iterations
status = "+ Status: Maximum iteration number is reached"
elseif !timedout
status = "+ Status: Timed out"
elseif it_unchanged >= options.itmax_unchanged
status = "+ Status: Converged"
end
return status
end
# Show results in REPL
function show_results(results::MetaheuristicResults)
if results.options.log
# Summary
println("___")
println()
println("Optimization summary...")
println("___")
println()
# Status
println(results.status)
# Minimizer
println("+ Optimal decisions: ", results.minimizer)
# Minimum
println("+ Minimum: ", results.minimum)
# Iteration
println("+ Iterations: ", results.iterations)
end
end
# Show verbose in REPL
function show_verbose(it::Int64, time::Float64, obj::Float64)
# Header
if it == 1
println()
println("___")
println()
println("Iteration Objective Time (s)")
# Write to log
open("metaheuristic_log.txt", "a+") do io
write(io, "Iteration;Objective;Time (s) \n")
end
end
# Iteration
print(it)
# Objective
print(" ", round(obj, digits = 2))
# Time
print(" ", round(time, digits = 1))
println()
# Write to log
open("metaheuristic_log.txt", "a+") do io
write(io, string(it, ";", round(obj, digits = 2), ";", round(time, digits = 1), "\n"))
end
end
|
{"hexsha": "17b9664d40f0b2c8c5c93d51d8d1c9eccce53c0a", "size": 2359, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/clearing/utils.jl", "max_stars_repo_name": "hradet/Metaheuristics.jl", "max_stars_repo_head_hexsha": "bce6647547fdd9b27f5c40db7e0de129b1196d7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-03T10:28:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T10:28:49.000Z", "max_issues_repo_path": "src/clearing/utils.jl", "max_issues_repo_name": "hradet/Metaheuristics.jl", "max_issues_repo_head_hexsha": "bce6647547fdd9b27f5c40db7e0de129b1196d7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/clearing/utils.jl", "max_forks_repo_name": "hradet/Metaheuristics.jl", "max_forks_repo_head_hexsha": "bce6647547fdd9b27f5c40db7e0de129b1196d7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7638888889, "max_line_length": 142, "alphanum_fraction": 0.580754557, "num_tokens": 619}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
import scipy
import scipy.signal
from wavestate.iirrational import TFmath
def eigspaces_right(A, B=None, tol=1e-9):
###############################
# eigenvalue subspace collection
w, vr = scipy.linalg.eig(A, B, left=False, right=True)
w_collect_val = [[_] for _ in w]
w_collect_idx = [[_] for _, v in enumerate(w)]
collected_any = True
w = list(w)
while collected_any:
# print(w / (2*np.pi))
# print(vr)
w_near = TFmath.nearest_idx(w)
tol = 1e-7
collected_any = False
for w_idx, (w_val, w_near_idx) in enumerate(zip(w, w_near)):
if w_near_idx is None:
continue
w_val2 = w[w_near_idx]
if w_val is None or w_val2 is None:
continue
if abs(w_val - w_val2) < tol:
w[w_near_idx] = None
w_collect_val[w_idx] += w_collect_val[w_near_idx]
w_collect_val[w_near_idx] = []
w_collect_idx[w_idx] += w_collect_idx[w_near_idx]
w_collect_idx[w_near_idx] = []
collected_any = True
# print(w_collect_val)
# print(w_collect_idx)
w_pairs = [p for p in zip(w_collect_idx, w_collect_val) if len(p[0]) > 0]
v_pairs = []
# u, s, v = scipy.linalg.svd(vr)
# print(s)
for idxs, eigs in w_pairs:
evects = vr[:, idxs]
# u, s, v = scipy.linalg.svd(evects)
# print(s)
v_pairs.append((eigs, evects))
# the evects output is rows are A-space, columns are eig-idx-space
return v_pairs
def eigspaces_right_real(A, B=None, tol=1e-9):
v_pairs = eigspaces_right(A, B=B, tol=tol)
v_pairs_re = []
v_pairs_im = []
w_im = []
for eigs, evects in v_pairs:
eigv = np.mean(eigs)
if abs(eigv.imag) < tol:
# remove the imaginary part to the eigenvectors
assert np.all(np.sum(evects.imag ** 2, axis=1) < tol)
v_pairs_re.append((eigs, evects.real))
continue
v_pairs_im.append((eigs, evects))
w_im.append(eigv)
# this finds conjugate pairs
w_near = TFmath.nearest_idx(w_im)
v_pairs_im2 = []
for idx_fr, idx_to in enumerate(w_near):
if idx_to is None or w_near[idx_to] is None:
continue
if idx_fr is None or w_near[idx_fr] is None:
continue
if w_near[idx_to] != idx_fr:
continue
# unique conjugate pair
w_near[idx_to] = None
w_near[idx_fr] = None
eigs1, eigv1 = v_pairs_im[idx_to]
eigs2, eigv2 = v_pairs_im[idx_fr]
if w_im[idx_to].imag > 0:
eigs_use = eigs1
else:
eigs_use = eigs2
v_pairs_im2.append((eigs_use, np.hstack([eigv1, eigv2])))
v_pairs_im3 = []
for eigs, evects in v_pairs_im2:
# TODO, it may be the the SVD should be used here
# this may rely on r being rank-revealing
q, r = scipy.linalg.qr(evects.imag.T)
# check that the imaginary space is actually reduced
idx_cut = r.shape[0] // 2
assert np.all(np.sum(r[idx_cut:] ** 2, axis=1) < tol)
# same check as above (redundant)
assert np.all(np.sum((evects.imag @ q[idx_cut:].T) ** 2, axis=1) < tol)
# now formulate the real projection
evects2 = evects.real @ q[idx_cut:].T
# normalize the eigenvectors again
evects2 = evects2 / (np.sum(evects2 ** 2, axis=0)) ** 0.5
v_pairs_im3.append((eigs, evects2))
return v_pairs_re + v_pairs_im3
|
{"hexsha": "0cf0518b115a218c50a2a95705ba97f03c6acca3", "size": 3946, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/wavestate/iirrational/statespace/dense/eig_algorithms.py", "max_stars_repo_name": "wavestate/wavestate-iirrational", "max_stars_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wavestate/iirrational/statespace/dense/eig_algorithms.py", "max_issues_repo_name": "wavestate/wavestate-iirrational", "max_issues_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wavestate/iirrational/statespace/dense/eig_algorithms.py", "max_forks_repo_name": "wavestate/wavestate-iirrational", "max_forks_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0172413793, "max_line_length": 79, "alphanum_fraction": 0.5917384693, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1130}
|
## A Quick Tour of DifferentialEquations.jl
DifferentialEquations.jl is a metapackage for solving differential equations in Julia. The basic workflow is:
- Define a problem
- Solve a problem
- Plot the solution
The API between different types of differential equations is unified through multiple dispatch
## Example: Lotka-Volterra ODE
$$\begin{align}
x' &= ax - bxy\\
y' &= -cy + dxy
\end{align}$$
```julia
using DifferentialEquations
# Define a problem
const a = 1.0; const b = 2.0
const c = 1.5; const d = 1.25
f = function (t,u,du) # Define f as an in-place update into du
du[1] = a*u[1] - b*u[1]*u[2]
du[2] = -c*u[2]+ d*u[1]*u[2]
end
u0 = [1.0;1.0]; tspan = (0.0,10.0)
prob = ODEProblem(f,u0,tspan)
```
DiffEqBase.ODEProblem{Array{Float64,1},Float64,true,##1#2}(#1,[1.0,1.0],(0.0,10.0))
```julia
# Solve the problem
sol = solve(prob)
```
DiffEqBase.ODESolution{Array{Array{Float64,1},1},Array{Float64,1},Array{Array{Array{Float64,1},1},1},DiffEqBase.ODEProblem{Array{Float64,1},Float64,true,##1#2},OrdinaryDiffEq.Tsit5}(Array{Float64,1}[[1.0,1.0],[0.912378,0.971564],[0.768635,0.887673],[0.640215,0.739539],[0.579038,0.586645],[0.575609,0.43638],[0.647248,0.312288],[0.858273,0.222465],[1.35784,0.199293],[1.9923,0.308114],[2.0708,0.692973],[1.38472,1.00669],[0.749332,0.868987],[0.585712,0.607774],[0.61316,0.352207],[0.830329,0.229312],[1.6363,0.225058],[2.17455,0.525744],[1.95406,0.781683]],[0.0,0.0942336,0.292772,0.583403,0.894442,1.27134,1.72127,2.31275,3.08354,3.81497,4.51466,5.0524,5.69616,6.21559,6.92158,7.61688,8.7721,9.6451,10.0],Array{Array{Float64,1},1}[Array{Float64,1}[[Inf,Inf]],Array{Float64,1}[[-1.0,-0.25],[-0.977358,-0.267944],[-0.953858,-0.285244],[-0.874675,-0.340554],[-0.864124,-0.347634],[-0.861484,-0.349373],[-0.860489,-0.349304]],Array{Float64,1}[[-0.860489,-0.349304],[-0.814788,-0.37831],[-0.768366,-0.40298],[-0.623447,-0.471329],[-0.607034,-0.479105],[-0.602901,-0.480922],[-0.595958,-0.478639]],Array{Float64,1}[[-0.595958,-0.478639],[-0.541156,-0.496724],[-0.488396,-0.507047],[-0.336725,-0.524479],[-0.322681,-0.527544],[-0.319131,-0.528077],[-0.306713,-0.517479]],Array{Float64,1}[[-0.306713,-0.517479],[-0.266969,-0.513047],[-0.229776,-0.504214],[-0.120563,-0.467931],[-0.109463,-0.464552],[-0.106698,-0.463573],[-0.100342,-0.455355]],Array{Float64,1}[[-0.100342,-0.455355],[-0.0676245,-0.438163],[-0.0373299,-0.418962],[0.0560904,-0.354913],[0.066499,-0.347975],[0.0690868,-0.34618],[0.0732407,-0.34059]],Array{Float64,1}[[0.0732407,-0.34059],[0.10258,-0.318603],[0.130465,-0.296665],[0.225873,-0.227795],[0.238624,-0.219607],[0.241807,-0.217569],[0.242992,-0.215772]],Array{Float64,1}[[0.242992,-0.215772],[0.279229,-0.193137],[0.315581,-0.17141],[0.452284,-0.105553],[0.473176,-0.097418],[0.478416,-0.0954156],[0.476402,-0.0950276]],Array{Float64,1}[[0.476402,-0.0950276],[0.530856,-0.0744214],[0.587835,-0.0528887],[0.785114,0.023796],[0.810624,0.0365301],[0.816915,0.0398332],[0.816626,0.0393212]],Array{Float64,1}[[0.816626,0.0393212],[0.860996,0.0647479],[0.891706,0.0978093],[0.796413,0.267322],[0.733842,0.304354],[0.717618,0.314065],[0.764589,0.305151]],Array{Float64,1}[[0.764589,0.305151],[0.654751,0.376068],[0.451512,0.464349],[-0.684867,0.676435],[-0.917973,0.625448],[-0.972402,0.610386],[-0.799214,0.754298]],Array{Float64,1}[[-0.799214,0.754298],[-1.03394,0.759797],[-1.23319,0.710792],[-1.28411,0.314734],[-1.13229,0.233961],[-1.0974,0.214075],[-1.40325,0.232449]],Array{Float64,1}[[-1.40325,0.232449],[-1.31558,0.050614],[-1.1445,-0.116552],[-0.762234,-0.295025],[-0.787111,-0.167891],[-0.786233,-0.140416],[-0.552988,-0.489531]],Array{Float64,1}[[-0.552988,-0.489531],[-0.461295,-0.514335],[-0.378013,-0.516403],[-0.188398,-0.508778],[-0.191578,-0.523974],[-0.191915,-0.526953],[-0.126249,-0.466685]],Array{Float64,1}[[-0.126249,-0.466685],[-0.0625391,-0.435904],[-0.00837493,-0.398468],[0.136143,-0.295743],[0.13908,-0.295662],[0.140036,-0.295226],[0.181242,-0.258361]],Array{Float64,1}[[0.181242,-0.258361],[0.223879,-0.228947],[0.265291,-0.201101],[0.421627,-0.119131],[0.445462,-0.109591],[0.451447,-0.107246],[0.44952,-0.105962]],Array{Float64,1}[[0.44952,-0.105962],[0.530807,-0.0749502],[0.616297,-0.0423637],[0.888188,0.0902778],[0.920606,0.118146],[0.927983,0.125535],[0.899776,0.122741]],Array{Float64,1}[[0.899776,0.122741],[0.908496,0.170455],[0.856593,0.238345],[0.0134149,0.577832],[-0.254365,0.634424],[-0.320807,0.648238],[-0.111962,0.640453]],Array{Float64,1}[[-0.111962,0.640453],[-0.270317,0.680535],[-0.449287,0.71273],[-0.993857,0.727265],[-1.0439,0.700943],[-1.05652,0.694192],[-1.10085,0.736798]]],DiffEqBase.ODEProblem{Array{Float64,1},Float64,true,##1#2}(#1,[1.0,1.0],(0.0,10.0)),OrdinaryDiffEq.Tsit5
order: 5
adaptiveorder: 4,OrdinaryDiffEq.#124,true,0)
```julia
# Plot the solution using the plot recipe
using Plots; gr() # Using the GR Backend
plot(sol,title="All Plots.jl Attributes are Available")
```
<?xml version="1.0" encoding="utf-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="600" height="400" viewBox="0 0 600 400">
<defs>
<clipPath id="clip00">
<rect x="0" y="0" width="600" height="400"/>
</clipPath>
</defs>
<polygon clip-path="url(#clip00)" points="
0,400 600,400 600,0 0,0
" fill="#ffffff" fill-opacity="1"/>
<defs>
<clipPath id="clip01">
<rect x="120" y="0" width="421" height="400"/>
</clipPath>
</defs>
<polygon clip-path="url(#clip00)" points="
30.6037,384.952 596.063,384.952 596.063,23.3815 30.6037,23.3815
" fill="#ffffff" fill-opacity="1"/>
<defs>
<clipPath id="clip02">
<rect x="30" y="23" width="566" height="362"/>
</clipPath>
</defs>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
30.6037,379.528 30.6037,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
143.696,379.528 143.696,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
256.787,379.528 256.787,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
369.879,379.528 369.879,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
482.971,379.528 482.971,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
596.063,379.528 596.063,28.805
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
39.0856,329.504 587.581,329.504
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
39.0856,238.421 587.581,238.421
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
39.0856,147.339 587.581,147.339
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
39.0856,56.2558 587.581,56.2558
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,384.952 596.063,384.952
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,384.952 30.6037,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
143.696,384.952 143.696,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
256.787,384.952 256.787,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
369.879,384.952 369.879,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
482.971,384.952 482.971,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
596.063,384.952 596.063,379.528
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,384.952 30.6037,23.3815
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,329.504 39.0856,329.504
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,238.421 39.0856,238.421
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,147.339 39.0856,147.339
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
30.6037,56.2558 39.0856,56.2558
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 30.6037, 396.952)" x="30.6037" y="396.952">0</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 143.696, 396.952)" x="143.696" y="396.952">2</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 256.787, 396.952)" x="256.787" y="396.952">4</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 369.879, 396.952)" x="369.879" y="396.952">6</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 482.971, 396.952)" x="482.971" y="396.952">8</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 596.063, 396.952)" x="596.063" y="396.952">10</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 29.4037, 334.004)" x="29.4037" y="334.004">0.5</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 29.4037, 242.921)" x="29.4037" y="242.921">1.0</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 29.4037, 151.839)" x="29.4037" y="151.839">1.5</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 29.4037, 60.7558)" x="29.4037" y="60.7558">2.0</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:21; text-anchor:middle;" transform="rotate(0, 313.333, 18)" x="313.333" y="18">All Plots.jl Attributes are Available</text>
</g>
<polyline clip-path="url(#clip02)" style="stroke:#0099ff; stroke-width:2.4; stroke-opacity:1; fill:none" points="
30.6037,238.421 36.3154,255.439 42.0271,269.785 47.7388,281.67 53.4505,291.357 59.1622,299.114 64.8739,305.19 70.5856,309.805 76.2974,313.145 82.0091,315.363
87.7208,316.583 93.4325,316.901 99.1442,316.391 104.856,315.11 110.568,313.096 116.279,310.374 121.991,306.961 127.703,302.861 133.414,298.073 139.126,292.587
144.838,286.388 150.55,279.458 156.261,271.778 161.973,263.325 167.685,254.079 173.396,244.015 179.108,233.109 184.82,221.349 190.532,208.739 196.243,195.297
201.955,181.052 207.667,166.054 213.378,150.406 219.09,134.212 224.802,117.655 230.514,101.003 236.225,84.6096 241.937,68.9143 247.649,54.4359 253.36,41.7277
259.072,31.7397 264.784,25.3815 270.496,23.3815 276.207,26.2869 281.919,34.4642 287.631,48.0945 293.342,66.8182 299.054,89.4892 304.766,114.812 310.477,141.416
316.189,167.854 321.901,192.862 327.613,215.734 333.324,235.981 339.036,253.372 344.748,267.934 350.459,279.955 356.171,289.887 361.883,297.882 367.595,304.172
373.306,308.98 379.018,312.492 384.73,314.859 390.441,316.216 396.153,316.656 401.865,316.257 407.577,315.079 413.288,313.164 419,310.541 424.712,307.224
430.423,303.22 436.135,298.527 441.847,293.136 447.559,287.035 453.27,280.207 458.982,272.632 464.694,264.298 470.405,255.204 476.117,245.293 481.829,234.527
487.54,222.885 493.252,210.371 498.964,197.01 504.676,182.85 510.387,167.958 516.099,152.427 521.811,136.369 527.522,119.919 533.234,103.264 538.946,86.7993
544.658,71.0225 550.369,56.4904 556.081,43.821 561.793,33.6926 567.504,26.8443 573.216,24.0756 578.928,26.207 584.64,33.7032 590.351,46.6392 596.063,64.6238
"/>
<polyline clip-path="url(#clip02)" style="stroke:#e9746a; stroke-width:2.4; stroke-opacity:1; fill:none" points="
30.6037,238.421 36.3154,244.036 42.0271,251.326 47.7388,259.782 53.4505,268.95 59.1622,278.455 64.8739,288 70.5856,297.36 76.2974,306.373 82.0091,314.93
87.7208,322.961 93.4325,330.43 99.1442,337.322 104.856,343.638 110.568,349.394 116.279,354.612 121.991,359.317 127.703,363.537 133.414,367.303 139.126,370.643
144.838,373.584 150.55,376.15 156.261,378.363 161.973,380.241 167.685,381.796 173.396,383.041 179.108,383.983 184.82,384.62 190.532,384.948 196.243,384.952
201.955,384.61 207.667,383.895 213.378,382.762 219.09,381.161 224.802,379.026 230.514,376.27 236.225,372.788 241.937,368.455 247.649,363.128 253.36,356.654
259.072,348.844 264.784,339.593 270.496,328.913 276.207,316.926 281.919,303.872 287.631,290.106 293.342,276.291 299.054,263.351 304.766,252.105 310.477,243.233
316.189,237.283 321.901,234.508 327.613,234.743 333.324,237.726 339.036,243.08 344.748,250.304 350.459,258.78 356.171,267.86 361.883,277.301 367.595,286.822
373.306,296.184 379.018,305.218 384.73,313.815 390.441,321.899 396.153,329.431 401.865,336.392 407.577,342.777 413.288,348.595 419,353.87 424.712,358.635
430.423,362.914 436.135,366.736 441.847,370.13 447.559,373.122 453.27,375.736 458.982,377.993 464.694,379.909 470.405,381.492 476.117,382.761 481.829,383.73
487.54,384.404 493.252,384.777 498.964,384.835 504.676,384.553 510.387,383.899 516.099,382.829 521.811,381.293 527.522,379.23 533.234,376.583 538.946,373.216
544.658,368.973 550.369,363.716 556.081,357.323 561.793,349.69 567.504,340.731 573.216,330.375 578.928,318.595 584.64,305.642 590.351,291.963 596.063,278.191
"/>
<polygon clip-path="url(#clip00)" points="
505.547,89.6215 578.063,89.6215 578.063,44.2615 505.547,44.2615
" fill="#ffffff" fill-opacity="1"/>
<polyline clip-path="url(#clip00)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
505.547,89.6215 578.063,89.6215 578.063,44.2615 505.547,44.2615 505.547,89.6215
"/>
<polyline clip-path="url(#clip00)" style="stroke:#0099ff; stroke-width:2.4; stroke-opacity:1; fill:none" points="
511.547,59.3815 547.547,59.3815
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:start;" transform="rotate(0, 553.547, 63.8815)" x="553.547" y="63.8815">y1</text>
</g>
<polyline clip-path="url(#clip00)" style="stroke:#e9746a; stroke-width:2.4; stroke-opacity:1; fill:none" points="
511.547,74.5015 547.547,74.5015
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:start;" transform="rotate(0, 553.547, 79.0015)" x="553.547" y="79.0015">y2</text>
</g>
</svg>
## Extra Features
The solution object acts both as an array and as an interpolation of the solution
```julia
@show sol.t[3] # Time at the 3rd timestep
@show sol[3] # Value at the third timestep
@show sol(5) # Value at t=5 using the interpolation
```
sol.t[3] = 0.2927716363580929
sol[3] = [0.768635,0.887673]
sol(5) = [1.45932,0.99208]
2-element Array{Float64,1}:
1.45932
0.99208
## Stochastic Differential Equations
Also included are problems for stochastic differential equations
```julia
g = function (t,u,du)
du[1] = .5*u[1]
du[2] = .1*u[2]
end
prob = SDEProblem(f,g,u0,tspan)
sol = solve(prob,dt=1/2^4)
plot(sol)
```
<?xml version="1.0" encoding="utf-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="600" height="400" viewBox="0 0 600 400">
<defs>
<clipPath id="clip00">
<rect x="0" y="0" width="600" height="400"/>
</clipPath>
</defs>
<polygon clip-path="url(#clip00)" points="
0,400 600,400 600,0 0,0
" fill="#ffffff" fill-opacity="1"/>
<defs>
<clipPath id="clip01">
<rect x="120" y="0" width="421" height="400"/>
</clipPath>
</defs>
<polygon clip-path="url(#clip00)" points="
12.8259,384.952 596.063,384.952 596.063,3.93701 12.8259,3.93701
" fill="#ffffff" fill-opacity="1"/>
<defs>
<clipPath id="clip02">
<rect x="12" y="3" width="584" height="382"/>
</clipPath>
</defs>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
12.8259,379.237 12.8259,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
129.473,379.237 129.473,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
246.121,379.237 246.121,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
362.768,379.237 362.768,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
479.416,379.237 479.416,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
596.063,379.237 596.063,9.65223
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
21.5745,289.031 587.314,289.031
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
21.5745,190.307 587.314,190.307
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:0.5; fill:none" stroke-dasharray="1, 2" points="
21.5745,91.5819 587.314,91.5819
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,384.952 596.063,384.952
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,384.952 12.8259,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
129.473,384.952 129.473,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
246.121,384.952 246.121,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
362.768,384.952 362.768,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
479.416,384.952 479.416,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
596.063,384.952 596.063,379.237
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,384.952 12.8259,3.93701
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,289.031 21.5745,289.031
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,190.307 21.5745,190.307
"/>
<polyline clip-path="url(#clip02)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
12.8259,91.5819 21.5745,91.5819
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 12.8259, 396.952)" x="12.8259" y="396.952">0</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 129.473, 396.952)" x="129.473" y="396.952">2</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 246.121, 396.952)" x="246.121" y="396.952">4</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 362.768, 396.952)" x="362.768" y="396.952">6</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 479.416, 396.952)" x="479.416" y="396.952">8</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:middle;" transform="rotate(0, 596.063, 396.952)" x="596.063" y="396.952">10</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 11.6259, 293.531)" x="11.6259" y="293.531">1</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 11.6259, 194.807)" x="11.6259" y="194.807">2</text>
</g>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:end;" transform="rotate(0, 11.6259, 96.0819)" x="11.6259" y="96.0819">3</text>
</g>
<polyline clip-path="url(#clip02)" style="stroke:#0099ff; stroke-width:2.4; stroke-opacity:1; fill:none" points="
12.8259,289.031 15.2547,285.555 17.9871,271.091 20.0577,260.652 21.8599,277.598 23.8873,276.197 26.1682,235.378 28.1848,249.724 30.4533,250.291 32.1303,253.122
34.0168,261.697 36.1392,269.51 38.044,285.834 40.1868,269.612 42.1195,282.241 44.2937,281.669 46.2451,300.574 48.4403,298.743 50.91,300.12 53.6884,295.263
56.4183,302.982 59.2495,294.903 61.8767,295.151 64.1527,304.205 66.7132,307.607 69.5937,288.551 72.8344,289.6 76.4801,281.999 79.5703,300.917 83.0469,315.528
86.4277,321.448 89.1579,327.97 92.2294,337.077 95.6849,333.13 98.5458,343.083 101.764,343.478 105.04,335.322 108.491,334.327 112.374,332.281 116.742,340.609
120.753,345.988 124.122,347.122 127.912,346.95 132.137,345.824 136.419,342.207 140.955,351.626 145.663,335.921 150.479,325.607 154.187,332.243 158.358,318.791
163.051,307.873 168.33,292.482 173.472,288.511 177.825,277.087 181.446,233.112 185.487,201.551 187.549,196.83 189.869,184.046 192.478,204.104 195.119,211.782
198.089,211.729 201.431,199.914 204.455,209.113 207.857,206.233 210.176,164.611 212.424,84.7984 214.076,94.2217 215.935,123.453 217.744,117.977 219.388,80.8382
220.816,92.4258 222.297,82.6831 223.447,79.174 224.658,55.1382 225.64,49.7975 226.706,49.3293 227.812,86.1547 228.919,68.7543 229.826,60.5119 230.647,40.3618
231.504,56.8479 232.43,67.7768 233.247,73.2855 234.064,69.5125 234.85,97.7165 235.735,108.025 236.618,92.9323 237.325,109.989 238.121,128.315 239.015,134.217
239.908,136.441 240.7,146.284 241.538,145.118 242.255,176.759 243.063,167.665 243.872,177.717 244.529,213.147 245.269,214.114 246.101,232.541 247.037,238.364
248.029,245.016 248.981,261.32 250.026,265.905 250.967,275.407 251.989,283.544 253.067,286.753 254.153,289.141 255.373,295.377 256.746,291.918 258.291,290.997
259.692,288.392 260.834,293.531 262.12,300.166 263.34,308.53 264.714,306.683 266.259,308.874 267.997,305.739 268.95,316.433 270.023,321.732 271.23,326.576
272.588,327.372 274.115,330.163 275.834,335.576 277.554,340.18 278.924,347.024 280.466,349.022 282.201,350.295 284.026,353.627 285.985,356.464 288.042,354.732
289.697,355.991 291.558,358.201 293.653,358.768 295.984,356.945 298.358,358.202 300.662,363.084 303.022,356.296 305.546,358.556 307.956,363.093 310.493,365.449
312.963,367.108 315.522,366.061 318.072,368.594 320.808,372.54 323.344,372.576 326.197,372.22 329.406,370.562 332.866,368.743 336.759,366.537 340.798,367.327
345.086,365.262 349.212,362.044 353.715,356.335 358.288,355.707 362.955,356.48 366.83,358.043 371.189,365.136 376.092,368.054 381.317,368.09 387.195,366.99
393.808,362.608 400.49,356.148 404.518,351.691 407.894,351.007 411.691,361.048 415.963,358.758 420.769,351.505 426.175,336.53 428.337,338.953 430.768,349.346
433.504,346.417 436.581,340.021 440.043,351.648 443.459,355.916 447.011,346.171 451.006,340.598 455.501,328.401 457.495,325.927 459.739,316.381 462.263,304.037
464.374,302.433 466.749,297.485 469.42,279.27 470.846,277.57 472.451,290.391 474.255,279.477 476.286,277.69 478.57,252.729 480.204,250.602 482.043,257.684
484.063,259.928 485.786,274.853 487.724,264.456 489.904,258.149 491.635,260.679 493.582,255.459 495.524,262.065 497.709,254.302 499.667,242.582 501.87,230.321
504.348,210.033 506.675,174.754 509.134,142.423 510.295,147.299 511.601,136.123 513.07,134.545 514.722,128.818 516.135,136.686 517.725,146.258 519.513,145.475
521.254,158.531 523.12,129.831 524.444,147.545 525.933,155.552 527.609,119.773 529.009,122.046 530.585,133.074 532.271,151.25 534.164,115.142 536.262,83.037
538.591,22.2008 539.639,20.056 540.817,43.2506 542.143,36.3693 543.635,82.7513 545.313,86.555 547.201,65.6558 549.139,50.4502 550.865,23.6953 552.549,3.93701
553.932,46.5469 555.488,42.2913 557.239,107.597 559.208,35.3222 560.519,23.9705 561.994,56.5233 563.654,83.0762 565.503,93.7552 567.374,106.262 569.397,58.9519
570.784,78.1817 572.345,94.9546 574.101,117.278 575.575,112.53 577.234,72.5824 578.282,56.5011 579.461,62.1492 580.575,69.12 581.704,57.9092 582.974,72.8398
584.21,73.1061 585.285,82.0893 586.494,105.983 587.8,125.667 588.951,85.4489 589.869,86.3378 590.877,81.9909 591.831,73.0068 592.685,84.1248 593.56,83.4192
594.395,99.3926 595.335,114.063 596.063,123.35
"/>
<polyline clip-path="url(#clip02)" style="stroke:#e9746a; stroke-width:2.4; stroke-opacity:1; fill:none" points="
12.8259,289.031 15.2547,289.306 17.9871,289.809 20.0577,289.06 21.8599,290.918 23.8873,292.236 26.1682,292.322 28.1848,292.45 30.4533,291.401 32.1303,291.967
34.0168,288.463 36.1392,290.045 38.044,290.429 40.1868,292.412 42.1195,294.572 44.2937,292.211 46.2451,292.652 48.4403,291.627 50.91,294.397 53.6884,298.009
56.4183,301.128 59.2495,301.356 61.8767,299.898 64.1527,302.378 66.7132,306.726 69.5937,306.95 72.8344,307.991 76.4801,309.305 79.5703,309.651 83.0469,312.696
86.4277,312.53 89.1579,314.95 92.2294,316.947 95.6849,319.737 98.5458,324.118 101.764,327.467 105.04,333.819 108.491,335.146 112.374,336.993 116.742,339.713
120.753,341.734 124.122,345.189 127.912,348.952 132.137,351.621 136.419,354.631 140.955,357.011 145.663,360.315 150.479,362.34 154.187,364.263 158.358,364.833
163.051,366.587 168.33,366.774 173.472,366.545 177.825,366.776 181.446,365.689 185.487,364.985 187.549,363.95 189.869,363.79 192.478,362.237 195.119,361.991
198.089,361.591 201.431,360.1 204.455,358.613 207.857,356.538 210.176,353.591 212.424,349.943 214.076,346.456 215.935,343.057 217.744,340.571 219.388,336.844
220.816,334.436 222.297,331.061 223.447,328.242 224.658,325.331 225.64,323.297 226.706,319.702 227.812,317.396 228.919,313.861 229.826,309.648 230.647,306.32
231.504,302.824 232.43,296.647 233.247,294.031 234.064,290.84 234.85,289.056 235.735,285.047 236.618,280.571 237.325,278.888 238.121,274.834 239.015,270.578
239.908,263.897 240.7,259.034 241.538,256.19 242.255,252.731 243.063,249.966 243.872,248.396 244.529,248.355 245.269,248.64 246.101,246.563 247.037,243.308
248.029,242.614 248.981,243.084 250.026,242.753 250.967,242.001 251.989,239.914 253.067,243.332 254.153,246.633 255.373,249.483 256.746,249.828 258.291,253.043
259.692,252.751 260.834,248.464 262.12,249.591 263.34,249.601 264.714,250.033 266.259,253.38 267.997,258.365 268.95,259.521 270.023,259.286 271.23,259.586
272.588,258.578 274.115,259.726 275.834,261.112 277.554,265.551 278.924,267.865 280.466,274.978 282.201,278.836 284.026,282.412 285.985,285.174 288.042,288.125
289.697,293.871 291.558,297.758 293.653,301.848 295.984,306.086 298.358,308.508 300.662,312.321 303.022,313.88 305.546,316.392 307.956,320.755 310.493,323.697
312.963,327.279 315.522,331.331 318.072,335.92 320.808,339.55 323.344,343.827 326.197,347.244 329.406,350.359 332.866,352.412 336.759,354.964 340.798,356.853
345.086,358.932 349.212,362.022 353.715,363.816 358.288,366.672 362.955,369.313 366.83,371.006 371.189,372.479 376.092,374.074 381.317,375.042 387.195,376.826
393.808,378.15 400.49,379.453 404.518,379.98 407.894,380.752 411.691,381.272 415.963,381.939 420.769,382.325 426.175,382.866 428.337,382.991 430.768,383.26
433.504,383.444 436.581,383.687 440.043,383.684 443.459,383.839 447.011,384.14 451.006,384.188 455.501,384.422 457.495,384.578 459.739,384.541 462.263,384.667
464.374,384.703 466.749,384.708 469.42,384.843 470.846,384.896 472.451,384.936 474.255,384.952 476.286,384.855 478.57,384.796 480.204,384.745 482.043,384.698
484.063,384.658 485.786,384.673 487.724,384.764 489.904,384.788 491.635,384.786 493.582,384.684 495.524,384.619 497.709,384.631 499.667,384.615 501.87,384.545
504.348,384.366 506.675,384.154 509.134,383.911 510.295,383.695 511.601,383.553 513.07,383.343 514.722,383.008 516.135,382.819 517.725,382.671 519.513,382.39
521.254,382.301 523.12,382.173 524.444,381.963 525.933,381.864 527.609,381.693 529.009,381.372 530.585,381.002 532.271,380.685 534.164,380.227 536.262,379.594
538.591,378.537 539.639,377.983 540.817,377.068 542.143,376.045 543.635,375.099 545.313,374.083 547.201,373.195 549.139,371.786 550.865,370.07 552.549,367.907
553.932,366.2 555.488,364.672 557.239,362.82 559.208,361.332 560.519,360.024 561.994,357.834 563.654,355.576 565.503,354.801 567.374,352.548 569.397,349.609
570.784,347.673 572.345,344.603 574.101,341.508 575.575,339.67 577.234,336.725 578.282,334.288 579.461,332.173 580.575,330.91 581.704,329.649 582.974,326.446
584.21,321.906 585.285,319.492 586.494,316.816 587.8,312.422 588.951,310.134 589.869,306.361 590.877,303.459 591.831,299.637 592.685,295.448 593.56,292.555
594.395,290.783 595.335,286.224 596.063,284.1
"/>
<polygon clip-path="url(#clip00)" points="
505.547,70.177 578.063,70.177 578.063,24.817 505.547,24.817
" fill="#ffffff" fill-opacity="1"/>
<polyline clip-path="url(#clip00)" style="stroke:#00002d; stroke-width:0.8; stroke-opacity:1; fill:none" points="
505.547,70.177 578.063,70.177 578.063,24.817 505.547,24.817 505.547,70.177
"/>
<polyline clip-path="url(#clip00)" style="stroke:#0099ff; stroke-width:2.4; stroke-opacity:1; fill:none" points="
511.547,39.937 547.547,39.937
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:start;" transform="rotate(0, 553.547, 44.437)" x="553.547" y="44.437">y1</text>
</g>
<polyline clip-path="url(#clip00)" style="stroke:#e9746a; stroke-width:2.4; stroke-opacity:1; fill:none" points="
511.547,55.057 547.547,55.057
"/>
<g clip-path="url(#clip00)">
<text style="fill:#00002d; fill-opacity:1; font-family:Arial,Helvetica Neue,Helvetica,sans-serif; font-size:12; text-anchor:start;" transform="rotate(0, 553.547, 59.557)" x="553.547" y="59.557">y2</text>
</g>
</svg>
## Documentation
For more information, see the documentation: https://github.com/JuliaDiffEq/DifferentialEquations.jl
## Problems
### Problem 1
The DifferentialEquations.jl algorithms choose the number type of their calculation given their input. Use this fact to solve the [Lorenz equation](https://en.wikipedia.org/wiki/Lorenz_system) using BigFloats. You may want to [check out the example notebooks](https://github.com/JuliaDiffEq/DifferentialEquations.jl/tree/master/examples).
### Problem 2
Use the [event handling](http://juliadiffeq.github.io/DifferentialEquations.jl/latest/man/callback_functions.html) the model a bouncing ball with friction, i.e. at every bounce the velocity flips but is decreased to 80%. Does the ball eventually stop bouncing?
### Problem 3
Install the ODE.jl and ODEInterface conditional dependencies (note: ODEInterface may give build issues on Windows!) and run some of the [benchmarks notebooks](https://github.com/JuliaDiffEq/DifferentialEquations.jl/tree/master/benchmarks) on your machine. Do you notice any trends amongst the algorithms? Use the method as shown in the Pleiades benchmarks to benchmark the algorithms against each other on nonlinear problems with no known analytical solution. Try building an example problem with a large number of independent variables to accentuate the differences between the algorithms (Example: the Linear problem in the benchmarks is a 100x100 problem).
|
{"hexsha": "4eab33102485b20a82e32bd1a898727ec9fbb4f0", "size": 43278, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Notebooks/DiffEq.ipynb", "max_stars_repo_name": "jngod2011/IntroToJulia", "max_stars_repo_head_hexsha": "7364926e7844390a8b169aca353be7b631050224", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Notebooks/DiffEq.ipynb", "max_issues_repo_name": "jngod2011/IntroToJulia", "max_issues_repo_head_hexsha": "7364926e7844390a8b169aca353be7b631050224", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Notebooks/DiffEq.ipynb", "max_forks_repo_name": "jngod2011/IntroToJulia", "max_forks_repo_head_hexsha": "7364926e7844390a8b169aca353be7b631050224", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-15T05:03:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-15T05:03:33.000Z", "avg_line_length": 68.5863708399, "max_line_length": 3860, "alphanum_fraction": 0.6064744212, "converted": true, "num_tokens": 14802}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e127m1_6limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition add :
{ add : feBW_tight -> feBW_tight -> feBW_loose
| forall a b, phiBW_loose (add a b) = F.add (phiBW_tight a) (phiBW_tight b) }.
Proof.
Set Ltac Profiling.
Time synthesize_add ().
Show Ltac Profile.
Time Defined.
Print Assumptions add.
|
{"author": "anonymous-code-submission-01", "repo": "sp2019-54-code", "sha": "8867f5bed0821415ec99f593b1d61f715ed4f789", "save_path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code", "path": "github-repos/coq/anonymous-code-submission-01-sp2019-54-code/sp2019-54-code-8867f5bed0821415ec99f593b1d61f715ed4f789/src/Specific/solinas32_2e127m1_6limbs/feadd.v"}
|
import copy
import json
import math
import six
import numpy as np
import tensorflow as tf
from tensorflow.contrib import layers as contrib_layers
class ModelConfig(object):
def __init__(self,
vocab_size,
embedding_size=128,
hidden_size=4096,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
hidden_act="gelu",
hidden_dropout_prob=0,
attention_probs_dropout_prob=0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02
):
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `ModelConfig` from a Python dictionary of parameters."""
config = ModelConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `ModelConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=4, sort_keys=True) + "\n"
class Model:
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
seq_type_ids=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(
shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(
shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope(scope, default_name='bert'):
with tf.variable_scope('embedding'):
(self.word_embedding_output,
self.output_embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.embedding_size,
initializer_range=config.initializer_range,
word_embedding_name='word_embeddings',
use_one_hot_embeddings=use_one_hot_embeddings)
self.embedding_output = embedding_postprocessor(
input_tensor=self.word_embedding_output,
use_token_type=True,
seq_type_ids=seq_type_ids,
token_type_ids=token_type_ids,
seq_type_embedding_name="seq_type_embeddings",
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name='token_type_embeddings',
use_position_embeddings=True,
position_embedding_name='position_embeddings',
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob,
use_one_hot_embeddings=use_one_hot_embeddings)
with tf.variable_scope('transformer', reuse=tf.AUTO_REUSE):
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=input_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_hidden_groups=config.num_hidden_groups,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(
self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_word_embedding_output(self):
return self.word_embedding_output
def get_embedding_output(self):
return self.embedding_output
def get_embedding_table(self):
return self.output_embedding_table
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return output, embedding_table
def embedding_postprocessor(input_tensor,
use_token_type=False,
seq_type_ids=None,
token_type_ids=None,
seq_type_embedding_name="seq_type_embeddings",
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
use_one_hot_embeddings=True):
input_shape = get_shape_list(input_tensor, expected_rank=[3])
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary, unless converting to tflite model.
if use_one_hot_embeddings:
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(
flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
else:
token_type_embeddings = tf.nn.embedding_lookup(token_type_table,
token_type_ids)
output += token_type_embeddings
if seq_type_ids is not None:
seq_type_table = tf.get_variable(
name=seq_type_embedding_name,
shape=[seq_length, width],
initializer=create_initializer(initializer_range))
seq_type_embeddings = tf.nn.embedding_lookup(seq_type_table,
token_type_ids)
output += seq_type_embeddings
if use_position_embeddings:
assert_op = tf.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False,
in_group_reuse=True):
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size {} is not a multiple of the number of attention "
"heads {}".format(hidden_size, num_attention_heads))
if num_hidden_layers % num_hidden_groups != 0:
raise ValueError(
"number hidden layers {} is not a multiple of the number of num "
"hidden groups {}".format(num_hidden_layers, num_attention_heads))
attention_head_size = hidden_size // num_attention_heads
input_shape = get_shape_list(input_tensor, expected_rank=[3])
input_width = input_shape[2]
all_layer_outputs = []
if input_width != hidden_size:
prev_output = abc_cd_abd(
input_tensor, hidden_size, create_initializer(initializer_range),
None, name="embedding_hidden_mapping_in")
else:
prev_output = input_tensor
num_layers_pre_group = int(num_hidden_layers / num_hidden_groups)
for group_idx in range(num_hidden_groups):
with tf.name_scope("group_%d" % group_idx):
for inner_group_idx in range(num_layers_pre_group):
layer_idx = group_idx * num_layers_pre_group + inner_group_idx
var_idx = group_idx if in_group_reuse else inner_group_idx
with tf.name_scope("layer_%d" % layer_idx):
with tf.variable_scope("inner_layer_%d" % var_idx):
layer_output = prev_output
layer_output = attention_ffn_block(
layer_input=layer_output,
hidden_size=hidden_size,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_head_size=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
intermediate_size=intermediate_size,
intermediate_act_fn=intermediate_act_fn,
initializer_range=initializer_range,
hidden_dropout_prob=hidden_dropout_prob)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
return all_layer_outputs
else:
return all_layer_outputs[-1]
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
batch_size=None,
from_seq_length=None,
to_seq_length=None,
use_einsum=True):
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
size_per_head = int(from_shape[2] / num_attention_heads)
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if batch_size is None or from_seq_length is None or to_seq_length is None:
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
# `query_layer` = [B, F, N, H]
q = abc_ced_abde(from_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), query_act, "query")
# `key_layer` = [B, T, N, H]
k = abc_ced_abde(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), key_act, "key")
# `value_layer` = [B, T, N, H]
v = abc_ced_abde(to_tensor, num_attention_heads, size_per_head,
create_initializer(initializer_range), value_act, "value")
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
if attention_mask is not None:
attention_mask = tf.reshape(
attention_mask, [batch_size, 1, to_seq_length, 1])
# 'new_embeddings = [B, N, F, H]'
new_embeddings = dot_product_attention(q, k, v, attention_mask,
attention_probs_dropout_prob)
return tf.transpose(new_embeddings, [0, 2, 1, 3])
def attention_ffn_block(layer_input,
hidden_size=768,
attention_mask=None,
num_attention_heads=1,
attention_head_size=64,
attention_probs_dropout_prob=0.0,
intermediate_size=3072,
intermediate_act_fn=None,
initializer_range=0.02,
hidden_dropout_prob=0.0):
with tf.variable_scope("attention_1"):
with tf.variable_scope("self"):
attention_output = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = abcd_cde_abe(
attention_output,
hidden_size,
attention_head_size,
create_initializer(initializer_range),
None,
name="dense")
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
with tf.variable_scope("ffn_1"):
with tf.variable_scope("intermediate"):
intermediate_output = abc_cd_abd(
attention_output,
intermediate_size,
create_initializer(initializer_range),
intermediate_act_fn,
name="dense")
with tf.variable_scope("output"):
ffn_output = abc_cd_abd(
intermediate_output,
hidden_size,
create_initializer(initializer_range),
None,
name="dense")
ffn_output = dropout(ffn_output, hidden_dropout_prob)
ffn_output = layer_norm(ffn_output + attention_output)
return ffn_output
def dot_product_attention(q, k, v, bias, dropout_rate=0.0):
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
logits = tf.multiply(logits, 1.0 / math.sqrt(float(get_shape_list(q)[-1])))
if bias is not None:
# `attention_mask` = [B, T]
from_shape = get_shape_list(q)
broadcast_ones = tf.ones(
[from_shape[0], 1, from_shape[2], 1], tf.float32)
bias = tf.matmul(broadcast_ones,
tf.cast(bias, tf.float32), transpose_b=True)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - bias) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
logits += adder
else:
adder = 0.0
attention_probs = tf.nn.softmax(logits, name="attention_probs")
attention_probs = dropout(attention_probs, dropout_rate)
return tf.matmul(attention_probs, v)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def abc_cd_abd(input_tensor,
output_size,
initializer,
activation,
name=None):
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, output_size],
initializer=initializer)
b = tf.get_variable(
name="bias", shape=[output_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def abcd_cde_abe(input_tensor,
hidden_size,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel for projection.
Args:
input_tensor: float Tensor of shape [batch,from_seq_length,
num_attention_heads, size_per_head].
hidden_size: The size of hidden layer.
head_size: The size of head.
initializer: Kernel initializer.
activation: Actication function.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
num_attention_heads = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[num_attention_heads * head_size, hidden_size],
initializer=initializer)
w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
b = tf.get_variable(
name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def abc_ced_abde(input_tensor,
num_attention_heads,
head_size,
initializer,
activation,
name=None):
"""A dense layer with 3D kernel.
Args:
input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
num_attention_heads: Number of attention heads.
head_size: The size per attention head.
initializer: Kernel initializer.
activation: Actication function.
use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
name: The name scope of this layer.
Returns:
float logits Tensor.
"""
input_shape = get_shape_list(input_tensor)
hidden_size = input_shape[2]
with tf.variable_scope(name):
w = tf.get_variable(
name="kernel",
shape=[hidden_size, num_attention_heads * head_size],
initializer=initializer)
w = tf.reshape(w, [hidden_size, num_attention_heads, head_size])
b = tf.get_variable(
name="bias",
shape=[num_attention_heads * head_size],
initializer=tf.zeros_initializer)
b = tf.reshape(b, [num_attention_heads, head_size])
ret = tf.einsum("BFH,HND->BFND", input_tensor, w)
ret += b
if activation is not None:
return activation(ret)
else:
return ret
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return contrib_layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert tensor.shape.ndims in expected_rank, \
"tensor {} shape {} is not equal expected rank {}".format(
name, tensor.shape.ndims, expected_rank)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
|
{"hexsha": "152433b81eac7350892365af71d1f927992d5c1b", "size": 28385, "ext": "py", "lang": "Python", "max_stars_repo_path": "modeling.py", "max_stars_repo_name": "pwldj/nls", "max_stars_repo_head_hexsha": "c85adafcbb88d0d93df1141a1a04ba0581cc6bfb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modeling.py", "max_issues_repo_name": "pwldj/nls", "max_issues_repo_head_hexsha": "c85adafcbb88d0d93df1141a1a04ba0581cc6bfb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modeling.py", "max_forks_repo_name": "pwldj/nls", "max_forks_repo_head_hexsha": "c85adafcbb88d0d93df1141a1a04ba0581cc6bfb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4784422809, "max_line_length": 86, "alphanum_fraction": 0.6125770654, "include": true, "reason": "import numpy", "num_tokens": 5642}
|
# This Python file uses the following encoding: utf-8
"""
MIT License
Copyright (c) 2020 Nils DEYBACH & Léo OUDART
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
This class file represent the compute of minimizing energy with a SciPy function : optimize.fmin_cg
The compute is made in a thread, in background
"""
from PySide2 import QtWidgets
from PySide2 import QtCore, QtGui
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtGui import *
from copy import deepcopy
from math import cos, sin, radians, degrees
from scipy.constants import mu_0, pi
from scipy import optimize
import numpy.matlib
import numpy as np
from .DipSimUtilities import *
from .DipSim import *
""" lunch the minimizing function in a thread """
class DipSimComputor(QObject):
def __init__(self):
super(DipSimComputor, self).__init__()
self._workerThreadMinEn = WorkerMinEnergy(self)
class WorkerMinEnergy(QThread):
resultDips = Signal(list)
resultEnergy = Signal(float)
error = Signal()
def __init__(self, parent=None):
super(WorkerMinEnergy, self).__init__(parent=parent)
self.dipoles = None
self.lock2D = False
self.unitCoef=10**-9
"""
dipoles: dipoles list (DipModel)
distCoeff: power of the distance unit, 0 is meter, -9 is nanometer (float)
lock2D: dipoles are on a 2D plan or 3D (boolean)
"""
@Slot()
def compute(self, dipoles, distCoef=0.0, lock2D=False):
self.dipoles = dipoles
self.unitCoef=10**distCoef
self.lock2D = lock2D
self.start()
def run(self):
try:
resDips = self.getMinEnergy(self.dipoles, self.lock2D)
resEn = self.computeEnergyDipoles(resDips)
self.resultDips.emit(resDips)
self.resultEnergy.emit(resEn)
except:
self.error.emit()
def setDipoles(self, dipoles):
self.dipoles = dipoles
"""
Return the configuration of moments that minimize the total energy(magnetic dipole-dipole interaction) of the dipoles
It take two argument:
-dipoles: the list of the dipoles (DipModel)
-lock2D: boolean, if true the moments will be on a 2D plan (theta=0)
"""
def getMinEnergy(self, dipoles, lock2D):
momentIntensity= 1
positions = []
angle = []
nul= []
a=0
#Find the minimum configuration in 3D
if lock2D == False:
for i in dipoles:
angle.append([anglesQuaternionToSph(Dipole.rndQuaternionGenerator(is2D=True))[0],anglesQuaternionToSph(Dipole.rndQuaternionGenerator(is2D=True))[1]])
positions.append([i.position.x(),i.position.y(),i.position.z()]) # [[x1,y1,z1], [x2,y2,z2]]
nul.append([0,0])
pos=tuple(positions)
res1= optimize.fmin_cg(self.computeEnergy,angle,args=pos,maxiter=10000) #Minimize the computeEnergy function, variables are the orientation of the moments (in 3D)
#res1 is a liste of angle : [phi1, theta1, phi2, theta2]
for i in dipoles:
i.quaternion = anglesSphToQuaternion(degrees(res1[a]),degrees(res1[a+1]))
a+=2
return(dipoles)
elif lock2D == True: #Find the minimum configuration in 2D
for i in dipoles:
angle.append(anglesQuaternionToSph(Dipole.rndQuaternionGenerator(is2D=True))[0])
positions.append([i.position.x(),i.position.y(),i.position.z()]) # [[x1,y1,z1], [x2,y2,z2]]
nul.append(0)
pos=tuple(positions)
res1= optimize.fmin_cg(self.computeEnergy2D,angle,args=pos,maxiter=10000) #Minimize the computeEnergy function, variables are the orientation of the moments (in 2D)
#res1 is a list of angle: [phi1, phi2, phi3]
for i in dipoles:
i.quaternion = anglesSphToQuaternion(degrees(res1[a]),90) # change the quaternion to the minimized one
a+=1
return(dipoles)
"""
Compute the total energy (Magnetic dip to dip)
It take two argument:
-angle: list of angles of each dipole : [[phi1,theta1],[phi2,theta2]]
-args: tuple of list of the positions of each dipole: ([[x1,y1,z1],[x2,y2,z2])
"""
def computeEnergy(self, angle,*args):
E=0
moment=[]
for i in range(int(len(angle)/2)):
phiI= angle[2*i]
thetaI= angle[2*i+1]
moment.append([cos(phiI)*sin(thetaI),sin(phiI)*sin(thetaI),cos(thetaI)]) #create al list of all moment: [[m1_x,m1,y,m1_z],[m2_x,m2_y,m2_z]]
""" Formula of energy of magnetic dipole–dipole interaction """
for i in range(len(args)):
for j in range(len(args)):
if j != i:
vectIJ= np.subtract(args[j],args[i])
normIJ= np.linalg.norm(vectIJ)
E+=(np.dot(moment[i],moment[j]))/(normIJ**3) - (3*np.dot(moment[i],vectIJ)*np.dot(moment[j],vectIJ))/(normIJ**5)
return ((E*mu_0/(8*pi))*10**18)
"""
Compute the total energy (Magnetic dip to dip) in J
It take two argument:
-angle: list of angles of each dipole (in polar coordinate) : [phi1,phi2,phi3]
-args: tuple of list of the positions of each dipole: ([[x1,y1,z1],[x2,y2,z2])
"""
def computeEnergy2D(self, angle,*args):
E=0
moment=[]
for i in range(len(angle)):
phiI= angle[i]
thetaI= pi/2
moment.append([cos(phiI)*sin(thetaI),sin(phiI)*sin(thetaI),cos(thetaI)]) #create al list of all moment: [[m1_x,m1,y,m1_z],[m2_x,m2_y,m2_z]]
""" Formula of energy of magnetic dipole–dipole interaction """
for i in range(len(args)):
for j in range(len(args)):
if j != i:
vectIJ= np.subtract(args[j],args[i]) # r_IJ vector
normIJ= np.linalg.norm(vectIJ) # ||r_IJ||
E+=(np.dot(moment[i],moment[j]))/(normIJ**3) - (3*np.dot(moment[i],vectIJ)*np.dot(moment[j],vectIJ))/(normIJ**5)
E=(E*mu_0)/(8*pi)
return (E*(10**18))
"""
Compute the total energy (Magnetic dip to dip) of a dipole configuration in J
It take one argument:
-dipol: list of all dipoles (DipModel)
"""
def computeEnergyDipoles(self, dipol):
positions = []
angle = []
E=0
moment=[]
if len(dipol)>1: #if there is only one dipole, the energy is zero
for i in dipol:
angle.append([anglesQuaternionToSph(i.quaternion)[0],anglesQuaternionToSph(i.quaternion)[1]]) # [[phi1,theta1],[phi2,theta2]]
positions.append([i.position.x()*self.unitCoef,i.position.y()*self.unitCoef,i.position.z()*self.unitCoef]) # [[x1,y1,z1], [x2,y2,z2]]
for i in range(len(angle)):
MI = dipol[i].moment * 9.27 * 10**-24 #convert the moment intensity of the dipole in J/T (µ_b -> J/T)
phiI= angle[i][0]
thetaI= angle[i][1]
moment.append([cos(phiI)*sin(thetaI)*MI,sin(phiI)*sin(thetaI)*MI,cos(thetaI)*MI]) #create al list of all moment: [[m1_x,m1,y,m1_z],[m2_x,m2_y,m2_z]]
""" Formula of energy of magnetic dipole–dipole interaction """
for i in range(len(positions)):
for j in range(len(positions)):
if j != i:
vectIJ= np.subtract(positions[j],positions[i]) # r_IJ vector
normIJ= np.linalg.norm(vectIJ) # ||r_IJ||
E+=(np.dot(moment[i],moment[j]))/(normIJ**3) - (3*np.dot(moment[i],vectIJ)*np.dot(moment[j],vectIJ))/(normIJ**5)
E=(E*mu_0/(8*pi))
return (E*6.242 * 10**18) #convert E in J to eV
else:
return(0)
|
{"hexsha": "317353e1d3d8dee3b484e60a36cd7d8edeeb6d5c", "size": 9054, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/DipSimComputor.py", "max_stars_repo_name": "ndeybach/DipSim", "max_stars_repo_head_hexsha": "091f147f933b000b6ab829ec7d10eef985c260b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python/DipSimComputor.py", "max_issues_repo_name": "ndeybach/DipSim", "max_issues_repo_head_hexsha": "091f147f933b000b6ab829ec7d10eef985c260b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/DipSimComputor.py", "max_forks_repo_name": "ndeybach/DipSim", "max_forks_repo_head_hexsha": "091f147f933b000b6ab829ec7d10eef985c260b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1142857143, "max_line_length": 190, "alphanum_fraction": 0.6036006185, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2363}
|
from carla_utils import carla
import numpy as np
from typing import List, Any
import pickle
import os
from os.path import join
from ..basic import Data, YamlConfig
from ..world_map import Role, get_topology
from ..augment import GlobalPath
from ..agents import AgentListMaster, BaseAgent
from .scenario import ScenarioSingleAgent
class Recorder(object):
def __init__(self, dir_path):
self.dir_path = dir_path
self.records = dict()
def record_town_map(self, scenario: ScenarioSingleAgent):
file_path = join(self.dir_path, scenario.map_name + '.txt')
if not os.path.isfile(file_path):
with open(file_path, 'wb') as f:
pickle.dump(PicklableTownMap(scenario.town_map), f)
return
def record_scenario(self, config: YamlConfig, scenario: ScenarioSingleAgent):
self.records['scenario'] = {
'frequency': config.decision_frequency,
'map_name': scenario.map_name,
}
def record_agents(self, timestamp, agents_master: AgentListMaster, epoch_info: Data):
"""
Args:
timestamp: time.time()
agents: list of BaseAgent and BaseAgentObstacle
Returns:
"""
for agent in agents_master.agents:
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
if self.records.get(agent_key) == None:
self.records[agent_key] = dict()
self.records[agent_key][timestamp] = Data(agent=PicklableAgent(agent))
for obstacle in agents_master.obstacles:
obstacle_key = 'obstacle' + '_' + obstacle.role_name.atype.name + '_' + str(obstacle.vi)
if self.records.get(obstacle_key) == None:
self.records[obstacle_key] = dict()
self.records[obstacle_key][timestamp] = Data(agent=PicklableAgent(obstacle))
if epoch_info.done:
for agent in agents_master.agents:
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
global_path = PicklableGlobalPath(agent.global_path)
for t, picklable_agent in self.records[agent_key].items():
picklable_agent.global_path = global_path
return
def record_experience(self, timestamp, agents_master: AgentListMaster, actions):
for agent, action in zip(agents_master.agents, actions):
agent_key = 'agent' + '_' + agent.role_name.atype.name + '_' + str(agent.vi)
self.records[agent_key][timestamp].update(action=action)
return
def save_to_disk(self, index):
file_path = join(self.dir_path, str(index) + '.txt')
with open(file_path, 'wb') as f:
pickle.dump(self.records, f)
return
def clear(self):
del self.records
self.records = dict()
@staticmethod
def load_from_disk(file_path):
record = None
with open(file_path, 'rb') as f:
record = pickle.load(f)
return record
# =============================================================================
# -- Picklable ---------------------------------------------------------------
# =============================================================================
class PicklableAgent(object):
def __init__(self, agent: BaseAgent):
self.id = agent.vi
self.vi = agent.vi
self.state = agent.get_state()
attributes = agent.vehicle.attributes
attributes['role_name'] = agent.role_name
self.attributes = attributes
bbx = agent.vehicle.bounding_box.extent
x, y, z = bbx.x, bbx.y, bbx.z
bbx = PicklableBoundingBox(x, y, z)
self.bounding_box = bbx
self.max_velocity = agent.max_velocity if hasattr(agent, 'max_velocity') else None
self.global_path = None
def get_transform(self):
x, y, z = self.state.x, self.state.y, self.state.z
theta = self.state.theta
location = carla.Location(x, y, z)
rotation = carla.Rotation(yaw=np.rad2deg(theta))
return carla.Transform(location, rotation)
def get_state(self):
return self.state
class PicklableBoundingBox(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.extent = self
class PicklableGlobalPath(object):
def __init__(self, global_path: GlobalPath):
self.carla_waypoints = [PicklableWaypoint(wp) for wp in global_path.carla_waypoints]
self.options = global_path.options
self.x = global_path.x
self.y = global_path.y
self.z = global_path.z
self.theta = global_path.theta
self.curvatures = global_path.curvatures
self.distances = global_path.distances
self.sampling_resolution = global_path.sampling_resolution
self._max_coverage = 0
def __len__(self):
return len(self.carla_waypoints)
def _step_coverage(self, current_transform):
return GlobalPath._step_coverage(self, current_transform)
def remaining_waypoints(self, current_transform):
return GlobalPath.remaining_waypoints(self, current_transform)
class PicklableWaypoint(object):
def __init__(self, waypoint: carla.Waypoint):
self.transform = PicklableTransform(waypoint.transform)
class PicklableTransform(object):
def __init__(self, transform: carla.Transform):
self.location = PicklableLocation(transform.location)
self.rotation = PicklableRotation(transform.rotation)
class PicklableLocation(object):
def __init__(self, location: carla.Location):
self.x = location.x
self.y = location.y
self.z = location.z
def distance(self, loc):
dx = self.x - loc.x
dy = self.y - loc.y
dz = self.z - loc.z
return np.sqrt(dx**2 + dy**2 + dz**2)
class PicklableRotation(object):
def __init__(self, rotation: carla.Rotation):
self.roll = rotation.roll
self.pitch = rotation.pitch
self.yaw = rotation.yaw
class PicklableTownMap(object):
def __init__(self, town_map):
self.name = town_map.name
self.cua_waypoints = [PicklableWaypoint(wp) for wp in town_map.generate_waypoints(0.1)]
self.opendrive_content = town_map.to_opendrive()
self.topology_origin = [(PicklableWaypoint(start), PicklableWaypoint(end)) for (start, end) in town_map.get_topology()]
self.topology = [t.info for t in get_topology(town_map, sampling_resolution=2.0)]
def generate_waypoints(self, _):
return self.cua_waypoints
def to_opendrive(self):
return self.opendrive_content
def get_topology(self):
return self.topology_origin
|
{"hexsha": "0cdb6246c29b69f6045ea5866e1eee2ac064b3b2", "size": 6809, "ext": "py", "lang": "Python", "max_stars_repo_path": "carla_utils/rl_template/recorder.py", "max_stars_repo_name": "IamWangYunKai/DG-TrajGen", "max_stars_repo_head_hexsha": "0a8aab7e1c05111a5afe43d53801c55942e9ff56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2021-09-15T00:43:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T22:57:21.000Z", "max_issues_repo_path": "carla_utils/rl_template/recorder.py", "max_issues_repo_name": "IamWangYunKai/DG-TrajGen", "max_issues_repo_head_hexsha": "0a8aab7e1c05111a5afe43d53801c55942e9ff56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-09T03:08:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-15T07:08:31.000Z", "max_forks_repo_path": "carla_utils/rl_template/recorder.py", "max_forks_repo_name": "IamWangYunKai/DG-TrajGen", "max_forks_repo_head_hexsha": "0a8aab7e1c05111a5afe43d53801c55942e9ff56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-11-26T05:45:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T12:46:41.000Z", "avg_line_length": 31.8177570093, "max_line_length": 127, "alphanum_fraction": 0.6234395653, "include": true, "reason": "import numpy", "num_tokens": 1543}
|
#!/usr/local/bin/env Rscript --vanilla
args <- commandArgs(TRUE)
if (!is.null(args[1])) {
df <- read.csv(args[1], sep = "\t", row.names = 1, check.names=F)
if (max(df, na.rm = TRUE) < 50) {
df <- 2^df
}
} else {
stop("No expression matrix provided!")
}
library(matrixStats)
library(EPIC)
if (length(args) > 1) {
ref <- read.csv(args[2], sep = "\t", row.names = 1, check.names=F) ### Need to change later
cellTypeNames <- colnames(ref)
markerGenes <- c()
for (s in colnames(ref)) {
tempMarkers <- rownames(ref[ref[,s] > quantile(ref[,s], prob=0.75),])
markers <- c()
if (length(args) > 2) {
if (tolower(args[3]) == "pairwise") {
tempTb = 2 * ref[,cellTypeNames[cellTypeNames != s]] - ref[,s]
markers <- tempMarkers[tempMarkers %in% rownames(ref[rowSums(tempTb < 0) == (length(cellTypeNames) - 1), ])]
} else {
markers <- tempMarkers[tempMarkers %in% rownames(ref)[ref[,s] > 2 * rowMedians(as.matrix(ref))]]
}
} else {
markers <- tempMarkers[tempMarkers %in% rownames(ref)[ref[,s] > 2 * rowMedians(as.matrix(ref))]]
}
if (length(markers) == 0) {
# tempMarkers <- rownames(ref[ref[,s] > quantile(ref[,s], prob=0.5),])
deOverMedians <- (ref[tempMarkers,s] - rowMedians(as.matrix(ref[tempMarkers,])))/rowMedians(as.matrix(ref[tempMarkers,]))
markers <- tempMarkers[deOverMedians > quantile(deOverMedians, prob=0.9)]
}
genesTemp <- markers
markerGenes <- c(markerGenes, genesTemp)
# genesNull <- rownames(ref)[!(rownames(ref) %in% genesTemp)]
# ref[genesNull, s] <- 0.0
}
markerGenes <- unique(markerGenes)
epicRef <- list()
epicRef$sigGenes <- markerGenes
sigMatName <- tools::file_path_sans_ext(basename(args[2]))
meanFile <- paste0("/data/", sigMatName, "_refMean.txt")
stdFile <- paste0("/data/", sigMatName, "_refStd.txt")
if (file.exists(meanFile)) {
epicRef$refProfiles <- read.csv(meanFile, sep = "\t", row.names = 1, check.names=F)
} else {
epicRef$refProfiles <- ref
warning("Reference profile was not provided or can not be found, using the signature matrix instead\n")
}
if (file.exists(stdFile)) {
epicRef$refProfiles.var <- read.csv(stdFile, sep = "\t", row.names = 1, check.names=F)
}
tryCatch(
expr = {
xc <- EPIC(bulk = df, reference = epicRef)
results <- xc$cellFractions
results <- results[,colnames(results) != "otherCells"]
write.table(t(results), file = "deconvoluted.tsv", quote = FALSE, col.names = NA, sep = "\t")
},
error = function(e){
# (Optional)
# Do this if an error is caught...
print(e)
Y <- read.csv(args[1], sep = "\t", check.names=F)
X <- read.csv(args[2], sep = "\t", row.names = 1, check.names=F)
results <- matrix(0, nrow = length(colnames(Y)) - 1, ncol = length(colnames(X)), dimnames = list(colnames(Y)[2:length(colnames(Y))], colnames(X)))
write.table(t(results), file="deconvoluted.tsv", quote = FALSE, col.names = NA, sep = "\t")
}
)
} else {
xc <- EPIC(bulk = df)
results <- xc$cellFractions
write.table(t(results), file = "deconvoluted.tsv", quote = FALSE, col.names = NA, sep = "\t")
}
|
{"hexsha": "ddfcb90567d071b72e9082b4802fbb67e81303d8", "size": 3457, "ext": "r", "lang": "R", "max_stars_repo_path": "tumorDeconvAlgs/epic/epic.r", "max_stars_repo_name": "PNNL-CompBio/proteomicsTumorDeconv", "max_stars_repo_head_hexsha": "a88c3ee729a0dab267ebd5d802a486ebacbf0c0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-18T17:37:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-15T05:20:31.000Z", "max_issues_repo_path": "tumorDeconvAlgs/epic/epic.r", "max_issues_repo_name": "sgosline/proteomicsTumorDeconv", "max_issues_repo_head_hexsha": "a88c3ee729a0dab267ebd5d802a486ebacbf0c0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2020-10-14T20:37:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-11T15:36:22.000Z", "max_forks_repo_path": "tumorDeconvAlgs/epic/epic.r", "max_forks_repo_name": "sgosline/proteomicsTumorDeconv", "max_forks_repo_head_hexsha": "a88c3ee729a0dab267ebd5d802a486ebacbf0c0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-18T01:21:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T15:44:27.000Z", "avg_line_length": 41.6506024096, "max_line_length": 158, "alphanum_fraction": 0.5658085045, "num_tokens": 973}
|
import numpy as np
import scipy
from scipy.optimize import linprog
import qpsolvers
import json
import matplotlib
from matplotlib import cm
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import matplotlib.pyplot as plt
import typing as t
import click
DEFAULT_RESOLUTION = 100 # Runtime is O(n^2) with respect to resolution!
DEFAULT_MAX_THRUSTS = [-2.9, 3.71] # Lifted from the BlueRobotics public performance data (kgf)
# coefficients of the quadratic approximating current draw as a function of thrust in the forward direction in the form:
# ax^2 + bx + c
# Both regressions are in terms of the same variable, thrust, which is negative in the reverse direction
DEFAULT_FWD_CURRENT = [.741, 1.89, -.278]
DEFAULT_REV_CURRENT = [1.36, -2.04, -.231]
DEFAULT_MAX_CURRENT = 22
class Thruster3D:
def __init__(self, x, y, z, theta, phi, max_thrusts, fwd_current, rev_current):
self.pos = np.array([x, y, z])
self.max_thrusts = max_thrusts
self.fwd_current = fwd_current
self.rev_current = rev_current
# Calculate the unit vector in the direction specified by theta and phi
theta = np.radians(theta)
phi = np.radians(phi)
self.orientation = np.array([
np.sin(phi) * np.cos(theta),
np.sin(phi) * np.sin(theta),
np.cos(phi)
])
def torque(self):
return np.cross(self.pos, self.orientation)
def get_column_span(mat: np.ndarray) -> np.ndarray:
"""
Find the column span of a matrix (remove columns which to not increase the number of dimensions the columns span)
@param mat:The input matrix
@return: A full rank matrix constructed from the column vectors of the input
"""
upper_triangular = scipy.linalg.lu(mat)[2]
output_vectors = []
for row in upper_triangular:
for i, value in enumerate(row):
if abs(value) > 1e-10:
output_vectors.append(mat[..., i])
break
return np.array(output_vectors).transpose()
# rref function by joni on Stack Overflow:
# https://stackoverflow.com/a/66412719
# This function is licenced under CC BY-SA 4.0
# https://creativecommons.org/licenses/by-sa/4.0/
def rref(A, tol=1.0e-12):
m, n = A.shape
i, j = 0, 0
jb = []
while i < m and j < n:
# Find value and index of largest element in the remainder of column j
k = np.argmax(np.abs(A[i:m, j])) + i
p = np.abs(A[k, j])
if p <= tol:
# The column is negligible, zero it out
A[i:m, j] = 0.0
j += 1
else:
# Remember the column index
jb.append(j)
if i != k:
# Swap the i-th and k-th rows
A[[i, k], j:n] = A[[k, i], j:n]
# Divide the pivot row i by the pivot element A[i, j]
A[i, j:n] = A[i, j:n] / A[i, j]
# Subtract multiples of the pivot row from all the other rows
for k in range(m):
if k != i:
A[k, j:n] -= A[k, j] * A[i, j:n]
i += 1
j += 1
# Finished
return A, jb
def rotate_to_vector(vectors: np.ndarray, target_dir: np.ndarray) -> np.ndarray:
"""
Rotate a group of vectors so that the a specified vector is along the +x axis
:param vectors: A 2d numpy array in which each column is a 3d vector
:param target_dir: A 3d vector in the target direction
:return: A np array with the same size as vectors, with the same rotation applied to each column
"""
target_dir = target_dir / np.linalg.norm(target_dir) # Make target_dir a unit vector
new_bases = np.empty((3, 3)) # Create an empty 3x3 change of basis matrix
new_bases[..., 0] = target_dir # The first basis is our target direction
if not (target_dir[1] == 0 and target_dir[2] == 0): # Make sure the cross product computed below isn't 0
second_basis = np.cross(target_dir, np.array([1, 0, 0])) # Choose a second basis perpendicular the first
else:
second_basis = np.cross(target_dir, np.array([0, 1, 0]))
second_basis /= np.linalg.norm(second_basis) # Make the second basis a unit vector
new_bases[..., 1] = second_basis
third_basis = np.cross(target_dir, second_basis) # Calculate a third basis perpendicular the first two
third_basis /= np.linalg.norm(third_basis) # Make the third basis a unit vector
new_bases[..., 2] = third_basis
# Invert the matrix. The original matrix maps (1, 0, 0) onto the target direction. We want a matrix
# that maps the target direction onto (1, 0, 0).
inverse_transform = np.linalg.inv(new_bases)
# Calculate the transformation with matrix_vector multiplication
transformed_orientations = inverse_transform.dot(vectors)
return transformed_orientations
def get_max_effort(thrusters: t.List[Thruster3D], objective: np.ndarray, constraints: t.Optional[np.ndarray],
max_current: float):
thruster_count = len(thrusters)
# First Simplex run. Find the maximum thrust in the desired direction
bounds = [thruster.max_thrusts for thruster in thrusters]
right_of_equality = np.zeros(constraints.shape[0]) if constraints is not None else None # All constraints must be 0
max_effort_result = linprog(c=-objective, A_ub=None, b_ub=None, A_eq=constraints, b_eq=right_of_equality,
bounds=bounds, method="highs")
max_effort = -.99999 * max_effort_result.fun # some sort of precision/numerical error makes this bullshit necessary
if max_effort < 0.00000001:
# The thruster layout is incapable of producing effort in the target direction
return 0.0
# Find the minimum current that produces the same effort as the first result
# Each thruster is split into reverse and forwards, so there are double the elements in the objective
# The objective function (total current as a function of thruster forces) is quadratic
x_squared_coefficients = np.zeros((thruster_count * 2, thruster_count * 2)) # Holds the coefficients of x^2
x_coefficients = np.empty(thruster_count * 2) # Holds the coefficients of x
# The reverse half thrusters are indexed 0 to numb_thrusters - 1
# Forward half thrusters are indexed numb_thrusters to 2 * numb_thrusters - 1
for i, thruster in enumerate(thrusters):
x_squared_coefficients[i][i] = thruster.rev_current[0]
x_squared_coefficients[i + thruster_count][i + thruster_count] = thruster.fwd_current[0]
x_coefficients[i] = thruster.rev_current[1]
x_coefficients[i+thruster_count] = thruster.fwd_current[1]
# All 6 degrees of freedom are constrained
thruster_constraints_mincurrent = np.row_stack((objective, constraints)) if constraints is not None else \
np.array(objective)
# Each thruster is split in two, the constraints are the same for each half of a thruster
left_of_equality_mincurrent = np.column_stack((thruster_constraints_mincurrent, thruster_constraints_mincurrent))
lower_bounds = np.array([thruster.max_thrusts[0] for thruster in thrusters] + [0.0 for _ in thrusters])
upper_bounds = np.array([0.0 for _ in thrusters] + [thruster.max_thrusts[1] for thruster in thrusters])
# Extra constraint for the original objective
right_of_equality_mincurrent = np.zeros((0 if constraints is None else constraints.shape[0]) + 1)
right_of_equality_mincurrent[0] = max_effort
min_current_result = qpsolvers.solve_qp(
P=2 * x_squared_coefficients, # The solver minimizes 1/2 * Px^2 + qx, we need to cancel out the 1/2
q=x_coefficients,
A=left_of_equality_mincurrent,
b=right_of_equality_mincurrent,
lb=lower_bounds,
ub=upper_bounds,
solver="quadprog"
)
# combine half-thrusters into full thrusters
min_current_true_array = []
for i in range(thruster_count):
min_current_true_array.append(min_current_result[i] + min_current_result[i + thruster_count])
current_quadratic = [0] * 3
for i, thruster in enumerate(thrusters):
thrust = min_current_true_array[i]
if thrust >= 0: # use the forward thrust coefficients
current_quadratic[0] += thruster.fwd_current[0] * thrust ** 2 # a * t^2
current_quadratic[1] += thruster.fwd_current[1] * thrust # b * t
current_quadratic[2] += thruster.fwd_current[2] # c
else: # use the reverse thrust coefficients
current_quadratic[0] += thruster.rev_current[0] * thrust ** 2
current_quadratic[1] += thruster.rev_current[1] * thrust
current_quadratic[2] += thruster.rev_current[2]
current_quadratic[2] -= max_current # ax^2 + bx + c = I -> ax^2 + bx + (c-I) = 0
# solve quadratic, take the proper point, and clamp it to a maximum of 1.0
effort_multiplier = min(1., max(np.roots(current_quadratic)))
return max_effort * effort_multiplier
def setup_subplot(subplot, thrusters, axes_bounds):
subplot.set_box_aspect((1, 1, 1))
subplot.view_init(elev=30, azim=-150)
subplot.set_xlim((axes_bounds, -axes_bounds)) # Invert x axis
subplot.set_ylim((-axes_bounds, axes_bounds))
subplot.set_zlim((axes_bounds, -axes_bounds)) # Invert z axis
# Draw some "axes" so it's clear where (0, 0, 0) is
subplot.plot((-axes_bounds, axes_bounds), (0, 0), (0, 0), c="black")
subplot.plot((0, 0), (-axes_bounds, axes_bounds), (0, 0), c="black")
subplot.plot((0, 0), (0, 0), (-axes_bounds, axes_bounds), c="black")
# Plot the locations and orientations of the thrusters
thrusterloc_x = [2 * thruster.pos[0] for thruster in thrusters]
thrusterloc_y = [2 * thruster.pos[1] for thruster in thrusters]
thrusterloc_z = [2 * thruster.pos[2] for thruster in thrusters]
thrusterdir_x = [2 * thruster.orientation[0] for thruster in thrusters]
thrusterdir_y = [2 * thruster.orientation[1] for thruster in thrusters]
thrusterdir_z = [2 * thruster.orientation[2] for thruster in thrusters]
subplot.quiver(thrusterloc_x, thrusterloc_y, thrusterloc_z, thrusterdir_x, thrusterdir_y, thrusterdir_z,
color="black")
def add_colorbar(plot, ax, color_index, norm=None, cmap=plt.cm.turbo):
norm = norm or matplotlib.colors.Normalize(vmin=color_index.min(), vmax=color_index.max())
color_range = norm.vmax - norm.vmin
m = cm.ScalarMappable(cmap=cmap, norm=norm)
plot.colorbar(m, ticks=[
norm.vmin,
norm.vmin + color_range * 1 / 4,
norm.vmin + color_range * 2 / 4,
norm.vmin + color_range * 3 / 4,
norm.vmax
], ax=ax, fraction=0.1, shrink=0.5)
def plot_effort_surface(plot, ax, thrusters: t.List[Thruster3D], effort_vectors: np.ndarray,
extra_constraints: np.ndarray, resolution: int, max_current: float):
# Determine whether the the set of possible efforts is a solid, surface, or line
# Solve the constraints matrix
constraints_rref = rref(np.copy(extra_constraints), tol=1e-10)[0]
# Find the pivot columns
pivot_columns = []
for row in constraints_rref:
for j, val in enumerate(row):
if abs(val) > 1e-10:
pivot_columns.append(j)
break
# Find the vectors that span the solution set to extra_constraints * x = 0
thruster_value_bases = []
for i in range(constraints_rref.shape[1]):
if i not in pivot_columns:
new_basis = np.empty(constraints_rref.shape[1])
for j in range(constraints_rref.shape[1]):
if j in pivot_columns:
new_basis[j] = -constraints_rref[pivot_columns.index(j)][i]
else:
new_basis[j] = int(i == j)
thruster_value_bases.append(new_basis)
thruster_bases_matrix = np.matrix.round(np.array(thruster_value_bases).transpose(), decimals=10)
if thruster_bases_matrix.shape[0] == 0:
# The thrusters cannot produce effort in any direction under the constraints
return
# Find the span of the effort vectors under the constraints
effort_bases_matrix = effort_vectors.dot(thruster_bases_matrix)
effort_span = np.matrix.round(get_column_span(effort_bases_matrix), decimals=10)
if effort_span.shape[1] == 3:
# The output space is a 3d solid
# I have no idea what np.meshgrid does
u, v = np.mgrid[0:2 * np.pi:resolution * 1j, 0:np.pi: resolution / 2 * 1j]
mesh_x = np.empty(np.shape(u))
mesh_y = np.empty(np.shape(u))
mesh_z = np.empty(np.shape(u))
color_index = np.empty(np.shape(u))
# Iterate over each vertex and calculate the max effort in that direction
max_effort = 0
for i in range(np.shape(u)[0]):
for j in range(np.shape(u)[1]):
z = np.cos(u[i][j]) * np.sin(v[i][j])
y = np.sin(u[i][j]) * np.sin(v[i][j])
x = np.cos(v[i][j])
transformed_effort_vectors = rotate_to_vector(effort_vectors, np.array([x, y, z]))
effort = get_max_effort(thrusters, transformed_effort_vectors[0],
np.row_stack((transformed_effort_vectors[1:], extra_constraints)), max_current)
mesh_x[i][j] = x * effort
mesh_y[i][j] = y * effort
mesh_z[i][j] = z * effort
color_index[i][j] = effort
max_effort = max(max_effort, effort)
# Adjust each color so that the min and max values correspond to the min and max colors
color_index_modified = (color_index - color_index.min()) / (color_index.max() - color_index.min())
setup_subplot(ax, thrusters, np.ceil(max_effort))
ax.plot_surface(
mesh_x, mesh_y, mesh_z, alpha=0.75, facecolors=cm.turbo(color_index_modified), edgecolors='w', linewidth=0
)
# Create a legend mapping the colors of the each plot to its values
add_colorbar(plot, ax, color_index)
elif effort_span.shape[1] == 2:
# The output space is confined to a plane
# Switch to equivalent perpendicular bases
normal = np.cross(effort_span[..., 0], effort_span[..., 1])
if normal[1] != 0 or normal[2] != 0:
first_basis = np.cross(normal, np.array([1, 0, 0]))
else:
first_basis = np.cross(normal, np.array([0, 1, 0]))
effort_span[..., 0] = first_basis / np.linalg.norm(first_basis) # Convert to unit vector
second_basis = np.cross(normal, first_basis)
effort_span[..., 1] = second_basis / np.linalg.norm(second_basis)
effort_inv_transform = np.linalg.pinv(effort_span)
transformed_efforts = effort_inv_transform.dot(effort_vectors)
theta_space = np.linspace(0, np.pi * 2, num=resolution * 2)
curve = np.empty((2, theta_space.size))
color_index = np.empty(theta_space.size)
max_effort = 0
for i, theta in enumerate(theta_space):
u = np.cos(theta)
v = np.sin(theta)
rotation_mat = np.array([[u, -v], [v, u]])
rotated_efforts = rotation_mat.dot(transformed_efforts)
effort = get_max_effort(thrusters, rotated_efforts[0], np.array([rotated_efforts[1]]), max_current)
curve[0, i] = u * effort
curve[1, i] = v * effort
color_index[i] = effort
max_effort = max(max_effort, effort)
# Transform the 2d output space back into 3d
curve_3d = effort_span.dot(curve)
setup_subplot(ax, thrusters, np.ceil(max_effort))
points = curve_3d.T.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = Line3DCollection(segments, cmap='turbo', linewidths=2.5)
lc.set_array(color_index)
ax.add_collection(lc)
add_colorbar(plt, ax, color_index)
elif effort_span.shape[1] == 1:
# The output space is confined to a line
effort_span[..., 0] = effort_span[..., 0] / np.linalg.norm(effort_span[..., 0]) # Normalize the basis vector
effort_inv_transform = np.linalg.pinv(effort_span)
transformed_efforts = effort_inv_transform.dot(effort_vectors)
pos_effort = get_max_effort(thrusters, transformed_efforts, None, max_current)
neg_effort = get_max_effort(thrusters, -transformed_efforts, None, max_current)
efforts = np.zeros((3, 2))
efforts[..., 0] = effort_span.transpose() * pos_effort
efforts[..., 1] = effort_span.transpose() * -neg_effort
setup_subplot(ax, thrusters, max(np.linalg.norm(efforts[..., 0]), np.linalg.norm(efforts[..., 1])))
average_effort = (pos_effort + neg_effort) / 2
norm = matplotlib.colors.Normalize(average_effort / 2, average_effort * 3 / 2)
custom_cmap = matplotlib.colors.LinearSegmentedColormap.from_list("rbu_cmap", ["blue", "gray", "red"])
a1 = ax.quiver(0, 0, 0, efforts[0][0], efforts[1][0], efforts[2][0], cmap=custom_cmap, norm=norm)
a1.set_array(np.array([pos_effort]))
a2 = ax.quiver(0, 0, 0, efforts[0][1], efforts[1][1], efforts[2][1], cmap=custom_cmap, norm=norm)
a2.set_array(np.array([neg_effort]))
add_colorbar(plt, ax, None, norm=norm, cmap=custom_cmap)
else:
raise ValueError("The span of the effort vectors had an unexpected dimension")
# The main entry point of the program
# All the Click decorators define various options that can be passed in on the command line
@click.command()
@click.option("--thrusters", "-t", default="thrusters.json", help="file containing thruster specifications")
@click.option("--resolution", "-r",
default=DEFAULT_RESOLUTION,
help="resolution of the thrust calculation, runtime is O(n^2) with respect to this!"
)
@click.option("--max-current", "-c", default=DEFAULT_MAX_CURRENT, help="maximum thruster current draw in amps")
def main(thrusters, resolution: int, max_current: float):
# This doc comment becomes the description text for the --help menu
"""
tau - the thruster arrangement utility
"""
# Read the thruster transforms input JSON file
# Wrap this in a try-except FileNotFoundError block to print a nicer error message
with open(thrusters) as f: # `with` blocks allow you to open files safely without risking corrupting them on crash
thrusters_raw = json.load(f)
# Convert loaded JSON data into Thruster3D objects
thrusters: t.List[Thruster3D] = [
Thruster3D(
thruster_raw['x'],
thruster_raw['y'],
thruster_raw['z'],
thruster_raw['theta'],
thruster_raw['phi'],
# Optional thruster parameters: dict.get is used to provide a default value if the key doesn't exist
thruster_raw.get("max_thrusts", DEFAULT_MAX_THRUSTS),
thruster_raw.get("fwd_current", DEFAULT_FWD_CURRENT),
thruster_raw.get("rev_current", DEFAULT_REV_CURRENT)
)
for thruster_raw in thrusters_raw
]
# Format the orientation and torque of the thrusters to be used as constraints
thruster_orientations = np.array([thruster.orientation for thruster in thrusters]).transpose()
thruster_torques = np.array([thruster.torque() for thruster in thrusters]).transpose()
# Set up matplotlib window
matplotlib.use('TkAgg')
fig = plt.figure(num="TAU", figsize=(12, 6)) # Window size, in inches for some reason
# Set up plot: 3d orthographic plot with ROV axis orientation
ax_thrust = fig.add_subplot(121, projection='3d', proj_type='ortho')
ax_torque = fig.add_subplot(122, projection='3d', proj_type='ortho')
# Plot thrust surface
plot_effort_surface(plt, ax_thrust, thrusters, thruster_orientations, thruster_torques, resolution, max_current)
# Plot torque surface
plot_effort_surface(plt, ax_torque, thrusters, thruster_torques, thruster_orientations, resolution, max_current)
ax_thrust.title.set_text('Thrust')
ax_thrust.set_xlabel('X (Surge)')
ax_thrust.set_ylabel('Y (Sway)')
ax_thrust.set_zlabel('Z (Heave)')
ax_torque.title.set_text('Torque')
ax_torque.set_xlabel('X (Roll)')
ax_torque.set_ylabel('Y (Pitch)')
ax_torque.set_zlabel('Z (Yaw)')
# Synchronize the rotation and zoom of both subplots
def on_plot_move(event):
if event.inaxes is None:
return
ax = event.inaxes
ax2 = ax_thrust if event.inaxes == ax_torque else ax_torque
try:
button_pressed = ax.button_pressed
except AttributeError:
return
if button_pressed in ax._rotate_btn:
ax2.view_init(elev=ax.elev, azim=ax.azim)
elif button_pressed in ax._zoom_btn:
ax2.set_xlim3d(ax.get_xlim3d())
ax2.set_ylim3d(ax.get_ylim3d())
ax2.set_zlim3d(ax.get_zlim3d())
fig.canvas.draw_idle()
fig.canvas.mpl_connect('motion_notify_event', on_plot_move)
# Show plot
plt.show()
if __name__ == "__main__": # Only run the main function the program is being run directly, not imported
main() # Click autofills the parameters to this based on the program's command-line arguments
|
{"hexsha": "554ee6cf3edae8149cfc5dbfb5eaf4671e23ed5f", "size": 21854, "ext": "py", "lang": "Python", "max_stars_repo_path": "tau.py", "max_stars_repo_name": "NoahMollerstuen/Thruster-Arrangement-Utility", "max_stars_repo_head_hexsha": "71c35b55c43c35a7f3883fc73c0b4c803566e15f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tau.py", "max_issues_repo_name": "NoahMollerstuen/Thruster-Arrangement-Utility", "max_issues_repo_head_hexsha": "71c35b55c43c35a7f3883fc73c0b4c803566e15f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2022-02-05T01:13:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T21:20:25.000Z", "max_forks_repo_path": "tau.py", "max_forks_repo_name": "NoahMollerstuen/Thruster-Arrangement-Utility", "max_forks_repo_head_hexsha": "71c35b55c43c35a7f3883fc73c0b4c803566e15f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-29T21:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T15:27:04.000Z", "avg_line_length": 43.708, "max_line_length": 121, "alphanum_fraction": 0.640523474, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 5552}
|
import numpy as np # Make sure that numpy is imported
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,),dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec,model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec,nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
#
# Loop through the reviews
for review in reviews:
#
# Print a status message every 1000th review
if counter%1000. == 0.:
print( "Review %d of %d" % (counter, len(reviews)))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[counter] = makeFeatureVec(review, model, \
num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
|
{"hexsha": "0dc761544d0de4aa550560dbc1bf48bc76e9f079", "size": 1799, "ext": "py", "lang": "Python", "max_stars_repo_path": "word2Vec.py", "max_stars_repo_name": "siddharthshah3030/Sentiment_Analysis", "max_stars_repo_head_hexsha": "380b9513461c5bd17db0f8fb8efe959db4469031", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "word2Vec.py", "max_issues_repo_name": "siddharthshah3030/Sentiment_Analysis", "max_issues_repo_head_hexsha": "380b9513461c5bd17db0f8fb8efe959db4469031", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-11T17:11:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-11T17:36:02.000Z", "max_forks_repo_path": "word2Vec.py", "max_forks_repo_name": "siddharthshah3030/Sentiment_Analysis", "max_forks_repo_head_hexsha": "380b9513461c5bd17db0f8fb8efe959db4469031", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2745098039, "max_line_length": 77, "alphanum_fraction": 0.6486937187, "include": true, "reason": "import numpy", "num_tokens": 460}
|
#!/usr/bin/julia
# Trizen
# 27 April 2017
# https://github.com/trizen
# Complex transform of an image, by mapping each pixel position to a complex function.
# usage:
# julia complex_transform.jl [image]
using Images
#using SpecialFunctions
function map_val(value, in_min, in_max, out_min, out_max)
(value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
end
function complex_transform(file)
img = load(file)
height, width = size(img)
function transform(x, y)
z = complex(
(2 * x - width ) / width,
(2 * y - height) / height
)
# Complex function
z = (cos(z) + sin(z)*im) / im
(real(z), imag(z))
end
matrix = zeros(height, width, 2)
min_x, min_y = (Inf, Inf)
max_x, max_y = (-Inf, -Inf)
for y in 1:height, x in 1:width
new_x, new_y = transform(x, y)
matrix[y,x,1] = new_x
matrix[y,x,2] = new_y
if (new_x < min_x)
min_x = new_x
end
if (new_y < min_y)
min_y = new_y
end
if (new_x > max_x)
max_x = new_x
end
if (new_y > max_y)
max_y = new_y
end
end
println("X: [$min_x, $max_x]")
println("Y: [$min_y, $max_y]")
out_img = zeros(RGB{N0f8}, height, width)
for y in 1:height, x in 1:width
out_img[y,x] = RGB{N0f8}(0,0,0)
end
for y in 1:height, x in 1:width
new_x = map_val(matrix[y,x,1], min_x, max_x, 1, width)
new_y = map_val(matrix[y,x,2], min_y, max_y, 1, height)
if (abs(new_x) == Inf || isnan(new_x) || abs(new_y) == Inf || isnan(new_y))
println("Skipping one pixel...")
continue
end
new_x = round(Int64, new_x)
new_y = round(Int64, new_y)
out_img[new_y,new_x] = img[y,x]
end
return out_img
end
inputfile = length(ARGS) > 0 ? ARGS[1] : "input.png"
save("complex_transform.png", complex_transform(inputfile))
|
{"hexsha": "b33c7f20b8fd92bd67743917172c4a2b7d081e99", "size": 2006, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Image/complex_transform.jl", "max_stars_repo_name": "trizen/julia-scripts", "max_stars_repo_head_hexsha": "26015006f2b37e0fcdb9dc4a96ea37a8b312a7ec", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-03-22T09:38:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T21:38:31.000Z", "max_issues_repo_path": "Image/complex_transform.jl", "max_issues_repo_name": "trizen/julia-scripts", "max_issues_repo_head_hexsha": "26015006f2b37e0fcdb9dc4a96ea37a8b312a7ec", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Image/complex_transform.jl", "max_forks_repo_name": "trizen/julia-scripts", "max_forks_repo_head_hexsha": "26015006f2b37e0fcdb9dc4a96ea37a8b312a7ec", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8043478261, "max_line_length": 86, "alphanum_fraction": 0.5513459621, "num_tokens": 625}
|
% Created 2022-01-25 Tue 18:13
% Intended LaTeX compiler: pdflatex
\documentclass[11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{longtable}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{capt-of}
\usepackage{hyperref}
\author{Dinh Duy Kha}
\date{\today}
\title{Outline}
\hypersetup{
pdfauthor={Dinh Duy Kha},
pdftitle={Outline},
pdfkeywords={},
pdfsubject={},
pdfcreator={Emacs 27.2 (Org mode 9.6)},
pdflang={English}}
\usepackage{biblatex}
\addbibresource{~/org/bibliography/bibliography.bib}
\begin{document}
\maketitle
\tableofcontents
\section{Introuction}
\label{sec:orgbfa7fe1}
\section{Background and Related Works}
\label{sec:orgf841941}
\subsection{PIM}
\label{sec:orgb01d4e2}
\subsubsection{PIM accelerators}
\label{sec:org4d8ee4e}
\subsubsection{PIM architectures}
\label{sec:orgda0d2ae}
\subsubsection{Baseline PIM assumptions}
\label{sec:orgb531ace}
\subsection{Confidential computing}
\label{sec:org4b22eb1}
\subsubsection{Secure enclave and the cloud}
\label{sec:org91490c6}
\subsubsection{Secure accelerators}
\label{sec:org4e2bd4f}
\subsubsection{Side-channel attacks and defenses}
\label{sec:org501865d}
\section{Overview of PIM-Enclave}
\label{sec:orgd2443a8}
\subsection{Usage Model}
\label{sec:org853483c}
\subsubsection{is a secure in-memory accelerator}
\label{sec:orgf12966a}
\begin{enumerate}
\item trusted by the host enclave
\label{sec:org850e42f}
\item have secure communication channel
\label{sec:org72499c1}
\item is efficient at processing large data
\label{sec:org7235504}
\end{enumerate}
\subsubsection{extends memory of CPU-based enclaves}
\label{sec:orgc8bf6bc}
\begin{enumerate}
\item protects confidentiality and integrity of data
\label{sec:orgc628c89}
\item hide the access pattern of data
\label{sec:org3e8941f}
\begin{enumerate}
\item accesses from PIM
\label{sec:orga86551d}
\item accesses from host
\label{sec:orge814af2}
\end{enumerate}
\end{enumerate}
\subsubsection{demonstration with an k-mean example}
\label{sec:org1b3e249}
\begin{enumerate}
\item k-mean is a data-intensive application
\label{sec:org33d53a8}
\item putting data to PIM allows host enclave to process more data
\label{sec:org4d4cf17}
\item communication channel protected by AES
\label{sec:orgc21f5f1}
\item access pattern from PIM is hidden with the access control logic
\label{sec:orgb7f603e}
\item access pattern from host is hidden with the secure access interface
\label{sec:org0211bff}
\item data integrity is protected by only allows the secure access interface to update memory. Direct update must be requested by the host enclave.
\label{sec:org891e263}
\end{enumerate}
\subsection{Threat model \& Design requirements}
\label{sec:orgaf4f635}
\subsubsection{threat model}
\label{sec:org298bee3}
\begin{enumerate}
\item scope
\label{sec:org7e9151f}
\begin{enumerate}
\item we protect
\label{sec:org5c6737a}
\begin{enumerate}
\item the execution of PIM enclaves
\label{sec:org4830b9d}
\item data packets on the bus
\label{sec:org45bd952}
\item observable memory changes
\label{sec:org797a609}
\end{enumerate}
\item out of scope:
\label{sec:org4e8bfac}
\begin{enumerate}
\item EM \& power
\label{sec:org25962ef}
\item Host-side side-channel
\label{sec:org750aa49}
\end{enumerate}
\end{enumerate}
\item privileged software
\label{sec:orge65723c}
\begin{enumerate}
\item untrusted memory mappings
\label{sec:orgde470d2}
\item unauthorized accesses to memory
\label{sec:orge054211}
\end{enumerate}
\item physical attack on the bus
\label{sec:org43d24dc}
\begin{enumerate}
\item snooping \& side-channels
\label{sec:org0aac69d}
\end{enumerate}
\item other
\label{sec:org6e8f99d}
\begin{enumerate}
\item dma attacks
\label{sec:orgcbbf085}
\item cold boot
\label{sec:org989e357}
\end{enumerate}
\end{enumerate}
\subsubsection{requirements as secure in-memory accelerator}
\label{sec:org6abda3e}
\begin{enumerate}
\item R1-a: establish trust with the host
\label{sec:org8bf95a8}
\item R1-b: secure communication channel
\label{sec:orge6604d1}
\item R1-c: efficiently process large data
\label{sec:orgd0ee7ff}
\end{enumerate}
\subsubsection{requirements as trusted memory}
\label{sec:org8818db9}
\begin{enumerate}
\item R2-a: protect confidentiality of data
\label{sec:org1c16471}
\begin{enumerate}
\item R2-a-1: memory encryption
\label{sec:org239abac}
\begin{enumerate}
\item prevent unauthorized accesses \& cold boot
\label{sec:org3944a04}
\end{enumerate}
\item R2-a-2: hide the access pattern of PIM
\label{sec:orgc3fa038}
\item R2-a-3: hide the access pattern of HOST
\label{sec:org8ea25fa}
\end{enumerate}
\item R2-b: protect integrity of data
\label{sec:orgc417487}
\begin{enumerate}
\item replay, spoofing, splicng
\label{sec:org5b5443c}
\end{enumerate}
\end{enumerate}
\section{Enabling in-memory confidential computation}
\label{sec:org0ad53d3}
\subsection{Hardware capabilities}
\label{sec:orgd3256c8}
\subsection{Remote attestation \& key exchange}
\label{sec:orgac00635}
\subsubsection{Satisfy R1-a \& R1-b}
\label{sec:orgf6a80ea}
\subsection{Process large data efficiently with the AES engine}
\label{sec:org3dfe836}
\subsubsection{Satisfy R1-c}
\label{sec:orga71afa2}
\section{PIM-enclave as memory extension}
\label{sec:org5a2f2dc}
\subsection{keeping memory encrypted with a shared key}
\label{sec:org21e3310}
\subsubsection{Satisfy R2-a-1}
\label{sec:org23ab08b}
\subsection{thwarting unauthorized accesses with the access control}
\label{sec:org8b13cd0}
\subsubsection{Satisfy R2-a-2}
\label{sec:org9c1d058}
\subsubsection{Satisfy R2-b}
\label{sec:orgc40be8b}
\subsection{enabling memory accesses from host with the secure access interface}
\label{sec:org34d866e}
\subsubsection{Satisfy R2-a-3 by encrypting the access address (trustore, invisimem)}
\label{sec:orgee9eaa7}
\subsubsection{Satisfy R2-b by only allow memory updates through the interface}
\label{sec:orgd17e28e}
\section{Implementation}
\label{sec:orgabc2fa3}
\section{Evaluation}
\label{sec:org491c38c}
\subsection{security analysis}
\label{sec:org3c9a81a}
\subsubsection{in-memory hash table}
\label{sec:org011d79c}
\begin{enumerate}
\item show sensitive application can be offloaded to PIM
\label{sec:org92c9fd2}
\end{enumerate}
\subsubsection{secure access interface}
\label{sec:orgdfb2b71}
\begin{enumerate}
\item show the interface can hide the access pattern
\label{sec:org51b2a60}
\end{enumerate}
\subsection{Microbenchmark}
\label{sec:org6d86f05}
\subsubsection{encrypted data transfer}
\label{sec:orge266fd1}
\subsubsection{secure access interface}
\label{sec:org918266b}
\subsection{data-intensive application}
\label{sec:org08b765c}
\subsubsection{k-mean algorithm}
\label{sec:orgaeae79c}
\begin{enumerate}
\item demonstrate the computation model
\label{sec:org7dc22aa}
\end{enumerate}
\section{Conclusion}
\label{sec:orgf69a0f1}
\end{document}
|
{"hexsha": "27289a73e68af62dd4387c23448a61d4d6fee09d", "size": 6895, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "roam/20210406132914-pim_enclave.tex", "max_stars_repo_name": "kha-dinh/org-notes", "max_stars_repo_head_hexsha": "a8e954aa8af587c065501825e6d85f6ec1a818d0", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "roam/20210406132914-pim_enclave.tex", "max_issues_repo_name": "kha-dinh/org-notes", "max_issues_repo_head_hexsha": "a8e954aa8af587c065501825e6d85f6ec1a818d0", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "roam/20210406132914-pim_enclave.tex", "max_forks_repo_name": "kha-dinh/org-notes", "max_forks_repo_head_hexsha": "a8e954aa8af587c065501825e6d85f6ec1a818d0", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1428571429, "max_line_length": 147, "alphanum_fraction": 0.7947788252, "num_tokens": 2253}
|
"""
check_MD5( file_path, checksum )
returns a MD5 hash from a file location.
Note: this converts Int8 representations to comma delimitted strings.
"""
get_MD5( file_path ) = join( string.( open(md5, file_path) ), "," )
"""
check_MD5( file_path, checksum )
Checks the result of an MD5 hash vs a stored checksum.
Note: this converts Int8 representations to comma delimitted strings.
"""
check_MD5( file_path, check_sum ) = get_MD5( file_path ) == check_sum
|
{"hexsha": "330650ed37a20990c9c9488ce8128d5b21afd4a4", "size": 466, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Validation.jl", "max_stars_repo_name": "caseykneale/ChemometricsData.jl", "max_stars_repo_head_hexsha": "22b2c7b298b4abb94a84737a0aba4e9478959086", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-07-04T01:54:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-01T05:25:02.000Z", "max_issues_repo_path": "src/Validation.jl", "max_issues_repo_name": "caseykneale/ChemometricsData.jl", "max_issues_repo_head_hexsha": "22b2c7b298b4abb94a84737a0aba4e9478959086", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-11T00:50:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-18T12:18:45.000Z", "max_forks_repo_path": "src/Validation.jl", "max_forks_repo_name": "caseykneale/ChemometricsData.jl", "max_forks_repo_head_hexsha": "22b2c7b298b4abb94a84737a0aba4e9478959086", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-29T10:28:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-29T10:28:51.000Z", "avg_line_length": 33.2857142857, "max_line_length": 69, "alphanum_fraction": 0.7274678112, "num_tokens": 118}
|
include("header.jl")
@testset "iterate" begin
o = (:delta => 0.01,)
function itr1(w)
total = 0.0
for wi in w; total+=sum(wi); end
return total
end
function itr2(w)
total = 0.0
for (i,wi) in enumerate(w); total+=sum(wi); end
return total
end
function itr3(w)
total = 0.0
for (k,wi) in w; total+=sum(wi); end
return total
end
function itr4(w)
w1,w2 = w
return sum(w1)+sum(w2)
end
for T in (Float32, Float64)
warray = [randn(T,2,3,5),randn(T,1,3,5)]
wtuple = (randn(T,2,3,5),randn(T,1,3,5))
wdict = Dict(:w1=>randn(T,2,3,5),:w2=>randn(T,1,3,5))
@test gradcheck(itr1,warray; o...)
@test gradcheck(itr2,warray; o...)
@test gradcheck(itr1,warray[1]; o...)
@test gradcheck(itr2,warray[1]; o...)
@test gradcheck(itr3,wdict; o...)
@test gradcheck(itr4,warray; o...)
@test gradcheck(itr4,wtuple; o...)
end
end
|
{"hexsha": "e04f3caf1e79b25f8f3f29f8527a7914510ad3c9", "size": 1028, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/iterate.jl", "max_stars_repo_name": "UnofficialJuliaMirror/AutoGrad.jl-6710c13c-97f1-543f-91c5-74e8f7d95b35", "max_stars_repo_head_hexsha": "b6d4e6858f4cd2b520a54a74f10345615aa4013e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 176, "max_stars_repo_stars_event_min_datetime": "2016-08-09T05:51:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T20:07:43.000Z", "max_issues_repo_path": "test/iterate.jl", "max_issues_repo_name": "UnofficialJuliaMirror/AutoGrad.jl-6710c13c-97f1-543f-91c5-74e8f7d95b35", "max_issues_repo_head_hexsha": "b6d4e6858f4cd2b520a54a74f10345615aa4013e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 102, "max_issues_repo_issues_event_min_datetime": "2016-08-26T18:16:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-12T19:47:54.000Z", "max_forks_repo_path": "test/iterate.jl", "max_forks_repo_name": "UnofficialJuliaMirror/AutoGrad.jl-6710c13c-97f1-543f-91c5-74e8f7d95b35", "max_forks_repo_head_hexsha": "b6d4e6858f4cd2b520a54a74f10345615aa4013e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2016-08-16T17:26:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-04T15:35:58.000Z", "avg_line_length": 23.9069767442, "max_line_length": 63, "alphanum_fraction": 0.5155642023, "num_tokens": 359}
|
"""
mutable struct SolidStateDetector{T <: SSDFloat, CS} <: AbstractConfig{T}
CS: Coordinate System: -> :cartesian / :cylindrical
"""
mutable struct SolidStateDetector{T <: SSDFloat, CS} <: AbstractConfig{T}
name::String # optional
inputunits::Dict{String, Unitful.Units}
world::World{T, 3}
config_dict::Dict
medium::NamedTuple # this should become a struct at some point
semiconductors::Vector{Semiconductor{T}}
contacts::Vector{Contact{T}}
passives::Vector{Passive{T}}
SolidStateDetector{T, CS}() where {T <: SSDFloat, CS} = new{T, CS}()
end
get_precision_type(d::SolidStateDetector{T}) where {T} = T
get_coordinate_system(d::SolidStateDetector{T, CS}) where {T, CS} = CS
function construct_units(dict::Dict)::Dict{String,Unitful.Units}
result_dict::Dict{String,Unitful.Units} = Dict()
haskey(dict,"length") ? result_dict["length"] = unit_conversion[dict["length"]] : result_dict["length"] = u"mm"
haskey(dict,"angle") ? result_dict["angle"] = unit_conversion[dict["angle"]] : result_dict["angle"] = u"rad"
haskey(dict,"potential") ? result_dict["potential"] = unit_conversion[dict["potential"]] : result_dict["potential"] = u"V"
haskey(dict,"temperature") ? result_dict["temperature"] = unit_conversion[dict["temperature"]] : result_dict["temperature"] = u"K"
result_dict
end
function construct_semiconductor(T, sc::Dict, inputunit_dict::Dict{String, Unitful.Units})
Semiconductor{T}(sc, inputunit_dict)
end
function construct_passive(T, pass::Dict, inputunit_dict::Dict{String, Unitful.Units})
Passive{T}(pass, inputunit_dict)
end
function construct_contact(T, contact::Dict, inputunit_dict::Dict{String, Unitful.Units})
Contact{T}(contact, inputunit_dict)
end
function construct_objects(T, objects::Vector, semiconductors, contacts, passives, inputunit_dict)::Nothing
for obj in objects
if obj["type"] == "semiconductor"
push!(semiconductors, construct_semiconductor(T, obj, inputunit_dict))
elseif obj["type"] == "contact"
push!(contacts, construct_contact(T, obj, inputunit_dict))
elseif obj["type"] == "passive"
push!(passives, construct_passive(T, obj, inputunit_dict))
else
@warn "please spcify the calss to bei either a \"semiconductor\", a \"contact\", or \"passive\""
end
end
nothing
end
function SolidStateDetector{T}(config_file::Dict)::SolidStateDetector{T} where{T <: SSDFloat}
grid_type = Symbol(config_file["setup"]["grid"]["coordinates"])
c = SolidStateDetector{T, grid_type}()
c.name = config_file["name"]
c.config_dict = config_file
c.inputunits = construct_units(config_file["setup"]["units"])
c.world = World(T, config_file["setup"]["grid"], c.inputunits)
c.medium = material_properties[materials[config_file["setup"]["medium"]]]
c.semiconductors, c.contacts, c.passives = [], [], []
construct_objects(T, config_file["setup"]["objects"], c.semiconductors, c.contacts, c.passives, c.inputunits)
return c
end
function SolidStateDetector(parsed_dict::Dict)
SolidStateDetector{Float32}(parsed_dict)
end
function Base.sort!(v::AbstractVector{<:AbstractGeometry})
hierarchies::Vector{Int} = map(x->x.hierarchy,v)
v_result::typeof(v) = []
for idx in sort!(unique!(hierarchies))
push!(v_result,filter(x->x.hierarchy == hierarchies[idx],v)...)
end
return v_result
end
function SolidStateDetector{T}(parsed_dict::Dict) where T
SolidStateDetector{T}(parsed_dict)
end
function contains(c::SolidStateDetector, point::AbstractCoordinatePoint{T,3})::Bool where T
for contact in c.contacts
if point in contact
return true
end
end
for sc in c.semiconductors
if point in sc
return true
end
end
return false
end
function println(io::IO, d::SolidStateDetector{T, CS}) where {T <: SSDFloat, CS}
println("________"*d.name*"________\n")
# println("Class: ",d.class)
println("---General Properties---")
println("-Environment Material: \t $(d.medium.name)")
println("-Grid Type: \t $(CS)")
println()
println("# Semiconductors: $(length(d.semiconductors))")
for (isc, sc) in enumerate(d.semiconductors)
println("\t_____Semiconductor $(isc)_____\n")
println(sc)
end
println()
println("# Contacts: $(length(d.contacts))")
if length(d.contacts)<=5
for c in d.contacts
println(c)
end
end
println()
println("# Passives: $(length(d.passives))")
if length(d.passives)<=5
for p in d.passives
# println(c)
end
end
end
function show(io::IO, d::SolidStateDetector{T}) where {T <: SSDFloat} println(d) end
function print(io::IO, d::SolidStateDetector{T}) where {T <: SSDFloat} println(d) end
function display(io::IO, d::SolidStateDetector{T} ) where {T <: SSDFloat} println(d) end
function show(io::IO,::MIME"text/plain", d::SolidStateDetector) where {T <: SSDFloat}
show(io, d)
end
# ToDo: Test it
function generate_random_startpositions(d::SolidStateDetector{T}, n::Int, Volume::NamedTuple=bounding_box(d), rng::AbstractRNG = MersenneTwister(), min_dist_from_boundary = 0.0001) where T
delta = T(min_dist_from_boundary)
n_filled::Int = 0
positions = Vector{CartesianPoint{T}}(undef,n)
while n_filled < n
sample=CylindricalPoint{T}(rand(rng,Volume[:r_range].left:0.00001:Volume[:r_range].right),rand(rng,Volume[:φ_range].left:0.00001:Volume[:φ_range].right),rand(rng,Volume[:z_range].left:0.00001:Volume[:z_range].right))
if !(sample in d.contacts) && contains(d,sample) && contains(d,CylindricalPoint{T}(sample.r+delta,sample.φ,sample.z))&& contains(d,CylindricalPoint{T}(sample.r-delta,sample.φ,sample.z))&& contains(d,CylindricalPoint{T}(sample.r,sample.φ,sample.z+delta))&& contains(d,CylindricalPoint{T}(sample.r,sample.φ,sample.z-delta))
n_filled += 1
positions[n_filled]=CartesianPoint(sample)
end
end
positions
end
|
{"hexsha": "61c27ca2b8c0fa8a31d1b355702a8650d44731db", "size": 6077, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/DetectorGeometries/SolidStateDetector.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_stars_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/DetectorGeometries/SolidStateDetector.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_issues_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/DetectorGeometries/SolidStateDetector.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_forks_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9551282051, "max_line_length": 329, "alphanum_fraction": 0.6814217542, "num_tokens": 1631}
|
from __future__ import print_function
import numpy as np
from numgrad import eval_numerical_gradient
def tanh_grad(x):
return 1 - np.tanh(x) ** 2
if __name__ == '__main__':
x = np.array([1.0, 2.1, 0.3, 0.7])
print('tanh', np.tanh(x))
print('tanh_grad', tanh_grad(x))
# Note: eval_numerical_gradient works for scalar functions. Therefore we'll
# run it for each element of tanh separately.
print('Numerical gradient')
for i in range(x.shape[0]):
print(i, eval_numerical_gradient(lambda z: np.tanh(z)[i], x))
|
{"hexsha": "5f41a590721078f271ef36d111fac5fc422a6b15", "size": 553, "ext": "py", "lang": "Python", "max_stars_repo_path": "gradients/tanh.py", "max_stars_repo_name": "eliben/deep-learning-samples", "max_stars_repo_head_hexsha": "d5ca86c5db664fabfb302cbbc231c50ec3d6a103", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 183, "max_stars_repo_stars_event_min_datetime": "2015-12-29T07:21:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T01:19:23.000Z", "max_issues_repo_path": "gradients/tanh.py", "max_issues_repo_name": "eliben/deep-learning-samples", "max_issues_repo_head_hexsha": "d5ca86c5db664fabfb302cbbc231c50ec3d6a103", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gradients/tanh.py", "max_forks_repo_name": "eliben/deep-learning-samples", "max_forks_repo_head_hexsha": "d5ca86c5db664fabfb302cbbc231c50ec3d6a103", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 68, "max_forks_repo_forks_event_min_datetime": "2016-06-02T15:31:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-08T19:58:10.000Z", "avg_line_length": 26.3333333333, "max_line_length": 79, "alphanum_fraction": 0.6763110307, "include": true, "reason": "import numpy", "num_tokens": 164}
|
'''Helper class for Monte Carlo Studies for (currently) statistical tests
Most of it should also be usable for Bootstrap, and for MC for estimators.
Takes the sample generator, dgb, and the statistical results, statistic,
as functions in the argument.
Author: Josef Perktold (josef-pktd)
'''
import numpy as np
#copied from stattools
class StatTestMC(object):
"""class to run Monte Carlo study on a statistical test'''
TODO
print summary, for quantiles and for histogram
draft in trying out script log
"""
def __init__(self, dgp, statistic):
self.dgp = dgp #staticmethod(dgp) #no self
self.statistic = statistic # staticmethod(statistic) #no self
def run(self, nrepl, statindices=None, dgpargs=[], statsargs=[]):
'''run the actual Monte Carlo and save results
'''
self.nrepl = nrepl
self.statindices = statindices
self.dgpargs = dgpargs
self.statsargs = statsargs
dgp = self.dgp
statfun = self.statistic # name ?
#single return statistic #TODO: introspect len of return of statfun
if statindices is None:
self.nreturn = nreturns = 1
mcres = np.zeros(nrepl)
for ii in range(nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
mcres[ii] = statfun(x, *statsargs) #unitroot_adf(x, 2,trendorder=0, autolag=None)
#more than one return statistic
else:
self.nreturn = nreturns = len(statindices)
self.mcres = mcres = np.zeros((nrepl, nreturns))
for ii in range(nrepl-1):
x = dgp(*dgpargs) #(1e-4+np.random.randn(nobs)).cumsum()
ret = statfun(x, *statsargs)
mcres[ii] = [ret[i] for i in statindices]
self.mcres = mcres
def histogram(self, idx=None, critval=None):
'''calculate histogram values
does not do any plotting
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
if critval is None:
histo = np.histogram(mcres, bins=10)
else:
if not critval[0] == -np.inf:
bins=np.r_[-np.inf, critval, np.inf]
if not critval[0] == -np.inf:
bins=np.r_[bins, np.inf]
histo = np.histogram(mcres,
bins=np.r_[-np.inf, critval, np.inf])
self.histo = histo
self.cumhisto = np.cumsum(histo[0])*1./self.nrepl
self.cumhistoreversed = np.cumsum(histo[0][::-1])[::-1]*1./self.nrepl
return histo, self.cumhisto, self.cumhistoreversed
def quantiles(self, idx=None, frac=[0.01, 0.025, 0.05, 0.1, 0.975]):
'''calculate quantiles of Monte Carlo results
changes:
does all sort at once, but reports only one at a time
'''
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
self.frac = frac = np.asarray(frac)
if not hasattr(self, 'mcressort'):
self.mcressort = np.sort(self.mcres, axis=0)
mcressort = self.mcressort[:,idx]
return frac, mcressort[(self.nrepl*frac).astype(int)]
def plot_hist(self, idx, distpdf, bins=50, ax=None):
if self.mcres.ndim == 2:
if not idx is None:
mcres = self.mcres[:,idx]
else:
raise ValueError('currently only 1 statistic at a time')
else:
mcres = self.mcres
lsp = np.linspace(mcres.min(), mcres.max(), 100)
import matplotlib.pyplot as plt
#I don't want to figure this out now
# if ax=None:
# fig = plt.figure()
# ax = fig.addaxis()
fig = plt.figure()
plt.hist(mcres, bins=bins, normed=True)
plt.plot(lsp, distpdf(lsp), 'r')
def summary_quantiles(self, idx, distpdf, bins=50, ax=None):
'''summary table for quantiles
currently just a partial copy from python session, for ljung-box example
add also
>>> lb_dist.ppf([0.01, 0.025, 0.05, 0.1, 0.975])
array([ 0.29710948, 0.48441856, 0.71072302, 1.06362322, 11.14328678])
>>> stats.kstest(mc1.mcres[:,3], stats.chi2(4).cdf)
(0.052009265258216836, 0.0086211970272969118)
'''
mcq = self.quantiles([1,3])[1]
perc = stats.chi2([2,4]).ppf(np.array([[0.01, 0.025, 0.05, 0.1, 0.975]]).T)
mml=[]
for i in range(2):
mml.extend([mcq[:,i],perc[:,i]])
print SimpleTable(np.column_stack(mml),txt_fmt={'data_fmts': ["%#6.3f"]+["%#10.4f"]*(mm.shape[1]-1)},headers=['quantile']+['mc','dist']*2)
if __name__ == '__main__':
def randwalksim(nobs=100, drift=0.0):
return (drift+np.random.randn(nobs)).cumsum()
def normalnoisesim(nobs=500, loc=0.0):
return (loc+np.random.randn(nobs))
def adf20(x):
return unitroot_adf(x, 2,trendorder=0, autolag=None)
# print '\nResults with MC class'
# mc1 = StatTestMC(randwalksim, adf20)
# mc1.run(1000)
# print mc1.histogram(critval=[-3.5, -3.17, -2.9 , -2.58, 0.26])
# print mc1.quantiles()
print '\nLjung Box'
from scikits.statsmodels.sandbox.stats.diagnostic import acorr_ljungbox
def lb4(x):
s,p = acorr_ljungbox(x, lags=4)
return s[-1], p[-1]
def lb1(x):
s,p = acorr_ljungbox(x, lags=1)
return s[0], p[0]
def lb(x):
s,p = acorr_ljungbox(x, lags=4)
return np.r_[s, p]
print 'Results with MC class'
mc1 = StatTestMC(normalnoisesim, lb)
mc1.run(1000, statindices=range(8))
print mc1.histogram(1, critval=[0.01, 0.025, 0.05, 0.1, 0.975])
print mc1.quantiles(1)
print mc1.quantiles(0)
print mc1.histogram(0)
|
{"hexsha": "3b7c8be1ae7ee5eacdced5ceb211f8454beb7818", "size": 6133, "ext": "py", "lang": "Python", "max_stars_repo_path": "scikits/statsmodels/sandbox/tools/mctools.py", "max_stars_repo_name": "matthew-brett/statsmodels", "max_stars_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scikits/statsmodels/sandbox/tools/mctools.py", "max_issues_repo_name": "matthew-brett/statsmodels", "max_issues_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scikits/statsmodels/sandbox/tools/mctools.py", "max_forks_repo_name": "matthew-brett/statsmodels", "max_forks_repo_head_hexsha": "915c9dc2d762c5592ac17a7cf5f1cc957fcbde1c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9747474747, "max_line_length": 146, "alphanum_fraction": 0.5711723463, "include": true, "reason": "import numpy", "num_tokens": 1789}
|
import torch
import torch.nn as nn
import math, random, sys
from optparse import OptionParser
import pickle
import rdkit
import json
import rdkit.Chem as Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from collections import defaultdict
import copy
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import numpy as np
from collections import deque
import os, random
import torch.nn.functional as F
import pdb
from jvae_model import *
path = "savedmodel.pth"
model=JTNNVAE(vocab, int(450), int(56), int(20), int(3))
model.load_state_dict(torch.load(path))
torch.manual_seed(0)
print("Molecules generated")
for i in range(10):
print(model.sample_prior())
|
{"hexsha": "361a42e9d69a8921a44ab73c780149ee2738fcf1", "size": 854, "ext": "py", "lang": "Python", "max_stars_repo_path": "genmol/JTVAE/sample.py", "max_stars_repo_name": "bayeslabs/genmol", "max_stars_repo_head_hexsha": "55837c9c3b7d9f835433db7886bce8b27099e544", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-07-11T11:53:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-11T19:58:12.000Z", "max_issues_repo_path": "genmol/JTVAE/sample.py", "max_issues_repo_name": "bayeslabs/genmol", "max_issues_repo_head_hexsha": "55837c9c3b7d9f835433db7886bce8b27099e544", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-15T05:44:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-15T05:44:24.000Z", "max_forks_repo_path": "genmol/JTVAE/sample.py", "max_forks_repo_name": "bayeslabs/genmol", "max_forks_repo_head_hexsha": "55837c9c3b7d9f835433db7886bce8b27099e544", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-11-03T01:39:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-03T05:48:30.000Z", "avg_line_length": 25.1176470588, "max_line_length": 57, "alphanum_fraction": 0.775175644, "include": true, "reason": "import numpy,from scipy", "num_tokens": 198}
|
from datetime import datetime, timedelta
import importlib
import itertools
import warnings
import urllib
import iris
import iris_hypothetic
import pandas as pd
import numpy as np
import tempfile
import boto3
from botocore.handlers import disable_signing
from intake.source.base import DataSource, Schema
from . import __version__
SECONDS_IN_HOUR = 60 * 60
SECONDS_IN_DAY = 60 * 60 * 24
def _import_from(module, name):
module = __import__(module, fromlist=[name])
return getattr(module, name)
def _product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
yield dict(zip(keys, instance))
class HypotheticSource(DataSource):
"""Intake hypothetic"""
version = __version__
container = 'iris'
name = 'hypothetic'
partition_access = True
def __init__(self, key_generator=None, forecast_reference_time=None, iris_kwargs=None, metadata=None, storage_options=None,
**kwargs):
self.key_generator = key_generator
self.forecast_reference_time = forecast_reference_time
self._kwargs = iris_kwargs or kwargs
self.metadata = metadata
self.metadata_df = None
self._template_cube_file = None
self.storage_options = storage_options
self._ds = None
super(HypotheticSource, self).__init__(metadata=metadata)
def _open_dataset(self):
self.metadata_df = self.generate_metadata()
self._template_cube_file, _ = self.find_template_cube(None)
uris = self.metadata_df.uri
replacement_coords = self.extract_unique_metadata(['uri'])
hypotheticube = iris_hypothetic.load_hypotheticube(self._template_cube_file.name, self.metadata['name'], replacement_coords, uris, storage_options=self.storage_options)
self._ds = hypotheticube
def _open_as_local(self, path):
if path.startswith('s3://'):
bucket, key = path[len('s3://'):].split('/', 1)
s3 = boto3.resource('s3')
if self.storage_options and self.storage_options.get('anon', False):
s3.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
try:
object_body = s3.Bucket(bucket).Object(key).get()['Body'].read()
except s3.meta.client.exceptions.NoSuchKey:
raise IOError(f'No such file {path}')
file = tempfile.NamedTemporaryFile()
file.write(object_body)
file.seek(0)
return file
if path.startswith('http://') or path.startswith('https://'):
object_body = urllib.request.urlopen(path).read()
file = tempfile.NamedTemporaryFile()
file.write(object_body)
file.seek(0)
return file
return open(path, 'rb')
def _get_schema(self):
"""Make schema object, which embeds iris object and some details"""
if self._ds is None:
self._open_dataset()
metadata = {}
self._schema = Schema(
datashape=None,
dtype=None,
shape=None,
npartitions=None,
extra_metadata=metadata)
return self._schema
def read(self):
"""Return iris object (which will have chunks)"""
return self.read_chunked()
def read_chunked(self):
"""Return iris object (which will have chunks)"""
self._load_metadata()
return self._ds
def read_partition(self, i):
"""Fetch one chunk of data at tuple index i
"""
import numpy as np
self._load_metadata()
if not isinstance(i, (tuple, list)):
raise TypeError('For iris sources, must specify partition as '
'tuple')
if isinstance(i, list):
i = tuple(i)
if isinstance(self._ds, iris.cube.CubeList):
arr = self._ds[i[0]].lazy_data()
i = i[1:]
else:
arr = self._ds.lazy_data()
if isinstance(arr, np.ndarray):
return arr
# dask array
return arr[i].compute()
def to_dask(self):
"""Return iris object where variables are dask arrays"""
return self.read_chunked()
def close(self):
"""Delete open file from memory"""
self._ds = None
self._schema = None
@staticmethod
def generate_frts(frt_description):
now = datetime.now()
interval = frt_description['forecast_reference_time_interval']
model_start_time = frt_description['model_start_time']
retention = frt_description['retention']
# Number of runs per day
runs_per_day = int(SECONDS_IN_DAY / interval)
# Hours of the day which the model runs (e.g midnight, 3am, 6am, etc)
run_hours = [model_start_time + (interval * i) / SECONDS_IN_HOUR for i in range(0, runs_per_day)]
# Get the last run relative to now
last_run_hour = max([x for x in run_hours if x <= now.hour])
# Create a datetime object for the last run
final_run = now.replace(minute=0, second=0, microsecond=0, hour=int(last_run_hour))
# Create a generator of all the runs going back as far as the retention
runs_generator = ((final_run - timedelta(seconds=i*interval)).strftime("%Y-%m-%dT%H:%M:%SZ") for i in range(0, int(retention/interval)))
return runs_generator
def generate_metadata(self):
generator_module_name = ".".join(self.key_generator.split('.')[:-1])
generator_function_name = self.key_generator.split('.')[-1]
generator_function = _import_from(generator_module_name, generator_function_name)
self.metadata['forecast_reference_time'] = list(self.generate_frts(self.forecast_reference_time))
iter_metadata = {key: value for key, value in self.metadata.items() if isinstance(value, list)}
scalar_metadata = {key: value for key, value in self.metadata.items() if not isinstance(value, list)}
df = pd.DataFrame.from_dict([{**product_dict, **scalar_metadata} for product_dict in _product_dict(**iter_metadata)])
df['uri'] = df.apply(lambda row: generator_function({k: str(int(v)) if isinstance(v, np.int64) else str(v) for k, v in row.to_dict().items()}), axis=1)
return df
def find_template_cube(self, var_name):
for index, row in self.metadata_df.iterrows():
test_metadata = row.to_dict()
path = test_metadata['uri']
try:
file = self._open_as_local(path)
cube = iris.load_cube(file.name, var_name)
except (IOError, OSError):
continue
else:
return file, cube
raise ValueError("Failed to find template cube")
def extract_unique_metadata(self, drop):
replacement_coords = self.metadata_df
replacement_coords = self.metadata_df.drop(drop, axis=1)
nunique = replacement_coords.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
return replacement_coords.drop(cols_to_drop, axis=1)
|
{"hexsha": "1c2974095646c30b10f4961afa734c8d388cf267", "size": 7191, "ext": "py", "lang": "Python", "max_stars_repo_path": "intake_hypothetic/hypothetic.py", "max_stars_repo_name": "informatics-lab/intake-hypothetic", "max_stars_repo_head_hexsha": "6ebc8b800aac4caa37820e2a5d1a01346fb65ecd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "intake_hypothetic/hypothetic.py", "max_issues_repo_name": "informatics-lab/intake-hypothetic", "max_issues_repo_head_hexsha": "6ebc8b800aac4caa37820e2a5d1a01346fb65ecd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "intake_hypothetic/hypothetic.py", "max_forks_repo_name": "informatics-lab/intake-hypothetic", "max_forks_repo_head_hexsha": "6ebc8b800aac4caa37820e2a5d1a01346fb65ecd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-10T23:57:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-10T23:57:51.000Z", "avg_line_length": 34.9077669903, "max_line_length": 176, "alphanum_fraction": 0.6334306772, "include": true, "reason": "import numpy", "num_tokens": 1575}
|
\documentclass[a4paper,12pt]{article}
\usepackage{graphicx}
\usepackage{anysize}
\usepackage{multicol}
% USE setspace BEFORE hyperref (for footnote link)
\usepackage{setspace}
\usepackage{hyperref}
\usepackage{spverbatim}
\usepackage{enumitem}
\usepackage[top=0.5in, left=0.75in, bottom=0.5in, right=0.5in, includefoot]{geometry}
\usepackage{subcaption}
\usepackage[justification=centering]{caption}
\usepackage[compact]{titlesec}
% syntax \titlespacing{<command>}{<left>}{<before>}{<after>}
\titlespacing{\section}{0pt}{-0.5in}{*0}
\titleformat{\section}{\normalfont\fontsize{14}{22}\bfseries}{\thesection}{1em}{}
\setlength{\columnsep}{0.5cm}
\setlength{\parindent}{0pt}
\linespread{1.5}
\begin{document}
% titlepage
\input{cg_lab_4_report_title_page}
\fontsize{14}{22}{\textbf{\underline{LAB 4}}}
\begin{spacing}{1}
\hfill \break
Implement Convex Hull using
\begin{enumerate}[leftmargin=*]
\itemsep0em
\item extreme points
\item extreme edges
\item gift wrap
% \hfill \break
\end{enumerate}
\end{spacing}
\section*{\textbf{\underline{Code}}\footnote{\url{https://github.com/Brihat9/CG/blob/master/cg_lab_4_convex_hull.py}}}
\begin{spacing}{1}
\begin{footnotesize}
\begin{spverbatim}
#!/usr/bin/env python
from basics import Point, LineSegment
from circular_doubly_linked_list import CircularDoublyLinkedList
from cg_lab_3 import is_point_inclusion
from cg_lab_2_lr_turn import is_left_turn, is_colinear, compute_area
import copy
import math
import pprint
''' change input file here '''
INPUT_FILE = 'cg_lab_4_input_file_test'
def get_extreme_points_based_convex_hull(points):
''' returns sorted list of extreme points from given set of points
that forms the convex hull
parameter: points - list of points
returns: sorted list of extreme points
'''
n = len(points)
# list to add non extreme points
non_extreme_points = []
'''
for i upto N - 1:
for j != i upto N -2:
for k != i != j upto N - 3:
for l != i != j != k upto N - 4:
if point P_l lies inside triangle(P_i, P_j, P_k):
P_l is non-extreme points
'''
for i in range(n - 1):
for j in range(n - 2):
if j != i:
for k in range(n - 3):
if k != i and k != j:
for l in range(n - 4):
if l != i and l != j and l != k:
# create triangle (polygon)
polygon = CircularDoublyLinkedList()
polygon.append(points[i])
polygon.append(points[j])
polygon.append(points[k])
# check point_l lies inside triangle (polygon)
res = is_point_inclusion(polygon, points[l])
# if point lies inside, it is non extreme point
if res:
non_extreme_points.append(points[l])
''' for testing: displays content of non_extreme_points '''
# for index in range(len(non_extreme_points)):
# print(non_extreme_points[index]),
# print("\t"),
''' using python 'set' datatype to find extreme point '''
points_set = set(points)
non_extreme_points_set = set(non_extreme_points)
extreme_points_set = points_set - non_extreme_points_set
extreme_points = list(extreme_points_set)
# print(extreme_points)
''' for sorting extreme points '''
# calculate centroid of polygon
centroid = Point(sum([point.x for point in extreme_points])/len(extreme_points),sum([point.y for point in extreme_points])/len(extreme_points))
# print("Centroid of all Points: " + str(centroid))
# sort vertices of polygon in anti-clockwise order
sorted_extreme_points = copy.deepcopy(extreme_points)
sorted_extreme_points.sort(key=lambda p: math.atan2(p.y-centroid.y,p.x-centroid.x))
''' for testing: show points in sorted order '''
# print("\nExtreme points in sorted order")
# for index in range(len(extreme_points)):
# print(sorted_extreme_points[index]),
# print("\n")
return sorted_extreme_points
def get_extreme_edges_based_convex_hull(points):
''' returns sorted list of extreme edges from given set of points
that forms the convex hull
parameter: points - list of points
returns: sorted list of extreme edges
'''
n = len(points)
# list to add non extreme points
extreme_edges = []
'''
for i upto N - 1:
for j != i upto N -2:
for k != i != j upto N - 3:
if point P_k is left or colinear with line(P_i, P_j):
line(P_i, P_j) is extreme edge
else:
line(P_i, P_j) is non-extreme edge
'''
for i in range(n):
for j in range(n):
if j != i:
res = [None] * n
line = LineSegment(points[i], points[j])
for k in range(n):
res[k] = is_left_turn(points[i], points[j], points[k]) or is_colinear(points[i], points[j], points[k])
if set(res) == {True}:
''' for test '''
# print(points[i]),; print("\t"),; print(points[j]),; print("\t"),; print(res)
extreme_edges.append(line)
# print(extreme_edges)
extreme_edges_set = set(extreme_edges)
''' get vertices from extreme edges '''
extreme_edge_vertex = []
for index in range(len(extreme_edges)):
line = extreme_edges[index]
extreme_edge_vertex.append(line.start)
extreme_edge_vertex.append(line.terminal)
''' get unique vertices from extreme edge vertices '''
eev = list(set(extreme_edge_vertex))
# print(eev)
''' for testing: displays content of non_extreme_points '''
# for index in range(len(eev)):
# print(eev[index]),
# print("\t"),
''' for sorting extreme edge vertices '''
centroid = Point(sum([point.x for point in eev])/len(eev),sum([point.y for point in eev])/len(eev))
sorted_eev = copy.deepcopy(eev)
sorted_eev.sort(key=lambda p: math.atan2(p.y-centroid.y,p.x-centroid.x))
''' for testing only '''
# for index in range(len(sorted_eev)):
# print(sorted_eev[index]),
# print("\t"),
''' obtain sorted edges from sorted edge vertices '''
sorted_edge = []
num_sorted_vertices = len(sorted_eev)
for index in range(num_sorted_vertices):
edge = LineSegment(sorted_eev[index], sorted_eev[(index + 1) % num_sorted_vertices])
sorted_edge.append(edge)
''' for testing only: sorted edges '''
# for index in range(len(sorted_edge)):
# print(sorted_edge[index]),
# print("\t"),
return(sorted_edge)
def gift_wrap_convex_hull_linked_list(point_linked_list):
''' Gift Wrap Algorithm implementation using Circular Doubly Linked List
parameter: point_linked_list = Circular Doubly Linked List of sorted
points (in non decreasing order of Y-Coord)
result: Circular Doubly Linked List of Convex Hull Points
'''
# result to return
gift_wrap_linked_list = CircularDoublyLinkedList()
# first point is the point with least Y- coordinate
first_point = point_linked_list.head
# take first point as reference, and set next point to None '''
ref_point = first_point
next_point = None
while(True):
# add reference point to result
gift_wrap_linked_list.append(ref_point.data)
# get next point in linked list
next_point = ref_point.next
# set cursor to head of linked list
cursor = point_linked_list.head
# for all node in linked list
while(True):
# if there exist a point counter-clockwise to next point, set that point as next point
if(compute_area(ref_point.data, cursor.data, next_point.data) > 0.0):
next_point = cursor
# increment cursor to next node
cursor = cursor.next
# stop when cursor reach head of linked list again
if(cursor == point_linked_list.head):
break
# set next point as reference point for next iteration
ref_point = next_point
# iterate until we reach head of linked list
if(ref_point == point_linked_list.head):
break
return gift_wrap_linked_list
def graham_scan_convex_hull(points):
''' Graham Scan Algorithm
parameter: points = array of given points
result: Array of Convex Hull Points
'''
# num of points
vertex_num = len(points)
# if number of points are less than 4, then the input set of points
# are the convex hull itself
if vertex_num < 4:
return points
# result variable
convex_hull_graham_scan = []
# get min Y- Coord point
sorted_points_inc_y = copy.deepcopy(points)
sorted_points_inc_y.sort(key=lambda p: p.y)
min_y_coord_point = sorted_points_inc_y[0]
# print(min_y_coord_point)
# sort points in anti-clockwise order wrt min_y_coord_point
sorted_p = copy.deepcopy(points)
sorted_p.sort(key=lambda p: math.atan2(p.y-min_y_coord_point.y,p.x-min_y_coord_point.x))
''' Graham Scan Algorithm begins here '''
# add first three coordinates of sorted points in result
convex_hull_graham_scan.append(sorted_p[0])
convex_hull_graham_scan.append(sorted_p[1])
convex_hull_graham_scan.append(sorted_p[2])
''' these are top of stack and next top of stack, using list (for testing)'''
# print(point_stack[-1])
# print(point_stack[-2])
'''
i = 3
while(i < N):
if left_turn(top(stack), next_top(stack), sorted_point(i)):
stack.push(sorted_point[i])
i++
else:
stack.pop()
'''
index = 3
while(index < vertex_num):
if is_left_turn(convex_hull_graham_scan[-2], convex_hull_graham_scan[-1], sorted_p[index]):
convex_hull_graham_scan.append(sorted_p[index])
index += 1
else:
convex_hull_graham_scan.pop()
return convex_hull_graham_scan
def main():
""" Main Function """
print("CG LAB 4")
print("Brihat Ratna Bajracharya\n19/075\n")
''' reads input file '''
in_file = open(INPUT_FILE, 'r')
''' get number of points '''
print("Enter number of points:"),
points_num = int(in_file.readline())
print(points_num)
''' reads coords of point '''
input_coords = in_file.readline()
input_coords_list = input_coords.split()
# print(input_coords_list)
''' initialize vertex list '''
points = [None] * points_num
''' get coordinates of each point '''
for index in range(points_num):
print(" Enter coordinates of point P{}:".format(index+1)),
input_coords_point = input_coords_list[index].split(',')
points[index] = Point(int(input_coords_point[0]), int(input_coords_point[1]))
print(points[index])
''' FINDING CONVEX HULL BASED ON EXTREME POINTS '''
convex_hull_exp_pt = get_extreme_points_based_convex_hull(points)
print("\nConvex Hull (Extreme Points): ["),
for index in range(len(convex_hull_exp_pt)):
print(convex_hull_exp_pt[index]),
if index != len(convex_hull_exp_pt) - 1:
print(","),
print("]")
''' FINDING CONVEX HULL BASED ON EXTREME EDGES '''
convex_hull_exp_edges = get_extreme_edges_based_convex_hull(points)
print("\nConvex Hull (Extreme Edges): ["),
for index in range(len(convex_hull_exp_edges)):
print(convex_hull_exp_edges[index]),
if index != len(convex_hull_exp_edges) - 1:
print("---"),
print("]")
''' FINDING CONVEX HULL: GIFT WRAP ALGORITHM (USING CIRCULAR DOUBLY LINKED LIST) '''
points_inc_order_of_y_coord = copy.deepcopy(points)
points_inc_order_of_y_coord.sort(key=lambda point: point.y)
point_linked_list = CircularDoublyLinkedList()
for index in range(len(points)):
point_linked_list.append(points_inc_order_of_y_coord[index])
convex_hull_gift_wrap_linked_list = gift_wrap_convex_hull_linked_list(point_linked_list)
convex_hull_gift_wrap_linked_list.display("Convex Hull (Gift Wrap) 2")
''' FINDING CONVEX HULL: GRAHAM SCAN ALGORITHM '''
convex_hull_graham_scan = graham_scan_convex_hull(points)
print("\nConvex Hull (Graham Scan): ["),
for index in range(len(convex_hull_graham_scan)):
print(convex_hull_graham_scan[index]),
if index != len(convex_hull_graham_scan) - 1:
print(","),
print("]")
print("\nDONE.")
if __name__ == '__main__':
main()
\end{spverbatim}
\end{footnotesize}
\end{spacing}
\section*{\textbf{\underline{Output}}}
\begin{spacing}{1}
\begin{footnotesize}
\begin{spverbatim}
$ ./cg_lab_4_convex_hull.py
CG LAB 4
Brihat Ratna Bajracharya
19/075
Enter number of points: 11
Enter coordinates of point P1: (5, 8)
Enter coordinates of point P2: (2, 7)
Enter coordinates of point P3: (7, 7)
Enter coordinates of point P4: (5, 6)
Enter coordinates of point P5: (3, 5)
Enter coordinates of point P6: (6, 5)
Enter coordinates of point P7: (4, 4)
Enter coordinates of point P8: (3, 3)
Enter coordinates of point P9: (2, 2)
Enter coordinates of point P10: (5, 2)
Enter coordinates of point P11: (8, 3)
Convex Hull (Extreme Points): [ (3, 3) , (2, 2) , (5, 2) , (8, 3) , (7, 7) , (5, 8) , (2, 7) ]
Convex Hull (Extreme Edges): [ [(2, 2), (5, 2)] --- [(5, 2), (8, 3)] --- [(8, 3), (7, 7)] --- [(7, 7), (5, 8)] --- [(5, 8), (2, 7)] --- [(2, 7), (2, 2)] ]
Convex Hull (Gift Wrap) 2: [ (2, 2) (5, 2) (8, 3) (7, 7) (5, 8) (2, 7) ] #
Convex Hull (Graham Scan): [ (2, 2) , (5, 2) , (8, 3) , (7, 7) , (5, 8) , (2, 7) ]
DONE.
\end{spverbatim}
\end{footnotesize}
\end{spacing}
\begin{figure}[h!]
\centering
\begin{subfigure}[b]{0.2\linewidth}
\includegraphics[width=\linewidth]{cg_lab_4_output_extreme_points.png}
\caption{Using \\Extreme Points}
\end{subfigure}
\hspace{1in}
\begin{subfigure}[b]{0.2\linewidth}
\includegraphics[width=\linewidth]{cg_lab_4_output_extreme_edges.png}
\caption{Using \\Extreme Edge}
\end{subfigure}
\hspace{1in}
\begin{subfigure}[b]{0.2\linewidth}
\includegraphics[width=\linewidth]{cg_lab_4_output_gift_wrap.png}
\caption{Using \\Gift Wrap/Graham Scan}
\end{subfigure}
\caption{Convex Hull}
\label{fig:convex_hull}
\end{figure}
\vspace{1cm}
\section*{\textbf{\underline{Input File}}\footnote{\url{https://github.com/Brihat9/CG/blob/master/cg_lab_4_input_file}}}
\begin{spacing}{1}
\begin{footnotesize}
\begin{spverbatim}
11
5,8 2,7 7,7 5,6 3,5 6,5 4,4 3,3 2,2 5,2 8,3
\end{spverbatim}
\end{footnotesize}
\end{spacing}
\end{document}
|
{"hexsha": "1e1107250010d2efa97682477b689eb942a80ef6", "size": 15205, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Lab_Reports/CG_LAB_4_Report/cg_lab_4_report.tex", "max_stars_repo_name": "Brihat9/CG", "max_stars_repo_head_hexsha": "e6af5a39729fe7def3d6af81fe636f23f23e2bdb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-15T14:28:50.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-15T14:28:50.000Z", "max_issues_repo_path": "Lab_Reports/CG_LAB_4_Report/cg_lab_4_report.tex", "max_issues_repo_name": "Brihat9/CG", "max_issues_repo_head_hexsha": "e6af5a39729fe7def3d6af81fe636f23f23e2bdb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lab_Reports/CG_LAB_4_Report/cg_lab_4_report.tex", "max_forks_repo_name": "Brihat9/CG", "max_forks_repo_head_hexsha": "e6af5a39729fe7def3d6af81fe636f23f23e2bdb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4893162393, "max_line_length": 154, "alphanum_fraction": 0.6238079579, "num_tokens": 3990}
|
import pandas as pd
import numpy as np
import os
import re
import html
import pyprind
import pickle
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.utils import shuffle
stopwords = stopwords.words('english')
def tokenizer(text):
text = html.unescape(text)
text = re.sub('http://[a-zA-Z0-9./]+','',text)
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
tokenized = [w for w in text.split() if w not in stopwords]
return tokenized
def data_stream(path):
with open(path,'r') as csv:
next(csv)
for line in csv :
text ,label = line[2:],int(line[0])
yield text,label
# print(next(stream_docs('./tweet_train.csv')))
def mini_batch(doc_stream,size):
docs,y = [],[]
try:
for _ in range(size):
text,label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs,y
vect = HashingVectorizer(decode_error='ignore', n_features=2**21,
preprocessor=None,tokenizer=tokenizer)
clf = SGDClassifier(loss='log',random_state=1,n_iter=1)
pbar=pyprind.ProgBar(1500)
classes = np.array([0,4])
doc_stream = data_stream('./tweet_train.csv')
for _ in range(1500):
X_train,y_train = mini_batch(doc_stream,size = 1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes)
pbar.update()
dest = './pkl_objects'
if not os.path.exists(dest):
os.makedirs(dest)
pickle.dump(clf,open(os.path.join(dest, 'tweet140_clf.pkl'), 'wb'),protocol=4)
X_test, y_test = mini_batch(doc_stream,size =5000)
#print(X_test[:5])
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
#clf = pickle.load(open(os.path.join('pkl_objects', 'tweet140_clf.pkl'), 'rb'))
def classify(text):
px=vect.transform([text])
labels = {'[4]':'Positive' , '[0]':'Negative'}
print(labels[str(clf.predict(px))])
print(clf.predict_proba(px))
return labels[str(clf.predict(px))]
|
{"hexsha": "f4e535eeaf65fcb5b9be022da2370d7877b68c00", "size": 2177, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier.py", "max_stars_repo_name": "Cranxter/Twitter_Sentiment_Analysis", "max_stars_repo_head_hexsha": "82dfe530579849ad7079b48e5b433b3e28923fb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classifier.py", "max_issues_repo_name": "Cranxter/Twitter_Sentiment_Analysis", "max_issues_repo_head_hexsha": "82dfe530579849ad7079b48e5b433b3e28923fb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier.py", "max_forks_repo_name": "Cranxter/Twitter_Sentiment_Analysis", "max_forks_repo_head_hexsha": "82dfe530579849ad7079b48e5b433b3e28923fb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.9724770642, "max_line_length": 81, "alphanum_fraction": 0.6839687644, "include": true, "reason": "import numpy", "num_tokens": 595}
|
/**
* PluginManager.cpp
*
* Note: Some functions in this file are Cocoa dependent
*
* History:
* David Cox on Fri Dec 27 2002 - Created.
* Paul Jankunas on Wed Mar 23 2005 - Fixed spacing. Fixxed readPlugin
* function so that it doesn't start its search from
* the package directory.
*
* Copyright (c) 2002 MIT. All rights reserved.
*/
#include "PluginServices.h"
#include "ComponentRegistry.h"
#include "VariableRegistry.h"
#include "Plugin.h"
#include "PlatformDependentServices.h"
#include "EventBuffer.h"
#include "LoadingUtilities.h"
#include <boost/filesystem/operations.hpp>
#include <dlfcn.h>
BEGIN_NAMESPACE_MW
bool registries_are_initialized = false;
shared_ptr<ComponentRegistry> ComponentRegistry__;
void initializeServiceRegistries(){
global_variable_registry = shared_ptr<VariableRegistry>(new VariableRegistry(global_outgoing_event_buffer));
registries_are_initialized = true;
}
void readPlugins(boost::filesystem::path dir_path){
using namespace boost::filesystem;
if ( !exists( dir_path ) ){
return;
}
directory_iterator end_itr; // default construction yields past-the-end
for ( directory_iterator itr( dir_path ); itr != end_itr; ++itr ){
string plugin_name = itr->path().filename().string();
string plugin_name_stripped =
plugin_name.substr(0, plugin_name.find_last_of("."));
//cerr << "Loading " << plugin_name_stripped.c_str() << endl;
readPlugin(plugin_name_stripped);
}
}
void readPlugin(string path){
// Typedef for the function pointer.
typedef Plugin* (*GetPluginFunctionPtr)();
GetPluginFunctionPtr getplug = NULL;
char dynamic_library_path[512]; // TODO define
#ifdef __APPLE__
#if TARGET_OS_OSX
sprintf(dynamic_library_path, "%s/%s.bundle/Contents/MacOS/%s",
pluginPath().string().c_str(), path.c_str(), path.c_str());
#elif TARGET_OS_IPHONE
sprintf(dynamic_library_path, "%s/%s.framework/%s",
pluginPath().string().c_str(), path.c_str(), path.c_str());
// On iOS, frameworks and plugins have the same extension and reside in the same
// subdirectory of the app bundle. However, frameworks are loaded automatically
// at launch time, while plugins are loaded manually at run time. Therefore, by
// calling dlopen with mode RTLD_NOLOAD, we can determine if a given bundle
// executable is already loaded and, hence, whether it's a framework or a
// plugin.
if (dlopen(dynamic_library_path, RTLD_LAZY | RTLD_NOLOAD)) {
// This "plugin" is actually a framework, so take no further action.
return;
}
#else
#error Unsupported platform
#endif
#elif linux
sprintf(dynamic_library_path, "%s/%s.so",
pluginPath().string().c_str(), path.c_str());
#endif
mprintf("Loading %s", path.c_str());
void *library_handle = dlopen(dynamic_library_path, RTLD_LAZY);
if(library_handle == NULL){
mwarning(M_PLUGIN_MESSAGE_DOMAIN, "Plugin module (%s) failed to load: %s", path.c_str(), dlerror());
// TODO: throw an error?
return;
}
getplug = (GetPluginFunctionPtr)dlsym(library_handle, "getPlugin");
//dlclose(library_handle);
std::unique_ptr<Plugin> plugin;
if (getplug) {
plugin.reset(getplug());
}
if(!plugin){
mwarning(M_PLUGIN_MESSAGE_DOMAIN,
"Plugin module (%s) failed to produce a valid plugin object",
path.c_str());
// TODO: throw an error?
return;
}
shared_ptr<ComponentRegistry> component_registry =
ComponentRegistry::getSharedRegistry();
// Now we've got a hold of the plugin and we can work with it
plugin->registerComponents(component_registry);
}
END_NAMESPACE_MW
|
{"hexsha": "1211b2b55e41b590804cd16c459d5a5bafd50b2e", "size": 3814, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "core/Core/PluginServices/PluginServices.cpp", "max_stars_repo_name": "esayui/mworks", "max_stars_repo_head_hexsha": "0522e5afc1e30fdbf1e67cedd196ee50f7924499", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/Core/PluginServices/PluginServices.cpp", "max_issues_repo_name": "esayui/mworks", "max_issues_repo_head_hexsha": "0522e5afc1e30fdbf1e67cedd196ee50f7924499", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/Core/PluginServices/PluginServices.cpp", "max_forks_repo_name": "esayui/mworks", "max_forks_repo_head_hexsha": "0522e5afc1e30fdbf1e67cedd196ee50f7924499", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4388489209, "max_line_length": 110, "alphanum_fraction": 0.6793392764, "num_tokens": 927}
|
function to_ast(::Type{T}) where {T}
io = IOBuffer()
print(io, T)
str = String(take!(io))
ast = Meta.parse(str)
return ast
end
function to_pascal_case(s::Symbol)
str = String(s)
new_str = ""
next_upper = true
for letter in str
if next_upper
new_str *= uppercase(letter)
next_upper = false
elseif letter == '_'
next_upper = true
else
new_str *= letter
end
end
if new_str[end] == 's'
new_str = new_str[1:end-1]
end
return Symbol(new_str)
end
function write_exprs(exprs::Vector, fname::AbstractString)
open(fname, "w") do io
for expr in exprs
str = repr(expr)[3:end-1] # removes :( and )
str = replace(str, "\n " => "\n")
write(io, str)
write(io, "\n\n")
end
end
end
function to_exprs(t, n)
exprs = []
to_expr(t, n, exprs)
return exprs
end
function to_expr(::Type{NamedTuple{N,T}}, root_name::Symbol, exprs) where {N,T<:Tuple}
sub_exprs = []
for (n, t) in zip(N, T.types)
push!(sub_exprs, to_field_expr(t, n, exprs))
end
struct_name = to_pascal_case(root_name)
push!(exprs, Expr(:struct, false, struct_name, Expr(:block, sub_exprs...)))
return struct_name
end
function to_expr(::Type{Array{T,N}}, root_name::Symbol, exprs) where {T<:NamedTuple,N}
return to_expr(T, root_name, exprs)
end
function to_expr(t::Type{T}, root_name::Symbol, exprs) where {T}
if T isa Union
return Expr(
:curly,
:Union,
to_expr(t.a, root_name, exprs),
to_expr(t.b, root_name, exprs),
)
else
return to_ast(T)
end
end
# given the type of a field of a struct, return a node for that field's name/type
function to_field_expr(t::Type{NamedTuple{N,T}}, root_name::Symbol, exprs) where {N,T}
to_expr(t, root_name, exprs)
return Expr(:(::), root_name, to_pascal_case(root_name))
end
function to_field_expr(::Type{Array{T,N}}, root_name::Symbol, exprs) where {T,N}
return Expr(:(::), root_name, Expr(:curly, :Array, to_expr(T, root_name, exprs), 1))
end
function to_field_expr(::Type{T}, root_name, exprs) where {T}
Expr(:(::), root_name, to_expr(T, root_name, exprs))
end
str_code = """
struct MyStruct
a::Int
b::OtherType
c::Vector{Int,1}
end
"""
|
{"hexsha": "5f0109b83e00f05bf84b884beb96866374af32ca", "size": 2398, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/write_type.jl", "max_stars_repo_name": "quinnj/JSONTypeProvider.jl", "max_stars_repo_head_hexsha": "783a08f79bb4a4dacc929a4003972d4d505234e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-05T02:15:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-05T02:15:03.000Z", "max_issues_repo_path": "src/write_type.jl", "max_issues_repo_name": "quinnj/JSONTypeProvider.jl", "max_issues_repo_head_hexsha": "783a08f79bb4a4dacc929a4003972d4d505234e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-25T17:17:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-25T17:17:46.000Z", "max_forks_repo_path": "src/write_type.jl", "max_forks_repo_name": "quinnj/JSONTypeProvider.jl", "max_forks_repo_head_hexsha": "783a08f79bb4a4dacc929a4003972d4d505234e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-25T17:10:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-25T17:10:19.000Z", "avg_line_length": 24.9791666667, "max_line_length": 88, "alphanum_fraction": 0.5963302752, "num_tokens": 703}
|
Subroutine hf3mkr(Axyz,Bxyz,Cxyz,alpha,Gxyz,
& RS,GC,ff,R,R0,IJK,Nabc,Lg,Lg3)
c
c $Id$
c
Implicit none
c::passed
integer Nabc, Lg, Lg3
c--> Cartesian Coordinates for centers a, b, c
Double Precision Axyz(3),Bxyz(3),Cxyz(3) ! [input]
c--> Exponents (1:3,*) and ES prefactors (4:4,*)
Double Precision alpha(4,Nabc) ! [input]
c--> Auxiliary Function Integrals & Index
Double Precision R0(Nabc,Lg3) ! [output]
Integer IJK(0:Lg,0:Lg,0:Lg) ! [output]
c--> Scratch Space
Double Precision Gxyz(3,Nabc), GC(Nabc,3)
Double Precision RS(Nabc), ff(2,Nabc), R(Nabc,0:Lg,Lg3)
c::local
double precision PI, PI4
Parameter (PI=3.1415926535898D0,PI4=4.D0/PI)
c
double precision a, b, c, abci
*rak: double precision ab, abi
double precision Ax, Ay, Az, Bx, By, Bz, Cx, Cy, Cz
*rak: Double Precision Px, Py, Pz
double precision GCx, GCy, GCz
double precision alpha_t
*acc_debug: double precision accy
*acc_debug: integer accy_cnt
*acc_debug: logical reached
integer mp, j, m, n
c
c Define the auxiliary function integrals necessary to compute the three
c center nuclear attraction integrals (NAIs). These integrals are scaled
c by an appropriate factor, RS, defined as
c
c / a + b + c \ 1/2
c RS = | ----------- |
c \ PI/4 /
c
c
c******************************************************************************
* call dfill((Nabc*(Lg+1)*Lg3), 0.0d00 , R, 1)
* call dfill((Nabc*Lg3),0.0d00, r0, 1)
c Define the center "P" plus C to get "G" center.
Ax = Axyz(1)
Ay = Axyz(2)
Az = Axyz(3)
Bx = Bxyz(1)
By = Bxyz(2)
Bz = Bxyz(3)
Cx = Cxyz(1)
Cy = Cxyz(2)
Cz = Cxyz(3)
do 00100 mp = 1,Nabc
a = alpha(1,mp)
b = alpha(2,mp)
c = alpha(3,mp)
*rak: ab = a + b
*rak: abi = 1/ab
*rak:
*rak: px = abi*(a*Ax + b*Bx)
*rak: py = abi*(a*Ay + b*By)
*rak: pz = abi*(a*Az + b*Bz)
*rak: abci = 1/(ab+c)
abci = 1/(a+b+c)
*rak: Gxyz(1,mp) = abci*(ab*px + c*Cx)
*rak: Gxyz(2,mp) = abci*(ab*py + c*Cy)
*rak: Gxyz(3,mp) = abci*(ab*pz + c*Cz)
Gxyz(1,mp) = abci*(a*Ax + b*Bx + c*Cx)
Gxyz(2,mp) = abci*(a*Ay + b*By + c*Cy)
Gxyz(3,mp) = abci*(a*Az + b*Bz + c*Cz)
c Define the scaling factor.
RS(mp) = sqrt((a+b+c)*PI4)
00100 continue
c Define factors necessary to compute incomplete gamma function and the
c auxiliary functions.
do 00200 m = 1,Nabc
alpha_t = alpha(1,m) + alpha(2,m) + alpha(3,m)
ff(1,m) = RS(m)
ff(2,m) = -2.D0*alpha_t
GCx = Gxyz(1,m) - Cx
GCy = Gxyz(2,m) - Cy
GCz = Gxyz(3,m) - Cz
R(m,0,1) = alpha_t*(GCx*GCx + GCy*GCy + GCz*GCz)
GC(m,1) = GCx
GC(m,2) = GCy
GC(m,3) = GCz
00200 continue
c Evaluate the incomplete gamma function.
call igamma(R,Nabc,Lg)
*acc_debug: accy = 1.0d-30
*acc_debug: accy_cnt = 0
*acc_debug: reached = .false.
*acc_debug:00001 continue
*acc_debug: call igamma_acc(R,Nabc,Lg,accy,reached)
*acc_debug: if (.not.reached) then
*acc_debug: accy_cnt = accy_cnt + 1
*acc_debug: accy = accy/5.0d00
*acc_debug: write(6,*)' accy reset to ',accy,' count is ',accy_cnt
*acc_debug: goto 00001
*acc_debug: endif
c Define the initial auxiliary functions (i.e., R000j, j=1,Lg).
do 00300 j = 0,Lg
do 00400 m = 1,Nabc
R(m,j,1) = ff(1,m)*R(m,j,1)
ff(1,m) = ff(1,m)*ff(2,m)
00400 continue
00300 continue
c Recursively build the remaining auxiliary functions (i.e., RIJKj, j=0).
call hfmkr(R,IJK,GC,Nabc,Lg,Lg3)
c Transfer to R0 array.
do 00500 n = 1,Lg3
do 00600 m = 1,Nabc
R0(m,n) = R(m,0,n)
00600 continue
00500 continue
end
|
{"hexsha": "d3f7b6006830c525d3f27481899735a31e7ebcd5", "size": 4167, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/NWints/int/hf3mkr.f", "max_stars_repo_name": "dinisAbranches/nwchem", "max_stars_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_stars_repo_licenses": ["ECL-2.0"], "max_stars_count": 317, "max_stars_repo_stars_event_min_datetime": "2017-11-20T21:29:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T11:48:24.000Z", "max_issues_repo_path": "src/NWints/int/hf3mkr.f", "max_issues_repo_name": "dinisAbranches/nwchem", "max_issues_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_issues_repo_licenses": ["ECL-2.0"], "max_issues_count": 356, "max_issues_repo_issues_event_min_datetime": "2017-12-05T01:38:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T02:28:21.000Z", "max_forks_repo_path": "src/NWints/int/hf3mkr.f", "max_forks_repo_name": "dinisAbranches/nwchem", "max_forks_repo_head_hexsha": "21cb07ff634475600ab687882652b823cad8c0cd", "max_forks_repo_licenses": ["ECL-2.0"], "max_forks_count": 135, "max_forks_repo_forks_event_min_datetime": "2017-11-19T18:36:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T02:28:49.000Z", "avg_line_length": 26.2075471698, "max_line_length": 79, "alphanum_fraction": 0.5152387809, "num_tokens": 1469}
|
import argparse
import logging
import sys
import os
import yaml
import numpy as np
from dtld_parsing.calibration import CalibrationData
import cv2
from dtld_parsing.driveu_dataset import DriveuDatabase
import matplotlib.pyplot as plt
from PIL import Image
np.set_printoptions(suppress=True)
# Logging
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: "
"%(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--label_file", default="")
parser.add_argument("--calib_dir", default="")
parser.add_argument("--data_base_dir", default="")
return parser.parse_args()
def main(args):
# Load database
database = DriveuDatabase(args.label_file)
database.open(args.data_base_dir)
# Load calibration
calibration = CalibrationData()
intrinsic_left = calibration.load_intrinsic_matrix(
args.calib_dir + "/intrinsic_left.yml"
)
rectification_left = calibration.load_rectification_matrix(
args.calib_dir + "/rectification_left.yml"
)
projection_left = calibration.load_projection_matrix(
args.calib_dir + "/projection_left.yml"
)
extrinsic = calibration.load_extrinsic_matrix(
args.calib_dir + "/extrinsic.yml"
)
distortion_left = calibration.load_distortion_matrix(
args.calib_dir + "/distortion_left.yml"
)
# logging.info("Intrinsic Matrix:\n\n{}\n".format(intrinsic_left))
# logging.info("Extrinsic Matrix:\n\n{}\n".format(extrinsic))
# logging.info("Projection Matrix:\n\n{}\n".format(projection_left))
# logging.info("Rectification Matrix:\n\n{}\n".format(rectification_left))
# logging.info("Distortion Matrix:\n\n{}\n".format(distortion_left))
output_dir = "DTLD_images"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
out = []
for idx_d, driveu_img in enumerate(database.images):
prefix = driveu_img.file_path.split('/')[-1].split('.')[0]
status, img = driveu_img.get_image()
image = Image.fromarray(img)
annotations = []
for i, o in enumerate(driveu_img.objects):
area = o.width * o.height
class_name = get_class_name(int(str(o.class_id)[-2]))
bbox = {
"class": class_name,
"x_width": o.width,
"y_height": o.height,
"xmin": o.x,
"ymin": o.y,
}
annotations.append(bbox)
# if there is no annotation, break;
if len(annotations) == 0:
break
output_path = "{}/{}.jpeg".format(output_dir, prefix)
image.save(output_path)
entry = {
"annotations": annotations,
"class": "image",
"filename": output_path,
}
out.append(entry)
with open('real_data_annotations_3.yaml', 'a') as outfile:
yaml.dump(out, outfile, default_flow_style=False)
def get_class_name(class_id):
class_name = "Unknown"
if class_id == 1:
class_name = "Red"
elif class_id == 2 or class_id == 3:
class_name = "Yellow"
elif class_id == 4:
class_name = "Green"
return class_name
if __name__ == "__main__":
main(parse_args())
|
{"hexsha": "73ac9d411d113341dabd534a6c7750a7b495ebaf", "size": 3381, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/real_tl_images/SDCN/create_annotations_from_dtld.py", "max_stars_repo_name": "CaoFM/CarND-Capstone", "max_stars_repo_head_hexsha": "fc4d5a14e63d68f8a9946c37438bafe7dda11c70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-15T04:15:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T08:09:04.000Z", "max_issues_repo_path": "models/real_tl_images/SDCN/create_annotations_from_dtld.py", "max_issues_repo_name": "CaoFM/CarND-Capstone", "max_issues_repo_head_hexsha": "fc4d5a14e63d68f8a9946c37438bafe7dda11c70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-07-12T23:16:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-27T21:35:04.000Z", "max_forks_repo_path": "models/real_tl_images/SDCN/create_annotations_from_dtld.py", "max_forks_repo_name": "CaoFM/CarND-Capstone", "max_forks_repo_head_hexsha": "fc4d5a14e63d68f8a9946c37438bafe7dda11c70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-12T15:53:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-16T22:17:03.000Z", "avg_line_length": 28.4117647059, "max_line_length": 78, "alphanum_fraction": 0.6329488317, "include": true, "reason": "import numpy", "num_tokens": 783}
|
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
def getNWindowColNames(col,N):
return [(col+str(i)) for i in range(0,N)]
class SlidingWindowTransformer(TransformerMixin):
def __init__(self,NwindowSize=10):
super().__init__()
assert NwindowSize >= 1, "Window Size >= 1 please"
self.NwindowSize = NwindowSize
def _partialTransform(self,X, y=None,col_names=None,force_reshape=False,**fit_params):
if(col_names!=None):
assert self.NwindowSize==len(col_names), "Column Names array should be equal to size of window"
i=0
new_arr = []
X = np.array(X)
if(force_reshape==True):
X = np.ravel(X)
assert X.ndim==1, "Data should be one dimensional, else pass force_reshape=True"
while(i+self.NwindowSize<=len(X)):
new_arr.append(X[i:i+self.NwindowSize])
i+=1
if(col_names==None):
return np.array(new_arr)
else:
DF = pd.DataFrame(np.array(new_arr))
DF.columns = col_names
return DF
def fit_transform(self,X, y=None,col_names=None,col_namePrefix=None,force_reshape=False,**fit_params):
if(col_namePrefix != None):
col_names = getNWindowColNames(col_namePrefix,self.NwindowSize)
return self._partialTransform(X,y,col_names,force_reshape,**fit_params)
def inverse_transform(self,X,y=None,col_name=None,**params):
X = np.array(X)
finalArr = []
finalArr.extend(X[0,:].tolist())
finalArr.extend(X[1:,-1].tolist())
if(col_name==None):
return np.array(finalArr)
else:
return pd.DataFrame(data=finalArr,columns=[col_name])
|
{"hexsha": "69d17675e3f90092f92b0b2fd3b5c0dd314a4b4b", "size": 1752, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scikit_Extensions_For_Stocks/sliding_window_tranformer/_sliding_window_tfrmer.py", "max_stars_repo_name": "uNRealCoder/Scikit-Extensions-Stock", "max_stars_repo_head_hexsha": "0968f2319373b91218589119cb06c561aa315e58", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Scikit_Extensions_For_Stocks/sliding_window_tranformer/_sliding_window_tfrmer.py", "max_issues_repo_name": "uNRealCoder/Scikit-Extensions-Stock", "max_issues_repo_head_hexsha": "0968f2319373b91218589119cb06c561aa315e58", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scikit_Extensions_For_Stocks/sliding_window_tranformer/_sliding_window_tfrmer.py", "max_forks_repo_name": "uNRealCoder/Scikit-Extensions-Stock", "max_forks_repo_head_hexsha": "0968f2319373b91218589119cb06c561aa315e58", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9333333333, "max_line_length": 107, "alphanum_fraction": 0.6289954338, "include": true, "reason": "import numpy", "num_tokens": 416}
|
from .utils import PyKEArgumentHelpFormatter
import time, urllib
import sys
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from . import kepio
from . import kepmsg
from . import kepkey
__all__ = ['keptrim']
def keptrim(infile, column, row, imsize, outfile=None, kepid=None,
overwrite=False, verbose=False, logfile='keptrim.log'):
"""
keptrim -- trim pixels from Target Pixel Files
keptrim will extract a square-shaped series of sub-images from a Target
Pixel File. The simple purpose of this task is to reduce the size of large
data sets such as the superstamps or two-wheel engineering data for the
sake of processing efficiency. Performing a keptrim step speeds up
calculations such as kepprfphot considertably and provides manual
convenience for tasks such as kepmask.
Parameters
----------
infile : str
Filename for the input Target Pixel File.
column : int
The CCD column number on which to center the output subimage.
row : int
The CCD row number on which to center the output subimage.
imsize : int
The pixel size of the subimage along either the row or column
dimension. The subimage will be square.
outfile : str
Filename for the output Target Pixel File. This product will be written
to the same FITS format as archived light curves.
kepid : None or int
If the target is catalogued within the Kepler Input Catalog (KIC), then
the pixel row and column location will be extracted from the KIC
provided the Kepler ID is provided. The user must be online for this
feature to execute. If provided kepid will override column and row.
overwrite : bool
Overwrite the output file?
verbose : bool
Option for verbose mode, in which informative messages and warnings to
the shell and a logfile.
logfile : str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block:: bash
$ keptrim ktwo251248961-c112_lpd-targ.fits 14 770 --imsize 3
--overwrite --verbose
.. image:: ../_static/images/api/keptrim.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPTRIM -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' column={}'.format(column)
+ ' row={}'.format(row)
+ ' imsize={}'.format(imsize)
+ ' kepid={}'.format(kepid)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPTRIM started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = 'ERROR -- KEPTRIM: {} exists. Use --overwrite'.format(outfile)
kepmsg.err(logfile, errmsg, verbose)
# open input file
instr = pyfits.open(infile, mode='readonly', memmap=True)
cards0 = instr[0].header.cards
cards1 = instr[1].header.cards
cards2 = instr[2].header.cards
# fudge non-compliant FITS keywords with no values
instr = kepkey.emptykeys(instr, infile, logfile, verbose)
# identify the season of observation
try:
season = cards0['SEASON'].value
except:
season = 0
# retrieve column and row from KIC
try:
kic = FOVKepID(str(kepid))
column = int(kic[98 + season * 5])
row = int(kic[97 + season * 5])
except:
pass
# convert CCD column and row to image column and row
if imsize % 2 == 0:
imsize += 1
crpix1p = cards2['CRPIX1P'].value
crpix2p = cards2['CRPIX2P'].value
crval1p = cards2['CRVAL1P'].value
crval2p = cards2['CRVAL2P'].value
cdelt1p = cards2['CDELT1P'].value
cdelt2p = cards2['CDELT2P'].value
imcol = (column - crval1p) * cdelt1p + crpix1p - 1
imrow = (row - crval2p) * cdelt2p + crpix2p - 1
crval1p = column - imsize / 2 + 0.5
crval2p = row - imsize / 2 + 0.5
# check subimage is contained inside the input image
naxis1 = cards2['NAXIS1'].value
naxis2 = cards2['NAXIS2'].value
x1 = int(imcol - imsize // 2 + 0.5)
x2 = x1 + imsize
y1 = int(imrow - imsize // 2 + 0.5)
y2 = y1 + imsize
if x1 < 0 or y1 < 0 or x2 > naxis1 or y2 > naxis2:
errmsg = ('ERROR -- KEPTRIM: Requested pixel area falls outside of '
'the pixel image in file {}. Make the pixel area smaller '
'or relocate it''s center.'.format(infile))
kepmsg.err(logfile, errmsg, verbose)
# time series data
time = instr[1].data.field('TIME')[:]
timecorr = instr[1].data.field('TIMECORR')[:]
cadenceno = instr[1].data.field('CADENCENO')[:]
raw_cnts = instr[1].data.field('RAW_CNTS')[:]
flux = instr[1].data.field('FLUX')[:]
flux_err = instr[1].data.field('FLUX_ERR')[:]
flux_bkg = instr[1].data.field('FLUX_BKG')[:]
flux_bkg_err = instr[1].data.field('FLUX_BKG_ERR')[:]
cosmic_rays = instr[1].data.field('COSMIC_RAYS')[:]
quality = instr[1].data.field('QUALITY')[:]
pos_corr1 = instr[1].data.field('POS_CORR1')[:]
pos_corr2 = instr[1].data.field('POS_CORR2')[:]
# resize time series
raw_cnts = raw_cnts[:, y1:y2, x1:x2]
flux = flux[:, y1:y2, x1:x2]
flux_err = flux_err[:, y1:y2, x1:x2]
flux_bkg = flux_bkg[:, y1:y2, x1:x2]
flux_bkg_err = flux_bkg_err[:, y1:y2, x1:x2]
cosmic_rays = cosmic_rays[:, y1:y2, x1:x2]
# reshape time series images
isize = np.shape(flux)[0]
jsize = np.shape(flux)[1]
ksize = np.shape(flux)[2]
raw_cnts = np.reshape(raw_cnts, (isize, jsize * ksize))
flux = np.reshape(flux, (isize, jsize * ksize))
flux_err = np.reshape(flux_err, (isize, jsize * ksize))
flux_bkg = np.reshape(flux_bkg, (isize, jsize * ksize))
flux_bkg_err = np.reshape(flux_bkg_err, (isize, jsize * ksize))
cosmic_rays = np.reshape(cosmic_rays, (isize, jsize * ksize))
# pixel map data
maskmap = np.array(instr[2].data[y1:y2,x1:x2])
# construct output primary extension
hdu0 = pyfits.PrimaryHDU()
for i in range(len(cards0)):
try:
if cards0[i].keyword not in hdu0.header.keys():
hdu0.header[cards0[i].keyword] = (cards0[i].value, cards0[i].comment)
else:
hdu0.header.cards[cards0[i].keyword].comment = cards0[i].comment
except:
pass
kepkey.history(call, hdu0, outfile, logfile, verbose)
outstr = pyfits.HDUList(hdu0)
# construct output light curve extension
coldim = '(' + str(imsize) + ',' + str(imsize) + ')'
eformat = str(imsize*imsize) + 'E'
jformat = str(imsize*imsize) + 'J'
kformat = str(imsize*imsize) + 'K'
col1 = pyfits.Column(name='TIME', format='D', unit='BJD - 2454833',
array=time)
col2 = pyfits.Column(name='TIMECORR', format='E', unit='d',
array=timecorr)
col3 = pyfits.Column(name='CADENCENO', format='J', array=cadenceno)
col4 = pyfits.Column(name='RAW_CNTS', format=jformat, unit='count',
dim=coldim, array=raw_cnts)
col5 = pyfits.Column(name='FLUX', format=eformat, unit='e-/s', dim=coldim,
array=flux)
col6 = pyfits.Column(name='FLUX_ERR', format=eformat, unit='e-/s',
dim=coldim,array=flux_err)
col7 = pyfits.Column(name='FLUX_BKG', format=eformat, unit='e-/s',
dim=coldim,array=flux_bkg)
col8 = pyfits.Column(name='FLUX_BKG_ERR', format=eformat, unit='e-/s',
dim=coldim, array=flux_bkg_err)
col9 = pyfits.Column(name='COSMIC_RAYS', format=eformat,unit='e-/s',
dim=coldim, array=cosmic_rays)
col10 = pyfits.Column(name='QUALITY', format='J', array=quality)
col11 = pyfits.Column(name='POS_CORR1', format='E', unit='pixel',
array=pos_corr1)
col12 = pyfits.Column(name='POS_CORR2', format='E', unit='pixel',
array=pos_corr2)
cols = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8,
col9, col10, col11, col12])
hdu1 = pyfits.BinTableHDU.from_columns(cols)
for i in range(len(cards1)):
try:
if cards1[i].keyword not in hdu1.header.keys():
hdu1.header[cards1[i].keyword] = (cards1[i].value,
cards1[i].comment)
else:
hdu1.header.cards[cards1[i].keyword].comment = cards1[i].comment
except:
pass
hdu1.header['1CRV4P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV4P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX4'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX4'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV5P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV5P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX5'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX5'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV6P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV6P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX6'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX6'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV7P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV7P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX7'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX7'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV8P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV8P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX8'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX8'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
hdu1.header['1CRV9P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['2CRV9P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu1.header['1CRPX9'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu1.header['2CRPX9'] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
outstr.append(hdu1)
# construct output mask bitmap extension
hdu2 = pyfits.ImageHDU(maskmap)
for i in range(len(cards2)):
try:
if cards2[i].keyword not in hdu2.header.keys():
hdu2.header[cards2[i].keyword] = (cards2[i].value,
cards2[i].comment)
else:
hdu2.header.cards[cards2[i].keyword].comment = cards2[i].comment
except:
pass
hdu2.header['NAXIS1' ] = (imsize, '')
hdu2.header['NAXIS2' ] = (imsize, '')
hdu2.header['CRVAL1P'] = (crval1p,
'[pixel] detector coordinate at reference pixel')
hdu2.header['CRVAL2P'] = (crval2p,
'[pixel] detector coordinate at reference pixel')
hdu2.header['CRPIX1' ] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 1')
hdu2.header['CRPIX2' ] = ((imsize + 1) / 2,
'[pixel] reference pixel along image axis 2')
outstr.append(hdu2)
# write output file
print("Writing output file {}...".format(outfile))
outstr.writeto(outfile,checksum=True)
# close input structure
instr.close()
# end time
kepmsg.clock('KEPTRIM finished at', logfile, verbose)
def FOVKepID(id):
"""KIC retrieval based upon KepID"""
# build mast query
url = ('http://archive.stsci.edu/kepler/kepler_fov/search.php?'
'action=Search&kic_kepler_id={}'.format(id) + '&max_records=100'
'&verb=3&outputformat=CSV')
# retrieve results from MAST
out = ''
lines = urllib.urlopen(url)
for line in lines:
line = line.strip()
if (len(line) > 0
and 'Kepler' not in line
and 'integer' not in line
and 'no rows found' not in line):
out = line.split(',')
return out
def keptrim_main():
import argparse
parser = argparse.ArgumentParser(
description='Trim unwanted pixels from a Target Pixel File',
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input target pixel file',
type=str)
parser.add_argument('column', help='CCD column number of the target',
type=int)
parser.add_argument('row', help='CCD row number of the target', type=int)
parser.add_argument('imsize',
help=('Number of pixels to extract in both row and'
' column dimensions'), type=int)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keptrim.'),
default=None)
parser.add_argument('--kepid', type=int,
help='Kepler ID number from the Kepler Input Catalog')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keptrim.log', type=str)
args = parser.parse_args()
keptrim(args.infile, args.column, args.row, args.imsize, args.outfile,
args.kepid, args.overwrite, args.verbose, args.logfile)
|
{"hexsha": "af0c43e33edce50edaa4662296dc07823720be7a", "size": 15381, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyke/keptrim.py", "max_stars_repo_name": "ecalifornica/pyke", "max_stars_repo_head_hexsha": "6a3fcc0513cf012044e4420cc4d17064e582d142", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyke/keptrim.py", "max_issues_repo_name": "ecalifornica/pyke", "max_issues_repo_head_hexsha": "6a3fcc0513cf012044e4420cc4d17064e582d142", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-07-25T19:23:05.000Z", "max_issues_repo_issues_event_max_datetime": "2017-07-25T19:23:05.000Z", "max_forks_repo_path": "pyke/keptrim.py", "max_forks_repo_name": "mirca/PyKE", "max_forks_repo_head_hexsha": "6a3fcc0513cf012044e4420cc4d17064e582d142", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.9636871508, "max_line_length": 85, "alphanum_fraction": 0.5744099863, "include": true, "reason": "import numpy,from astropy", "num_tokens": 4152}
|
# Various chirp preprocessing functions to highlight certain features or
# provide the input that Tensorflow expects.
import numpy as np
def raw_normalize(data): # Minmax normalization
normalized_data = (data - data.min(0)) / data.ptp(0)
return normalized_data
def normalize(data): # z-score normalization
normalized_data = (data - np.mean(data)) / np.std(data)
return normalized_data
def ifreq(data, sampling_freq):
instantaneous_phase = np.unwrap(np.angle(data))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0 * np.pi) * sampling_freq)
normalized_instantaneous_frequency = normalize(instantaneous_frequency)
# Append the last value of the ifreq to the array, so that the size of the tensor stays a power of 2.
normalized_instantaneous_frequency = np.append(normalized_instantaneous_frequency, normalized_instantaneous_frequency[-1])
return normalized_instantaneous_frequency
def ifreq_nonorm(data, sampling_freq):
instantaneous_phase = np.unwrap(np.angle(data))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0 * np.pi) * sampling_freq)
return instantaneous_frequency
def iphase(data):
instantaneous_phase = np.unwrap(np.angle(data))
normalized_instantaneous_phase = normalize(instantaneous_phase)
return normalized_instantaneous_phase
def iphase_nonorm(data):
instantaneous_phase = np.unwrap(np.angle(data))
return instantaneous_phase
def iphase_nonorm_wrapped(data):
instantaneous_phase = np.angle(data)
return instantaneous_phase
def iamp(data):
instantaneous_amplitude = np.abs(data)
normalized_instantaneous_amplitude = normalize(instantaneous_amplitude)
return normalized_instantaneous_amplitude
def fft(data):
fourier = np.fft.fft(data)
fourier_m = np.abs(fourier)
fourier_mnorm = normalize(fourier_m)
return fourier_mnorm
def _gradient_absolute(chirp, sample_rate):
instantaneous_phase = np.unwrap(np.angle(chirp))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0 * np.pi) * sample_rate)
gradient = np.gradient(instantaneous_frequency)
return gradient
def _gradient(chirp, sample_rate=0):
instantaneous_phase = np.unwrap(np.angle(chirp))
instantaneous_frequency = np.diff(instantaneous_phase) # Constant multiplications can be dropped
gradient = np.gradient(instantaneous_frequency)
return gradient
def roll_to_base_old(chirp, sample_rate, sf=7):
g = _gradient(chirp, sample_rate)
to_roll = -np.argmin(g)
return np.roll(chirp, to_roll)
def roll_to_base(chirp, sf=7):
# Find out the number of samples per chip
num_chips = 2**sf
num_samples = len(chirp)
num_samples_per_chip = num_samples / num_chips
# Calculate gradient
g = _gradient(chirp)
# Decimate gradient according to samples per chip
n = num_samples_per_chip
g_decim = [np.mean(g[i:i+n]) for i in range(0, len(g), n)]
assert(len(g_decim) == num_samples / num_samples_per_chip)
# Get the minimum gradient chip
g_chip_min = np.argmin(g_decim)
#print("G: " + str(min(g_decim)) + " (" + str(g_chip_min) + ")")
# Roll to base
to_roll = -(g_chip_min * num_samples_per_chip)
return np.roll(chirp, to_roll)
|
{"hexsha": "a27efaf20a44f2276599b3c5f9bd8298f2d22b19", "size": 3259, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing.py", "max_stars_repo_name": "rpp0/lora-phy-fingerprinting", "max_stars_repo_head_hexsha": "e53cab65472b60ae66d79a7ce38f30b0929d6178", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-06-21T16:01:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T09:57:48.000Z", "max_issues_repo_path": "preprocessing.py", "max_issues_repo_name": "cx869/lora-phy-fingerprinting", "max_issues_repo_head_hexsha": "e53cab65472b60ae66d79a7ce38f30b0929d6178", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-10-15T16:22:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-15T16:22:45.000Z", "max_forks_repo_path": "preprocessing.py", "max_forks_repo_name": "cx869/lora-phy-fingerprinting", "max_forks_repo_head_hexsha": "e53cab65472b60ae66d79a7ce38f30b0929d6178", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-06-21T16:02:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-03T09:16:28.000Z", "avg_line_length": 32.2673267327, "max_line_length": 126, "alphanum_fraction": 0.7397974839, "include": true, "reason": "import numpy", "num_tokens": 797}
|
/*
@copyright Louis Dionne 2015
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#include <boost/hana/assert.hpp>
#include <boost/hana/bool.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/ext/std/integral_constant.hpp>
#include <boost/hana/functional.hpp>
#include <boost/hana/integral_constant.hpp>
#include <boost/hana/tuple.hpp>
#include <boost/hana/type.hpp>
#include <type_traits>
#include <vector>
using namespace boost::hana;
int main() {
{
//! [and_]
BOOST_HANA_CONSTANT_CHECK(and_(true_, true_, true_, true_));
BOOST_HANA_CONSTEXPR_CHECK(!and_(true_, false, true_, true_));
//! [and_]
}{
//! [or_]
BOOST_HANA_CONSTANT_CHECK(or_(false_, false_, true_));
BOOST_HANA_CONSTANT_CHECK(!or_(false_, false_, false_));
//! [or_]
}{
//! [if_]
BOOST_HANA_CONSTEXPR_CHECK(if_(true, 1, 2) == 1);
BOOST_HANA_CONSTEXPR_CHECK(if_(false, 1, 2) == 2);
BOOST_HANA_CONSTEXPR_CHECK(
if_(true_,
make<Tuple>('t', 'r', 'u', 'e'),
make<Tuple>('f', 'a', 'l', 's', 'e')
)
==
make<Tuple>('t', 'r', 'u', 'e')
);
//! [if_]
}{
//! [not_]
BOOST_HANA_CONSTANT_CHECK(not_(true_) == false_);
BOOST_HANA_CONSTEXPR_CHECK(not_(false) == true);
//! [not_]
}{
//! [heterogeneous_eval_if]
BOOST_HANA_CONSTEXPR_LAMBDA auto safe_make_unsigned = [](auto t) {
return eval_if(trait<std::is_integral>(t),
[=](auto id) { return id(template_<std::make_unsigned_t>)(t); },
always(t)
);
};
BOOST_HANA_CONSTANT_CHECK(safe_make_unsigned(type<void>) == type<void>);
BOOST_HANA_CONSTANT_CHECK(safe_make_unsigned(type<int>) == type<unsigned int>);
//! [heterogeneous_eval_if]
(void)safe_make_unsigned;
//! [homogeneous_eval_if]
BOOST_HANA_CONSTEXPR_LAMBDA auto safe_divide = [](auto x, auto y) {
return eval_if(y == 0,
[=](auto) { return 0; },
[=](auto id) { return id(x) / y; }
);
};
BOOST_HANA_CONSTEXPR_CHECK(safe_divide(6, 3) == 2);
BOOST_HANA_CONSTEXPR_CHECK(safe_divide(6, 0) == 0);
//! [homogeneous_eval_if]
}{
//! [homogeneous_while]
std::vector<int> ints;
int final_state = while_(_ < 10, 0, [&](int i) {
ints.push_back(i);
return i + 1;
});
// The state is known only at runtime
BOOST_HANA_RUNTIME_CHECK(final_state == 10);
BOOST_HANA_RUNTIME_CHECK(ints == std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
//! [homogeneous_while]
}{
//! [heterogeneous_while]
using namespace literals;
std::vector<int> ints;
auto final_state = while_(_ < 10_c, 0_c, [&](auto i) {
ints.push_back(i);
return i + 1_c;
});
// The state is known at compile-time
BOOST_HANA_CONSTANT_CHECK(final_state == 10_c);
BOOST_HANA_RUNTIME_CHECK(ints == std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
//! [heterogeneous_while]
}{
//! [homogeneous_until]
std::vector<int> ints;
int final_state = until(_ == 10, 0, [&](int i) {
ints.push_back(i);
return i + 1;
});
// The state is known only at runtime
BOOST_HANA_RUNTIME_CHECK(final_state == 10);
BOOST_HANA_RUNTIME_CHECK(ints == std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
//! [homogeneous_until]
}{
//! [heterogeneous_until]
using namespace literals;
std::vector<int> ints;
auto final_state = until(_ == 10_c, 0_c, [&](auto i) {
ints.push_back(i);
return i + 1_c;
});
// The state is known at compile-time
BOOST_HANA_CONSTANT_CHECK(final_state == 10_c);
BOOST_HANA_RUNTIME_CHECK(ints == std::vector<int>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
//! [heterogeneous_until]
}
}
|
{"hexsha": "24519a3156065440f0041eb88874730c44d58a82", "size": 3506, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example/logical.cpp", "max_stars_repo_name": "josephwinston/hana", "max_stars_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/logical.cpp", "max_issues_repo_name": "josephwinston/hana", "max_issues_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/logical.cpp", "max_forks_repo_name": "josephwinston/hana", "max_forks_repo_head_hexsha": "a8586ec1812e14e43dfd6867209412aa1d254e1a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0657894737, "max_line_length": 81, "alphanum_fraction": 0.6657159156, "num_tokens": 1086}
|
# cython: language_level=3
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2016-2020, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pytest
import dpnp
import numpy as np
ROUNDS = 30
ITERATIONS = 4
NNUMBERS = 2**26
@pytest.mark.parametrize("function", [dpnp.random.beta, np.random.beta],
ids=["dpnp", "numpy"])
def test_beta(benchmark, function):
result = benchmark.pedantic(target=function, args=(4.0, 5.0, NNUMBERS,),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function", [dpnp.random.exponential, np.random.exponential],
ids=["dpnp", "numpy"])
def test_exponential(benchmark, function):
result = benchmark.pedantic(target=function, args=(4.0, NNUMBERS,),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function", [dpnp.random.gamma, np.random.gamma],
ids=["dpnp", "numpy"])
def test_gamma(benchmark, function):
result = benchmark.pedantic(target=function, args=(2.0, 4.0, NNUMBERS,),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function", [dpnp.random.normal, np.random.normal],
ids=["dpnp", "numpy"])
def test_normal(benchmark, function):
result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,),
rounds=ROUNDS, iterations=ITERATIONS)
@pytest.mark.parametrize("function", [dpnp.random.uniform, np.random.uniform],
ids=["dpnp", "numpy"])
def test_uniform(benchmark, function):
result = benchmark.pedantic(target=function, args=(0.0, 1.0, NNUMBERS,),
rounds=ROUNDS, iterations=ITERATIONS)
|
{"hexsha": "61f46f95ba417fc44d53e97950ea0250fb4bc3ab", "size": 3286, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmarks/pytest_benchmark/test_random.py", "max_stars_repo_name": "LukichevaPolina/dpnp", "max_stars_repo_head_hexsha": "5f5a679905d237ac7be1cc9ad1075877a9f77e39", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2020-09-08T00:38:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T01:44:10.000Z", "max_issues_repo_path": "benchmarks/pytest_benchmark/test_random.py", "max_issues_repo_name": "LukichevaPolina/dpnp", "max_issues_repo_head_hexsha": "5f5a679905d237ac7be1cc9ad1075877a9f77e39", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 432, "max_issues_repo_issues_event_min_datetime": "2020-09-07T09:48:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T17:50:55.000Z", "max_forks_repo_path": "benchmarks/pytest_benchmark/test_random.py", "max_forks_repo_name": "LukichevaPolina/dpnp", "max_forks_repo_head_hexsha": "5f5a679905d237ac7be1cc9ad1075877a9f77e39", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2020-09-07T10:00:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T13:53:43.000Z", "avg_line_length": 45.0136986301, "max_line_length": 87, "alphanum_fraction": 0.6393791844, "include": true, "reason": "import numpy", "num_tokens": 684}
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.compat.v1 as tf
import cvmodel
import math
import os
import utils
tf.disable_v2_behavior()
class classifier:
def __init__(self):
self.img_path = './data/images'
self.anno_path = './data/annotations'
self.ft_path = './feature_maps/'
self.model_path = './checkpoint/'
self.model_name = 'segmentation.ckpt-285'
self.model = os.path.join(self.model_path, self.model_name)
# Parameters
self.depth = 7
self.classes = 1
self.img_size = 32
# Placeholders
self.x = tf.placeholder(tf.float32, shape=[None, None, None, self.depth], name='input')
self.y_true = tf.placeholder(tf.float32, shape=[None, None, None, self.classes], name='y_true')
self.rate = tf.placeholder(tf.float32, name='dropout_rate')
self.is_training = tf.placeholder(tf.bool, shape=())
# Build network
self.y01 = cvmodel.build_model(input=self.x,
drop_rate=0,
is_training=False)
# Calculate loss + f1
self.cost_reg, self.f1_vec, self.recall, \
self.precision, self.specificity, self.accuracy = utils.loss(logits=[self.y01],
labels=self.y_true,
classes_weights=[2.])
# Open session and restore model
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.model)
# Load data
self.img_names = utils.load_train(path=self.img_path)
self.anno_names = utils.load_train(path=self.anno_path)
self.imgs_ = utils.get_image_array(self.img_names, self.img_size)
self.annos_ = utils.get_annotation_array(self.anno_names, self.img_size)
n = self.imgs_.shape[0]
print('\nNumber of images:', n)
# Get number of trainable variables
v_nb = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print('Number of trainable variables:', v_nb)
def predict(self, N, intv, show_avg=True, show_pgr=True):
"""Get prediction metrics to evaluate how pruning influences the model performance
:param N: number of inputs
:param intv: display progression at given interval
:param show_avg: display average metrics
:param show_pgr: display step metrics"""
avg_loss, avg_rec, avg_prec, avg_spec = 0., 0., 0., 0.
avg_f1 = np.zeros((self.annos_.shape[-1],))
for i in range(N):
feed_dict_tr = {self.x: np.expand_dims(self.imgs_[i], axis=0),
self.y_true: np.expand_dims(self.annos_[i], axis=0),
self.rate: 0.,
self.is_training: False}
loss_ = self.sess.run(self.cost_reg, feed_dict=feed_dict_tr)
f1_ = self.sess.run(self.f1_vec, feed_dict=feed_dict_tr)
rec_ = self.sess.run(self.recall, feed_dict=feed_dict_tr)
prec_ = self.sess.run(self.precision, feed_dict=feed_dict_tr)
spec_ = self.sess.run(self.specificity, feed_dict=feed_dict_tr)
avg_loss += loss_ / N
avg_f1 += f1_ / N
avg_rec += rec_ / N
avg_prec += prec_ / N
avg_spec += spec_ / N
if i % intv == 0:
if show_pgr is True:
utils.show_progress('i '+str(i), loss_, utils.array_to_text(f1_, 3), rec_, prec_, spec_, True)
# convert f1 vector to text
avg_f1_txt = utils.array_to_text(avg_f1, 3)
if show_avg is True:
utils.show_progress('Avg. results', avg_loss, avg_f1_txt, avg_rec, avg_prec, avg_spec, True)
return avg_loss, avg_f1
class pruning_system:
def __init__(self, object):
self.mdl = object
def getActivations(self, layer, input, softmax=False):
"""Gets the activations at a given layer for a given input image
:param layer: layer name
:param input: input images to the model
:param softmax: condition to apply softmax"""
if softmax is True:
layer = tf.nn.softmax(layer)
return self.mdl.sess.run(layer, feed_dict={self.mdl.x: input,
self.mdl.rate: 0.,
self.mdl.is_training: False})
def plot_features(self, units, clmns, fig_size, img_name, save, show):
"""Plots activations in a grid
:param units: feature maps considered
:param clmns: number of columns
:param fig_size: figure size
:param img_name: output image name
:param save: save results
:param show: show results"""
filters = units.shape[3] # get number of filters used
# Define plotting grid
fig = plt.figure(figsize=(fig_size, fig_size))
n_columns = clmns
n_rows = math.ceil(filters / n_columns) + 1
# define output name
output_name = os.path.join(os.path.abspath(self.mdl.ft_path), img_name)
plt.axis('off')
plt.title(img_name)
for i in range(filters):
fig.add_subplot(n_rows, n_columns, i + 1)
plt.gca().axes.get_yaxis().set_visible(False)
plt.gca().axes.get_xaxis().set_visible(False)
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="jet")
if show is True:
plt.show()
if save is True:
plt.savefig(output_name)
print(img_name + ' saved.')
def check_layer(self, n_examples, layer_name, activation='Relu', index=':0', save=False, show=True):
"""Check feature maps of given layer
:param n_examples: number of input examples whose feature maps are displayed
:param layer_name: name of the layer considered from the model
:param activation: activation name
:param index: index name
:param save: save results
:param show: show results
"""
# Check layer for given blob inputs
op_name = layer_name
tensor_name = op_name + '/' + activation + index
layer = self.mdl.sess.graph.get_tensor_by_name(tensor_name)
for i in range(n_examples):
units = self.getActivations(layer=layer,
input=np.expand_dims(self.mdl.imgs_[i], axis=0),
softmax=False)
# get image number
nb = self.mdl.img_names[i][0].split('_')[-1].split('.')[0]
img_name = op_name + '_input-' + nb
self.plot_features(units, 10, 20, img_name=img_name, save=save, show=show)
def get_thresholdIndex(self, x, threshold=0.99):
"""Keep indices from sorted vector x whose cumulative sum equals the threshold
and return index of vector when the threshold is reached to prune remaining indexes"""
value = 0.
for i in range(len(x)):
value += x[i]
if value >= threshold:
print('Threshold index: ', i)
return i
def norm(self, x):
"""Normalization"""
return (x - min(x)) / (np.sum(x) - min(x))
def pruning(self, layer_names, epsilon=0.005, activation='Relu', index=':0'):
"""Prune filters and check the model accuracy recursively
if change in acc. is > epsilon, then keep the last pruned filter.
:param layer_names: conv layer names (list)
:param epsilon: the maximum accuracy change accepted for the model while pruning
:param activation: activation function considered
:param index: tensor index"""
# get metrics
print('\nInitial prediction metrics:')
_, f1 = self.mdl.predict(N=self.mdl.imgs_.shape[0], intv=25, show_avg=True, show_pgr=False)
count = 0
current_acc = f1[0] # used to monitor accuracy change of prediction while pruning
tokeep_indexes = [] # list filter indices not pruned
while count < len(layer_names):
# (1) get the layer considered from the graph
print('----------------------')
print('Input: {}'.format(layer_names[count]))
tensor_name = layer_names[count] + '/' + activation + index
layer = self.mdl.sess.graph.get_tensor_by_name(tensor_name)
# (2) calculate eigenvector over many input examples N
cnt = 0
vects = []
N = self.mdl.imgs_.shape[0]
print('Computing SVDs and pruning indices...')
for i in range(N):
if i == cnt * int(N / 4):
print(' Processing input...', i)
cnt += 1
# get activation units
units = self.getActivations(layer,
np.expand_dims(self.mdl.imgs_[i], axis=0),
self.mdl.sess)
k_sz = units.shape[1] # kernel size
f_sz = units.shape[-1] # filter size
# reshape into a 2D (non-squared matrix)
M = units.reshape((k_sz * k_sz, f_sz)).T
n = M.shape[0]
# calculate singular value decomposition (SVD)
# U columns = singular left vectors
U, Sigma, Vh = np.linalg.svd(M, full_matrices=False, compute_uv=True)
# take first vector
vect = U[:, 0]
# numerate the vector
ind = np.arange(n)
vect_ = np.vstack((ind, vect)).T
# and sort it given its singular values
vect_ = vect_[np.abs(vect_[:, 1]).argsort()][::-1]
# remove negative sign for normalization
vect_n = np.abs(vect_[:, 1])
vect_n = self.norm(vect_n)
vects.append(vect_n)
# take the mean of normalized SV vectors and re-normalize
vects_mn = np.mean(np.array(vects), axis=0)
vects_mn = self.norm(vects_mn)
# (3) get the index of filter beyond which indexes of other filters are pruned
threshIndex = self.get_thresholdIndex(x=vects_mn, threshold=0.99)
pruning_indexes = vect_[threshIndex + 1:, 0].astype(np.uint32)
# (4) Create a mask of 1s and 0s and apply it to the layer's kernel
# get shape of kernel to prune
op_name = 'kernel'
tensor_name = layer_names[count] + '/' + op_name + index
kernel_ = [v for v in tf.global_variables() if v.name == tensor_name][0]
# get a copy of kernel to reset it in the network if needed
kernel_ident = self.mdl.sess.run(kernel_)
# recursive pruning to reach epsilon
skip = 0 # increment to skip pruning of last filter considered for pruning is acc. change > epsilon
while True:
# skip the first(s) feature map(s)
pruning_indexes = pruning_indexes[skip:]
# create the kernel mask (1s) and put 0s where pruned
mask = np.ones(shape=list(kernel_.shape)) # (h,l,channels,indices)
mask[:, :, :, pruning_indexes] = 0.
# apply the mask to prune filters from the layer
_ = self.mdl.sess.run(tf.assign(kernel_, tf.multiply(kernel_, mask)))
# (5) get prediction results to check the change in accuracy after pruning
_, f1 = self.mdl.predict(N=self.mdl.imgs_.shape[0], intv=10, show_avg=False, show_pgr=False)
change_acc = np.abs(current_acc - f1[0])
if skip < 1:
print('Current | new accuracy: {0:.4f} | {1:.4f} --- '
'difference: {2:.4f}'.format(current_acc, f1[0], change_acc))
current_acc = f1[0]
# if the accuracy change is more than epsilon
if change_acc > epsilon:
print(' Change in accuracy ({0:.5f}) exceeded. '
'Re-iterating the pruning procedure...'.format(change_acc))
# reset model's kernel
_ = self.mdl.sess.run(tf.assign(kernel_, kernel_ident))
skip += 1 # increment skip factor
else:
print('Final accuracy: {0:.5f}'.format(current_acc))
print('Pruned filters: ', sorted(pruning_indexes))
break
print('\n')
# get indexes of filters not pruned
tokeep_indexes.append(list(set(ind) - set(pruning_indexes)))
count += 1
|
{"hexsha": "d3649eaa74f278f9a8c05979b76481abdab7d104", "size": 13238, "ext": "py", "lang": "Python", "max_stars_repo_path": "pruning.py", "max_stars_repo_name": "cjuliani/tf-cnn-pruning", "max_stars_repo_head_hexsha": "50514d1575d3121a9548b435c3e911783508234c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-24T11:19:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-24T11:19:16.000Z", "max_issues_repo_path": "pruning.py", "max_issues_repo_name": "cjuliani/tf-cnn-pruning", "max_issues_repo_head_hexsha": "50514d1575d3121a9548b435c3e911783508234c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pruning.py", "max_forks_repo_name": "cjuliani/tf-cnn-pruning", "max_forks_repo_head_hexsha": "50514d1575d3121a9548b435c3e911783508234c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5723905724, "max_line_length": 115, "alphanum_fraction": 0.5512917359, "include": true, "reason": "import numpy", "num_tokens": 2926}
|
# adapted from https://stackoverflow.com/a/3753428/11126567
import sys
from PIL import Image
import numpy as np
im = Image.open(sys.argv[1])
im = im.convert('RGBA')
colors = [
[29,43,83],
[126,37,83],
[0,135,81],
[171,82,54],
[95,87,79],
[194,195,199],
[255,241,232],
[255,0,77],
[255,163,0],
[255,236,39],
[0,228,54],
[41,173,255],
[131,118,156],
[255,119,168],
[255,204,170]
]
data = np.array(im)
i = 16
r, g, b, a = data.T
for x in colors:
replacearea = (r == x[0]) & (g == x[1]) & (b == x[2])
data[..., :-1][replacearea.T] = (i, i, i)
i = i + 16
im2 = Image.fromarray(data)
im2.save("tex.png")
|
{"hexsha": "d579831cfdf26b22f5d9d30ecc4b414c961973aa", "size": 675, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/maketex.py", "max_stars_repo_name": "DPS2004/picolove-hd", "max_stars_repo_head_hexsha": "a8b5d927e2cb1d9d7cb62b6abf801614c8187fc5", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/maketex.py", "max_issues_repo_name": "DPS2004/picolove-hd", "max_issues_repo_head_hexsha": "a8b5d927e2cb1d9d7cb62b6abf801614c8187fc5", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/maketex.py", "max_forks_repo_name": "DPS2004/picolove-hd", "max_forks_repo_head_hexsha": "a8b5d927e2cb1d9d7cb62b6abf801614c8187fc5", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2432432432, "max_line_length": 59, "alphanum_fraction": 0.5288888889, "include": true, "reason": "import numpy", "num_tokens": 265}
|
import numpy
from bfio.bfio import BioReader
from bfio.bfio import BioWriter
from skimage import restoration
from skimage import util
# The number of pixels to be saved at a time must be a multiple of 1024.
TILE_SIZE = 1024
def _rolling_ball(tile, ball_radius: int, light_background: bool):
""" Applies the rolling-ball algorithm to a single tile.
Args:
tile: A tile, usually from an ome.tif file.
ball_radius: The radius of the ball to use for calculating the background.
light_background: Whether the image has a light background.
Returns:
An image with its background subtracted away.
"""
# Get the shape of the original image, so we can reshape the result at the end.
shape = numpy.shape(tile)
# squeeze the image into a 2-d array
tile = numpy.squeeze(tile)
# invert the image if it has a light background
if light_background:
tile = util.invert(tile)
# use the rolling ball algorithm to calculate the background and subtract it from the image.
background = restoration.rolling_ball(tile, radius=ball_radius)
tile = tile - background
# if the image had a light backend, invert the result.
result = util.invert(tile) if light_background else tile
result = numpy.reshape(result, shape)
return result
def _bounds(x, x_max, ball_radius):
""" Calculates the indices for handling the edges of tiles.
We pad each tile with 'ball_radius' pixels from the full image along the
top, bottom, left, and right edges of each tile.
"""
row_max = min(x_max, x + TILE_SIZE)
pad_left = max(0, x - ball_radius)
pad_right = min(x_max, row_max + ball_radius)
tile_left = 0 if x == 0 else ball_radius
tile_right = min(x_max, tile_left + TILE_SIZE)
return row_max, pad_left, pad_right, tile_left, tile_right
def rolling_ball(
reader: BioReader,
writer: BioWriter,
ball_radius: int,
light_background: bool,
):
""" Applies the rolling-ball algorithm from skimage to perform background subtraction.
This function processes the image in tiles and, therefore, scales to images of any size.
It writes the resulting image to the given BioWriter object.
Args:
reader: BioReader object from which to read the image.
writer: BioWriter object to which to write the image.
ball_radius: The radius of the ball to use for calculating the background.
This should be greater than the radii of relevant objects in the image.
light_background: Whether the image has a light background.
"""
for z in range(reader.Z):
for y in range(0, reader.Y, TILE_SIZE):
y_max, pad_top, pad_bottom, tile_top, tile_bottom = _bounds(y, reader.Y, ball_radius)
for x in range(0, reader.X, TILE_SIZE):
x_max, pad_left, pad_right, tile_left, tile_right = _bounds(x, reader.X, ball_radius)
tile = reader[pad_top:pad_bottom, pad_left:pad_right, z:z + 1, 0, 0]
result = _rolling_ball(tile, ball_radius, light_background)
writer[y:y_max, x:x_max, z:z + 1, 0, 0] = result[tile_top:tile_bottom, tile_left:tile_right]
return
|
{"hexsha": "e1789d38bd2da4f1d4c95814acf22913c4f8879f", "size": 3239, "ext": "py", "lang": "Python", "max_stars_repo_path": "transforms/images/polus-rolling-ball-plugin/src/rolling_ball.py", "max_stars_repo_name": "mmvih/polus-plugins", "max_stars_repo_head_hexsha": "c424938e3f35900758f7d74f3dfec2adfb3228fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-11-28T12:56:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T23:30:10.000Z", "max_issues_repo_path": "transforms/images/polus-rolling-ball-plugin/src/rolling_ball.py", "max_issues_repo_name": "mmvih/polus-plugins", "max_issues_repo_head_hexsha": "c424938e3f35900758f7d74f3dfec2adfb3228fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 74, "max_issues_repo_issues_event_min_datetime": "2019-09-23T16:30:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-21T20:49:45.000Z", "max_forks_repo_path": "transforms/images/polus-rolling-ball-plugin/src/rolling_ball.py", "max_forks_repo_name": "mmvih/polus-plugins", "max_forks_repo_head_hexsha": "c424938e3f35900758f7d74f3dfec2adfb3228fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2019-09-04T19:45:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T17:16:07.000Z", "avg_line_length": 36.393258427, "max_line_length": 108, "alphanum_fraction": 0.6853967274, "include": true, "reason": "import numpy", "num_tokens": 766}
|
import inspect
import uuid
from typing import Optional, Generic, TypeVar, List, Iterator, Callable, Tuple
from collections import deque
T = TypeVar('T')
class Node(Generic[T]):
value: T
parent: Optional['NodeType'] = None
children: Optional[List['NodeType']] = None
def __init__(self, value: T = None, parent: Optional['NodeType'] = None, children: Optional[List['NodeType']] = None):
self.value = value
self.children = children or []
self.parent = parent
if parent is not None:
parent.children.append(self)
if children is not None:
for child in children:
child.parent = self
self._uid = str(uuid.uuid4())
def __getitem__(self, item: int):
return self.children[item]
def merge(self, other: 'NodeType'):
self.value = other.value
def path(self):
n = self
path = ''
while n.parent:
index = n.parent.children.index(n)
n = n.parent
path = str(index) + '.' + path if path else str(index)
return '$.' + path if path else '$'
def push_above(self, node: 'NodeType'):
if node.parent:
node.parent.children = [c for c in node.parent.children if c != node] + [self]
self.parent = node.parent
node.parent = self
self.children.append(node)
def push_under(self, node: 'NodeType'):
self.set_children(node.children)
self.parent = node
node.children = [self]
def detach_parent(self) -> Optional['NodeType']:
p = self.parent
if p is not None:
self.parent = None
p.children = [c for c in p.children if c != self]
return p
def detach_children(self) -> List['NodeType']:
detached = []
if self.children:
for c in self.children:
c.parent = None
detached.append(c)
self.children = []
return detached
def detach(self) -> List['NodeType']:
parent = self.parent
index_in_parent = -1
if parent is not None:
self.parent = None
index_in_parent = parent.children.index(self)
parent.children = [c for c in parent.children if c != self]
children = self.detach_children()
if parent is not None:
for c in children:
parent.children.insert(index_in_parent, c)
c.parent = parent
return children
def set_parent(self, parent: 'NodeType'):
if self.parent:
self.parent.children = [c for c in self.parent.children if c != self]
self.parent = parent
if self not in parent.children:
parent.children.append(self)
def set_children(self, children: List['NodeType']):
for child in children:
if child.parent is not None:
child.parent.children = [c for c in child.parent.children if c != child]
child.parent = self
self.children = children
def reorder_children(self, new_indices: List[int]):
self.children = [self.children[i] for i in new_indices]
def intersection(self, other, exclude_self=False) -> Optional['NodeType']:
p1 = self
if exclude_self:
if p1.parent is not None and other.parent is not None:
p1 = p1.parent
other = other.parent
else:
return None
while p1 is not None:
p2 = other
while p2 is not None:
if p1 == p2:
return p1
p2 = p2.parent
p1 = p1.parent
return None
def ancestors(self) -> Iterator['NodeType']:
p = self
while p.parent is not None:
p = p.parent
yield p
def highest(self, must: Optional[Callable[['NodeType'], bool]] = None) -> 'NodeType':
result = None
for a in self.ancestors():
if must and callable(must) and not must(a):
break
result = a
return result
def preorder(self):
return self.descendents('dfs')
def inorder(self):
current = self
s = []
while current or len(s) > 0:
if current is not None:
s.append(current)
current = current.children[0] if current.children else None
elif s:
current = s.pop()
yield current
current = current.children[1] if len(current.children) == 2 else None
def postorder(self):
def peek(s):
if len(s) > 0:
return s[-1]
return None
stack = []
current = self
while True:
while current:
if current.children and len(current.children) == 2:
stack.append(current.children[1])
stack.append(current)
current = current.children[0] if current.children else None
current = stack.pop()
if current.children and len(current.children) == 2 and id(peek(stack)) == id(current.children[1]):
stack.pop()
stack.append(current)
current = current.children[1]
else:
yield current
current = None
if len(stack) <= 0:
break
def descendents(self, strategy='bfs', ghost_children=False):
"""
In order BFS
:return:
"""
q = [self]
while len(q) > 0:
node = q.pop(0 if strategy == 'bfs' else -1)
yield node
if node is not None:
if node.children:
children = node.children if strategy == 'bfs' else node.children[::-1]
for c in children:
q.append(c)
if ghost_children and len(node.children) == 1:
q.append(None)
def leaves(self) -> Tuple[int, 'NodeType']:
q = [(0, self)]
while len(q) > 0:
lvl, node = q.pop(0)
if node.children:
for c in node.children:
q.append((lvl + 1, c))
else:
yield lvl, node
def head(self, include_self=True) -> Optional['NodeType']:
ancestors = list(self.ancestors())
if ancestors:
return ancestors[-1]
elif include_self:
return self
return None
def bubble_up(self):
head = self.head(False)
if head:
self.detach()
head.set_parent(self)
def is_leaf(self) -> bool:
return self.children is None or len(self.children) > 0
NodeType = TypeVar('NodeType', bound=Node)
TreeType = TypeVar('TreeType', bound='Tree')
class Tree(Generic[NodeType]):
root: Optional[NodeType] = None
def __init__(self, root: Optional[NodeType] = None):
self._uid = str(uuid.uuid4())
self.root = root
def apply(self, func: Callable[[NodeType], None]):
for node in self.nodes():
func(node)
def preorder(self):
return self.root.preorder()
def inorder(self):
return self.root.inorder()
def postorder(self):
return self.root.postorder()
def dfs(self, ghost_children=False) -> Iterator[NodeType]:
return self.root.descendents(strategy='dfs', ghost_children=ghost_children)
def bfs(self, ghost_children=False) -> Iterator[NodeType]:
return self.root.descendents(strategy='bfs', ghost_children=ghost_children)
def nodes(self) -> Iterator[NodeType]:
return self.bfs()
def find(self, uid):
for n in self.bfs():
if n._uid == uid:
return n
return None
def node_at(self, path: str) -> NodeType:
parts = path.split('.')
node = None
for part in parts:
if part == '$':
node = self.root
else:
node = node.children[int(part)]
return node
def detach_from_root(self, attach_to: NodeType):
attach_to.push_under(self.root)
attach_to.parent = None
def filter_nodes(self, condition: Optional[Callable[[NodeType], bool]]) -> List[NodeType]:
if not hasattr(self, '_filtered_nodes'):
setattr(self, '_filtered_nodes', {})
condition_source = inspect.getsource(condition).strip() if condition else None
conditional_nodes = getattr(self, '_filtered_nodes')
if condition_source not in conditional_nodes:
nodes = []
for n in self.bfs():
if not condition or condition(n):
nodes.append(n)
conditional_nodes[condition_source] = nodes
return conditional_nodes[condition_source]
# TODO: do this faster
def __len__(self):
return len(list(self.dfs()))
def depth(self) -> int:
d = 0
for leaf_depth, _ in self.root.leaves():
if leaf_depth > d:
d = leaf_depth
return d
def nodes_at_level(self, lvl) -> List[NodeType]:
q = [(0, self.root)]
while len(q) > 0:
current_lvl, node = q.pop(0)
if current_lvl == lvl:
yield node
elif current_lvl < lvl:
if node.children:
for c in node.children:
q.append((current_lvl + 1, c))
def swap_nodes(self, a: NodeType, b: NodeType):
b_parent, a_parent = b.parent, a.parent
b_children, a_children = b.children[:], a.children[:]
if a.parent == b:
a.set_parent(b.parent)
b.set_parent(a)
b.set_children(a_children)
elif b.parent == a:
b.set_parent(a.parent)
a.set_parent(b)
a.set_children(b_children)
else:
b.set_parent(a_parent)
a.set_parent(b_parent)
b.set_children(a_children)
a.set_children(b_children)
def permutations(self, limit: Optional[int] = None) -> List[TreeType]:
from sympy.utilities.iterables import multiset_permutations
import copy
trees = [self]
all_nodes = list(self.dfs())
for n in all_nodes:
new_trees = []
for tree in trees:
if limit and 0 < limit <= len(trees):
return trees
uid = n._uid
current = tree.find(uid)
variations = []
if hasattr(current, 'variations'):
variations = getattr(current, 'variations')()
if current.children and len(current.children) > 1:
for perm_indices in multiset_permutations(list(range(len(current.children)))):
if variations:
for variation in variations:
new_tree = copy.deepcopy(tree)
new_node = new_tree.node_at(current.path())
new_node.reorder_children(perm_indices)
new_node.merge(variation)
new_trees.append(new_tree)
# exclude the natural sort
if perm_indices != list(range(len(perm_indices))):
new_tree = copy.deepcopy(tree)
new_node = new_tree.node_at(current.path())
new_node.reorder_children(perm_indices)
new_trees.append(new_tree)
elif variations:
# TODO: this is ugly, every variation is essentially a split
for variation in variations:
new_tree = copy.deepcopy(tree)
new_node = new_tree.node_at(current.path())
new_node.merge(variation)
new_trees.append(new_tree)
trees += new_trees
return trees
def pretty(self, value_only=True):
nodes = [self.root]
graphic_tree = ''
while len(nodes) > 0:
node = nodes.pop()
padding = getattr(node, 'depth', 0) * ' '
graphic_tree += f"{padding}- {str(node.value) if value_only else str(node)}\n"
if node.children:
for c in node.children[::-1]:
setattr(c, 'depth', getattr(node, 'depth', 0) + 1)
nodes.append(c)
return graphic_tree
@staticmethod
def transform(a: 'Tree', node_transform: Callable[[Node], Node]) -> 'Tree':
b = Tree()
a2b = {}
for a_node in a.dfs():
a_node_id = id(a_node)
b_node = node_transform(a_node)
a2b[a_node_id] = b_node
if a_node.parent is None:
b.root = b_node
else:
b_parent = a2b[id(a_node.parent)]
b_node.set_parent(b_parent)
return b
|
{"hexsha": "60c183e5a4a1593c3e07a677f1a256701afc3d8a", "size": 13199, "ext": "py", "lang": "Python", "max_stars_repo_path": "dqo/tree/__init__.py", "max_stars_repo_name": "danield137/deep_query_optimzation", "max_stars_repo_head_hexsha": "01a25c966338007f15d14dea1b37e388e47bcfe3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dqo/tree/__init__.py", "max_issues_repo_name": "danield137/deep_query_optimzation", "max_issues_repo_head_hexsha": "01a25c966338007f15d14dea1b37e388e47bcfe3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dqo/tree/__init__.py", "max_forks_repo_name": "danield137/deep_query_optimzation", "max_forks_repo_head_hexsha": "01a25c966338007f15d14dea1b37e388e47bcfe3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7668997669, "max_line_length": 122, "alphanum_fraction": 0.5257216456, "include": true, "reason": "from sympy", "num_tokens": 2735}
|
// (C) Copyright Gennadiy Rozental 2005-2008.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org/libs/test for the library home page.
//
// File : $RCSfile$
//
// Version : $Revision$
//
// Description : implements framework API - main driver for the test
// ***************************************************************************
#ifndef BOOST_TEST_FRAMEWORK_IPP_021005GER
#define BOOST_TEST_FRAMEWORK_IPP_021005GER
// Boost.Test
#include <boost/test/framework.hpp>
#include <boost/test/execution_monitor.hpp>
#include <boost/test/debug.hpp>
#include <boost/test/unit_test_suite_impl.hpp>
#include <boost/test/unit_test_log.hpp>
#include <boost/test/unit_test_monitor.hpp>
#include <boost/test/test_observer.hpp>
#include <boost/test/results_collector.hpp>
#include <boost/test/progress_monitor.hpp>
#include <boost/test/results_reporter.hpp>
#include <boost/test/test_tools.hpp>
#include <boost/test/detail/unit_test_parameters.hpp>
#include <boost/test/detail/global_typedef.hpp>
#include <boost/test/utils/foreach.hpp>
// Boost
#include <boost/timer.hpp>
// STL
#include <map>
#include <set>
#include <cstdlib>
#include <ctime>
#ifdef BOOST_NO_STDC_NAMESPACE
namespace std { using ::time; using ::srand; }
#endif
#include <boost/test/detail/suppress_warnings.hpp>
//____________________________________________________________________________//
namespace boost {
namespace unit_test {
// ************************************************************************** //
// ************** test_start calls wrapper ************** //
// ************************************************************************** //
namespace ut_detail {
struct test_start_caller {
test_start_caller( test_observer* to, counter_t tc_amount )
: m_to( to )
, m_tc_amount( tc_amount )
{}
int operator()()
{
m_to->test_start( m_tc_amount );
return 0;
}
private:
// Data members
test_observer* m_to;
counter_t m_tc_amount;
};
//____________________________________________________________________________//
struct test_init_caller {
explicit test_init_caller( init_unit_test_func init_func )
: m_init_func( init_func )
{}
int operator()()
{
#ifdef BOOST_TEST_ALTERNATIVE_INIT_API
if( !(*m_init_func)() )
throw std::runtime_error( "test module initialization failed" );
#else
test_suite* manual_test_units = (*m_init_func)( framework::master_test_suite().argc, framework::master_test_suite().argv );
if( manual_test_units )
framework::master_test_suite().add( manual_test_units );
#endif
return 0;
}
// Data members
init_unit_test_func m_init_func;
};
}
// ************************************************************************** //
// ************** framework ************** //
// ************************************************************************** //
class framework_impl : public test_tree_visitor {
public:
framework_impl()
: m_master_test_suite( 0 )
, m_curr_test_case( INV_TEST_UNIT_ID )
, m_next_test_case_id( MIN_TEST_CASE_ID )
, m_next_test_suite_id( MIN_TEST_SUITE_ID )
, m_is_initialized( false )
, m_test_in_progress( false )
{}
~framework_impl() { clear(); }
void clear()
{
while( !m_test_units.empty() ) {
test_unit_store::value_type const& tu = *m_test_units.begin();
test_unit const* tu_ptr = tu.second;
// the delete will erase this element from map
if( ut_detail::test_id_2_unit_type( tu.second->p_id ) == tut_suite )
delete static_cast<test_suite const*>(tu_ptr);
else
delete static_cast<test_case const*>(tu_ptr);
}
}
void set_tu_id( test_unit& tu, test_unit_id id ) { tu.p_id.value = id; }
// test_tree_visitor interface implementation
void visit( test_case const& tc )
{
if( !tc.check_dependencies() ) {
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_skipped( tc );
return;
}
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_start( tc );
boost::timer tc_timer;
test_unit_id bkup = m_curr_test_case;
m_curr_test_case = tc.p_id;
unit_test_monitor_t::error_level run_result = unit_test_monitor.execute_and_translate( tc );
unsigned long elapsed = static_cast<unsigned long>( tc_timer.elapsed() * 1e6 );
if( unit_test_monitor.is_critical_error( run_result ) ) {
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_aborted();
}
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_finish( tc, elapsed );
m_curr_test_case = bkup;
if( unit_test_monitor.is_critical_error( run_result ) )
throw test_being_aborted();
}
bool test_suite_start( test_suite const& ts )
{
if( !ts.check_dependencies() ) {
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_skipped( ts );
return false;
}
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_start( ts );
return true;
}
void test_suite_finish( test_suite const& ts )
{
BOOST_TEST_FOREACH( test_observer*, to, m_observers )
to->test_unit_finish( ts, 0 );
}
//////////////////////////////////////////////////////////////////
struct priority_order {
bool operator()( test_observer* lhs, test_observer* rhs ) const
{
return (lhs->priority() < rhs->priority()) || ((lhs->priority() == rhs->priority()) && (lhs < rhs));
}
};
typedef std::map<test_unit_id,test_unit*> test_unit_store;
typedef std::set<test_observer*,priority_order> observer_store;
master_test_suite_t* m_master_test_suite;
test_unit_id m_curr_test_case;
test_unit_store m_test_units;
test_unit_id m_next_test_case_id;
test_unit_id m_next_test_suite_id;
bool m_is_initialized;
bool m_test_in_progress;
observer_store m_observers;
};
//____________________________________________________________________________//
namespace {
#if defined(__CYGWIN__)
framework_impl& s_frk_impl() { static framework_impl* the_inst = 0; if(!the_inst) the_inst = new framework_impl; return *the_inst; }
#else
framework_impl& s_frk_impl() { static framework_impl the_inst; return the_inst; }
#endif
} // local namespace
//____________________________________________________________________________//
namespace framework {
void
init( init_unit_test_func init_func, int argc, char* argv[] )
{
runtime_config::init( argc, argv );
// set the log level and format
unit_test_log.set_threshold_level( runtime_config::log_level() );
unit_test_log.set_format( runtime_config::log_format() );
// set the report level and format
results_reporter::set_level( runtime_config::report_level() );
results_reporter::set_format( runtime_config::report_format() );
register_observer( results_collector );
register_observer( unit_test_log );
if( runtime_config::show_progress() )
register_observer( progress_monitor );
if( runtime_config::detect_memory_leaks() > 0 ) {
debug::detect_memory_leaks( true );
debug::break_memory_alloc( runtime_config::detect_memory_leaks() );
}
// init master unit test suite
master_test_suite().argc = argc;
master_test_suite().argv = argv;
try {
boost::execution_monitor em;
ut_detail::test_init_caller tic( init_func );
em.execute( tic );
}
catch( execution_exception const& ex ) {
throw setup_error( ex.what() );
}
s_frk_impl().m_is_initialized = true;
}
//____________________________________________________________________________//
bool
is_initialized()
{
return s_frk_impl().m_is_initialized;
}
//____________________________________________________________________________//
void
register_test_unit( test_case* tc )
{
BOOST_TEST_SETUP_ASSERT( tc->p_id == INV_TEST_UNIT_ID, BOOST_TEST_L( "test case already registered" ) );
test_unit_id new_id = s_frk_impl().m_next_test_case_id;
BOOST_TEST_SETUP_ASSERT( new_id != MAX_TEST_CASE_ID, BOOST_TEST_L( "too many test cases" ) );
typedef framework_impl::test_unit_store::value_type map_value_type;
s_frk_impl().m_test_units.insert( map_value_type( new_id, tc ) );
s_frk_impl().m_next_test_case_id++;
s_frk_impl().set_tu_id( *tc, new_id );
}
//____________________________________________________________________________//
void
register_test_unit( test_suite* ts )
{
BOOST_TEST_SETUP_ASSERT( ts->p_id == INV_TEST_UNIT_ID, BOOST_TEST_L( "test suite already registered" ) );
test_unit_id new_id = s_frk_impl().m_next_test_suite_id;
BOOST_TEST_SETUP_ASSERT( new_id != MAX_TEST_SUITE_ID, BOOST_TEST_L( "too many test suites" ) );
typedef framework_impl::test_unit_store::value_type map_value_type;
s_frk_impl().m_test_units.insert( map_value_type( new_id, ts ) );
s_frk_impl().m_next_test_suite_id++;
s_frk_impl().set_tu_id( *ts, new_id );
}
//____________________________________________________________________________//
void
deregister_test_unit( test_unit* tu )
{
s_frk_impl().m_test_units.erase( tu->p_id );
}
//____________________________________________________________________________//
void
clear()
{
s_frk_impl().clear();
}
//____________________________________________________________________________//
void
register_observer( test_observer& to )
{
s_frk_impl().m_observers.insert( &to );
}
//____________________________________________________________________________//
void
deregister_observer( test_observer& to )
{
s_frk_impl().m_observers.erase( &to );
}
//____________________________________________________________________________//
void
reset_observers()
{
s_frk_impl().m_observers.clear();
}
//____________________________________________________________________________//
master_test_suite_t&
master_test_suite()
{
if( !s_frk_impl().m_master_test_suite )
s_frk_impl().m_master_test_suite = new master_test_suite_t;
return *s_frk_impl().m_master_test_suite;
}
//____________________________________________________________________________//
test_case const&
current_test_case()
{
return get<test_case>( s_frk_impl().m_curr_test_case );
}
//____________________________________________________________________________//
test_unit&
get( test_unit_id id, test_unit_type t )
{
test_unit* res = s_frk_impl().m_test_units[id];
if( (res->p_type & t) == 0 )
throw internal_error( "Invalid test unit type" );
return *res;
}
//____________________________________________________________________________//
void
run( test_unit_id id, bool continue_test )
{
if( id == INV_TEST_UNIT_ID )
id = master_test_suite().p_id;
test_case_counter tcc;
traverse_test_tree( id, tcc );
BOOST_TEST_SETUP_ASSERT( tcc.p_count != 0 , runtime_config::test_to_run().is_empty()
? BOOST_TEST_L( "test tree is empty" )
: BOOST_TEST_L( "no test cases matching filter" ) );
bool call_start_finish = !continue_test || !s_frk_impl().m_test_in_progress;
bool was_in_progress = s_frk_impl().m_test_in_progress;
s_frk_impl().m_test_in_progress = true;
if( call_start_finish ) {
BOOST_TEST_FOREACH( test_observer*, to, s_frk_impl().m_observers ) {
boost::execution_monitor em;
try {
em.execute( ut_detail::test_start_caller( to, tcc.p_count ) );
}
catch( execution_exception const& ex ) {
throw setup_error( ex.what() );
}
}
}
switch( runtime_config::random_seed() ) {
case 0:
break;
case 1: {
unsigned int seed = static_cast<unsigned int>( std::time( 0 ) );
BOOST_TEST_MESSAGE( "Test cases order is shuffled using seed: " << seed );
std::srand( seed );
break;
}
default:
BOOST_TEST_MESSAGE( "Test cases order is shuffled using seed: " << runtime_config::random_seed() );
std::srand( runtime_config::random_seed() );
}
try {
traverse_test_tree( id, s_frk_impl() );
}
catch( test_being_aborted const& ) {
// abort already reported
}
if( call_start_finish ) {
BOOST_TEST_FOREACH( test_observer*, to, s_frk_impl().m_observers )
to->test_finish();
}
s_frk_impl().m_test_in_progress = was_in_progress;
}
//____________________________________________________________________________//
void
run( test_unit const* tu, bool continue_test )
{
run( tu->p_id, continue_test );
}
//____________________________________________________________________________//
void
assertion_result( bool passed )
{
BOOST_TEST_FOREACH( test_observer*, to, s_frk_impl().m_observers )
to->assertion_result( passed );
}
//____________________________________________________________________________//
void
exception_caught( execution_exception const& ex )
{
BOOST_TEST_FOREACH( test_observer*, to, s_frk_impl().m_observers )
to->exception_caught( ex );
}
//____________________________________________________________________________//
void
test_unit_aborted( test_unit const& tu )
{
BOOST_TEST_FOREACH( test_observer*, to, s_frk_impl().m_observers )
to->test_unit_aborted( tu );
}
//____________________________________________________________________________//
} // namespace framework
} // namespace unit_test
} // namespace boost
//____________________________________________________________________________//
#include <boost/test/detail/enable_warnings.hpp>
#endif // BOOST_TEST_FRAMEWORK_IPP_021005GER
|
{"hexsha": "4476251d4084b04d1d79b25f1d4cb70694b07750", "size": 14334, "ext": "ipp", "lang": "C++", "max_stars_repo_path": "boost/test/impl/framework.ipp", "max_stars_repo_name": "jonstewart/boost-svn", "max_stars_repo_head_hexsha": "7f6dc0c0cb807b28072c7bdd3d77bb01ab290c59", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-04-08T10:44:28.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-08T10:44:28.000Z", "max_issues_repo_path": "boost/test/impl/framework.ipp", "max_issues_repo_name": "jonstewart/boost-svn", "max_issues_repo_head_hexsha": "7f6dc0c0cb807b28072c7bdd3d77bb01ab290c59", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/test/impl/framework.ipp", "max_forks_repo_name": "jonstewart/boost-svn", "max_forks_repo_head_hexsha": "7f6dc0c0cb807b28072c7bdd3d77bb01ab290c59", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4404761905, "max_line_length": 132, "alphanum_fraction": 0.6687595926, "num_tokens": 3055}
|
import logging
import neo4j
from numpy.lib.utils import source
import pandas as pd
from neo4j import GraphDatabase
from dku_neo4j.query_templates import (
LOAD_FROM_CSV_PREFIX,
UNWIND_PREFIX,
BATCH_DELETE_NODES,
DELETE_NODES,
SOURCE_MERGE_STATEMENT,
PROPERTIES_STATEMENT,
CREATE_CONSTRAINT_IF_NOT_EXIST,
create_export_relationship_suffix_query,
)
class Neo4jHandle(object):
DATA = "data"
ROWS = "rows"
SOURCE_IDENTIFIER = "src"
TARGET_IDENTIFIER = "tgt"
RELATIONSHIP_IDENTIFIER = "rel"
def __init__(self, uri, username, password, database=None):
self.uri = uri
self.username = username
self.password = password
self.database = database if database else neo4j.DEFAULT_DATABASE
def __enter__(self):
try:
self.driver = GraphDatabase.driver(self.uri, auth=(self.username, self.password))
except Exception as e:
raise Exception(f"Failed to connect to the Neo4j server. Please check your preset credentials and URI.")
return self
def __exit__(self, exception_type, exception_value, traceback):
logging.info("Neo4j plugin - Closing driver ...")
self.driver.close()
def check(self):
try:
query = "MATCH (n) RETURN n LIMIT 1"
self.run(query)
except Exception:
raise ValueError("Could not connect to graph database using the preset")
return
def run(self, query, data=None, log_results=False):
"""Run a cypher query with a session from the driver. If some data is specified, it calls a write_transaction with the data.
Args:
query (str): Cypher query to execute.
data (list of dict, optional): Data used in an unwind query. Defaults to None.
log_results (bool, optional): Log statistics about the query execution. Defaults to False.
"""
with self.driver.session(database=self.database) as session:
if data:
results = session.write_transaction(self.unwind_transaction, query=query, data=data)
else:
results = session.run(query)
if log_results:
logging.info(f"Neo4j plugin - Query results: {results.consume().counters}")
def unwind_transaction(self, tx, query, data):
results = tx.run(query, parameters={self.DATA: data})
return results
def delete_nodes(self, nodes_label, batch_size=1000):
query = BATCH_DELETE_NODES.format(nodes_label=nodes_label, batch_size=batch_size)
logging.info(f"Neo4j plugin - Deleting nodes by batch: {query}")
try:
self.run(query, log_results=True)
except Exception as e:
if e.code == "Neo.ClientError.Procedure.ProcedureNotFound":
query = DELETE_NODES.format(nodes_label=nodes_label)
logging.info(f"Neo4j plugin - APOC procedure not found, deleting nodes with: {query}")
self.run(query, log_results=True)
def load_nodes_from_csv(self, df_iterator, columns_list, params, file_handler):
definition = self._schema(params.used_columns)
node_primary_key_statement = self._primary_key_statement(
columns_list, params.node_lookup_key, params.node_id_column
)
properties = self._properties(
columns_list, params.node_properties, self.SOURCE_IDENTIFIER, params.property_names_map
)
for index, df in enumerate(df_iterator):
self._check_no_empty_primary_key(df, mandatory_columns=[params.node_id_column])
local_path = f"dss_neo4j_export_temp_file_{index+1:03}.csv.gz"
import_file_path = file_handler.write(df, local_path)
query = LOAD_FROM_CSV_PREFIX.format(
periodic_commit=params.periodic_commit,
import_file_path=import_file_path,
definition=definition,
)
query += SOURCE_MERGE_STATEMENT.format(
node_label=params.nodes_label, node_primary_key_statement=node_primary_key_statement
)
query += PROPERTIES_STATEMENT.format(properties=properties)
if index == 0:
logging.info(f"Neo4j plugin - Importing nodes into Neo4j: {query}")
else:
logging.info(f"Neo4j plugin - Same query using file: {import_file_path}")
self.run(query, log_results=True)
file_handler.delete(local_path)
def insert_nodes_by_batch(self, df_iterator, columns_list, params):
node_primary_key_statement = self._primary_key_statement(
columns_list, params.node_lookup_key, params.node_id_column, unwind=True
)
properties = self._properties(
columns_list, params.node_properties, self.SOURCE_IDENTIFIER, params.property_names_map, unwind=True
)
query = UNWIND_PREFIX.format(
data=self.DATA,
rows=self.ROWS,
)
query += SOURCE_MERGE_STATEMENT.format(
node_label=params.nodes_label, node_primary_key_statement=node_primary_key_statement
)
query += PROPERTIES_STATEMENT.format(properties=properties)
logging.info(f"Neo4j plugin - Inserting nodes into Neo4j: {query}")
rows_processed = 0
for df in df_iterator:
rows_processed += len(df.index)
data = self._get_cleaned_data(df, mandatory_columns=[params.node_id_column])
self.run(query, data=data, log_results=True)
logging.info(f"Neo4j plugin - Processed rows: {rows_processed}")
def add_unique_constraint_on_relationship_nodes(self, params):
self._add_unique_constraint_if_not_exist(params.source_node_label, params.source_node_lookup_key)
self._add_unique_constraint_if_not_exist(params.target_node_label, params.target_node_lookup_key)
def add_unique_constraint_on_nodes(self, params):
self._add_unique_constraint_if_not_exist(params.nodes_label, params.node_lookup_key)
def _add_unique_constraint_if_not_exist(self, label, property_key):
query = CREATE_CONSTRAINT_IF_NOT_EXIST.format(label=label, property_key=property_key)
logging.info(f"Neo4j plugin - Creating uniqueness constraint on {label}.{property_key}")
self.run(query, log_results=True)
def load_relationships_from_csv(self, df_iterator, columns_list, params, file_handler):
definition = self._schema(params.used_columns)
source_node_primary_key_statement = self._primary_key_statement(
columns_list, params.source_node_lookup_key, params.source_node_id_column
)
target_node_primary_key_statement = self._primary_key_statement(
columns_list, params.target_node_lookup_key, params.target_node_id_column
)
relationship_primary_key_statement = ""
if params.relationship_id_column:
relationship_primary_key_statement = self._primary_key_statement(
columns_list, params.relationship_lookup_key, params.relationship_id_column
)
node_incremented_property = "count" if params.node_count_property else None
edge_incremented_property = "weight" if params.edge_weight_property else None
source_node_properties = self._properties(
columns_list,
params.source_node_properties,
self.SOURCE_IDENTIFIER,
params.property_names_map,
incremented_property=node_incremented_property,
skip_row_if_not_exist=params.skip_row_if_not_source,
)
target_node_properties = self._properties(
columns_list,
params.target_node_properties,
self.TARGET_IDENTIFIER,
params.property_names_map,
incremented_property=node_incremented_property,
skip_row_if_not_exist=params.skip_row_if_not_target,
)
relationship_properties = self._properties(
columns_list,
params.relationship_properties,
self.RELATIONSHIP_IDENTIFIER,
params.property_names_map,
incremented_property=edge_incremented_property,
)
for i, df in enumerate(df_iterator):
self._check_no_empty_primary_key(
df, mandatory_columns=[params.source_node_id_column, params.target_node_id_column]
)
local_path = f"dss_neo4j_export_temp_file_{i+1:03}.csv.gz"
import_file_path = file_handler.write(df, local_path)
query = LOAD_FROM_CSV_PREFIX.format(
periodic_commit=params.periodic_commit,
import_file_path=import_file_path,
definition=definition,
)
query += create_export_relationship_suffix_query(
source_node_label=params.source_node_label,
source_node_primary_key_statement=source_node_primary_key_statement,
source_node_properties=source_node_properties,
target_node_label=params.target_node_label,
target_node_primary_key_statement=target_node_primary_key_statement,
target_node_properties=target_node_properties,
relationships_verb=params.relationships_verb,
relationship_primary_key_statement=relationship_primary_key_statement,
relationship_properties=relationship_properties,
skip_row_if_not_source=params.skip_row_if_not_source,
skip_row_if_not_target=params.skip_row_if_not_target,
)
if i == 0:
logging.info(f"Neo4j plugin - Importing relationships and nodes into Neo4j: {query}")
else:
logging.info(f"Neo4j plugin - Same query using file: {import_file_path}")
self.run(query, log_results=True)
file_handler.delete(local_path)
def insert_relationships_by_batch(self, df_iterator, columns_list, params):
node_incremented_property = "count" if params.node_count_property else None
edge_incremented_property = "weight" if params.edge_weight_property else None
source_node_primary_key_statement = self._primary_key_statement(
columns_list, params.source_node_lookup_key, params.source_node_id_column, unwind=True
)
target_node_primary_key_statement = self._primary_key_statement(
columns_list, params.target_node_lookup_key, params.target_node_id_column, unwind=True
)
relationship_primary_key_statement = ""
mandatory_columns = [params.source_node_id_column, params.target_node_id_column]
if params.relationship_id_column:
relationship_primary_key_statement = self._primary_key_statement(
columns_list, params.relationship_lookup_key, params.relationship_id_column, unwind=True
)
mandatory_columns.append(params.relationship_id_column)
source_node_properties = self._properties(
columns_list,
params.source_node_properties,
self.SOURCE_IDENTIFIER,
params.property_names_map,
incremented_property=node_incremented_property,
unwind=True,
skip_row_if_not_exist=params.skip_row_if_not_source,
)
target_node_properties = self._properties(
columns_list,
params.target_node_properties,
self.TARGET_IDENTIFIER,
params.property_names_map,
incremented_property=node_incremented_property,
unwind=True,
skip_row_if_not_exist=params.skip_row_if_not_target,
)
relationship_properties = self._properties(
columns_list,
params.relationship_properties,
self.RELATIONSHIP_IDENTIFIER,
params.property_names_map,
incremented_property=edge_incremented_property,
unwind=True,
)
query = UNWIND_PREFIX.format(
data=self.DATA,
rows=self.ROWS,
)
query += create_export_relationship_suffix_query(
source_node_label=params.source_node_label,
source_node_primary_key_statement=source_node_primary_key_statement,
source_node_properties=source_node_properties,
target_node_label=params.target_node_label,
target_node_primary_key_statement=target_node_primary_key_statement,
target_node_properties=target_node_properties,
relationships_verb=params.relationships_verb,
relationship_primary_key_statement=relationship_primary_key_statement,
relationship_properties=relationship_properties,
skip_row_if_not_source=params.skip_row_if_not_source,
skip_row_if_not_target=params.skip_row_if_not_target,
)
logging.info(f"Neo4j plugin - Inserting nodes into Neo4j: {query}")
rows_processed = 0
for df in df_iterator:
rows_processed += len(df.index)
data = self._get_cleaned_data(df, mandatory_columns=mandatory_columns)
self.run(query, data=data, log_results=True)
logging.info(f"Neo4j plugin - Processed rows: {rows_processed}")
def _schema(self, columns_list):
return ", ".join([f"line[{index}] AS `{column}`" for index, column in enumerate(columns_list)])
def _properties(
self,
all_columns_list,
properties_list,
identifier,
property_names_map,
incremented_property=None,
unwind=False,
skip_row_if_not_exist=False,
):
type_per_column = {}
for column in all_columns_list:
type_per_column[column["name"]] = column["type"]
properties_strings = []
for colname in properties_list:
if colname in property_names_map:
neo4j_property_name = property_names_map[colname]
else:
neo4j_property_name = colname
property_string = self._property(
colname,
neo4j_property_name,
type_per_column[colname],
identifier,
unwind=unwind,
match_statement=skip_row_if_not_exist,
)
properties_strings.append(property_string)
if incremented_property and not skip_row_if_not_exist:
incremented_property_statement = f"ON CREATE SET {identifier}.{incremented_property} = 1"
incremented_property_statement += (
f"\nON MATCH SET {identifier}.{incremented_property} = {identifier}.{incremented_property} + 1"
)
properties_strings.append(incremented_property_statement)
return "\n".join(properties_strings)
def _primary_key_statement(self, all_columns_list, lookup_key, id_column, unwind=False):
"""Create a merge statement in the form of '{lookup_key: id_column}'"""
id_column_type = next((column["type"] for column in all_columns_list if column["name"] == id_column), None)
typed_value = self._cast_property_type(id_column, id_column_type, unwind)
return f" {{`{lookup_key}`: {typed_value}}}"
def _property(self, colname, prop, coltype, identifier, unwind=False, match_statement=False):
typedValue = self._cast_property_type(colname, coltype, unwind)
set_statement = f"{identifier}.`{prop}` = {typedValue}"
if match_statement:
return f"SET {set_statement}"
else:
return f"ON CREATE SET {set_statement}\nON MATCH SET {set_statement}"
def _cast_property_type(self, colname, coltype, unwind):
if unwind:
colname_reference = f"{self.ROWS}.`{colname}`"
else:
colname_reference = f"`{colname}`"
if coltype in ["int", "bigint", "smallint", "tinyint"]:
typedValue = f"toInteger({colname_reference})"
elif coltype in ["double", "float"]:
typedValue = f"toFloat({colname_reference})"
elif coltype == "boolean":
typedValue = f"toBoolean({colname_reference})"
elif coltype == "date":
typedValue = f"datetime({colname_reference})"
else:
typedValue = colname_reference
return typedValue
def _get_cleaned_data(self, df, mandatory_columns=None):
"""Make sure primary key columns don't have missing values and remove missing values from other properties columns"""
if mandatory_columns:
self._check_no_empty_primary_key(df, mandatory_columns)
return self._remove_nan_values_from_records(df.to_dict(orient="records"))
def _check_no_empty_primary_key(self, df, mandatory_columns=None):
if df[mandatory_columns].isnull().any().any():
raise ValueError(f"The primary key columns {mandatory_columns} cannot have missing values.")
def _remove_nan_values_from_records(self, data):
return [{key: value for key, value in row.items() if not pd.isnull(value)} for row in data]
class ExportParams(object):
def __init__(self):
pass
def set_periodic_commit(self, periodic_commit):
self.periodic_commit = periodic_commit
class NodesExportParams(ExportParams):
def __init__(
self,
nodes_label,
node_id_column,
properties_mode,
node_properties,
property_names_mapping,
property_names_map,
expert_mode=False,
clear_before_run=False,
columns_list=None,
):
self.nodes_label = nodes_label
self.node_id_column = node_id_column
self.properties_mode = properties_mode
self.node_properties = node_properties or []
self.property_names_map = property_names_map or {} if property_names_mapping else {}
self.clear_before_run = clear_before_run if expert_mode else False
if properties_mode == "SELECT_COLUMNS":
if node_id_column in node_properties:
self.node_properties.remove(node_id_column)
else:
self.node_properties = [column["name"] for column in columns_list if column["name"] != self.node_id_column]
if node_id_column in self.property_names_map:
self.node_lookup_key = self.property_names_map[node_id_column]
else:
self.node_lookup_key = node_id_column
self.used_columns = [self.node_id_column] + self.node_properties
def check(self, column_list):
existing_colnames = [column["name"] for column in column_list]
if not self.nodes_label:
raise ValueError("Node label is not specified.")
check_backtick(self.nodes_label, "Node label")
if not self.node_id_column or self.node_id_column not in existing_colnames:
raise ValueError(f"Primary key column '{self.node_id_column}' is invalid.")
if self.properties_mode == "SELECT_COLUMNS":
for colname in self.node_properties:
if colname not in existing_colnames:
raise ValueError(f"Node properties column '{colname}' is invalid.")
check_property_names_map(self.property_names_map, self.used_columns)
class RelationshipsExportParams(ExportParams):
def __init__(
self,
source_node_label,
source_node_id_column,
source_node_properties,
target_node_label,
target_node_id_column,
target_node_properties,
relationships_verb,
relationship_id_column,
relationship_properties,
property_names_mapping,
property_names_map,
expert_mode=False,
clear_before_run=False,
node_count_property=False,
edge_weight_property=False,
skip_row_if_not_source=False,
skip_row_if_not_target=False,
):
self.source_node_label = source_node_label
self.source_node_id_column = source_node_id_column
self.source_node_properties = source_node_properties or []
self.target_node_label = target_node_label
self.target_node_id_column = target_node_id_column
self.target_node_properties = target_node_properties or []
self.relationships_verb = relationships_verb
self.relationship_id_column = relationship_id_column
self.relationship_properties = relationship_properties
self.property_names_map = property_names_map or {} if property_names_mapping else {}
self.clear_before_run = clear_before_run if expert_mode else False
self.node_count_property = node_count_property if expert_mode else False
self.edge_weight_property = edge_weight_property if expert_mode else False
self.skip_row_if_not_source = skip_row_if_not_source if expert_mode else False
self.skip_row_if_not_target = skip_row_if_not_target if expert_mode else False
if source_node_id_column in source_node_properties:
self.source_node_properties.remove(source_node_id_column)
if source_node_id_column in self.property_names_map:
self.source_node_lookup_key = self.property_names_map[source_node_id_column]
else:
self.source_node_lookup_key = source_node_id_column
if target_node_id_column in target_node_properties:
self.target_node_properties.remove(target_node_id_column)
if target_node_id_column in self.property_names_map:
self.target_node_lookup_key = self.property_names_map[target_node_id_column]
else:
self.target_node_lookup_key = target_node_id_column
if relationship_id_column in relationship_properties:
self.relationship_properties.remove(relationship_id_column)
if relationship_id_column in property_names_map:
self.relationship_lookup_key = property_names_map[relationship_id_column]
else:
self.relationship_lookup_key = relationship_id_column
self.used_columns = sorted(
list(
set(
[self.source_node_id_column, self.target_node_id_column]
+ self.source_node_properties
+ self.target_node_properties
+ self.relationship_properties
)
)
)
if self.relationship_id_column:
self.used_columns.append(self.relationship_id_column)
def check(self, column_list):
existing_colnames = [column["name"] for column in column_list]
if not self.source_node_label:
raise ValueError("Source nodes label not specified")
check_backtick(self.source_node_label, "Source node label")
if not self.target_node_label:
raise ValueError("Target nodes label not specified")
check_backtick(self.target_node_label, "Target node label")
if not self.relationships_verb:
raise ValueError("Relationships type not specified")
check_backtick(self.relationships_verb, "Relationships type")
if not self.source_node_id_column or self.source_node_id_column not in existing_colnames:
raise ValueError(
f"Source nodes primary key '{self.source_node_id_column}' is invalid. It is mandatory and must be a valid column"
)
if not self.target_node_id_column or self.target_node_id_column not in existing_colnames:
raise ValueError(
f"Target nodes primary key '{self.target_node_id_column}' is invalid. It is mandatory and must be a valid column"
)
if self.relationship_id_column and self.relationship_id_column not in existing_colnames:
raise ValueError(f"Relationship primary key '{self.relationship_id_column}' is not a valid column")
for colname in self.source_node_properties:
if colname not in existing_colnames:
raise ValueError(f"Source nodes property '{colname}' is invalid.")
for colname in self.target_node_properties:
if colname not in existing_colnames:
raise ValueError(f"Target nodes property '{colname}' is invalid.")
for colname in self.relationship_properties:
if colname not in existing_colnames:
raise ValueError(f"Relationship property '{colname}' is invalid.")
check_property_names_map(self.property_names_map, self.used_columns)
def check_property_names_map(property_names_map, used_columns):
"""Check that all key -> values in the DSS column -> Neo4j name mapping are valid """
if property_names_map:
for dss_column, neo4j_property in property_names_map.items():
if dss_column not in used_columns:
raise ValueError(f"'{dss_column}' is not a valid DSS column name for changing names in Neo4j.")
if not neo4j_property:
raise ValueError(f"Neo4j property for DSS column '{dss_column}' is not specified.")
check_backtick(neo4j_property, "Neo4j property name")
def check_backtick(value, label):
"""Raise an error if the value contain any backtick """
if "`" in value:
raise ValueError(f"{label} '{value}' cannot contain backticks (`). Please remove any backtick.")
|
{"hexsha": "2d9a9a30cd7970f33d7c238cc28ff2aa7221fd02", "size": 25332, "ext": "py", "lang": "Python", "max_stars_repo_path": "python-lib/dku_neo4j/neo4j_handle.py", "max_stars_repo_name": "dataiku/dss-plugin-neo4j", "max_stars_repo_head_hexsha": "f3de1780dfb121eee7dd27518da93cd6b59f1ba7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-23T08:19:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T08:14:23.000Z", "max_issues_repo_path": "python-lib/dku_neo4j/neo4j_handle.py", "max_issues_repo_name": "dataiku/dss-plugin-neo4j", "max_issues_repo_head_hexsha": "f3de1780dfb121eee7dd27518da93cd6b59f1ba7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-07-26T09:21:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-15T10:58:27.000Z", "max_forks_repo_path": "python-lib/dku_neo4j/neo4j_handle.py", "max_forks_repo_name": "dataiku/dss-plugin-neo4j", "max_forks_repo_head_hexsha": "f3de1780dfb121eee7dd27518da93cd6b59f1ba7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.914893617, "max_line_length": 132, "alphanum_fraction": 0.67771988, "include": true, "reason": "from numpy", "num_tokens": 4882}
|
Although the body of literature on the area of proposed research is not
expansive, there have been a number of relevant studies on the prediction of
forensically relevant categories or quantities of nuclear materials using
statistical methods.
\subsection{Special Nuclear Materials Studied}
With regards to broader forensics capabilities, materials from different steps
of the nuclear fuel cycle are being studied. Even though each material has
its own forensics signatures, the process of applying statistical methods to
the analysis of material provenance is similar for each.
For example, on the front end of the fuel cycle, an entity may have obtained
\gls{UOC} if they have enrichment capabilities. One study performed
statistical analyses on \gls{UOC} from 21 sources (throughout seven countries)
using 30 concentration measurements of various elements, isotopes, and
compounds, e.g., sodium, magnesium, thorium, uranium-234, or halide compounds
\cite{robel_2009}. The goal of classifying the source and the country was
reached 60\% and 85\% of the time, respectively.
%Note: there was a ton of skew in their data and the correction for that was
%weak sauce
On the back end, an organization might have interest in \gls{SNF} if they have
reprocessing capabilities. Or, perhaps already separated plutonium from
\gls{SNF} has been intercepted and needs to be traced. Another study addresses
this by performing factor analysis on theoretical separated plutonium from
various sources of \gls{ORIGEN}-simulated \gls{SNF} based on their composition
at the end of irradiation \cite{nicolaou_pu}. Since in this study all
materials are the same age, five plutonium isotopes ($A = 238-242$) correctly
predicted a test sample. However, taking different times since irradiation and
reprocessing into account requires more isotopic measurements.
\subsection{Statistical Methods Employed}
There are statistical methods studies that focus on the classification of the
reactor type for unknown samples \cite{robel_2009, nicolaou_pu, jones_snf_2014,
nicolaou_2009}. However, this work is focused on burnup prediction. Although
the results for both regression and classification are based on a number of
features that are usually isotopic in nature, it is not clear if the regression
counterparts of these algorithms will perform similarly for this task.
Promising regression work using factor analysis has been published
\cite{nicolaou_2006, nicolaou_2014}. Although factor analysis explicitly
requires the input of domain knowledge, it is a valuable first step towards
understanding how statistical methods can provide insightful models that
predict fuel enrichment and burnup. In the following two cases, the features
included for the analysis are only the uranium and plutonium isotopes remaining
in the \gls{SNF}. Ref. \cite{nicolaou_2006} covers predicting enrichment and
burnup from a range of simulated \gls{SNF} recipes and comparing an `unknown'
sample to the results of the factor analysis. Ref. \cite{nicolaou_2014}
extends that study to real measured samples from the \gls{SFCOMPO} database
\cite{sfcompo}. This work also highlights and addresses a known problem:
reliable discrimination between \gls{SNF} from \glspl{PWR} and \glsreset{BWR}
\glspl{BWR}.
The most closely related work to this study involves not only statistical
methods but an investigation of those methods when faced with information
reduction via random nuclide measurement errors in the training data set
\cite{dayman_feasibility_2013}. Additionally, feature reduction was
investigated by using various nuclide compositions: the top 200 nuclides by
concentration in each vector, fission products only, and a \gls{PCA}-derived
shortened nuclide list. Three methods were compared. First and second, the
nearest neighbor algorithm using both $L_1$ (sum of absolute differences of
Cartesian coordinates) and $L_2$ norms for measuring distance between test data
points classified reactor type and predicted burnup. Third, ridge regression
with an $L_2$ norm for regularization was only applied to burnup prediction. In
both classification and regression cases, using the fission products nuclide
list with both nearest neighbor methods performed the best. All other nuclide
lists quickly devolved to random guesses with an increase in nuclide error in
the case of reactor prediction, and more than 100\% error in the case of burnup
prediction.
For reliable prediction, it seems to be promising to use actinides
\cite{nicolaou_2006, nicolaou_2014} and/or fission products
\cite{dayman_feasibility_2013} for domain knowledge-based feature reduction.
However, this work intends to still investigate statistical methods for
dimensionality reduction, e.g., \gls{PCA}. This could be beneficial in
prediction of burnup or other reactor parameters, or could be useful in other
ways, such as visualization or discovering new correlations among \gls{SNF}
properties as new reactor technologies are deployed.
|
{"hexsha": "165b9e4c13e04a341689b6dfef5a6b5e6e5742d8", "size": 4994, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "document/chapters/litrev/stats4nf.tex", "max_stars_repo_name": "opotowsky/prelim", "max_stars_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "document/chapters/litrev/stats4nf.tex", "max_issues_repo_name": "opotowsky/prelim", "max_issues_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "document/chapters/litrev/stats4nf.tex", "max_forks_repo_name": "opotowsky/prelim", "max_forks_repo_head_hexsha": "100a27fb533beee1c985ad72ae70bdb646b04bab", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.9024390244, "max_line_length": 79, "alphanum_fraction": 0.8155786944, "num_tokens": 1169}
|
import pandas as pd
import numpy as np
from pathlib import Path
from data_params import Data
import matplotlib.pyplot as plt
import matplotlib
matplotlib.font_manager.findSystemFonts(fontpaths=None, fontext='ttf')
import seaborn as sns
import pycountry
def coef_bar_plot(data, y, filename, fig_size, fig_dpi):
plt.figure(figsize=fig_size)
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
coef_plot = sns.barplot(y=y, x="coefs", data=data, palette=("Greens_r"))
coef_plot.set(xlabel="")
coef_plot.set(ylabel="")
coef_plot.figure.tight_layout()
fig = coef_plot.get_figure()
fig.savefig(filename, dpi=fig_dpi)
plt.close()
return None
# %% ###########
data_par = Data()
cols_process = data_par.cols_process
cols_output = data_par.cols_output
valid_status = data_par.valid_status
dir_to_saved_data = data_par.dir_to_saved_data
dir_to_query_data = data_par.dir_to_query_data
path_to_training_data = data_par.path_to_training_data
stat_name_select = data_par.stat_name_select
predict = False
stats_desc = pd.read_csv(Path(dir_to_saved_data, "coefs_stats_df_desc.csv"))
stats_loanuse = pd.read_csv(Path(dir_to_saved_data, "coefs_stats_df_loanuse.csv"))
stats_tags = pd.read_csv(Path(dir_to_saved_data, "coefs_stats_df_tags.csv"))
words_desc = pd.read_csv(Path(dir_to_saved_data, "coefs_tfidf_df_desc.csv")).head(10)
words_loanuse = pd.read_csv(Path(dir_to_saved_data, "coefs_tfidf_df_loanuse.csv")).head(10)
words_tags = pd.read_csv(Path(dir_to_saved_data, "coefs_tfidf_df_tags.csv")).head(10)
loan_amount = pd.read_csv(Path(dir_to_saved_data, "coefs_df_loan_amount.csv")).head(10)
funding_days = pd.read_csv(Path(dir_to_saved_data, "coefs_df_funding_days.csv")).head(10)
was_translated = pd.read_csv(Path(dir_to_saved_data, "coefs_df_was_translated.csv")).head(10)
sector_code = pd.read_csv(Path(dir_to_saved_data, "coefs_df_sector_code.csv")).head(10)
sector_code['sector_code_names'] = sector_code['sector_code_names'].str[3:]
country_code = pd.read_csv(Path(dir_to_saved_data, "coefs_df_country_code.csv")).head(10)
country_code['country_code_names'] = country_code['country_code_names'].str[3:]
country_name_list = []
country_code_list = country_code["country_code_names"].values.tolist()
for code in country_code_list:
country_name_list.append( pycountry.countries.get(alpha_2=code).name )
country_code["country_names"] = country_name_list
original_language = pd.read_csv(Path(dir_to_saved_data, "coefs_df_original_language.csv")).head(10)
original_language['original_language_names'] = original_language['original_language_names'].str[3:]
numerical_vars = pd.DataFrame(
{
"feature_names": [
"loan_amount",
"funding_days",
"was_translated",],
"coefs": [
loan_amount["coefs"][0],
funding_days["coefs"][0],
was_translated["coefs"][0],]})
numerical_vars.sort_values("coefs", inplace=True, ascending=False)
numerical_vars
sns.set_style(
"white",
{
"axes.spines.bottom": False,
"axes.spines.top": False,
"axes.spines.right": False,
"axes.spines.left": False,
"axes.grid": False,},)
fig_dpi = 400
fig_size = (4.5, 2.5)
###########################
coef_bar_plot(stats_desc, "stats_names", "top_features_stats_desc.png", fig_size, fig_dpi)
###########################
coef_bar_plot(stats_loanuse, "stats_names", "top_features_stats_loanuse.png", fig_size, fig_dpi)
###########################
coef_bar_plot(stats_tags, "stats_names", "top_features_stats_tags.png", fig_size, fig_dpi)
###########################
coef_bar_plot(words_desc, "tfidf_names", "top_features_words_desc.png", fig_size, fig_dpi)
###########################
coef_bar_plot(words_loanuse, "tfidf_names", "top_features_words_loanuse.png", fig_size, fig_dpi)
###########################
coef_bar_plot(words_tags, "tfidf_names", "top_features_words_tags.png", fig_size, fig_dpi)
###########################
coef_bar_plot(numerical_vars, "feature_names", "top_features_numerical_vars.png", fig_size, fig_dpi)
###########################
coef_bar_plot(sector_code, "sector_code_names", "top_features_sector_code.png", fig_size, fig_dpi)
###########################
coef_bar_plot(country_code, "country_names", "top_features_country_code.png", fig_size, fig_dpi)
###########################
coef_bar_plot(original_language, "original_language_names", "top_features_original_language.png", fig_size, fig_dpi)
|
{"hexsha": "0a7c59f9bea01886aee184f7e5bc1face490eccb", "size": 4501, "ext": "py", "lang": "Python", "max_stars_repo_path": "top_features.py", "max_stars_repo_name": "far-from-normal/Kiva-Insight", "max_stars_repo_head_hexsha": "dfcebd4d766b007aeaa3ff6a54f4a28141006f2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-24T14:45:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-27T10:57:26.000Z", "max_issues_repo_path": "top_features.py", "max_issues_repo_name": "far-from-normal/Kiva-Insight", "max_issues_repo_head_hexsha": "dfcebd4d766b007aeaa3ff6a54f4a28141006f2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "top_features.py", "max_forks_repo_name": "far-from-normal/Kiva-Insight", "max_forks_repo_head_hexsha": "dfcebd4d766b007aeaa3ff6a54f4a28141006f2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.6759259259, "max_line_length": 116, "alphanum_fraction": 0.7167296156, "include": true, "reason": "import numpy", "num_tokens": 1097}
|
import os
import numpy as np
relationsDic = np.load(os.getcwd() + "../../data/relationDic.npy")
entityDic = dict()
for line in open(os.getcwd() + "../../data/entity.csv"):
triple = line.strip().split(",")
entityDic[triple[0]] = triple[1]
relationsSet = set()
for item in relationsDic:
if int(item[1]) > 10:
relationsSet.add(item[0])
relationFile = open(os.getcwd() + "../../data/relation1.csv", "w")
relationFile.write(":START_ID,:END_ID,:TYPE\n")
propFile = open(os.getcwd() + "../../data/prop1.txt", "w")
for line in open(os.getcwd() + "../../data/relation.csv"):
triple = line.strip().split(",")
if triple[2] in relationsSet:
relationFile.write(line)
else:
propFile.write(entityDic[triple[0]]+"\t" +
triple[2]+"\t"+entityDic[triple[1]]+"\n")
relationFile.close()
propFile.close()
|
{"hexsha": "1aa531b30080489a97325926edc52ff0aa78caf9", "size": 854, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/dbprocess/cleanRelation.py", "max_stars_repo_name": "AutoKnowledge/AutoKnowledge", "max_stars_repo_head_hexsha": "1a9fce1449d9605dc0289ab13736d073453ed102", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-24T10:22:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T10:22:19.000Z", "max_issues_repo_path": "src/dbprocess/cleanRelation.py", "max_issues_repo_name": "AutoKnowledge/AutoKnowledge", "max_issues_repo_head_hexsha": "1a9fce1449d9605dc0289ab13736d073453ed102", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dbprocess/cleanRelation.py", "max_forks_repo_name": "AutoKnowledge/AutoKnowledge", "max_forks_repo_head_hexsha": "1a9fce1449d9605dc0289ab13736d073453ed102", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5833333333, "max_line_length": 66, "alphanum_fraction": 0.6135831382, "include": true, "reason": "import numpy", "num_tokens": 227}
|
"""Docs"""
import imageio
import numpy as np
from scipy.stats import entropy
import cv2
from _cpbd import compute
class Image:
"""Docs"""
def __init__(self, path: str = "") -> None:
self.path = path
self.image = cv2.imread(self.path)
self.image_gray = cv2.imread(self.path, 0)
self.image_xyz = cv2.cvtColor(self.image, cv2.COLOR_BGR2XYZ)
self.image_hsl = imageio.imread(self.path, pilmode="L")
self.height, self.width = self.image.shape[:2]
def get_sharpness(self) -> str:
"""Docs"""
sharp_value = compute(self.image_hsl)
return round(sharp_value, 4)
def get_colorfulness(self):
"""Docs"""
blue, green, red = cv2.split(self.image.astype("float"))
rg = np.absolute(red - green)
yb = np.absolute(0.5 * (red + green) - blue)
rbMean, rbStd = (np.mean(rg), np.std(rg))
ybMean, ybStd = (np.mean(yb), np.std(yb))
stdRoot = np.sqrt((rbStd**2) + (ybStd**2))
meanRoot = np.sqrt((rbMean**2) + (ybMean**2))
return round(stdRoot + (0.3 * meanRoot), 4)
def get_avg_luminance(self):
"""Docs"""
_, y, _ = cv2.split(self.image_xyz)
y = y / 255
dark = y[y < 0.135]
normal = y[(y >= 0.135) & (y <= 0.615)]
brihgt = y[y > 0.615]
if dark.size > normal.size and dark.size > brihgt.size:
luminance = sum(dark) / len(dark)
elif normal.size > dark.size and normal.size > brihgt.size:
luminance = sum(normal) / len(normal)
else:
luminance = sum(brihgt) / len(brihgt)
return round(luminance, 4)
def get_avg_information(self):
"""Docs"""
histogram = cv2.calcHist([self.image_gray], [0], None, [256], [0, 256])
prob_density = histogram / (self.height * self.width)
info_entropy = float(entropy(prob_density))
aux_var = 0
for i in range(3):
aux_var += (info_entropy**2) * i
avg_infor_entropy = ((1 / 3) * aux_var) ** 0.5
return round(avg_infor_entropy, 4)
def get_features(self):
"""Docs"""
features = [
self.get_sharpness(),
self.get_avg_luminance(),
self.get_avg_information(),
self.get_colorfulness(),
]
return features
|
{"hexsha": "671c6f5199dad4e33d5012305eec215bb9cced77", "size": 2353, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature_extraction.py", "max_stars_repo_name": "danrimr/Avance", "max_stars_repo_head_hexsha": "05683f4b9897210331bfb75f207f1c6d195b63d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "feature_extraction.py", "max_issues_repo_name": "danrimr/Avance", "max_issues_repo_head_hexsha": "05683f4b9897210331bfb75f207f1c6d195b63d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature_extraction.py", "max_forks_repo_name": "danrimr/Avance", "max_forks_repo_head_hexsha": "05683f4b9897210331bfb75f207f1c6d195b63d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7848101266, "max_line_length": 79, "alphanum_fraction": 0.5580110497, "include": true, "reason": "import numpy,from scipy", "num_tokens": 645}
|
# %%
import sys
sys.path.append("../../src")
sys.path.append("../")
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from __init__ import *
import datetime
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from lorenz import Lorenz
from training import TrainModel, get_callbacks
from basic_params import params
from basic_run import generate_data, train, get_hyperparameter_list
params['case'] = 'h2v_evolution'
params['model'] = 'lorenz'
params['input_dim'] = 128
params['tend'] = 60
params['dt'] = 0.001
params['n_ics'] = 50
params['batch_size'] = 500
params['coefficient_initialization'] = 'true'
params['max_epochs'] = 3000
params['loss_weight_x0'] = 0.2
params['loss_weight_sindy_x'] = 0.001
params['loss_weight_sindy_z'] = 0.0001 #0.2 # forces first element to be the same in x and z
params['loss_weight_sindy_regularization'] = 1e-5
params['loss_weight_integral'] = 0.0
params['svd_dim'] = None # try 7
params['scale'] = False # try true
params['widths_ratios'] = [0.5, 0.25]
params['sparse_weighting'] = None
params['normalization'] = [1/40, 1/40, 1/40]
params['data_path'] = '/home/joebakarji/delay-auto/main/examples/data/'
params['loss_weight_layer_l2'] = 0.0
params['loss_weight_layer_l1'] = 0.0
params['use_bias'] = True
params['learning_rate'] = 5e-4
# Generate data
data = generate_data(params)
trainer = TrainModel(data, params)
## Lock parameters
encdec_patience = 10
trainer.params['case'] = 'h2v_evolution'
trainer.params['save_checkpoints'] = True
trainer.params['patience'] = 40
trainer.params['fix_coefs'] = True
trainer.params['trainable_auto'] = True
trainer.savename = trainer.get_name()
print(trainer.savename)
train_data, test_data = trainer.get_data()
trainer.save_params()
print(trainer.params)
# Get model AFTER setting parameters
trainer.model = trainer.get_model()
## Get SVD output
reduced_dim = 3
U, s, VT = np.linalg.svd(data.x.T, full_matrices=False)
v = np.matmul(VT[:reduced_dim, :].T, np.diag(s[:reduced_dim]))
# Create directory and file name
os.makedirs(os.path.join(trainer.params['data_path'], trainer.savename), exist_ok=True)
os.makedirs(os.path.join(trainer.params['data_path'], trainer.savename, 'checkpoints'), exist_ok=True)
########################
# ENCODER Checkpoints
checkpoint_path_encoder = os.path.join(trainer.params['data_path'], trainer.savename, 'checkpoints', 'cp-enc-{epoch:04d}.ckpt')
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path_encoder,
verbose=1,
save_weights_only=True,
save_freq=params['save_freq'] * int(trainer.params['tend']/trainer.params['dt']*trainer.params['n_ics']/ \
trainer.params['batch_size'] * trainer.params['train_ratio']))
# ENCODER TRAINING
optimizer = tf.keras.optimizers.Adam(lr=trainer.params['learning_rate'])
trainer.model.encoder.compile(optimizer=optimizer, loss='mse')
history_encoder = trainer.model.encoder.fit(
x=data.x, y=v,
batch_size=trainer.params['batch_size'],
epochs=20,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=encdec_patience, monitor='loss'), cp_callback],
shuffle=True)
########################
# DECODER Checkpoints
checkpoint_path_decoder = os.path.join(trainer.params['data_path'], trainer.savename, 'checkpoints', 'cp-dec-{epoch:04d}.ckpt')
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path_decoder,
verbose=1,
save_weights_only=True,
save_freq=trainer.params['save_freq'] * int(trainer.params['tend']/trainer.params['dt']*trainer.params['n_ics']/ \
trainer.params['batch_size'] * trainer.params['train_ratio']))
# DECODER TRAINING
trainer.model.decoder.compile(optimizer=optimizer, loss='mse')
history_decoder = trainer.model.decoder.fit(
x=v, y=data.x,
batch_size=trainer.params['batch_size'],
epochs=20,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=encdec_patience, monitor='loss'), cp_callback],
shuffle=True)
########################
# FULL MODEL Checkpoints
checkpoint_path = os.path.join(trainer.params['data_path'], trainer.savename, 'checkpoints', 'cp-{epoch:04d}.ckpt')
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
save_freq=trainer.params['save_freq'] * int(trainer.params['tend']/trainer.params['dt']*trainer.params['n_ics']/ \
trainer.params['batch_size'] * trainer.params['train_ratio']))
# Build model and fit
trainer.model.compile(optimizer=optimizer, loss='mse')
callback_list = get_callbacks(trainer.params, trainer.savename)
trainer.history = trainer.model.fit(
x=train_data, y=train_data,
batch_size=trainer.params['batch_size'],
epochs=trainer.params['max_epochs'],
validation_data=(test_data, test_data),
callbacks=callback_list,
shuffle=True)
# Save locked model
prediction = trainer.model.predict(test_data)
trainer.save_results(trainer.model)
|
{"hexsha": "0dca4c00b16a8a43c4ac482259bd3a1e77577e02", "size": 5273, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/testcases/h2z_evolution.py", "max_stars_repo_name": "josephbakarji/deep-delay-autoencoder", "max_stars_repo_head_hexsha": "eed23447f930aac140b0f37b888ccab688aa6294", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-08T20:26:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T21:22:00.000Z", "max_issues_repo_path": "examples/testcases/h2z_evolution.py", "max_issues_repo_name": "josephbakarji/deep-delay-autoencoder", "max_issues_repo_head_hexsha": "eed23447f930aac140b0f37b888ccab688aa6294", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/testcases/h2z_evolution.py", "max_forks_repo_name": "josephbakarji/deep-delay-autoencoder", "max_forks_repo_head_hexsha": "eed23447f930aac140b0f37b888ccab688aa6294", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-28T21:22:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T21:22:23.000Z", "avg_line_length": 35.1533333333, "max_line_length": 130, "alphanum_fraction": 0.6884126683, "include": true, "reason": "import numpy", "num_tokens": 1290}
|
!===============================================================================
! One of Andy Nowacki's Fortran utility modules for dealing with seismic
! anisotropy and other problems.
!
! Andy Nowacki <andy.nowacki@bristol.ac.uk>
!
! See the file LICENCE for licence details.
!===============================================================================
! Module containing spherical geometry helper functions and subroutines.
! Andy Nowacki, University of Bristol
! andy.nowacki@bristol.ac.uk
!
! History:
! 2011-04-12: Added sphere_sample subroutine to return an array of points
! which evenly sample a sphere.
! 2011-07-18: Added routines to find Earth radial direction
! 2011-11-08: Added sph_poly_inout: determines if point is inside or outside
! a set of points (ordered) on a sphere.
!
!===============================================================================
module spherical_geometry
implicit none
! ** size constants
integer, parameter, private :: i4 = selected_int_kind(9) ! long int
integer, parameter, private :: r4 = selected_real_kind(6,37) ! SP
integer, parameter, private :: r8 = selected_real_kind(15,307) ! DP
! ** precision selector
integer, parameter, private :: rs = r8
! ** maths constants and other useful things
real(rs), parameter, private :: pi = 3.141592653589793238462643_rs
real(rs), parameter, private :: pi2 = pi/2._rs
real(rs), parameter, private :: twopi = 2._rs*pi
real(rs), parameter, private :: to_rad = 1.74532925199433e-002
real(rs), parameter, private :: to_deg = 57.2957795130823e0
real(rs), parameter, private :: to_km = 111.194926644559
real(rs), parameter, private :: big_number = 10.e36
real(rs), parameter, private :: angle_tol = 1.e-5
! Random number sampler state
logical, save, private :: rng_initialised = .false.
contains
!------------------------------------------------------------------------------
function delta(lon1_in,lat1_in,lon2_in,lat2_in,degrees)
!------------------------------------------------------------------------------
! delta returns the angular distance between two points on a sphere given
! the lat and lon of each using the Haversine formula
!
implicit none
real(rs) :: delta,lat1,lon1,lat2,lon2
real(rs),intent(in) :: lat1_in,lon1_in,lat2_in,lon2_in
logical,optional :: degrees
lat1 = lat1_in; lon1 = lon1_in
lat2 = lat2_in; lon2 = lon2_in
if (present(degrees)) then
if (degrees) then
lat1=lat1*pi/1.8D2 ; lon1=lon1*pi/1.8D2
lat2=lat2*pi/1.8D2 ; lon2=lon2*pi/1.8D2
endif
endif
delta=atan2( sqrt( (cos(lat2)*sin(lon2-lon1))**2 + (cos(lat1)*sin(lat2) - &
sin(lat1)*cos(lat2)*cos(lon2-lon1))**2) , &
sin(lat1)*sin(lat2) + cos(lat1)*cos(lat2)*cos(lon2-lon1))
if (present(degrees)) then
if (degrees) delta = delta * 1.8D2/pi
endif
return
end function delta
!==============================================================================
!------------------------------------------------------------------------------
! function dist_vincenty(lon1_in,lat1_in,lon2_in,lat2_in,R,&
! a_in,b_in,azi,baz,degrees)
subroutine dist_vincenty(lon1_in,lat1_in,lon2_in,lat2_in,dist,R,a_in,b_in,degrees)
!------------------------------------------------------------------------------
! delta_vincenty uses the Vincenty algorithm to find accurate great circle
! distances on a flattened sphere. Input in radians unless specified.
! Unless R is specified, then distances are on the surface of the Earth.
! R is fractional radius of points, given the ellipsoid of the Earth.
! Also computes the azimuth and backazimuth, if asked for
! Fom: http://www.movable-type.co.uk/scripts/latlong-vincenty.html
implicit none
real(rs),intent(in) :: lon1_in,lat1_in,lon2_in,lat2_in
real(rs),intent(in),optional :: a_in,b_in,R
real(rs) :: conversion
logical,intent(in),optional :: degrees
real(rs),intent(out) :: dist
real(rs) :: a,b,f,lon1,lat1,lon2,lat2,L,u1,u2,lambda1,lambda2,&
sin_s,cos_s,s,sin_a,cos_a_sq,cos_2sm,C,u_2,&
big_A,big_B,ds
logical :: isnan
! real,intent(out),optional :: azi,baz
write(*,'(a)') 'subroutine dist_vincenty is not working yet.'
stop
! Convert from degrees if necessary
conversion = 1.d0
if (present(degrees)) then
if (degrees) conversion = pi/1.8d2
endif
! Get the inputs
lon1 = conversion * (lon1_in) ; lat1 = conversion * (lat1_in)
lon2 = conversion * (lon2_in) ; lat2 = conversion * (lat2_in)
! Default ellipsoidal shape is WGS-84: override these if both a and b are supplied
a = 6378.137d0 ; b = 6356.752314245d0
if (present(a_in).and.present(b_in)) then
a = a_in ; b = b_in
endif
! Scale by the fractional radius of the ellipsoid if present
if (present(R)) then
a = a*R ; b = b*R
endif
f = (a-b)/a
! Constants
L = lon2 - lon1
u1 = atan((1-f)*tan(lat1))
u2 = atan((1-f)*tan(lat2))
! Starting guess of L for lambda
lambda1 = 0.d0
lambda2 = L
! Iterate until convergence
do while (abs(lambda2-lambda1) < 1.d-12)
sin_s = sqrt( (cos(u2)*sin(lambda2))**2 + (cos(u1)*sin(u2) - &
sin(u1)*cos(u2)*cos(lambda2))**2 )
if (sin_s==0.d0) then
dist = 0.d0
return
endif
cos_s = sin(u1)*sin(u2) + cos(u1)*cos(u2)*cos(lambda2)
s = atan2(sin_s,cos_s)
sin_a = cos(u1)*cos(u2)*sin(lambda2)/sin_s
cos_a_sq = 1.d0 - sin_a**2
cos_2sm = cos_s - 2.d0*sin(u1)*sin(u2)/(cos_a_sq)
if (isnan(cos_2sm)) cos_2sm = 0.d0
C = (f/16.d0)*(cos_a_sq)*(4.d0+f*(4.d0-3.d0*cos_a_sq))
lambda1 = lambda2
lambda2 = L+(1.d0-C)*f*sin_a*(s+C*sin_s*(cos_2sm+C*cos_s*(-1.d0+2.d0*cos_2sm**2)))
enddo
u_2 = cos_a_sq*(a**2-b**2)/(b**2)
big_A = 1.d0 + (u_2/16384.d0)*(4096.d0+u_2*(-768.d0+u_2*(320.d0-175.d0*u_2)))
big_B = (u_2/1024.d0)*(256.d0+u_2*(-128.d0+u_2*(74.d0-47.d0*u_2)))
ds = big_B*sin_s*(cos_2sm+(big_B/4.d0)*(cos_s*(-1.d0+2.d0*cos_2sm**2)-&
(big_B/6.d0)*cos_2sm*(-3.d0+4.d0*sin_s**2)*(-3.d0+4.d0*cos_2sm**2)))
dist = b*big_A*(s-ds)
! if (present(azi)) &
! azi = atan2(cos(u2)*sin(lambda2), &
! cos(u1)*sin(u2)-sin(u1)*cos(u2)*cos(lambda2))
! if (present(baz)) &
! baz = atan2(cos(u1)*sin(lambda2), &
! -sin(u1)*cos(u2)+cos(u1)*sin(u2)*cos(lambda2))
write(*,*) 'lambda =',lambda2
end subroutine dist_vincenty
!==============================================================================
!------------------------------------------------------------------------------
! function test_dist_vincenty(lon1_in,lat1_in,lon2_in,lat2_in,a_in,b_in,R,degrees)
!------------------------------------------------------------------------------
! implicit none
!
! real(rs),intent(in) :: lon1_in,lon2_in,lat1_in,lat2_in
! real(rs),intent(in),optional :: a_in,b_in,R
! real(rs) :: lon1,lon2,lat1,lat2
! real(rs) :: test_dist_vincenty
! logical,intent(in),optional :: degrees
! real(rs) :: a,b,f,L,u1,u2,lambda1,lambda2,sin_s,cos_s,s,sin_a,&
! cos_a_sq,cos_2sm,C,u_2,big_A,big_B,Delta_s,dist,&
! conversion
! real(rs),parameter :: convergence_limit = 1.d-12 ! To within ~6mm
!
!
! conversion = 1.d0
! if (present(degrees)) then
! if (degrees) conversion = pi/1.8d2
! endif
!
!! Get the inputs
! lon1 = conversion * (lon1_in) ; lat1 = conversion * (lat1_in)
! lon2 = conversion * (lon2_in) ; lat2 = conversion * (lat2_in)
!
!! Default ellipsoidal shape is WGS-84: override these if both a and b are supplied
! a = 6378.137d0 ; b = 6356.752314245d0
! if (present(a_in).and.present(b_in)) then
! a = a_in ; b = b_in
! endif
!
!! Scale by the fractional radius of the ellipsoid if present
! if (present(R)) then
! a = a*R ; b = b*R
! endif
!
!! Constants
! f = (a-b)/a ;
! L = lon2 - lon1 ;
! u1 = atan2((1-f)*tan(lat1),1.d0)
! u2 = atan2((1-f)*tan(lat2),1.d0)
!
!! Starting guess for the iteration variables
! lambda1 = big_number
! lambda2 = L
!
! do while ( abs(lambda2-lambda1) > convergence_limit )
! sin_s = sqrt((cos(u2)*sin(lambda2))**2 + (cos(u1)*sin(u2)-sin(u1)*cos(u2)*cos(lambda2))**2)
! cos_s = sin(u1)*sin(u2) + cos(u1)*cos(u2)*cos(lambda2)
! s = atan2(sin_s,cos_s)
! sin_a = cos(u1)*cos(u2)*sin(lambda2)/sin_s
! cos_a_sq = 1.d0 - sin_a**2
! cos_2sm = cos_s - s*sin(u1)*sin(u2)/cos_a_sq
! if (isnan(cos_2sm)) cos_2sm = 0.d0
! C = (f/16.d0)*cos_a_sq*(4.d0+f*(4.d0-3.d0*cos_a_sq))
! lambda1 = lambda2
! lambda2 = L + (1.d0-C)*f*sin_a*(s+C*sin_a*(cos_2sm+C*cos_s*(-1.d0+2.d0*cos_2sm**2)))
! enddo
!
! u_2 = cos_a_sq*(a**2-b**2)/b**2
! big_A = 1.d0+(u_2/16384.d0)*(4096.d0+u_2*(-768.d0+u_2*(320.d0-175.d0*u_2)))
! big_B = (u_2/1024.d0)*(256.d0+u_2*(-128.d0+u_2*(74.d0-47.d0*u_2)))
! Delta_s = big_B*sin_s*(cos_2sm+(big_B/4.d0)*(cos_s*(-1.d0+2.d0*cos_2sm**2)- &
! (big_B/6.d0)*cos_2sm*(-3.d0+4.d0*sin_s**2)*(-3.d0+4.d0*cos_2sm**2)))
! test_dist_vincenty = b*big_A*(s-Delta_s) ;
!
!
! return
!
! end function test_dist_vincenty
!==============================================================================
!------------------------------------------------------------------------------
subroutine step(lon1_in,lat1_in,az_in,delta_in,lon2,lat2,degrees)
!------------------------------------------------------------------------------
! Computes the endpoint given a starting point lon,lat, azimuth and angular distance
implicit none
real(rs),intent(in) :: lon1_in,lat1_in,az_in,delta_in
real(rs),intent(out) :: lon2,lat2
real(rs) :: lon1,lat1,az,delta
logical,optional,intent(in) :: degrees
logical :: deg
lon1=lon1_in ; lat1=lat1_in ; az=az_in ; delta=delta_in
deg = .false.
if (present(degrees)) deg = degrees
if (deg) then
if (delta > 360.) then
write(*,*)'spherical_geometry: step: Error: distance must be less than 180 degrees.'
stop
else if (lon1 <-180 .or. lon1 > 180) then
write(*,*)'spherical_geometry: step: Error: longitude must be in range -180 - 180.'
stop
else if (lat1 <-90 .or. lat1 > 90) then
write(*,*)'spherical_geometry: step: Error: latitude must be in range -90 - 90.'
stop
endif
else
if (delta > twopi) then
write(*,*)'spherical_geometry: step: Error: distance must be less than 2pi radians.'
stop
else if (lon1 < -pi .or. lon1 > pi) then
write(*,*)'spherical_geometry: step: Error: longitude must be in range -2pi - 2pi.'
stop
else if (lat1 < -pi/2.d0 .or. lat1 > pi/2.d0) then
write(*,*)'spherical_geometry: step: Error: latitude must be in range -pi - pi.'
stop
endif
endif
if (deg) then
! Convert to radians
lon1=lon1*pi/1.8D2 ; lat1=lat1*pi/1.8D2
az=az*pi/1.8D2 ; delta=delta*pi/1.8D2
endif
! Calculate point which is delta degrees/radians from lon1,lat1 along az
lat2 = asin(sin(lat1)*cos(delta) + cos(lat1)*sin(delta)*cos(az))
lon2 = lon1 + atan2(sin(az)*sin(delta)*cos(lat1), &
cos(delta)-sin(lat1)*sin(lat2) )
if (deg) then
! Convert to degrees
lat2=1.8D2*lat2/pi ; lon2=1.8D2*lon2/pi
if(lon2>1.8D2) lon2=lon2-3.6D2 ; if(lon2<-1.8D2) lon2=lon2+3.6D2
end if
return
end subroutine step
!==============================================================================
!-------------------------------------------------------------------------------
subroutine gcp_points(lon1,lat1,lon2,lat2,ptslon,ptslat,npts,ds,n,degrees)
!-------------------------------------------------------------------------------
! Returns arrays of lon and lat points (including the end points)
! along a great circle path between two endpoints. The user must specify
! one of the separation distance, ds (degrees or radians), or number of points
! (including the end points), n. geographic coordinates can be in degrees or
! radians, and the points are returned in the same format. The array must be
! at least npts long (optionally returned by the subroutine).
implicit none
real(rs),intent(in) :: lon1,lat1,lon2,lat2
real(rs) :: x1,x2,y1,y2,d,ddelta,azi
real(rs),dimension(:),intent(out) :: ptslon,ptslat
integer,intent(out),optional :: npts
real(rs),intent(in),optional :: ds
integer,intent(in),optional :: n
logical,intent(in),optional :: degrees
real(rs) :: conversion
integer :: i,npoints
! Default to radians; convert from degrees if necessary
conversion = 1._rs
if (present(degrees)) then
if (degrees) conversion = pi/180._rs
endif
x1 = lon1*conversion; x2 = lon2*conversion
y1 = lat1*conversion; y2 = lat2*conversion
! Check one of ds or n is present
if (.not.present(ds) .and. .not.present(n)) then
write(*,'(a)') 'spherical_geometry: gcp_points: Error: one of ds or n must be specified'
stop
endif
! Get distance and azimuth between two points
d = delta(x1,y1,x2,y2,degrees=.false.)
azi = azimuth(x1,y1,x2,y2,degrees=.false.)
! Using a fixed distance ds (same units as coordinates)
if (present(ds)) then
if (.not.present(npts)) then
write(*,'(a)') 'spherical_geometry: gcp_points: Error: must supply npts as well as ds if using constant spacing'
stop
endif
ddelta = ds*conversion
! Check we haven't asked for too much
if (ddelta > d) then
write(*,'(a)') 'spherical_geometry: gcp_points: Error: requested point spacing is larger than distance between points'
stop
endif
! Calculate number of points
npoints = ceiling(d/ddelta) + 1
npts = npoints
! Using a fixed number of points
else
npoints = n
! Have supplied too few points
if (npoints < 2) then
write(*,'(a)') 'spherical_geometry: gcp_points: Error: n must be at least 2'
stop
! Have asked for two points--which are the end points we've supplied!
else if (npoints == 2) then
write(*,'(a)') &
'spherical_geometry: gcp_points: Warning: have asked for only two points on path, so returning end points'
ptslon(2) = x2
ptslat(2) = y2
ptslon(1:2) = ptslon(1:2) / conversion
ptslat(1:2) = ptslat(1:2) / conversion
return
endif
! Calculate distance between points
ddelta = d/npoints
endif
! Check we have room for all the points
if (size(ptslon) < npoints .or. size(ptslat) < npoints) then
write(*,'(a)') 'spherical_geometry: gcp_points: Error: arrays to hold lon and lat points are not long enough'
stop
endif
! Fill in points
ptslon(1) = x1
ptslat(1) = y1
do i=2,npoints-1
call step(x1,y1,azi,real(i-1)*ddelta,ptslon(i),ptslat(i),degrees=.false.)
enddo
ptslon(npoints) = x2
ptslat(npoints) = y2
! Convert back
ptslon(1:npoints) = ptslon(1:npoints) / conversion
ptslat(1:npoints) = ptslat(1:npoints) / conversion
end subroutine gcp_points
!===============================================================================
!------------------------------------------------------------------------------
function azimuth(lon1,lat1,lon2,lat2,degrees)
! Returns azimuth from point 1 to point 2.
! From: http://www.movable-type.co.uk/scripts/latlong.html
implicit none
real(rs) :: azimuth,lon1,lat1,lon2,lat2
real(rs) :: rlon1,rlat1,rlon2,rlat2,conversion
logical,optional :: degrees
conversion = 1._rs
if (present(degrees)) then
if (degrees) conversion = pi/180._rs
endif
rlon1 = conversion*lon1 ; rlon2 = conversion*lon2
rlat1 = conversion*lat1 ; rlat2 = conversion*lat2
azimuth = atan2(sin(rlon2-rlon1)*cos(rlat2) , &
cos(rlat1)*sin(rlat2) - sin(rlat1)*cos(rlat2)*cos(rlon2-rlon1) )
if (azimuth < 0) then
azimuth = azimuth+2._rs*pi
endif
azimuth = azimuth / conversion
! write(*,*)'Azimuth',azimuth
return
end function azimuth
!==============================================================================
!------------------------------------------------------------------------------
subroutine geog2cart(phi_in,theta_in,r,x,y,z,degrees)
! Returns the cartesian coordinates from geographical ones
! Theta is latitude, phi is longitude and r is radius
implicit none
real(rs),intent(in) :: theta_in,phi_in,r
real(rs),intent(out) :: x,y,z
real(rs) :: theta,phi,conversion
logical,optional,intent(in) :: degrees
conversion = 1._rs
if (present(degrees)) then
if (degrees) conversion = pi/180._rs
endif
theta = theta_in * conversion
phi = phi_in * conversion
if (theta < -pi/2._rs .or. theta > pi/2._rs) then
write(*,'(a)') 'Latitude must be in range -pi/2 - pi/2 (-90 - 90 deg).'
stop
endif
x = r * sin(pi/2._rs - theta) * cos(phi)
y = r * sin(pi/2._rs - theta) * sin(phi)
z = r * cos(pi/2._rs - theta)
return
end subroutine geog2cart
!==============================================================================
!------------------------------------------------------------------------------
subroutine sph2cart(phi_in,theta_in,r,x,y,z,degrees)
! Returns the cartesian coordinates from spherical ones
! Theta is colatitude, phi is longitude and r is radius
implicit none
real(rs),intent(in) :: theta_in,phi_in,r
real(rs),intent(out) :: x,y,z
real(rs) :: theta,phi
logical,optional,intent(in) :: degrees
if (present(degrees)) then
if (degrees) then
theta = theta_in * pi / 1.8d2
phi = phi_in * pi / 1.8d2
else
theta = theta_in
phi = phi_in
endif
else
theta = theta_in
phi = phi_in
endif
if (theta < 0.d0 .or. theta > pi ) then
write(*,'(a)') 'Colatitude must be in range 0--pi (0--180deg).'
stop
endif
x = r * sin(theta) * cos(phi)
y = r * sin(theta) * sin(phi)
z = r * cos(theta)
return
end subroutine sph2cart
!==============================================================================
!------------------------------------------------------------------------------
subroutine cart2geog(x,y,z,theta,phi,r,degrees)
! Returns the geographic coordinates from cartesian ones.
implicit none
real(rs),intent(in) :: x,y,z
real(rs),intent(out) :: theta,phi,r
real(rs) :: t,p,r_temp
logical,optional :: degrees
r_temp = sqrt(x**2 + y**2 + z**2)
t = acos(z/r_temp)
p = atan2(y,x)
r = r_temp
if (present(degrees)) then
if (degrees) then
theta = 90.d0 - t * 1.8d2/pi
phi = p * 1.8d2/pi
else
theta = pi/2.d0 - t
phi = p
endif
else
theta = pi/2.d0 - t
phi = p
endif
return
end subroutine cart2geog
!==============================================================================
!------------------------------------------------------------------------------
subroutine cart2sph(x,y,z,theta,phi,r,degrees)
! Returns the spherical coordinates from cartesian ones.
implicit none
real(rs),intent(in) :: x,y,z
real(rs),intent(out) :: theta,phi,r
real(rs) :: t,p,r_temp
logical,optional :: degrees
r_temp = sqrt(x**2 + y**2 + z**2)
t = acos(z/r_temp)
p = atan2(y,x)
r = r_temp
if (present(degrees)) then
if (degrees) then
theta = t * 1.8d2/pi
phi = p * 1.8d2/pi
else
theta = t
phi = p
endif
else
theta = t
phi = p
endif
return
end subroutine cart2sph
!==============================================================================
!------------------------------------------------------------------------------
function inclination(r_in,lon_in,lat_in,degrees)
! Give the inclination of a vector in cartesian coordinates, given
! the latitude and longitude.
! Inclination is measured away from the Earth radial direction, hence
! 0 for an upward, vertical ray, 90° for a horizontal ray, 180° for a downward,
! vertical ray
implicit none
real(rs),intent(in) :: r_in(3),lon_in,lat_in
real(rs) :: inclination
real(rs) :: r(3),lon,lat,radial(3),conversion,dot
logical,intent(in),optional :: degrees
! Convert to radians if necessary
if (present(degrees)) then
if (degrees) conversion = pi/1.8d2
if (.not.degrees) conversion = 1.d0
else
conversion = 1.d0
endif
lon = conversion * lon_in ; lat = conversion * lat_in
! Create the (unit) cartesian vector along the Earth radial direction
radial(1) = cos(lat)*cos(lon)
radial(2) = cos(lat)*sin(lon)
radial(3) = sin(lat)
! Make r into unit vector
r = r_in / sqrt(r_in(1)**2 + r_in(2)**2 + r_in(3)**2)
! Compute the dot product and the inclination
dot = r(1)*radial(1) + r(2)*radial(2) * r(3)*radial(3)
inclination = abs(acos(dot))
if (inclination > pi/2.d0) inclination = pi - inclination
inclination = inclination / conversion
! write(*,*)'Inclination',inclination
return
end function inclination
!==============================================================================
!-------------------------------------------------------------------------------
function xyz2radial(x,y,z)
! Given Cartesian coordinates of convention
! 1 goes through (0,0)
! 2 goes through (90E,0)
! 3 goes through N pole,
! produce the Earth radial direction in Cartesian coordinates
implicit none
real(rs),intent(in) :: x,y,z
real(rs) :: xyz2radial(3), r
r = sqrt(x**2 + y**2 + z**2)
xyz2radial(1) = x / r
xyz2radial(2) = y / r
xyz2radial(3) = z / r
return
end function xyz2radial
!===============================================================================
!-------------------------------------------------------------------------------
function lonlat2radial(lon,lat,degrees)
! Given a longitude and latitude, give the Earth radial direction in the standard
! Cartesian reference system (see e.g. xyz2radial)
! Default is input in radians: override with degrees=.true.
implicit none
real(rs),intent(in) :: lon,lat
real(rs) :: lonlat2radial(3),x,y,z,r
logical,optional,intent(in) :: degrees
logical :: degrees_in
! Check for input in degrees and pass on as appropriate
degrees_in = .false.
if (present(degrees)) degrees_in = degrees
r = 1000._rs ! Dummy radius
call geog2cart(lon, lat, r, x, y, z, degrees=degrees_in)
lonlat2radial = xyz2radial(x,y,z)
return
end function lonlat2radial
!===============================================================================
!-------------------------------------------------------------------------------
subroutine sg_sphere_sample(d,lon_out,lat_out,n_out)
! Evenly sample a sphere given an input distance d between adjacent points.
! Points are in longitude range -180 to 180.
! lon and lat are column vectors which are assigned within the subroutine.
implicit none
real(rs),intent(in) :: d
real(rs),allocatable,intent(out) :: lon_out(:), lat_out(:)
integer,intent(out) :: n_out
integer,parameter :: nmax=50000
real(rs) :: lon(nmax),lat(nmax)
real(rs) :: dlon,dlat,dlon_i,lon_i,lat_i
integer :: i,n,n_i
n = 1
dlat = d
dlon = dlat ! At the equator
lat(n)=90.; lon(n)=0.
lon_i = 0.
lat_i = lat(1) - dlat
do while (lat_i > -90.)
dlon_i = dlon/sin((90.-lat_i)*pi/180.)
n_i = nint(360./dlon_i)
do i=1,n_i
n = n + 1
if (n > nmax) then
write(0,'(a)') 'sphere_sample: number of points greater than nmax',&
' Change compiled limits or increase point spacing d.'
stop
endif
lat(n) = lat_i
lon(n) = lon_i
lon_i = modulo(lon_i + dlon_i, 360.)
enddo
lon_i = modulo(lon_i + dlon_i, 360.)
lat_i = lat_i - dlat
enddo
n = n + 1
lat(n)=-90. ; lon(n) = 0.
if (allocated(lon_out)) then
if (size(lon_out) /= n) then
deallocate(lon_out)
allocate(lon_out(n))
endif
else
allocate(lon_out(n))
endif
if (allocated(lat_out)) then
if (size(lat_out) /= n) then
deallocate(lat_out)
allocate(lat_out(n))
endif
else
allocate(lat_out(n))
endif
lon_out(1:n) = mod(lon(1:n) + 180., 360.) - 180.
lat_out(1:n) = lat(1:n)
n_out = n
return
end subroutine sg_sphere_sample
!===============================================================================
!-------------------------------------------------------------------------------
subroutine sphere_sample(d,lon_out,lat_out,n_out)
! Deprecated synonym for sg_sphere_sample: we provide this wrapper subroutine to
! for backwards compatibility.
implicit none
real(rs), intent(in) :: d
real(rs), intent(out), allocatable, dimension(:) :: lon_out, lat_out
integer, intent(out) :: n_out
call sg_sphere_sample(d,lon_out,lat_out,n_out)
end subroutine sphere_sample
!===============================================================================
!-------------------------------------------------------------------------------
function sph_poly_inout(x,y,px,py,degrees)
! Takes in assumed-shape arrays (vectors) for points on a sphere, which must be
! ordered either clockwise or anticlockwise. The function assumes that the first
! and last points are not the same, but this doesn't matter anyway.
! x,y: trial point in lon,lat
! px(:),py(:): polygon vertices in lon,lat
!
! NOTE: This algorithm won't work for sample points on the north or south poles,
! because the azimuths will always be 0 or 180, and hence the total will
! always be zero. This can be alleviated by implementing the algorithm
! described in:
! Schettino (1999). Polygon intersections in spherical
! topology: applications to plate tectonics. Computers & Geosciences, 25
! (1) 61-69. doi:10.1016/S0098-3004(98)00081-8
implicit none
real(rs),intent(in) :: x,y
real(rs),intent(in),dimension(:) :: px,py
logical,intent(in),optional :: degrees
logical :: sph_poly_inout
logical :: deg
real(rs) :: conversion, s, a0, a1, da, tx, ty, tpx0, tpy0, tpx1, tpy1
integer :: i,n
real(rs),parameter :: tol = 1._rs ! Tolerance in *degrees*
! Check for same size arrays
if (size(px) /= size(py)) then
write(0,'(a)') &
'spherical_geometry: sph_poly_inout: Error: polygon coordinate vectors must be same length.'
stop
endif
! Check for degrees/radians
deg = .false.
conversion = 1._rs
if (present(degrees)) then
deg = degrees
if (degrees) conversion = pi/180._rs
endif
tx = conversion*x
ty = conversion*y
! Check for point on vertex
if (any(x == px .and. y == py)) then
write(0,'(a)') 'spherical_geometry: sph_poly_inout: point is on vertex.'
stop
endif
! Check for point on poles
if (y == 90. .or. y == -90.) then
write(0,'(a)') 'spherical_geometry: sph_poly_inout: point is on one of the poles.'
stop
endif
! Loop over sides and calculate sum of angles. If ~360, inside. If ~0, outside
n = size(px)
s = 0.
do i = 1,n-1
tpx0 = conversion*px(i)
tpy0 = conversion*py(i)
tpx1 = conversion*px(i+1)
tpy1 = conversion*py(i+1)
a0 = azimuth(tx,ty,tpx0,tpy0,degrees=.true.)
a1 = azimuth(tx,ty,tpx1,tpy1,degrees=.true.)
da = a1 - a0
do while (da > 180._rs)
da = da - 360._rs
enddo
do while (da < -180._rs)
da = da + 360._rs
enddo
s = s + da
enddo
! Calculate difference between last and first. da == 0 if given a closed set of points.
tpx0 = conversion*px(n)
tpy0 = conversion*py(n)
tpx1 = conversion*px(1)
tpy1 = conversion*py(1)
a0 = azimuth(tx,ty,tpx0,tpy0,degrees=.true.)
a1 = azimuth(tx,ty,tpx1,tpy1,degrees=.true.)
da = a1 - a0
do while (da > 180._rs)
da = da - 360._rs
enddo
do while (da < -180._rs)
da = da + 360._rs
enddo
s = s + da
! write(*,*) s
! Test for in or out
if (360._rs - abs(s) <= tol) then
sph_poly_inout = .true.
else
sph_poly_inout = .false.
endif
return
end function sph_poly_inout
!===============================================================================
!===============================================================================
function sg_torad(a)
!===============================================================================
! Convert from degrees to radians
implicit none
real(rs), intent(in) :: a
real(rs) :: sg_torad
sg_torad = a*to_rad
end function sg_torad
!-------------------------------------------------------------------------------
!===============================================================================
function sg_todeg(a)
!===============================================================================
! Convert from radians to degrees
implicit none
real(rs), intent(in) :: a
real(rs) :: sg_todeg
sg_todeg = a*to_deg
end function sg_todeg
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_project_to_gcp(long,latg,lonp,latp,lon,lat,degrees)
!===============================================================================
! Using the pole to a great circle path about a sphere with coordinates long,
! latg, project a point onto that path (point at lonp,latp)
! Input is assumed to be in radians.
implicit none
real(rs), intent(in) :: long,latg,lonp,latp
real(rs), intent(out) :: lon,lat
logical, optional, intent(in) :: degrees
real(rs), dimension(3) :: g,p,gp,pp
real(rs) :: r
logical :: deg
deg = .false.
if (present(degrees)) deg = degrees
! Convert to vectors
g = sg_lonlat2vec(long,latg,degrees=deg)
p = sg_lonlat2vec(lonp,latp,degrees=deg)
! Check whether the point already lies on the gcp
if (abs(dot_product(g,p)) < angle_tol) then
call cart2geog(p(1),p(2),p(3),lat,lon,r,degrees=deg)
return
endif
! Compute the pole to the gcp containing g and the point
gp = sg_cross_prod(g,p)
! Check for the point and the pole being the same--we can't handle this
if (sqrt(gp(1)**2 + gp(2)**2 + gp(3)**2) < angle_tol) then
write(0,'(a)') 'spherical_geometry: sg_project_to_gcp: Error: point and pole to plane are the same'
stop
endif
gp = gp/sqrt(sum(gp**2)) ! Normalise to unit vector
pp = sg_cross_prod(gp,g)
! Have to swap the sign of the point if the angle between the starting and
! projected point is more than 90 deg. Everything is a unit vector.
if (acos(dot_product(p,pp)) > pi2) pp = -pp
call cart2geog(pp(1),pp(2),pp(3),lat,lon,r,degrees=deg)
end subroutine sg_project_to_gcp
!-------------------------------------------------------------------------------
!===============================================================================
function sg_cross_prod(a,b) result(c)
!===============================================================================
! Compute the cross product for two three-vectors.
implicit none
real(rs), dimension(3), intent(in) :: a,b
real(rs) :: c(3)
c = (/a(2)*b(3) - a(3)*b(2), a(3)*b(1) - a(1)*b(3), a(1)*b(2) - a(2)*b(1)/)
end function sg_cross_prod
!-------------------------------------------------------------------------------
!===============================================================================
function sg_lonlat2vec(lon,lat,degrees) result(v)
!===============================================================================
! Convert a longitude and latitude on a unit sphere to a vector.
implicit none
real(rs), intent(in) :: lon,lat
logical, optional, intent(in) :: degrees
real(rs) :: r,x,y,z,v(3)
logical :: deg
deg = .false.
if (present(degrees)) deg = degrees
r = 1._rs
call geog2cart(lon,lat,r,x,y,z,degrees=deg)
v = (/x, y, z/)
end function sg_lonlat2vec
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_gcp_from_point_azi(lon,lat,azi,lonp,latp,degrees)
!===============================================================================
! Given a point on a unit sphere and and azimuth from that point, return the
! pole to the great cricle path created by that point and azimuth
implicit none
real(rs), intent(in) :: lon,lat,azi
real(rs), intent(out) :: lonp,latp
logical, optional, intent(in) :: degrees
logical :: deg
real(rs), dimension(3) :: pstart, pstep, gp
real(rs) :: r,conversion
real(rs), parameter :: step_dist = 45._rs
deg = .false.
if (present(degrees)) deg = degrees
conversion = 1._rs
if (deg) conversion = to_rad
! Convert to a vector
pstart = sg_lonlat2vec(lon, lat, degrees=deg)
! Find a point along the gcp
call step(lon, lat, azi, step_dist*conversion, lonp, latp, degrees=deg)
pstep = sg_lonlat2vec(lonp,latp,degrees=deg)
! Find the pole to the two points
gp = sg_cross_prod(pstart,pstep)
call cart2geog(gp(1),gp(2),gp(3),latp,lonp,r,degrees=deg)
end subroutine sg_gcp_from_point_azi
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_gcp_from_points(lon1,lat1,lon2,lat2,lonp,latp,degrees)
!===============================================================================
! Given two points on a sphere (geographic coordinates), return the coordinates
! of the pole to the great circle containing the two points.
implicit none
real(rs), intent(in) :: lon1,lat1,lon2,lat2
real(rs), intent(out) :: lonp,latp
logical, optional, intent(in) :: degrees
logical :: deg
real(rs), dimension(3) :: p1,p2,g
real(rs) :: r,conversion
deg = .false.
if (present(degrees)) deg = degrees
conversion = 1._rs
if (deg) conversion = to_rad
! Check the points don't overlap
if (conversion*abs(lon1-lon2) < angle_tol .and. conversion*abs(lat1-lat2) < angle_tol) then
write(0,'(a)') 'spherical_geometry: sg_gcp_from_points: Error: two points overlap'
stop
endif
! Check the points aren't antipodal
if (abs(delta(lon1,lat1,lon2,lat2,degrees=deg) - pi/conversion) < angle_tol) then
write(0,'(a)') 'spherical_geometry: sg_gcp_from_points: Error: two points are antipodal'
stop
endif
! Convert the coordinates to vectors and compute the cross product, which
! gives the pole to the great circle
p1 = sg_lonlat2vec(lon1,lat1,degrees=deg)
p2 = sg_lonlat2vec(lon2,lat2,degrees=deg)
g = sg_cross_prod(p1,p2)
g = g/sqrt(sum(g**2))
call cart2geog(g(1),g(2),g(3),latp,lonp,r,degrees=deg)
end subroutine sg_gcp_from_points
!-------------------------------------------------------------------------------
!===============================================================================
function sg_gcp_to_azimuth(long,latg,lonp,latp,degrees) result(azi)
!===============================================================================
! Calculate the local azimuth of the great circle defined by pole (long,latg)
! at a point (lonp,latp). Default is for input/output in radians; use
! degrees=.true. for azimuth in degrees.
implicit none
real(rs), intent(in) :: long,latg,lonp,latp
real(rs) :: azi
logical, optional, intent(in) :: degrees
real(rs) :: lon,lat,conversion
conversion = 1._rs
if (present(degrees)) then
if (degrees) conversion = to_rad
endif
! Check values
if (latg < -pi2/conversion .or. latg > pi2/conversion .or. &
latp < -pi2/conversion .or. latp > pi2/conversion) then
write(0,'(a)') 'spherical_geometry: sg_gcp_to_azimuth: Error: '// &
'latitude must be in range -pi/2 to pi/2 (-90 to 90 deg)'
stop
endif
! Project the point onto the great circle
call sg_project_to_gcp(long,latg,lonp,latp,lon,lat,degrees=degrees)
! The local azimuth is pi/2 (90 deg) from the azimuth to the pole, given
! in the range 0-pi or 0-360
azi = modulo(azimuth(lon,lat,long,latg,degrees=degrees) + pi2/conversion + &
twopi/conversion, twopi/conversion)
end function sg_gcp_to_azimuth
!-------------------------------------------------------------------------------
!===============================================================================
function sg_angle_180(a) result(angle)
!===============================================================================
! Make an angle be in the range -180 to 180 deg
implicit none
real(rs), intent(in) :: a
real(rs) :: angle
angle = modulo(a+180._rs,360._rs) - 180._rs
end function sg_angle_180
!-------------------------------------------------------------------------------
!===============================================================================
function sg_angle_360(a) result(angle)
!===============================================================================
! Make an angle be in the range 0 to 360 deg
implicit none
real(rs), intent(in) :: a
real(rs) :: angle
angle = modulo(a,360._rs)
end function sg_angle_360
!-------------------------------------------------------------------------------
!===============================================================================
function sg_angle_diff(a,b) result(diff)
!===============================================================================
! Provide the difference between two angles, handling cases where they are
! either side of 0/360 or -180/180
implicit none
real(rs), intent(in) :: a,b
real(rs) :: diff
diff = abs(sg_angle_360(b) - sg_angle_360(a))
if (diff > 180._rs) diff = 360._rs - diff
end function sg_angle_diff
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_initialise_rng(s)
!===============================================================================
! Initialise the random number generator. This involves seeding the RNG with
! an array of integers of a minimum length. This is 12 with gfortran 4.6.
! OPTIONAL: Supply an integer to mix in to the seed. This may be useful when
! calling in parallel to make sure the PRNG state is initialised differently.
implicit none
integer, optional, intent(in) :: s
integer :: values(8), n, i
integer, dimension(:), allocatable :: seed
if (.not. rng_initialised) then
call random_seed(size=n) ! Find minimum size of array used to seed
allocate(seed(n))
call date_and_time(values=values)
if (present(s)) values(7) = s*values(7)
seed = values(8) + values(7)*(/ (i-1, i=1,n) /)
call random_seed(put=seed)
deallocate(seed)
rng_initialised = .true.
endif
end subroutine sg_initialise_rng
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_random_points_sph(lon, colat, degrees)
!===============================================================================
! Return a set of points on a sphere randomly distributed, in spherical
! coordinates.
! From: http://mathworld.wolfram.com/SpherePointPicking.html
! (NB: They use (to me) non-standard theta=lon and phi=colat!)
implicit none
real(rs), intent(out), dimension(:) :: lon, colat
logical, optional, intent(in) :: degrees
logical :: radians
! Check array lengths
if (size(lon) /= size(colat)) then
write(0,'(a)') 'spherical_geometry: sg_random_points_sph: Error: arrays ' &
// 'for lon and colat are not the same length'
stop
endif
radians = .true.
if (present(degrees)) radians = .not.degrees
call sg_initialise_rng
call random_number(colat)
colat = acos(2._rs*colat - 1._rs)
call random_number(lon)
lon = twopi*lon
if (.not.radians) then
lon = to_deg*lon
colat = to_deg*colat
endif
end subroutine sg_random_points_sph
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_random_points_geog(lon, lat, degrees)
!===============================================================================
! Return a set of points on a sphere randomly distributed, in geographical
! coordinates.
implicit none
real(rs), intent(out), dimension(:) :: lon, lat
logical, optional :: degrees
logical :: radians
radians = .true.
if (present(degrees)) radians = .not.degrees
call sg_random_points_sph(lon, lat)
lat = pi2 - lat
if (.not.radians) then
lon = to_deg*lon
lat = to_deg*lat
endif
end subroutine sg_random_points_geog
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_random_point_sph(lon, colat, degrees)
!===============================================================================
implicit none
real(rs), intent(out) :: lon, colat
logical, optional, intent(in) :: degrees
logical :: radians
real(rs), dimension(1) :: alon, acolat
radians = .true.
if (present(degrees)) radians = .not.degrees
call sg_random_points_sph(alon, acolat, degrees=.not.radians)
lon = alon(1)
colat = acolat(1)
end subroutine sg_random_point_sph
!-------------------------------------------------------------------------------
!===============================================================================
subroutine sg_random_point_geog(lon, lat, degrees)
!===============================================================================
implicit none
real(rs), intent(out) :: lon, lat
logical, optional, intent(in) :: degrees
logical :: radians
real(rs), dimension(1) :: alon, alat
radians = .true.
if (present(degrees)) radians = .not.degrees
call sg_random_points_geog(alon, alat, degrees=.not.radians)
lon = alon(1)
lat = alat(1)
end subroutine sg_random_point_geog
!-------------------------------------------------------------------------------
!===============================================================================
function sg_triangle_area(lon1i, lat1i, lon2i, lat2i, lon3i, lat3i, r, degrees, quiet)
!===============================================================================
! Return the area of a spherical triangle defined by the three geographic
! coordinates.
! Default is for input in radians; use degrees=.true. for degrees.
! INPUT:
! {lon,lat}{1,2,3} : Coordinates of corners of triangles [radians by default]
! INPUT (OPTIONAL):
! r : Radius of sphere. Area is for unit sphere by default.
! degrees : Input coordinates are in radians by default; specify
! .true. for degrees input.
! quiet : If .true., do not complain with a warning about collinear
! points.
! OUTPUT : Area of triangle. Will be in units of r, which is 1
! by default.
!
! Note: where two sides of the triangle are similar in length (and the third
! is about half the other two), l'Huilier's formula becomes unstable. To guard
! against that case, we use a different expression; see:
! http://en.wikipedia.org/wiki/Spherical_trigonometry#Area_and_spherical_excess
real(rs), intent(in) :: lon1i, lat1i, lon2i, lat2i, lon3i, lat3i
real(rs), optional, intent(in) :: r
logical, optional, intent(in) :: degrees, quiet
real(rs) :: sg_triangle_area
real(rs) :: lon1, lat1, lon2, lat2, lon3, lat3
real(rs) :: arc1, arc2, r_in, az1, az2, c
real(rs), parameter :: tol = 2._rs*tiny(0._rs)
logical :: silent
silent = .false.
if (present(quiet)) silent = quiet
r_in = 1._rs
if (present(r)) r_in = r
lon1 = lon1i
lat1 = lat1i
lon2 = lon2i
lat2 = lat2i
lon3 = lon3i
lat3 = lat3i
if (present(degrees)) then
if (degrees) then
lon1 = to_rad*lon1i
lat1 = to_rad*lat1i
lon2 = to_rad*lon2i
lat2 = to_rad*lat2i
lon3 = to_rad*lon3i
lat3 = to_rad*lat3i
endif
endif
! Get lengths of adjacent sides to point 3
arc1 = delta(lon2, lat2, lon3, lat3, degrees=.false.)/2._rs
arc2 = delta(lon3, lat3, lon1, lat1, degrees=.false.)/2._rs
! Get the angle c which is subtended at point 3
az1 = azimuth(lon3, lat3, lon1, lat1, degrees=.false.)
az2 = azimuth(lon3, lat3, lon2, lat2, degrees=.false.)
c = abs(az1 - az2)
if (c > pi) c = twopi - c
! If the azimuths to the other two sides are the same or opposite, then
! the points are collinear
if (abs(c) < tol .or. abs(pi - c) < tol) then
sg_triangle_area = 0._rs
if (silent) &
write(0,'(a)') 'sg_triangle_area: Warning: points are collinear'
return
endif
sg_triangle_area = 2._rs*atan(tan(arc1)*tan(arc2)*sin(c) &
/(1._rs + tan(arc1)*tan(arc2)*cos(c)))*r_in**2
end function sg_triangle_area
!-------------------------------------------------------------------------------
!______________________________________________________________________________
end module spherical_geometry
|
{"hexsha": "34d2cb61822fc9fb754008d53bd70908782f3ab4", "size": 47181, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "spherical_geometry.f90", "max_stars_repo_name": "JackWalpole/seismo-fortran-fork", "max_stars_repo_head_hexsha": "18ba57302ba2dbd39028e6ff55deb448d02bd919", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-06-23T05:28:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T03:46:07.000Z", "max_issues_repo_path": "spherical_geometry.f90", "max_issues_repo_name": "JackWalpole/seismo-fortran-fork", "max_issues_repo_head_hexsha": "18ba57302ba2dbd39028e6ff55deb448d02bd919", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-05-17T11:02:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-28T09:36:24.000Z", "max_forks_repo_path": "spherical_geometry.f90", "max_forks_repo_name": "JackWalpole/seismo-fortran-fork", "max_forks_repo_head_hexsha": "18ba57302ba2dbd39028e6ff55deb448d02bd919", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2015-04-15T02:55:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-20T12:20:51.000Z", "avg_line_length": 36.7739672642, "max_line_length": 130, "alphanum_fraction": 0.5236853818, "num_tokens": 12683}
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__13_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_inv__13_on_rules imports n_g2kAbsAfter_lemma_on_inv__13
begin
section{*All lemmas on causal relation between inv__13*}
lemma lemma_inv__13_on_rules:
assumes b1: "r \<in> rules N" and b2: "(f=inv__13 )"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or>
(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or>
(r=n_n_SendReqS_j1 )\<or>
(r=n_n_SendReqEI_i1 )\<or>
(r=n_n_SendReqES_i1 )\<or>
(r=n_n_RecvReq_i1 )\<or>
(r=n_n_SendInvE_i1 )\<or>
(r=n_n_SendInvS_i1 )\<or>
(r=n_n_SendInvAck_i1 )\<or>
(r=n_n_RecvInvAck_i1 )\<or>
(r=n_n_SendGntS_i1 )\<or>
(r=n_n_SendGntE_i1 )\<or>
(r=n_n_RecvGntS_i1 )\<or>
(r=n_n_RecvGntE_i1 )\<or>
(r=n_n_ASendReqIS_j1 )\<or>
(r=n_n_ASendReqSE_j1 )\<or>
(r=n_n_ASendReqEI_i1 )\<or>
(r=n_n_ASendReqES_i1 )\<or>
(r=n_n_SendReqEE_i1 )\<or>
(r=n_n_ARecvReq_i1 )\<or>
(r=n_n_ASendInvE_i1 )\<or>
(r=n_n_ASendInvS_i1 )\<or>
(r=n_n_ASendInvAck_i1 )\<or>
(r=n_n_ARecvInvAck_i1 )\<or>
(r=n_n_ASendGntS_i1 )\<or>
(r=n_n_ASendGntE_i1 )\<or>
(r=n_n_ARecvGntS_i1 )\<or>
(r=n_n_ARecvGntE_i1 )"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__13) done
}
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendReqS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_RecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_RecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_RecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_RecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendReqIS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendReqSE_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_SendReqEE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ARecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ARecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ASendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ARecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__13) done
}
moreover {
assume d1: "(r=n_n_ARecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__13) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
{"author": "lyj238Gmail", "repo": "newParaVerifier", "sha": "5c2d49bf8e6c46c60efa53c98b0ba5c577d59618", "save_path": "github-repos/isabelle/lyj238Gmail-newParaVerifier", "path": "github-repos/isabelle/lyj238Gmail-newParaVerifier/newParaVerifier-5c2d49bf8e6c46c60efa53c98b0ba5c577d59618/examples/n_g2kAbsAfter/n_g2kAbsAfter_lemma_inv__13_on_rules.thy"}
|
function X = redo_scaling(X_scal,param)
% redo scaling (from scaled data to original data)
%
% INPUT
% X_scal: pretreated data matrix (samples x variables)
% param: output data structure from data_pretreatment routine
%
% OUTPUT
% X: data matrix (samples x variables)
%
% version 1.0 - september 2009
% Davide Ballabio
% Milano Chemometrics and QSAR Research Group
% www.disat.unimib.it/chm
a = param.a;
s = param.s;
m = param.m;
M = param.M;
pret_type = param.pret_type;
if strcmp(pret_type,'cent')
for j=1:size(X_scal,2)
X(:,j) = X_scal(:,j) + a(j);
end
elseif strcmp(pret_type,'scal')
for j=1:size(X_scal,2)
X(:,j) = X_scal(:,j)*s(j);
end
elseif strcmp(pret_type,'auto')
for j=1:size(X_scal,2)
X(:,j) = X_scal(:,j)*s(j) + a(j);
end
elseif strcmp(pret_type,'rang')
for j=1:size(X_scal,2)
X(:,j) = X_scal(:,j)*(M(j) - m(j)) + m(j);
end
else
X = X_scal;
end
|
{"author": "kmansouri", "repo": "OPERA", "sha": "fcbe8024c01f49cd9498187c0ff8c5c45d6dc833", "save_path": "github-repos/MATLAB/kmansouri-OPERA", "path": "github-repos/MATLAB/kmansouri-OPERA/OPERA-fcbe8024c01f49cd9498187c0ff8c5c45d6dc833/OPERA_Source_code/redo_scaling.m"}
|
struct Pnmp1toPlm{T} <: AbstractRotation{T}
rotations::Vector{Givens{T}}
end
function Pnmp1toPlm(::Type{T}, n::Int, m::Int, α::T, β::T, γ::T) where T
G = Vector{Givens{T}}(n)
@inbounds for ℓ = 1:n
c = sqrt((2m+β+γ+2)/(ℓ+2m+β+γ+2)*(2ℓ+2m+α+β+γ+2)/(ℓ+2m+α+β+γ+2))
s = sqrt(ℓ/(ℓ+2m+β+γ+2)*(ℓ+α)/(ℓ+2m+α+β+γ+2))
G[n+1-ℓ] = Givens(ℓ, ℓ+1, c, s)
end
Pnmp1toPlm(G)
end
@inline length(P::Pnmp1toPlm) = length(P.rotations)
@inline getindex(P::Pnmp1toPlm, i::Int) = P.rotations[i]
function LAmul!(C::Pnmp1toPlm, A::AbstractVecOrMat)
@inbounds for i = 1:length(C)
mul!(C.rotations[i], A)
end
A
end
function LAmul!(A::AbstractMatrix, C::Pnmp1toPlm)
@inbounds for i = length(C):-1:1
mul!(A, C.rotations[i])
end
A
end
struct TriRotationPlan{T} <: AbstractRotation{T}
layers::Vector{Pnmp1toPlm{T}}
end
function TriRotationPlan(::Type{T}, n::Int, α::T, β::T, γ::T) where T
layers = Vector{Pnmp1toPlm{T}}(n)
@inbounds for m = 0:n-1
layers[m+1] = Pnmp1toPlm(T, n-m, m, α, β, γ)
end
TriRotationPlan(layers)
end
function LAmul!(P::TriRotationPlan, A::AbstractMatrix)
M, N = size(A)
@inbounds for m = N-1:-1:1
layer = P.layers[m]
for ℓ = (m+1):N
@simd for i = 1:length(layer)
G = layer[i]
a1, a2 = A[G.i1,ℓ], A[G.i2,ℓ]
A[G.i1,ℓ] = G.c*a1 + G.s*a2
A[G.i2,ℓ] = G.c*a2 - G.s*a1
end
end
end
A
end
if VERSION < v"0.7-"
function Base.At_mul_B!(P::TriRotationPlan, A::AbstractMatrix)
M, N = size(A)
@inbounds for m = 1:N-1
layer = P.layers[m]
for ℓ = (m+1):N
@simd for i = length(layer):-1:1
G = layer[i]
a1, a2 = A[G.i1,ℓ], A[G.i2,ℓ]
A[G.i1,ℓ] = G.c*a1 - G.s*a2
A[G.i2,ℓ] = G.s*a1 + G.c*a2
end
end
end
A
end
Base.Ac_mul_B!(P::TriRotationPlan, A::AbstractMatrix) = At_mul_B!(P, A)
else
function LinearAlgebra.lmul!(Pt::Transpose{T,<:TriRotationPlan}, A::AbstractMatrix) where T
P = parent(Pt)
M, N = size(A)
@inbounds for m = 1:N-1
layer = P.layers[m]
for ℓ = (m+1):N
@simd for i = length(layer):-1:1
G = layer[i]
a1, a2 = A[G.i1,ℓ], A[G.i2,ℓ]
A[G.i1,ℓ] = G.c*a1 - G.s*a2
A[G.i2,ℓ] = G.s*a1 + G.c*a2
end
end
end
A
end
LinearAlgebra.lmul!(Pc::Adjoint{T,<:TriRotationPlan}, A::AbstractMatrix) where T =
lmul!(transpose(parent(Pc)), A)
end
struct SlowTriangularHarmonicPlan{T} <: TriangularHarmonicPlan{T}
RP::TriRotationPlan{T}
p::NormalizedLegendreToChebyshevPlan{T}
pinv::ChebyshevToNormalizedLegendrePlan{T}
B::Matrix{T}
end
function SlowTriangularHarmonicPlan(A::Matrix{T}, α, β, γ) where T
@assert β == γ == -half(T)
@assert α == zero(T)
M, N = size(A)
n = N
RP = TriRotationPlan(T, n-1, α, β, γ)
p = plan_normleg2cheb(A)
pinv = plan_cheb2normleg(A)
B = zeros(A)
SlowTriangularHarmonicPlan(RP, p, pinv, B)
end
function LAmul!(Y::Matrix, SP::SlowTriangularHarmonicPlan, X::Matrix)
RP, p, B = SP.RP, SP.p, SP.B
copyto!(B, X)
mul!(RP, B)
M, N = size(X)
for J = 1:N
mul_col_J!!(Y, p, B, J)
end
@inbounds for J = 1:N
nrm = sqrt((2-δ(J-1,0))/π)
@simd for I = 1:M
Y[I,J] *= nrm
end
end
Y
end
if VERSION < v"0.7-"
function Base.At_mul_B!(Y::Matrix, SP::SlowTriangularHarmonicPlan, X::Matrix)
RP, pinv, B = SP.RP, SP.pinv, SP.B
copyto!(B, X)
M, N = size(X)
@inbounds for J = 1:N
nrm = sqrt(π/(2-δ(J-1,0)))
@simd for I = 1:M
B[I,J] *= nrm
end
end
for J = 1:N
mul_col_J!!(Y, pinv, B, J)
end
tri_zero_spurious_modes!(At_mul_B!(RP, Y))
end
Base.Ac_mul_B!(Y::Matrix, SP::SlowTriangularHarmonicPlan, X::Matrix) = At_mul_B!(Y, SP, X)
else
function LinearAlgebra.mul!(Y::Matrix, SPt::Transpose{T,<:SlowTriangularHarmonicPlan}, X::Matrix) where T
SP = parent(SPt)
RP, pinv, B = SP.RP, SP.pinv, SP.B
copyto!(B, X)
M, N = size(X)
@inbounds for J = 1:N
nrm = sqrt(π/(2-δ(J-1,0)))
@simd for I = 1:M
B[I,J] *= nrm
end
end
for J = 1:N
mul_col_J!!(Y, pinv, B, J)
end
tri_zero_spurious_modes!(At_mul_B!(RP, Y))
end
LinearAlgebra.mul!(Y::Matrix, SPc::Adjoint{T,<:SlowTriangularHarmonicPlan}, X::Matrix) where T =
mul!(Y, transpose(parent(SPc)), X)
end
|
{"hexsha": "dc647630f5efd7312e10d845d6880a913f6e18f5", "size": 4885, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TriangularHarmonics/slowplan.jl", "max_stars_repo_name": "putianyi889/FastTransforms.jl", "max_stars_repo_head_hexsha": "491716260d0b8de4aa3b9bab15c98a8f8ce59970", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-25T21:42:35.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-25T21:42:35.000Z", "max_issues_repo_path": "src/TriangularHarmonics/slowplan.jl", "max_issues_repo_name": "dlfivefifty/FastTransforms.jl", "max_issues_repo_head_hexsha": "e49710ae578dc14f25dcb49e4edec06373e37819", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TriangularHarmonics/slowplan.jl", "max_forks_repo_name": "dlfivefifty/FastTransforms.jl", "max_forks_repo_head_hexsha": "e49710ae578dc14f25dcb49e4edec06373e37819", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9142857143, "max_line_length": 109, "alphanum_fraction": 0.5132036847, "num_tokens": 1792}
|
import os
import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from src.data.dataset import ImageDataset
from src.losses.loss import PerceptualLoss, ColorLoss
class Trainer():
def __init__(self,device,dataset_dir,outputs_dir, G, D, lambda_L1, lambda_Per, lambda_Col, lambda_Con, lambda_Sty, lambda_HSV, lambda_YUV):
#datloaders
self.train_dataset = ImageDataset(root_dir=dataset_dir+"train",
image_size=256+256)
self.train_dataloader = DataLoader(self.train_dataset,
batch_size=2,
shuffle=True,
num_workers=0)
self.val_dataset = ImageDataset(root_dir=dataset_dir+"val",
image_size=512)
self.val_dataloader = DataLoader(self.val_dataset,batch_size=1,shuffle=True)
self.G = G
self.D = D
#optimizers
self.G_optimizer = optim.Adam(self.G.parameters(), lr=0.0002, betas=(0.5, 0.999))
self.D_optimizer = optim.Adam(self.D.parameters(), lr=0.0002, betas=(0.5, 0.999))
#lambda
self.lambda_L1 = lambda_L1
self.lambda_Per = lambda_Per
self.lambda_Col = lambda_Col
#device
self.device = device
#output_dir
self.output_dir = outputs_dir
#losses
self.BCE_loss = nn.BCELoss().to(self.device)
self.L1_loss = nn.L1Loss().to(self.device)
self.P_Loss = PerceptualLoss(self.device, lambda_Con, lambda_Sty)
self.C_loss = ColorLoss(self.device, lambda_HSV, lambda_YUV)
def train_fn(self):
print(len(self.train_dataloader))
for i,(input_image,target_image) in enumerate(tqdm.tqdm(self.train_dataloader)):
x = input_image.to(self.device)
y = target_image.to(self.device)
#train_discriminator
y_fake = self.G(x)
D_real = self.D(x,y)
D_fake = self.D(x,y_fake.detach())
D_real_loss = self.BCE_loss(D_real,torch.ones_like(D_real))
D_fake_loss = self.BCE_loss(D_fake,torch.zeros_like(D_fake))
#Discriminator loss
D_loss = (D_real_loss + D_fake_loss)/2
self.D.zero_grad()
D_loss.backward()
self.D_optimizer.step()
#train_generator
y_fake = self.G(x)
D_fake = self.D(x,y_fake)
G_fake_loss = self.BCE_loss(D_fake,torch.ones_like(D_fake))
G_L1_loss = self.L1_loss(y_fake,y)
#Perceptual loss
perceptual_loss = self.P_Loss.find(y,y,y_fake)
#color loss
color_loss = self.C_loss.find(y,y_fake)
#Generator loss
G_loss = G_fake_loss + self.lambda_L1*G_L1_loss + self.lambda_P*perceptual_loss + self.Lambda_C*color_loss
self.G.zero_grad()
G_loss.backward()
self.G_optimizer.step()
def save_examples(self,epoch):
print("saving")
for j,(input_images,target_images) in enumerate(self.val_dataloader):
x = input_images.to("cuda:0")
fake_images = self.G(x)
self.G.zero_grad()
for i in range(len(fake_images)):
input_image = (np.transpose(input_images[i],(1,2,0))+1)/2
target_image = (np.transpose(target_images[i],(1,2,0))+1)/2
fake_image = np.transpose(np.array(fake_images.detach().cpu())[i],(1,2,0))
fake_image = (fake_image+1)/2
output_image = np.concatenate([input_image,target_image,fake_image],axis=1)
file_name = "image_epoch-"+str(epoch)+"_sample-"+str(i)+".png"
file_path = os.path.join(self.output_dir,file_name)
plt.imsave(file_path,output_image)
break
return
def fit(self, num_epochs):
self.G.to(self.device)
self.D.to(self.device)
self.G.train()
self.D.train()
train_hist = {}
train_hist['D_losses'] = []
train_hist['G_losses'] = []
train_hist['per_epoch_ptimes'] = []
train_hist['total_ptime'] = []
for epoch in range(num_epochs):
print(epoch)
#train
self.train_fn(self.D,self.G)
#checkpointing
if epoch % 1 == 0:
torch.save(self.G,"Generator_model_anime_2_sketch.pth")
torch.save(self.D,"Discriminator_model_anime_2_sketch.pth")
#save examples
self.save_examples(epoch)
|
{"hexsha": "472864f24173b897505a0a5bcd4a445cc89d8456", "size": 4919, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/training/trainer.py", "max_stars_repo_name": "shregar1/SKETCH_TO_ANIME", "max_stars_repo_head_hexsha": "7a1eae54c93ea26201e8bc6d6580834d223c15af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/training/trainer.py", "max_issues_repo_name": "shregar1/SKETCH_TO_ANIME", "max_issues_repo_head_hexsha": "7a1eae54c93ea26201e8bc6d6580834d223c15af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-25T05:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T05:53:58.000Z", "max_forks_repo_path": "src/training/trainer.py", "max_forks_repo_name": "shregar1/SKETCH_TO_ANIME", "max_forks_repo_head_hexsha": "7a1eae54c93ea26201e8bc6d6580834d223c15af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2651515152, "max_line_length": 143, "alphanum_fraction": 0.5604797723, "include": true, "reason": "import numpy", "num_tokens": 1105}
|
# -*- coding: utf-8 -*-
"""
DENSITY MATRIX PROPAGATOR
"""
import numpy
from .dmevolution import DensityMatrixEvolution
class DMPropagator:
def __init__(self, timeaxis, ham):
self.timeaxis = timeaxis
self.ham = ham
self.Odt = self.timeaxis.data[1]-self.timeaxis.data[0]
self.dt = self.Odt
self.Nref = 1
self.Nt = self.timeaxis.length
N = self.ham.data.shape[0]
self.N = N
self.data = numpy.zeros((self.Nt,N,N),dtype=numpy.complex64)
def propagate(self,rhoi):
return self._propagate_short_exp(rhoi,L=4)
def _propagate_short_exp(self,rhoi,L=4):
"""
Short exp integration
"""
pr = DensityMatrixEvolution(self.timeaxis,rhoi)
rho1 = rhoi.data
rho2 = rhoi.data
HH = self.ham.data
indx = 1
for ii in self.timeaxis.data[1:self.Nt]:
for jj in range(0,self.Nref):
for ll in range(1,L+1):
pref = (self.dt/ll)
rho1 = -1j*pref*(numpy.dot(HH,rho1) \
- numpy.dot(rho1,HH) )
rho2 = rho2 + rho1
rho1 = rho2
pr.data[indx,:,:] = rho2
indx += 1
return pr
|
{"hexsha": "520688a3d2297886ce04c2cbae3d906b10a6605d", "size": 1583, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantarhei/qm/propagators/dmpropagator.py", "max_stars_repo_name": "slamavl/quantarhei", "max_stars_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2016-10-16T13:26:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T11:40:52.000Z", "max_issues_repo_path": "quantarhei/qm/propagators/dmpropagator.py", "max_issues_repo_name": "slamavl/quantarhei", "max_issues_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 61, "max_issues_repo_issues_event_min_datetime": "2016-09-19T10:45:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-10T13:53:06.000Z", "max_forks_repo_path": "quantarhei/qm/propagators/dmpropagator.py", "max_forks_repo_name": "slamavl/quantarhei", "max_forks_repo_head_hexsha": "d822bc2db86152c418e330a9152e7866869776f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-08-30T09:09:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T03:16:35.000Z", "avg_line_length": 23.6268656716, "max_line_length": 76, "alphanum_fraction": 0.4226152874, "include": true, "reason": "import numpy", "num_tokens": 364}
|
"""
Scatterplot with marginal ticks
===============================
_thumb: .68, .32
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
# Generate a random bivariate dataset
rs = np.random.RandomState(9)
mean = [0, 0]
cov = [(1, 0), (0, 2)]
x, y = rs.multivariate_normal(mean, cov, 100).T
# Use JointGrid directly to draw a custom plot
grid = sns.JointGrid(x, y, space=0, height=6, ratio=50)
grid.plot_joint(plt.scatter, color="g")
grid.plot_marginals(sns.rugplot, height=1, color="g")
|
{"hexsha": "e8389ac2a21a682ceca31bd3e3e3798aace87f6f", "size": 556, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/marginal_ticks.py", "max_stars_repo_name": "berkeleyapplied/seaborn", "max_stars_repo_head_hexsha": "1b087a3a574922220d77b6219af99280905718f8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-26T14:32:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T20:55:18.000Z", "max_issues_repo_path": "examples/marginal_ticks.py", "max_issues_repo_name": "berkeleyapplied/seaborn", "max_issues_repo_head_hexsha": "1b087a3a574922220d77b6219af99280905718f8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-25T15:36:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-25T15:36:38.000Z", "max_forks_repo_path": "examples/marginal_ticks.py", "max_forks_repo_name": "berkeleyapplied/seaborn", "max_forks_repo_head_hexsha": "1b087a3a574922220d77b6219af99280905718f8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-27T05:35:26.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-27T06:39:05.000Z", "avg_line_length": 25.2727272727, "max_line_length": 55, "alphanum_fraction": 0.6744604317, "include": true, "reason": "import numpy", "num_tokens": 165}
|
Require Import
CoRN.stdlib_omissions.List Coq.Numbers.Natural.Peano.NPeano
Coq.QArith.QArith Coq.QArith.Qabs
CoRN.model.totalorder.QposMinMax
CoRN.model.metric2.Qmetric
Coq.Program.Program
CoRN.stdlib_omissions.N
CoRN.stdlib_omissions.Z
CoRN.stdlib_omissions.Q.
Set Automatic Introduction.
Set Automatic Introduction.
Open Scope Q_scope.
Definition Qsum := fold_right Qplus 0.
Definition Σ (n: nat) (f: nat -> Q) := Qsum (map f (enum n)).
(** Properties of Σ: *)
Lemma Σ_sub f g n: Σ n f - Σ n g == Σ n (fun x => f x - g x).
Proof.
unfold Σ. induction n. reflexivity.
simpl. rewrite <- IHn. ring.
Qed.
Lemma Σ_mult n f k: Σ n f * k == Σ n (Qmult k ∘ f).
Proof.
unfold Σ, Basics.compose.
induction n. reflexivity.
intros. simpl. rewrite <- IHn. ring.
Qed.
Lemma Σ_constant x n f: (forall i, (i < n)%nat -> f i == x) -> Σ n f == inject_Z (Z.of_nat n) * x.
Proof with auto.
unfold Σ. induction n. reflexivity.
simpl. intro E.
rewrite IHn...
rewrite E...
rewrite P_of_succ_nat_Zplus.
rewrite Q.Zplus_Qplus.
ring.
Qed.
Lemma Σ_const x n: Σ n (fun _ => x) == inject_Z (Z.of_nat n) * x.
Proof. apply Σ_constant. reflexivity. Qed.
Lemma Σ_S_bound_back f n: Σ (S n) f == Σ n f + f n.
Proof. unfold Σ. simpl. ring. Qed.
Lemma Σ_S_bound_front n f: Σ (S n) f == Σ n (f ∘ S) + f O.
Proof.
unfold Σ, Basics.compose.
induction n; intros; simpl in *. ring.
rewrite IHn. ring.
Qed.
Lemma Σ_S_bound_rev n f: Σ n (f ∘ S) == Σ (S n) f - f O.
Proof. rewrite Σ_S_bound_front. ring. Qed.
Lemma Σ_le f n (b: Q):
(forall x, (x < n)%nat -> f x <= b) -> Σ n f <= inject_Z (Z.of_nat n) * b.
Proof.
induction n. discriminate.
intro.
rewrite Q.S_Qplus.
change (f n + Σ n f <= (inject_Z (Z.of_nat n) + 1) * b)%Q.
assert ((inject_Z (Z.of_nat n) + 1) * b == b + inject_Z (Z.of_nat n) * b)%Q as E. ring.
rewrite E.
apply Qplus_le_compat; auto.
Qed.
Lemma Σ_abs_le f n (b: Q):
(forall x, (x < n)%nat -> Qabs (f x) <= b) -> Qabs (Σ n f) <= inject_Z (Z.of_nat n) * b.
Proof.
induction n.
discriminate.
intros.
rewrite S_Zplus.
rewrite Q.Zplus_Qplus.
change (Qabs (f n + Σ n f) <= (inject_Z (Z.of_nat n) + 1) * b)%Q.
assert ((inject_Z (Z.of_nat n) + 1) * b == b + inject_Z (Z.of_nat n) * b)%Q. ring.
rewrite H0. clear H0.
apply Qle_trans with (Qabs (f n) + Qabs (Σ n f))%Q.
apply Qabs_triangle.
apply Qplus_le_compat; auto.
Qed.
Lemma Σ_wd f g n (H: forall x, (x < n)%nat -> f x == g x):
Σ n f == Σ n g.
Proof with auto with *.
unfold Σ. induction n; simpl...
rewrite IHn... rewrite H...
Qed.
Lemma Σ_plus_bound m n f: Σ (m + n) f == Σ n f + Σ m (f ∘ plus n).
Proof with try reflexivity.
induction m; simpl; intros.
unfold Σ. simpl. ring.
do 2 rewrite Σ_S_bound_back.
rewrite Qplus_assoc.
rewrite <- IHm.
unfold Basics.compose.
replace (f (m + n)%nat) with (f (n + m)%nat)...
rewrite plus_comm...
Qed.
Lemma Σ_mult_bound n m f:
Σ (n * m) f == Σ n (fun i => Σ m (fun j => f (i * m + j)%nat)).
Proof.
induction n; intros.
reflexivity.
unfold Σ in *.
simpl.
rewrite <- IHn.
change (Σ (m + n * m) f == Σ m (fun j => f (n * m + j)%nat) + Σ (n * m) f).
rewrite Σ_plus_bound.
unfold Basics.compose.
ring.
Qed.
Lemma Σ_Qball (f g: nat -> Q) (e: Q) (n: nat):
0 <= e ->
(forall i: nat, (i < n)%nat -> Qabs (f i - g i) <= e / inject_Z (Z.of_nat n)) ->
Qball e (Σ n f) (Σ n g).
Proof with auto.
intros epos H.
apply Qball_Qabs.
destruct n. simpl. exact epos.
rewrite Σ_sub.
setoid_replace e
with (inject_Z (Z.of_nat (S n)) * (/ inject_Z (Z.of_nat (S n)) * e)).
apply Σ_abs_le.
intros ? E.
specialize (H x E).
rewrite Qmult_comm.
assumption.
unfold canonical_names.equiv, stdlib_rationals.Q_eq.
field. discriminate.
Qed.
Lemma Σ_Qball_pos_bounds (f g: nat -> Q) (e: Q) (n: positive):
(forall i: nat, (i < Pos.to_nat n)%nat -> Qball (e * (1#n)) (f i) (g i)) ->
Qball e (Σ (Pos.to_nat n) f) (Σ (Pos.to_nat n) g).
Proof with intuition.
intros.
assert (0 <= e).
{ specialize (H O (Pos2Nat.is_pos n)).
apply (msp_nonneg (msp (Q_as_MetricSpace))) in H.
rewrite <- (Qmult_0_l (1#n)) in H.
apply Qmult_le_r in H. exact H. reflexivity. }
apply Σ_Qball. exact H0. intros.
setoid_replace (e / inject_Z (Z.of_nat (nat_of_P n)))
with (e * (1#n)).
apply Qball_Qabs...
unfold canonical_names.equiv, stdlib_rationals.Q_eq.
rewrite <- Zpos_eq_Z_of_nat_o_nat_of_P...
Qed.
Lemma Qmult_Σ (f: nat -> Q) n (k: nat):
Σ n f * inject_Z (Z.of_nat k) == Σ (k * n) (f ∘ flip Nat.div k).
Proof with auto with *.
unfold Basics.compose.
rewrite mult_comm.
rewrite Σ_mult_bound.
unfold Qdiv.
rewrite Σ_mult.
apply Σ_wd.
intros.
unfold Basics.compose.
rewrite (Σ_constant (f x))...
intros.
unfold flip.
replace ((x * k + i) / k)%nat with x...
apply (Nat.div_unique (x * k + i)%nat k x i)...
Qed.
Lemma Σ_multiply_bound (n:nat) (k: positive) (f: nat -> Q):
Σ n f == Σ (Pos.to_nat k * n) (f ∘ flip Nat.div (Pos.to_nat k)) / inject_Z (Zpos k).
Proof.
rewrite <- Qmult_Σ.
rewrite <- Zpos_eq_Z_of_nat_o_nat_of_P.
field. discriminate.
Qed.
Lemma Qball_hetero_Σ (n m: positive) f g (e:Q):
(forall i: nat, (i < Pos.to_nat (n * m)%positive)%nat ->
Qball (e * (1# (n * m)%positive))
(/ inject_Z (Zpos m) * f (i / Pos.to_nat m)%nat)
(/ inject_Z (Zpos n) * g (i / Pos.to_nat n)%nat)) ->
Qball e (Σ (Pos.to_nat n) f) (Σ (Pos.to_nat m) g).
Proof.
intros.
rewrite (Σ_multiply_bound (Pos.to_nat n) m).
rewrite (Σ_multiply_bound (nat_of_P m) n).
rewrite mult_comm.
rewrite <- nat_of_P_mult_morphism.
unfold Qdiv.
rewrite Σ_mult.
rewrite Σ_mult.
apply Σ_Qball_pos_bounds.
intros.
unfold Basics.compose.
unfold flip.
auto.
Qed.
(** Σ was defined above straightforwardly in terms of ordinary lists and without
any efficiency consideration. In practice, building up a list with enum only to
break it down immediately with Qsum is wasteful, and I strongly doubt Coq
does list deforestation/fusion. Also, summing many Q's quickly yields large
numerators and denominators. Hence, we now define a faster version
which avoids the intermediate lists and uses Qred. The idea is to use
fastΣ in the actual definition of operations, and to immediately rewrite it to Σ
(using the correctness property) when doing proofs about said operations. *)
Section fastΣ.
Fixpoint fast (f: nat -> Q) (left: nat) (sofar: Q): Q :=
match left with
| O => sofar
| S n => fast f n (Qred (sofar + f n))
end.
Definition fastΣ (n: nat) (f: nat -> Q): Q := fast f n 0.
Lemma fastΣ_correct n f: fastΣ n f == Σ n f.
Proof.
intros.
rewrite <- (Qplus_0_r (Σ n f)).
unfold Σ, fastΣ.
generalize 0.
induction n; intros.
simpl. ring.
change (fast f n (Qred (q + f n)) == Qsum (map f (enum (S n))) + q).
rewrite IHn.
rewrite Qred_correct.
simpl.
ring.
Qed.
End fastΣ.
|
{"author": "coq-community", "repo": "corn", "sha": "cfbf6b297643935f0fe7e22d2b14b462bf7e3095", "save_path": "github-repos/coq/coq-community-corn", "path": "github-repos/coq/coq-community-corn/corn-cfbf6b297643935f0fe7e22d2b14b462bf7e3095/util/Qsums.v"}
|
The Andrew Donnell Tree honors the memory of Andrew Douglas Donnell. It sits near the fire circle on the western end of the arboretum.
|
{"hexsha": "d9d1fd722774da53c6b5bfb9efe355186a9ff7c0", "size": 137, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Andrew_Donnell_Tree.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Andrew_Donnell_Tree.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Andrew_Donnell_Tree.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.25, "max_line_length": 134, "alphanum_fraction": 0.795620438, "num_tokens": 31}
|
submodule (io:plasma_output) plasma_output_nc
use timeutils, only : date_filename
use nc4fortran, only: netcdf_file
implicit none (type, external)
contains
module procedure output_root_stream_mpi_nc4
!! COLLECT OUTPUT FROM WORKERS AND WRITE TO A FILE USING STREAM I/O.
!! STATE VARS ARE EXPECTED INCLUDE GHOST CELLS
integer :: lx1,lx2,lx3,lx2all,lx3all,isp
real(wp), dimension(1:size(ns,1)-4,1:size(ns,2)-4,1:size(ns,3)-4) :: v2avg,v3avg
real(wp), dimension(-1:size(Phiall,1)+2,-1:size(Phiall,2)+2,-1:size(Phiall,3)+2,1:lsp) :: nsall,vs1all,Tsall
real(wp), dimension(1:size(Phiall,1),1:size(Phiall,2),1:size(Phiall,3)) :: v2avgall,v3avgall,v1avgall,Tavgall,neall,Teall
real(wp), dimension(1:size(Phiall,1),1:size(Phiall,2),1:size(Phiall,3)) :: J1all,J2all,J3all
character(:), allocatable :: filenamefull
character(*), parameter :: dims4(4) = [character(7) :: 'x1', 'x2', 'x3', 'species'], &
dims3(3) = [character(2) :: 'x1', 'x2', 'x3'], &
dims23(2) = [character(2) :: 'x2', 'x3']
type(netcdf_file) :: hout
!! SYSTEM SIZES
! FIXME: should these be pulled from the grid module???
lx1=size(ns,1)-4
lx2=size(ns,2)-4
lx3=size(ns,3)-4
lx2all=size(Phiall,2)
lx3all=size(Phiall,3)
print *, 'System sizes according to Phiall: ',lx1,lx2all,lx3all
!ONLY AVERAGE DRIFTS PERP TO B NEEDED FOR OUTPUT
v2avg=sum(ns(1:lx1,1:lx2,1:lx3,1:lsp-1)*vs2(1:lx1,1:lx2,1:lx3,1:lsp-1),4)
v2avg=v2avg/ns(1:lx1,1:lx2,1:lx3,lsp) !compute averages for output.
v3avg=sum(ns(1:lx1,1:lx2,1:lx3,1:lsp-1)*vs3(1:lx1,1:lx2,1:lx3,1:lsp-1),4)
v3avg=v3avg/ns(1:lx1,1:lx2,1:lx3,lsp)
!GET THE SUBGRID DATA FORM THE WORKERS
call gather_recv(v2avg,tag%v2,v2avgall)
call gather_recv(v3avg,tag%v3,v3avgall)
call gather_recv(ns,tag%ns,nsall)
call gather_recv(vs1,tag%vs1,vs1all)
call gather_recv(Ts,tag%Ts,Tsall)
!> RADD--- NEED TO ALSO GATHER FULL GRID ELECTRODYANMICS PARAMTERS FROM WORKERS
call gather_recv(J1,tag%J1,J1all)
call gather_recv(J2,tag%J2,J2all)
call gather_recv(J3,tag%J3,J3all)
!COMPUTE AVERAGE VALUES FOR ION PLASMA PARAMETERS
v1avgall=sum(nsall(1:lx1,1:lx2all,1:lx3all,1:lsp-1)*vs1all(1:lx1,1:lx2all,1:lx3all,1:lsp-1),4)
v1avgall=v1avgall/nsall(1:lx1,1:lx2all,1:lx3all,lsp) !compute averages for output.
Tavgall=sum(nsall(1:lx1,1:lx2all,1:lx3all,1:lsp-1)*Tsall(1:lx1,1:lx2all,1:lx3all,1:lsp-1),4)
Tavgall=Tavgall/nsall(1:lx1,1:lx2all,1:lx3all,lsp) !compute averages for output.
neall=nsall(1:lx1,1:lx2all,1:lx3all,lsp)
Teall=Tsall(1:lx1,1:lx2all,1:lx3all,lsp)
!> FIGURE OUT THE FILENAME
filenamefull = date_filename(outdir,ymd,UTsec) // '.nc'
print *, 'Output file name: ', filenamefull
call hout%initialize(filenamefull, status='new',action='w',comp_lvl=1)
call hout%write('ymd', ymd)
call hout%write('UThour',UTsec/3600._wp)
if (flagswap/=1) then
select case (flagoutput)
case (2) !output ISR-like average parameters
call hout%write('neall', neall(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('v1avgall', v1avgall(1:lx1,1:lx2all,1:lx3all), dims3)
!output of ISR-like parameters (ne,Ti,Te,v1,etc.)
call hout%write('Tavgall', Tavgall(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('TEall', Teall(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('J1all', J1all(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('J2all', J2all(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('J3all', J3all(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('v2avgall', v2avgall(1:lx1,1:lx2all,1:lx3all), dims3)
call hout%write('v3avgall', v3avgall(1:lx1,1:lx2all,1:lx3all), dims3)
case (3) !just electron density
print *, '!!!NOTE: Input file has selected electron density only output, make sure this is what you really want!'
call hout%write('neall', neall(1:lx1,1:lx2all,1:lx3all), dims3)
case default !output everything
print *, '!!!NOTE: Input file has selected full output, large files may result!'
call hout%write('nsall', nsall(1:lx1,1:lx2all,1:lx3all, :), dims4)
call hout%write('vs1all', vs1all(1:lx1,1:lx2all,1:lx3all, :), dims4)
!this is full output of all parameters in 3D
call hout%write('Tsall', Tsall(1:lx1,1:lx2all,1:lx3all, :), dims4)
call hout%write('J1all', J1all(1:lx1, 1:lx2all, 1:lx3all), dims3)
call hout%write('J2all', J2all(1:lx1, 1:lx2all, 1:lx3all), dims3)
call hout%write('J3all', J3all(1:lx1, 1:lx2all, 1:lx3all), dims3)
call hout%write('v2avgall', v2avgall(1:lx1, 1:lx2all, 1:lx3all), dims3)
call hout%write('v3avgall', v3avgall(1:lx1, 1:lx2all, 1:lx3all), dims3)
end select
else
!! 2D simulation
select case (flagoutput)
case (2) !averaged parameters
call hout%write('neall', neall, dims3)
call hout%write('v1avgall', v1avgall, dims3)
call hout%write('Tavgall', Tavgall, dims3)
call hout%write('TEall', Teall, dims3)
call hout%write('J1all', J1all, dims3)
! J3,J2 and V3, V2 are swapped
call hout%write('J2all', J3all, dims3)
call hout%write('J3all', J2all, dims3)
call hout%write('v2avgall', v3avgall, dims3)
call hout%write('v3avgall', v2avgall, dims3)
case (3) !electron density only output
print *, '!!!NOTE: Input file has selected electron density only output, make sure this is what you really want!'
call hout%write('neall', neall, dims3)
case default
print *, '!!!NOTE: Input file has selected full output, large files may result!'
call hout%write('nsall', nsall(1:lx1,1:lx2all,1:lx3all, :), dims4)
call hout%write('vs1all', vs1all(1:lx1,1:lx2all,1:lx3all, :), dims4)
call hout%write('Tsall', Tsall(1:lx1,1:lx2all,1:lx3all, :), dims4)
call hout%write('J1all', J1all, dims3)
!! NOTE: J3,J2 and V3, V2 are swapped in name like this
call hout%write('J2all', J3all, dims3)
call hout%write('J3all', J2all, dims3)
call hout%write('v2avgall', v3avgall, dims3)
call hout%write('v3avgall', v2avgall, dims3)
end select
end if
if (gridflag==1) then
print *, 'Writing topside boundary conditions for inverted-type grid...'
call hout%write('Phiall', Phiall(1,:,:), dims23)
else
print *, 'Writing topside boundary conditions for non-inverted-type grid...'
call hout%write('Phiall', Phiall(lx1,:,:), dims23)
end if
call hout%finalize()
end procedure output_root_stream_mpi_nc4
end submodule plasma_output_nc
|
{"hexsha": "4450b4d156ee870b30cee6aa1187c167c21b32ff", "size": 6367, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/io/plasma_output_ncdf.f90", "max_stars_repo_name": "jklenzing/gemini", "max_stars_repo_head_hexsha": "aed028fbb6f6187ca951e4cd243fdec929514b2a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/io/plasma_output_ncdf.f90", "max_issues_repo_name": "jklenzing/gemini", "max_issues_repo_head_hexsha": "aed028fbb6f6187ca951e4cd243fdec929514b2a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/io/plasma_output_ncdf.f90", "max_forks_repo_name": "jklenzing/gemini", "max_forks_repo_head_hexsha": "aed028fbb6f6187ca951e4cd243fdec929514b2a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2974683544, "max_line_length": 121, "alphanum_fraction": 0.6899638762, "num_tokens": 2551}
|
Load LFindLoad.
Load LFindLoad.
From adtind Require Import goal8.
From lfind Require Import LFind.
Require Import Extraction.
Extract Inductive nat => nat [ "(O)" "S" ].
Extract Inductive list => list [ "Nil" "Cons" ].
Extraction "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal8_drop_Cons_assoc_34_drop_Cons/goal8_lfind_orig.ml" adtind.goal8.drop.
Success.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal8_drop_Cons_assoc_34_drop_Cons/lfind_ml_generator.v"}
|
import numpy as np
from objects.misc.default_functions import DefaultFunctions
class Problem(DefaultFunctions):
'''
Parametes object. Effectively represents each problem that we want to submit to the solver.
Refer to objects/default_functions.py for the meaning of each parameter.
'''
def __init__(self):
super().__init__()
# solver parameters
self.N = 100
self.M = 1000
self.K_pol = 3
self.K_cus = 0
self.U = 200
self.u_min, self.u_max = -10, 10
self.optimization_type = 'extensive' # gradient
self.coefficients_computation = 'ols' # ols
# control problem
self.T = 1
# controlled process
self.initial_condition = 0
self.sigma = 2
self.coeff_mu_c = 0
self.coeff_mu_x = 0
self.coeff_mu_u = 1
self.coeff_sigma_c = 0
self.coeff_sigma_x = 0
self.coeff_sigma_u = 1
# reward function
self.coeff_rr_c = 0
self.coeff_rr_x = 2
self.coeff_rr_xx = 0
self.coeff_rr_u = 0
self.coeff_rr_uu = .1
self.coeff_rr_xu = 0
# training points
self.measure_mu = 0, 1
# terminal condition
self.terminal_condition_fnc = lambda x: x ** 2
# gradient descent optimization
self.step_gradient = 1
self.epsilon_gradient = 0.0001
# bayesian regression
self.epsilon_variance = 0.01
self.smoothness = 100
self.variance_growth = 1
self.max_trust = 1
# initialization - ignore
self.custom_basis = np.array([])
self.custom_basis_expectation = np.array([])
self.custom_basis_exp_der = np.array([])
self.init_params()
|
{"hexsha": "950fd85d186081cd6c21211e8f9b654aad5a8aec", "size": 1772, "ext": "py", "lang": "Python", "max_stars_repo_path": "problems/problem.py", "max_stars_repo_name": "alessandrobalata/pyrlmc", "max_stars_repo_head_hexsha": "493d1ad5378823a9bbb032077bea2838db76602c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "problems/problem.py", "max_issues_repo_name": "alessandrobalata/pyrlmc", "max_issues_repo_head_hexsha": "493d1ad5378823a9bbb032077bea2838db76602c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "problems/problem.py", "max_forks_repo_name": "alessandrobalata/pyrlmc", "max_forks_repo_head_hexsha": "493d1ad5378823a9bbb032077bea2838db76602c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8484848485, "max_line_length": 95, "alphanum_fraction": 0.5981941309, "include": true, "reason": "import numpy", "num_tokens": 447}
|
import pytest
from tridesclous import *
import numpy as np
import scipy.signal
import time
import os
import shutil
from tridesclous.dataio import DataIO
from tridesclous.catalogueconstructor import CatalogueConstructor
from tridesclous import Peeler
from tridesclous.peeler_cl import Peeler_OpenCl
from tridesclous.peakdetector import detect_peaks_in_chunk
from tridesclous.tests.testingtools import setup_catalogue
from tridesclous.tests.testingtools import ON_CI_CLOUD
def setup_module():
setup_catalogue('test_peeler', dataset_name='olfactory_bulb')
setup_catalogue('test_peeler2', dataset_name='olfactory_bulb', duration=16.)
def teardown_module():
shutil.rmtree('test_peeler')
shutil.rmtree('test_peeler2')
def open_catalogue_window():
dataio = DataIO(dirname='test_peeler')
catalogueconstructor = CatalogueConstructor(dataio=dataio)
app = pg.mkQApp()
win = CatalogueWindow(catalogueconstructor)
win.show()
app.exec_()
def test_peeler_geometry():
dataio = DataIO(dirname='test_peeler')
catalogue0 = dataio.load_catalogue(chan_grp=0)
# catalogue1 = dataio.load_catalogue(chan_grp=0, name='with_oversampling')
# for catalogue in (catalogue0, catalogue1):
for catalogue in (catalogue0, ):
print()
print('engine=geometrical')
print('inter_sample_oversampling', catalogue['inter_sample_oversampling'])
peeler = Peeler(dataio)
peeler.change_params(engine='geometrical',
catalogue=catalogue,
chunksize=1024)
t1 = time.perf_counter()
peeler.run(progressbar=False)
t2 = time.perf_counter()
print('peeler.run_loop', t2-t1)
spikes = dataio.get_spikes(chan_grp=0).copy()
labels = catalogue['clusters']['cluster_label']
count_by_label = [np.sum(spikes['cluster_label'] == label) for label in labels]
print(labels)
print(count_by_label)
@pytest.mark.skipif(ON_CI_CLOUD, reason='ON_CI_CLOUD')
def test_peeler_geometry_cl():
dataio = DataIO(dirname='test_peeler')
#~ catalogue = dataio.load_catalogue(chan_grp=0)
catalogue0 = dataio.load_catalogue(chan_grp=0)
# catalogue1 = dataio.load_catalogue(chan_grp=0, name='with_oversampling')
# for catalogue in (catalogue0, catalogue1):
for catalogue in (catalogue0, ):
print()
print('engine=geometrical_opencl')
print('inter_sample_oversampling', catalogue['inter_sample_oversampling'])
peeler = Peeler(dataio)
catalogue['clean_peaks_params']['alien_value_threshold'] = None
peeler.change_params(engine='geometrical_opencl',
catalogue=catalogue,
chunksize=1024,
speed_test_mode=True,
)
t1 = time.perf_counter()
peeler.run(progressbar=False)
t2 = time.perf_counter()
print('peeler.run_loop', t2-t1)
spikes = dataio.get_spikes(chan_grp=0).copy()
labels = catalogue['clusters']['cluster_label']
count_by_label = [np.sum(spikes['cluster_label'] == label) for label in labels]
print(labels)
print(count_by_label)
run_times = peeler.get_run_times(chan_grp=0, seg_num=0)
print(run_times)
@pytest.mark.skipif(ON_CI_CLOUD, reason='TOO_OLD')
def test_peeler_empty_catalogue():
"""
This test peeler with empty catalogue.
This is like a peak detector.
Check several chunksize and compare to offline-one-buffer.
THIS TEST IS TOO OLD need to be rewritten
"""
dataio = DataIO(dirname='test_peeler')
#~ print(dataio)
catalogue = dataio.load_catalogue(chan_grp=0)
# empty catalogue for debug peak detection
s = catalogue['centers0'].shape
empty_centers = np.zeros((0, s[1], s[2]), dtype='float32')
catalogue['centers0'] = empty_centers
catalogue['centers1'] = empty_centers
catalogue['centers2'] = empty_centers
catalogue['cluster_labels'] = np.zeros(0, dtype=catalogue['cluster_labels'].dtype)
sig_length = dataio.get_segment_length(0)
chunksizes = [ 101, 174, 512, 1024, 1023, 10000, 150000]
#~ chunksizes = [1024,]
previous_peak = None
for chunksize in chunksizes:
print('**', chunksize, '**')
peeler = Peeler(dataio)
peeler.change_params(engine='geometrical', catalogue=catalogue,chunksize=chunksize,
save_bad_label=True)
t1 = time.perf_counter()
#~ peeler.run(progressbar=False)
#~ peeler.run_offline_loop_one_segment(seg_num=0, progressbar=False)
peeler.run(progressbar=False)
t2 = time.perf_counter()
#~ print('n_side', peeler.n_side, 'n_span', peeler.n_span, 'peak_width', peeler.peak_width)
#~ print('peeler.run_loop', t2-t1)
spikes = dataio.get_spikes(seg_num=0, chan_grp=0)
labeled_spike = spikes[spikes['cluster_label']>=0]
unlabeled_spike = spikes[spikes['cluster_label']<0]
assert labeled_spike.size == 0
print(unlabeled_spike.size)
is_sorted = np.all(np.diff(unlabeled_spike['index'])>=0)
online_peaks = unlabeled_spike['index']
engine = peeler.peeler_engine
i_stop = sig_length-catalogue['signal_preprocessor_params']['pad_width']-engine.extra_size+engine.n_span
sigs = dataio.get_signals_chunk(signal_type='processed', i_stop=i_stop)
offline_peaks = detect_peaks_in_chunk(sigs, engine.n_span, engine.relative_threshold, engine.peak_sign)
print(offline_peaks.size)
offline_peaks = offline_peaks[offline_peaks<=online_peaks[-1]]
assert offline_peaks.size == online_peaks.size
np.testing.assert_array_equal(offline_peaks, online_peaks)
if previous_peak is not None:
last = min(previous_peak[-1], online_peaks[-1])
previous_peak = previous_peak[previous_peak<=last]
online_peaks_cliped = online_peaks[online_peaks<=last]
assert previous_peak.size == online_peaks_cliped.size
np.testing.assert_array_equal(previous_peak, online_peaks_cliped)
previous_peak = online_peaks
@pytest.mark.skipif(ON_CI_CLOUD, reason='To hard for CI')
def test_peeler_several_chunksize():
dataio = DataIO(dirname='test_peeler')
print(dataio)
catalogue = dataio.load_catalogue(chan_grp=0)
all_spikes = []
sig_length = dataio.get_segment_length(0)
chunksizes = [ 174, 512, 1024, 1023, 10000, 150000]
#~ chunksizes = [512, 1024,]
for chunksize in chunksizes:
print('**', chunksize, '**')
peeler = Peeler(dataio)
peeler.change_params(engine='geometrical', catalogue=catalogue,chunksize=chunksize)
t1 = time.perf_counter()
peeler.run(progressbar=False)
t2 = time.perf_counter()
print('extra_size', peeler.peeler_engine.extra_size, 'n_span', peeler.peeler_engine.n_span,
'peak_width', peeler.peeler_engine.peak_width)
print('peeler.run_loop', t2-t1)
# copy is need because the memmap is reset at each loop
spikes = dataio.get_spikes(seg_num=0, chan_grp=0).copy()
all_spikes.append(spikes)
print(spikes.size)
# clip to last spike
last = min([spikes[-1]['index'] for spikes in all_spikes])
for i, chunksize in enumerate(chunksizes):
spikes = all_spikes[i]
all_spikes[i] = spikes[spikes['index']<=last]
previsous_spikes = None
for i, chunksize in enumerate(chunksizes):
print('**', chunksize, '**')
spikes = all_spikes[i]
is_sorted = np.all(np.diff(spikes['index'])>=0)
assert is_sorted
labeled_spike = spikes[spikes['cluster_label']>=0]
unlabeled_spike = spikes[spikes['cluster_label']<0]
print('labeled_spike.size', labeled_spike.size, 'unlabeled_spike.size', unlabeled_spike.size)
print(spikes)
# TODO: Peeler chunksize influence the number of spikes
if previsous_spikes is not None:
assert previsous_spikes.size == spikes.size
np.testing.assert_array_equal(previsous_spikes['index'], spikes['index'])
np.testing.assert_array_equal(previsous_spikes['cluster_label'], spikes['cluster_label'])
previsous_spikes = spikes
def test_peeler_with_and_without_preprocessor():
if ON_CI_CLOUD:
engines = ['geometrical']
else:
engines = ['geometrical', 'geometrical_opencl']
#~ engines = ['geometrical_opencl']
for engine in engines:
for i in range(2):
#~ for i in [1]:
print()
if i == 0:
print(engine, 'without processing')
dataio = DataIO(dirname='test_peeler')
else:
print(engine, 'with processing')
dataio = DataIO(dirname='test_peeler2')
catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(engine=engine, catalogue=catalogue, chunksize=1024)
t1 = time.perf_counter()
peeler.run(progressbar=False)
t2 = time.perf_counter()
print('peeler run_time', t2 - t1)
spikes = dataio.get_spikes(chan_grp=0).copy()
labels = catalogue['clusters']['cluster_label']
count_by_label = [np.sum(spikes['cluster_label'] == label) for label in labels]
print(labels)
print(count_by_label)
def open_PeelerWindow():
dataio = DataIO(dirname='test_peeler')
#~ dataio = DataIO(dirname='test_peeler2')
initial_catalogue = dataio.load_catalogue(chan_grp=0)
app = pg.mkQApp()
win = PeelerWindow(dataio=dataio, catalogue=initial_catalogue)
win.show()
app.exec_()
def test_export_spikes():
dataio = DataIO(dirname='test_peeler')
dataio.export_spikes()
def debug_compare_peeler_engines():
# this do not work because oversampling is not handle
dataio = DataIO(dirname='test_peeler')
print(dataio)
engine_list = [
('geometrical argmin opencl', 'geometrical', {}),
('geometrical_opencl', 'geometrical_opencl', {}),
]
all_spikes = []
for name, engine, kargs in engine_list:
#~ print()
#~ print(name)
# catalogue = dataio.load_catalogue(chan_grp=0, name='with_oversampling')
catalogue = dataio.load_catalogue(chan_grp=0)
peeler = Peeler(dataio)
peeler.change_params(engine=engine, catalogue=catalogue,chunksize=1024, **kargs)
t1 = time.perf_counter()
peeler.run(progressbar=False, duration=None)
t2 = time.perf_counter()
print(name, 'run', t2-t1)
spikes = dataio.get_spikes(chan_grp=0).copy()
#~ print(spikes.size)
all_spikes.append(spikes)
#~ print(dataio.get_spikes(chan_grp=0).size)
print()
#~ all_spikes[0] = all_spikes[0][88+80:88+81+10]
#~ all_spikes[1] = all_spikes[1][88+80:88+81+10]
#~ all_spikes[0] = all_spikes[0][:88+81]
#~ all_spikes[1] = all_spikes[1][:88+81]
labels = catalogue['clusters']['cluster_label']
for i, spikes in enumerate(all_spikes):
name = engine_list[i][0]
print()
print(name)
print(spikes[:10])
print(spikes.size)
count_by_label = [np.sum(spikes['cluster_label'] == label) for label in labels]
print(count_by_label)
#~ assert all_spikes[0].size == spikes.size
#~ assert np.all(all_spikes[0]['index'] == spikes['index'])
#~ assert np.all(all_spikes[0]['cluster_label'] == spikes['cluster_label'])
#~ assert np.all(np.abs(all_spikes[0]['jitter'] - spikes['jitter'])<0.0001)
if __name__ =='__main__':
#~ setup_module()
#~ open_catalogue_window()
#~ test_peeler_geometry()
#~ test_peeler_geometry_cl()
#~ test_peeler_empty_catalogue()
test_peeler_several_chunksize()
#~ test_peeler_with_and_without_preprocessor()
#~ test_export_spikes()
#~ debug_compare_peeler_engines()
#~ open_PeelerWindow()
#~ teardown_module()
|
{"hexsha": "54fd38f372713089032e6340558cd94f3e585799", "size": 12805, "ext": "py", "lang": "Python", "max_stars_repo_path": "tridesclous/tests/test_peeler.py", "max_stars_repo_name": "caniko/tridesclous", "max_stars_repo_head_hexsha": "9f412a42697561e3c7d8e3a35249cd13240239a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2016-01-27T22:27:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T08:49:27.000Z", "max_issues_repo_path": "tridesclous/tests/test_peeler.py", "max_issues_repo_name": "caniko/tridesclous", "max_issues_repo_head_hexsha": "9f412a42697561e3c7d8e3a35249cd13240239a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 87, "max_issues_repo_issues_event_min_datetime": "2015-12-14T08:16:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T14:35:55.000Z", "max_forks_repo_path": "tridesclous/tests/test_peeler.py", "max_forks_repo_name": "caniko/tridesclous", "max_forks_repo_head_hexsha": "9f412a42697561e3c7d8e3a35249cd13240239a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 31, "max_forks_repo_forks_event_min_datetime": "2015-11-10T14:37:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T06:41:19.000Z", "avg_line_length": 32.0927318296, "max_line_length": 112, "alphanum_fraction": 0.6225693089, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3167}
|
import os
import numpy as np
from tqdm import tqdm
sys.path.insert(0, 'classes')
from DomainSegmentor import *
# TODO make parameters commandline accessible.
eval_dir = 'nhlrc3_set2'
target_class_idx = [8, 9, 10, 11]
include_max = True # Include results using the max of the selected classes.
# Generate path list.
path_list = []
for root, dirs, files in os.walk(eval_dir):
for file in files:
if file.endswith('.pdb'):
path_list.append(os.path.join(root, file))
# Check that classes are correct.
print("Selected Classes:")
for t in target_class_idx:
print(idx_to_class[t])
# Evaluate and extract desired probs.
segmentor = DomainSegmentor()
prob_dict = {}
max_prob_dict = {}
print("Starting Evaluation.")
for pdb_path in tqdm(path_list):
class_probs, res_nums = segmentor.predict(pdb_path, log=True)
prob_subset = class_probs[[i for i in target_class_idx][:]]
target_probs = np.mean(prob_subset, axis=1)
prob_dict[os.path.split(pdb_path)[1]] = target_probs
if include_max:
max_prob_subset = np.max(prob_subset, axis=0)
max_target_prob = np.mean(max_prob_subset)
max_prob_dict[os.path.split(pdb_path)[1]] = max_target_prob
# Write the output.
if include_max:
sorted_list = sorted(max_prob_dict.items(), key=lambda x:x[1])[::-1]
out_lines = [x[0] + '\t' + str(x[1]) + '\n' for x in sorted_list]
out_file = open(eval_dir + '_max_' + '-'.join(str(t) for t in target_class_idx) +'.prob', 'w')
out_file.writelines(out_lines)
out_file.close()
print("Wrote: " + eval_dir + '_max_' + '-'.join(str(t) for t in target_class_idx) +'.prob')
for i, class_idx in enumerate(target_class_idx):
temp_dict = {x : prob_dict[x][i] for x in prob_dict}
sorted_list = sorted(temp_dict.items(), key=lambda x:x[1])[::-1]
out_lines = [x[0] + '\t' + str(x[1]) + '\n' for x in sorted_list]
out_file = open(eval_dir + '_' + idx_to_class[class_idx].replace(' ','').lower() + '.prob', 'w')
out_file.writelines(out_lines)
out_file.close()
print("Wrote: " + eval_dir + '_' + idx_to_class[class_idx].replace(' ','').lower() + '.prob')
|
{"hexsha": "ef0c28b471094cd1952442412943c819c1ccdbeb", "size": 2132, "ext": "py", "lang": "Python", "max_stars_repo_path": "custom/evaluate_design_set.py", "max_stars_repo_name": "egurapha/prot_domain_segmentor", "max_stars_repo_head_hexsha": "407ae9f5ff37ae20a32f07dd46b85ef8201659e1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-09-20T02:45:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T02:50:08.000Z", "max_issues_repo_path": "custom/evaluate_design_set.py", "max_issues_repo_name": "egurapha/prot_domain_segmentor", "max_issues_repo_head_hexsha": "407ae9f5ff37ae20a32f07dd46b85ef8201659e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "custom/evaluate_design_set.py", "max_forks_repo_name": "egurapha/prot_domain_segmentor", "max_forks_repo_head_hexsha": "407ae9f5ff37ae20a32f07dd46b85ef8201659e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-26T00:15:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-26T00:15:28.000Z", "avg_line_length": 37.4035087719, "max_line_length": 100, "alphanum_fraction": 0.6749530957, "include": true, "reason": "import numpy", "num_tokens": 582}
|
import io
import logging
import numpy as np
import pandas as pd
import core.artificial_signal_generators as casgen
import core.config as cconfig
import core.dataflow.nodes.test.helpers as cdnth
import core.dataflow.nodes.transformers as cdnt
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestSeriesToSeriesTransformer(hut.TestCase):
def test1(self) -> None:
"""
Test `fit()` call.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"in_col_group": ("close",),
"out_col_group": ("ret_0",),
"transformer_func": lambda x: x.pct_change(),
}
)
node = cdnt.SeriesToSeriesTransformer("sklearn", **config.to_dict())
df_out = node.fit(data)["df_out"]
df_str = hut.convert_df_to_string(df_out.round(3), index=True, decimals=3)
self.check_string(df_str)
def test2(self) -> None:
"""
Test `predict()` call.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"in_col_group": ("close",),
"out_col_group": ("ret_0",),
"transformer_func": lambda x: x.pct_change(),
}
)
node = cdnt.SeriesToSeriesTransformer("sklearn", **config.to_dict())
expected, actual = cdnth.get_fit_predict_outputs(data, node)
self.assert_equal(actual, expected)
def _get_data(self) -> pd.DataFrame:
"""
Generate multivariate normal returns.
"""
mn_process = casgen.MultivariateNormalProcess()
mn_process.set_cov_from_inv_wishart_draw(dim=4, seed=342)
realization = mn_process.generate_sample(
{"start": "2000-01-01", "periods": 40, "freq": "B"}, seed=134
)
realization = realization.rename(columns=lambda x: "MN" + str(x))
realization = np.exp(0.1 * realization.cumsum())
volume = pd.DataFrame(
index=realization.index, columns=realization.columns, data=100
)
data = pd.concat([realization, volume], axis=1, keys=["close", "volume"])
return data
class TestFunctionWrapper(hut.TestCase):
def test1(self) -> None:
"""
Test `fit()` call.
"""
data = self._get_df()
def multiply(df: pd.DataFrame, col1: str, col2: str) -> pd.DataFrame:
product = (df[col1] * df[col2]).rename("pv")
return product.to_frame()
config = cconfig.get_config_from_nested_dict(
{
"func": multiply,
"func_kwargs": {
"col1": "close",
"col2": "volume",
},
}
)
node = cdnt.FunctionWrapper("sklearn", **config.to_dict())
actual = node.fit(data)["df_out"]
txt = """
datetime,pv
2016-01-04 09:30:00,1.769e+08
2016-01-04 09:31:00,3.316e+07
2016-01-04 09:32:00,3.999e+07
"""
expected = pd.read_csv(io.StringIO(txt), index_col=0, parse_dates=True)
pd.testing.assert_frame_equal(actual, expected, rtol=1e-2)
@staticmethod
def _get_df() -> pd.DataFrame:
"""
Return a df without NaNs.
"""
txt = """
datetime,close,volume
2016-01-04 09:30:00,94.7,1867590
2016-01-04 09:31:00,94.98,349119
2016-01-04 09:32:00,95.33,419479
"""
df = pd.read_csv(io.StringIO(txt), index_col=0, parse_dates=True)
return df
class TestTwapVwapComputer(hut.TestCase):
def test1(self) -> None:
"""
Test building 5-min TWAP/VWAP bars from 1-min close/volume bars.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"rule": "5T",
"price_col": "close",
"volume_col": "volume",
}
)
node = cdnt.TwapVwapComputer("twapvwap", **config.to_dict())
df_out = node.fit(data)["df_out"]
df_str = hut.convert_df_to_string(df_out.round(3), index=True, decimals=3)
self.check_string(df_str)
def test2(self) -> None:
"""
Test `predict()` call.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"rule": "5T",
"price_col": "close",
"volume_col": "volume",
}
)
node = cdnt.TwapVwapComputer("twapvwap", **config.to_dict())
expected, actual = cdnth.get_fit_predict_outputs(data, node)
self.assert_equal(actual, expected)
def _get_data(self) -> pd.DataFrame:
"""
Generate AR(1) returns and Poisson volume.
"""
date_range_kwargs = {
"start": "2001-01-04 09:30:00",
"end": "2001-01-04 10:00:00",
"freq": "T",
}
ar_params = [0.5]
arma_process = casgen.ArmaProcess(ar_params, [])
rets = arma_process.generate_sample(
date_range_kwargs=date_range_kwargs,
scale=1,
burnin=0,
seed=100,
)
prices = np.exp(0.25 * rets.cumsum())
prices.name = "close"
poisson_process = casgen.PoissonProcess(mu=100)
volume = poisson_process.generate_sample(
date_range_kwargs=date_range_kwargs,
seed=100,
)
volume.name = "volume"
df = pd.concat([prices, volume], axis=1)
return df
class TestMultiindexTwapVwapComputer(hut.TestCase):
def test1(self) -> None:
"""
Test building 5-min TWAP/VWAP bars from 1-min close/volume bars.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"rule": "5T",
"price_col_group": ("close",),
"volume_col_group": ("volume",),
"out_col_group": (),
}
)
node = cdnt.MultiindexTwapVwapComputer("twapvwap", **config.to_dict())
df_out = node.fit(data)["df_out"]
df_str = hut.convert_df_to_string(df_out.round(3), index=True, decimals=3)
self.check_string(df_str)
def test2(self) -> None:
"""
Test `predict()` call.
"""
data = self._get_data()
config = cconfig.get_config_from_nested_dict(
{
"rule": "5T",
"price_col_group": ("close",),
"volume_col_group": ("volume",),
"out_col_group": (),
}
)
node = cdnt.MultiindexTwapVwapComputer("twapvwap", **config.to_dict())
expected, actual = cdnth.get_fit_predict_outputs(data, node)
self.assert_equal(actual, expected)
def _get_data(self) -> pd.DataFrame:
"""
Generate AR(1) returns and Poisson volume.
"""
date_range_kwargs = {
"start": "2001-01-04 09:30:00",
"end": "2001-01-04 10:00:00",
"freq": "T",
}
mn_process = casgen.MultivariateNormalProcess()
mn_process.set_cov_from_inv_wishart_draw(dim=4, seed=402)
rets = mn_process.generate_sample(
date_range_kwargs=date_range_kwargs, seed=343
)
rets = rets.rename(columns=lambda x: "MN" + str(x))
prices = np.exp(0.1 * rets.cumsum())
poisson_process = casgen.PoissonProcess(mu=100)
volume_srs = poisson_process.generate_sample(
date_range_kwargs=date_range_kwargs,
seed=100,
)
volume = pd.DataFrame(index=volume_srs.index, columns=rets.columns)
for col in volume.columns:
volume[col] = volume_srs
df = pd.concat([prices, volume], axis=1, keys=["close", "volume"])
return df
|
{"hexsha": "4312a7bddb78b2bf8d3059b806c9e5d522ed1af6", "size": 7848, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/dataflow/nodes/test/test_transformers.py", "max_stars_repo_name": "ajmal017/amp", "max_stars_repo_head_hexsha": "8de7e3b88be87605ec3bad03c139ac64eb460e5c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/dataflow/nodes/test/test_transformers.py", "max_issues_repo_name": "ajmal017/amp", "max_issues_repo_head_hexsha": "8de7e3b88be87605ec3bad03c139ac64eb460e5c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/dataflow/nodes/test/test_transformers.py", "max_forks_repo_name": "ajmal017/amp", "max_forks_repo_head_hexsha": "8de7e3b88be87605ec3bad03c139ac64eb460e5c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8368200837, "max_line_length": 82, "alphanum_fraction": 0.5575942915, "include": true, "reason": "import numpy", "num_tokens": 1944}
|
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// GameKit
#include <aws/gamekit/core/model/account_info.h>
// Boost
#include <boost/algorithm/string/case_conv.hpp>
std::string GameKit::TruncateAndLower(const std::string& str, const std::regex& pattern)
{
auto rbegin = std::sregex_iterator(str.begin(), str.end(), pattern);
auto rend = std::sregex_iterator();
std::string matchStr;
for (std::sregex_iterator i = rbegin; i != rend; ++i)
{
std::smatch match = *i;
matchStr = match.str();
}
return boost::algorithm::to_lower_copy(matchStr);
}
GameKit::AccountInfoCopy GameKit::CreateAccountInfoCopy(const GameKit::AccountInfo accountInfo)
{
AccountInfoCopy acctCopy;
acctCopy.environment = ResourceEnvironment(accountInfo.environment),
acctCopy.accountId = accountInfo.accountId;
acctCopy.companyName = accountInfo.companyName;
acctCopy.gameName = accountInfo.gameName;
// using the regex pattern for each field, truncate and convert them to lowercase
acctCopy.accountId = TruncateAndLower(acctCopy.accountId, std::regex("\\d{12}"));
acctCopy.gameName = TruncateAndLower(acctCopy.gameName, std::regex("[a-zA-Z0-9]{1,12}"));
acctCopy.companyName = TruncateAndLower(acctCopy.companyName, std::regex("[a-zA-Z0-9]{3,12}"));
return acctCopy;
}
// Method to compose bootstrap bucket name
std::string GameKit::GetBootstrapBucketName(const GameKit::AccountInfoCopy& accountInfo, const std::string& shortRegionCode)
{
std::string bootstrapBucketName = "do-not-delete-gamekit-";
// Bootstrap bucket names have a maximum 63 characters and has the format:
// do-not-delete-gamekit-<env>-<5_letter_aws_region_code>-<base36_account_id>-<gamename>
bootstrapBucketName.append(accountInfo.environment.GetEnvironmentString())
.append("-")
.append(shortRegionCode)
.append("-")
.append(GameKit::Utils::EncodingUtils::DecimalToBase(accountInfo.accountId, GameKit::Utils::BASE_36))
.append("-")
.append(accountInfo.gameName);
return bootstrapBucketName;
}
|
{"hexsha": "5ad3fa60d5962a5164aad65f24f6ca30f6373c54", "size": 2154, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "aws-gamekit-core/source/aws/gamekit/core/model/account_info.cpp", "max_stars_repo_name": "aws/aws-gamekit", "max_stars_repo_head_hexsha": "7ea5a9067c53a70ed279e1870008cf3a6d8d3b2e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16.0, "max_stars_repo_stars_event_min_datetime": "2022-03-23T18:28:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T19:59:23.000Z", "max_issues_repo_path": "aws-gamekit-core/source/aws/gamekit/core/model/account_info.cpp", "max_issues_repo_name": "aws/aws-gamekit", "max_issues_repo_head_hexsha": "7ea5a9067c53a70ed279e1870008cf3a6d8d3b2e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aws-gamekit-core/source/aws/gamekit/core/model/account_info.cpp", "max_forks_repo_name": "aws/aws-gamekit", "max_forks_repo_head_hexsha": "7ea5a9067c53a70ed279e1870008cf3a6d8d3b2e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-28T17:14:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T17:14:15.000Z", "avg_line_length": 39.1636363636, "max_line_length": 124, "alphanum_fraction": 0.7186629526, "num_tokens": 532}
|
module SBML
using SBML_jll, Libdl
using SparseArrays
using Symbolics
using IfElse
using Unitful
include("types.jl")
include("structs.jl")
include("version.jl")
include("converters.jl")
include("math.jl")
include("readsbml.jl")
include("symbolics.jl")
include("utils.jl")
sbml(sym::Symbol) = dlsym(SBML_jll.libsbml_handle, sym)
export readSBML, getS, getLBs, getUBs, getOCs
export set_level_and_version, libsbml_convert, convert_simplify_math
end # module
|
{"hexsha": "def10757deaa7a07ab85bda257b7aa5d5626d5dd", "size": 461, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SBML.jl", "max_stars_repo_name": "paulflang/SBML.jl", "max_stars_repo_head_hexsha": "5ca3e1988bff7009e410fb315f0bc360c7476098", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SBML.jl", "max_issues_repo_name": "paulflang/SBML.jl", "max_issues_repo_head_hexsha": "5ca3e1988bff7009e410fb315f0bc360c7476098", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/SBML.jl", "max_forks_repo_name": "paulflang/SBML.jl", "max_forks_repo_head_hexsha": "5ca3e1988bff7009e410fb315f0bc360c7476098", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-11T19:18:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-15T19:26:59.000Z", "avg_line_length": 18.44, "max_line_length": 68, "alphanum_fraction": 0.7809110629, "num_tokens": 143}
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.stats as stats
__all__ = ['lhs']
def lhs(dist, param, nsample):
"""
Latin Hypercube Sampling of any distribution without correlations
after Stein (1987).
Definition
----------
def lhs(dist, param, nsample):
Input
-----
dist
random number generator (list) from scipy.stats such as stats.norm,
stats.beta, etc.
param
tuple of parameters as required for dist
nsample
number of samples per parameter
Output
------
Latin hypercube Sample array of [size(nsample),nsample]
Restrictions
------------
No correlations between parameters possible.
References
----------
Stein, M. 1987. Large Sample Properties of Simulations Using
Latin Hypercube Sampling. Technometrics 29:143-151
Examples
--------
>>> import numpy as np
>>> import scipy.stats as stats
>>> # seed for reproducible results in doctest
>>> np.random.seed(1)
>>> dist = [stats.norm, stats.uniform] # for uniform (min, max-min)
>>> pars = [(50,2),(1,5)]
>>> c = lhs(dist, pars, 20)
>>> from autostring import astr
>>> print(astr(c[0:2,0:4],3,pp=True))
[['52.822' '51.956' '46.710' '50.585']
[' 4.950' ' 2.492' ' 2.078' ' 4.673']]
>>> np.random.seed(1)
>>> dist = [stats.norm]
>>> pars = [(50,2)]
>>> c = lhs(dist, pars, 20)
>>> print(c.shape)
(1, 20)
>>> print(astr(c[0,0:4],3,pp=True))
['51.171' '48.562' '51.683' '50.585']
>>> np.random.seed(1)
>>> dist = stats.norm
>>> pars = (50,2)
>>> c = lhs(dist, pars, 20)
>>> print(c.shape)
(20,)
>>> print(astr(c[0:4],3,pp=True))
['51.171' '48.562' '51.683' '50.585']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python
library, Department of Computational Hydrosystems, Helmholtz Centre for
Environmental Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2021 Matthias Cuntz - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
History
-------
Written, Matthias Cuntz, May 2012
- combination of Matlab routines of Budiman (2003)
and Python routines of Flavio Codeco Coelho (2008)
Modified, Matthias Cuntz, Feb 2013 - ported to Python 3
Matthias Cuntz, Nov 2016 - preserve shape <- nodim
Matthias Cuntz, Sep 2021 - code refactoring
"""
#
# Check input
if not isinstance(dist, (list, tuple)):
nodim = True
dist = [dist]
param = [param]
else:
nodim = False
assert len(dist) == len(param)
ndist = len(dist)
# LHS
ran = np.random.uniform(0., 1., (ndist, nsample))
lhsout = np.empty((ndist, nsample))
for j, d in enumerate(dist):
if not isinstance(d, (stats.rv_discrete, stats.rv_continuous)):
raise TypeError('dist is not a scipy.stats distribution object.')
# force type to float for sage compatibility
pars = tuple([float(k) for k in param[j]])
idx = np.array(np.random.permutation(nsample), dtype=float)
p = (idx+ran[j, :])/float(nsample) # probability of cdf
lhsout[j, :] = d(*pars).ppf(p) # inverse of cdf
if nodim:
return lhsout[0, :]
else:
return lhsout
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# import matplotlib.pyplot as plt
# dist = [stats.norm, stats.uniform]
# pars = [(50,2),(1,5)]
# c = lhs(dist, pars, 20000)
# plt.figure()
# plt.hist(c[0,:])
# plt.figure()
# plt.hist(c[1,:])
# dist = [stats.uniform, stats.uniform]
# pars = [(50,2),(1,5)]
# c = lhs(dist, pars, 20000)
# plt.figure()
# plt.plot(c[0,:],c[1,:],'ko',markersize=1.0)
# plt.show()
|
{"hexsha": "ab0d5e4d438943d941fa9ec0d088fde3e55d0ce2", "size": 5134, "ext": "py", "lang": "Python", "max_stars_repo_path": "jams/lhs.py", "max_stars_repo_name": "mcuntz/jams_python", "max_stars_repo_head_hexsha": "41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-06-03T03:24:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T07:14:00.000Z", "max_issues_repo_path": "jams/lhs.py", "max_issues_repo_name": "mcuntz/jams_python", "max_issues_repo_head_hexsha": "41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-25T21:56:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-08T14:58:27.000Z", "max_forks_repo_path": "jams/lhs.py", "max_forks_repo_name": "mcuntz/jams_python", "max_forks_repo_head_hexsha": "41b4504d2f55a77a7876fc6d146e4eb91dd8b2b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-10-17T12:04:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T07:45:07.000Z", "avg_line_length": 30.3786982249, "max_line_length": 79, "alphanum_fraction": 0.6155044799, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1371}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.