text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
"""."""
from typing import Callable
import cupy as cp
import dask.array as da
def infer_gib_gpu(metric: Callable) -> bool:
"""Infer greater is better from metric for GPU.
Args:
metric: Score or loss function.
Returns:
```True``` if grater is better.
Raises:
AssertionError: If there is no way to order the predictions.
"""
label = cp.array([0., 1.])
pred = cp.array([0.1, 0.9])
g_val = metric(label, pred)
b_val = metric(label, pred[::-1])
assert g_val != b_val, 'Cannot infer greater is better from metric.' \
' Should be set manually.'
return g_val > b_val
def infer_gib_multiclass_gpu(metric: Callable) -> bool:
"""Infer greater is better from metric for GPU.
Args:
metric: Metric function. It must take two
arguments y_true, y_pred.
Returns:
```True``` if grater is better.
Raises:
AssertionError: If there is no way to order the predictions.
"""
label = cp.array([0., 1., 2.])
pred = cp.array([[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])
g_val = metric(label, pred)
b_val = metric(label, pred[::-1])
assert g_val != b_val, 'Cannot infer greater is better from metric. ' \
'Should be set manually.'
return g_val > b_val
|
{"hexsha": "1dcf820292c7efda9d3a9d6adbd30a1dfe12ab4e", "size": 1358, "ext": "py", "lang": "Python", "max_stars_repo_path": "lightautoml/tasks/gpu/utils_gpu.py", "max_stars_repo_name": "Rishat-skoltech/LightAutoML_GPU", "max_stars_repo_head_hexsha": "4a0a524dc097de94b90871e40f2e33159a0e19b5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-21T19:15:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:41:59.000Z", "max_issues_repo_path": "lightautoml/tasks/gpu/utils_gpu.py", "max_issues_repo_name": "Rishat-skoltech/LightAutoML_GPU", "max_issues_repo_head_hexsha": "4a0a524dc097de94b90871e40f2e33159a0e19b5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lightautoml/tasks/gpu/utils_gpu.py", "max_forks_repo_name": "Rishat-skoltech/LightAutoML_GPU", "max_forks_repo_head_hexsha": "4a0a524dc097de94b90871e40f2e33159a0e19b5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4137931034, "max_line_length": 78, "alphanum_fraction": 0.5846833579, "include": true, "reason": "import cupy", "num_tokens": 364}
|
From iris.base_logic Require Export invariants.
From iris.program_logic Require Export weakestpre.
From iris.heap_lang Require Export lang proofmode notation.
From iris.heap_lang.lib Require Export nondet_bool.
From iris_examples.proph Require Import clairvoyant_coin_spec.
(* Clairvoyant coin using (untyped) sequence prophecies. *)
Definition new_coin: val :=
λ: <>, (ref (nondet_bool #()), NewProph).
Definition read_coin : val := λ: "cp", !(Fst "cp").
Definition toss_coin : val :=
λ: "cp",
let: "c" := Fst "cp" in
let: "p" := Snd "cp" in
let: "r" := nondet_bool #() in
"c" <- "r";; resolve_proph: "p" to: "r";; #().
Section proof.
Context `{!heapG Σ}.
Definition prophecy_to_list_bool (vs : list (val * val)) : list bool :=
(λ v, bool_decide (v = #true)) ∘ snd <$> vs.
Definition coin (cp : val) (bs : list bool) : iProp Σ :=
(∃ (c : loc) (p : proph_id) (vs : list (val * val)),
⌜cp = (#c, #p)%V⌝ ∗
⌜bs ≠ []⌝ ∗ ⌜tail bs = prophecy_to_list_bool vs⌝ ∗
proph p vs ∗
from_option (λ b : bool, c ↦ #b) (∃ b : bool, c ↦ #b) (head bs))%I.
Lemma coin_exclusive (cp : val) (bs1 bs2 : list bool) :
coin cp bs1 -∗ coin cp bs2 -∗ False.
Proof.
iIntros "H1 H2".
iDestruct "H1" as (c1 p1 vs1) "(-> & _ & _ & Hp1 & _)".
iDestruct "H2" as (c2 p2 vs2) "(% & _ & _ & Hp2 & _)".
simplify_eq. iApply (proph_exclusive with "Hp1 Hp2").
Qed.
Lemma new_coin_spec : {{{ True }}} new_coin #() {{{ c bs, RET c; coin c bs }}}.
Proof.
iIntros (Φ) "_ HΦ".
wp_lam.
wp_apply wp_new_proph; first done.
iIntros (vs p) "Hp".
wp_apply nondet_bool_spec; first done.
iIntros (b) "_".
wp_alloc c as "Hc".
wp_pair.
iApply ("HΦ" $! (#c, #p)%V (b :: prophecy_to_list_bool vs)).
rewrite /coin; eauto with iFrame.
Qed.
Lemma read_coin_spec cp bs :
{{{ coin cp bs }}}
read_coin cp
{{{ b bs', RET #b; ⌜bs = b :: bs'⌝ ∗ coin cp bs }}}.
Proof.
iIntros (Φ) "Hc HΦ".
iDestruct "Hc" as (c p vs -> ? ?) "[Hp Hb]".
destruct bs as [|b bs]; simplify_eq/=.
wp_lam. wp_load.
iApply "HΦ"; iSplit; first done.
rewrite /coin; eauto 10 with iFrame.
Qed.
Lemma toss_coin_spec cp bs :
{{{ coin cp bs }}}
toss_coin cp
{{{ b bs', RET #(); ⌜bs = b :: bs'⌝ ∗ coin cp bs' }}}.
Proof.
iIntros (Φ) "Hc HΦ".
iDestruct "Hc" as (c p vs -> ? ?) "[Hp Hb]".
destruct bs as [|b bs]; simplify_eq/=.
wp_lam. do 2 (wp_proj; wp_let).
wp_apply nondet_bool_spec; first done.
iIntros (r) "_".
wp_store.
wp_apply (wp_resolve_proph with "[Hp]"); first done.
iIntros (ws) "[-> Hp]".
wp_seq.
iApply "HΦ"; iSplit; first done.
destruct r; rewrite /coin; eauto 10 with iFrame.
Qed.
End proof.
Definition clairvoyant_coin_spec_instance `{!heapG Σ} :
clairvoyant_coin_spec.clairvoyant_coin_spec Σ :=
{| clairvoyant_coin_spec.new_coin_spec := new_coin_spec;
clairvoyant_coin_spec.read_coin_spec := read_coin_spec;
clairvoyant_coin_spec.toss_coin_spec := toss_coin_spec;
clairvoyant_coin_spec.coin_exclusive := coin_exclusive |}.
Typeclasses Opaque coin.
|
{"author": "anemoneflower", "repo": "IRIS-study", "sha": "63cbfee3959659074047682faeed7190b5be53df", "save_path": "github-repos/coq/anemoneflower-IRIS-study", "path": "github-repos/coq/anemoneflower-IRIS-study/IRIS-study-63cbfee3959659074047682faeed7190b5be53df/examples-master/theories/proph/clairvoyant_coin.v"}
|
import StaticArrays: SVector, MVector
import DelayEmbeddings: Dataset
include("induced_invariant_measure.jl")
export InducedRectangularInvariantMeasure, inducedrectangularinvariantmeasure
"""
struct InducedRectangularInvariantMeasure{T} <: AbstractRectangularInvariantMeasure where {T}
points::AbstractArray{T, 2}
ϵF::Union{Int, Float64, Vector{Int}, Vector{Float64}}
ϵj::Union{Int, Float64, Vector{Int}, Vector{Float64}}
bins_ϵF::AbstractArray{Float64, 2}
bins_visited_ϵj::AbstractArray{Float64, 2}
transferoperator_ϵj::TransferOperatorRectangularBinning
measure_ϵj::InvariantDistribution
measure_induced::InvariantDistribution
end
An induced RectangularInvariantMeasure. Created from a set of points by
discretizing the state space into rectangular bins with edge lengths dictated
by the binning scheme `ϵ`.
The invariant measure is first computed at resolution `ϵj`, then induced at the
final resolution `ϵF` by considering the overlap of the bins between resolutions.
## Fields
- **`points`**: The points for which to estimate the invariant measure.
- **`ϵF`**: The binning scheme at the final resolution, expressed as absolute
edge lengths. The following `ϵ` are valid, and will all be converted into
the `ϵ::Vector{Float64` because knowing edge lengths at both resolutions
is necessary.
1. `ϵ::Int` divides each axis into `ϵ` intervals of the same size.
2. `ϵ::Float` divides each axis into intervals of size `ϵ`.
3. `ϵ::Vector{Int}` divides the i-th axis into `ϵᵢ` intervals of the same size.
4. `ϵ::Vector{Float64}` divides the i-th axis into intervals of size `ϵᵢ`.
- **`ϵF_absolute`**: `ϵF` converted to absolute edge lengths.
- **`ϵj`**: The binning scheme at the resolution from which we induce the measure.
- **`ϵj_absolute`**: `ϵj` converted to absolute edge lengths.
- **`bins_ϵF`**: The origins of all bins covering the hyperrectangle spanned
by a box covering of `points` at resolution `ϵF`.
- **`bins_visited_ϵj`**: As for `bins_ϵF`, but at resolution `ϵj` and only
for nonempty bins.
- **`transferoperator_ϵj`**: The transfer operator from from which the invariant
measure is obtained at resolution `ϵj`.
- **`measure_ϵj`**: The invariant measure obtained from at resolution `ϵj`,
which is obtained from `transferoperator_ϵj`.
- **`measure_induced`**: The invariant measure induced at resolution `ϵF`
from resolution `ϵj`.
"""
struct InducedRectangularInvariantMeasure{T} <: AbstractRectangularInvariantMeasure where {T}
points::AbstractArray{T, 2}
ϵF::Union{Int, Float64, Vector{Int}, Vector{Float64}}
ϵF_absolute::Vector{Float64}
ϵj::Union{Int, Float64, Vector{Int}, Vector{Float64}}
ϵj_absolute::Vector{Float64}
bins_ϵF::AbstractArray{Float64, 2}
bins_visited_ϵj::AbstractArray{Float64, 2}
transferoperator_ϵj::TransferOperatorRectangularBinning
measure_ϵj::InvariantDistribution
measure_induced::InvariantDistribution
end
"""
inducedrectangularinvariantmeasure(points, ϵF, ϵⱼ)
Compute the invariant measure the boxes of a hyperrectangular subdivision
of the hypercube spanned by `points` at resolution
`ϵF` induced by the invariant measure computed for the same points at resolution
`ϵⱼ`. Returns a `InducedRectangularInvariantMeasure` instance.
"""
function inducedrectangularinvariantmeasure(points::AbstractArray{T, 2}, ϵF, ϵⱼ) where {T}
ϵF_absolute = minima_and_stepsizes(points, ϵF)[2]
ϵⱼ_absolute = minima_and_stepsizes(points, ϵⱼ)[2]
# All possible bins in the final partition given by ϵF
allbins_ϵF = boxorigins(points, ϵF_absolute)
# The unique bins visited by the orbit in the partition given by ϵⱼ
visitedbins_ϵⱼ = unique(assign_coordinate_labels(points, ϵⱼ_absolute), dims = 2)
# The transfer operator in the partition given by ϵⱼ
TOϵⱼ = TransferOperatorEstimatorRectangularBinning(points, ϵⱼ_absolute)
# The measure of the of the visited bins in the partition given by ϵⱼ
μϵⱼ = invariantmeasure(TOϵⱼ)
# The induced measure at resolution ϵF. This does not filter
# zero entries, so be careful when comparing with the distribution
# obtained using invariantmeasure(::TransferOperatorRectangularBinning)
μϵF = μ_allbins_ϵF_induced_by_binningscheme_ϵⱼ(
points,
ϵF,
allbins_ϵF,
ϵⱼ,
visitedbins_ϵⱼ,
μϵⱼ.dist)
InducedRectangularInvariantMeasure(
points,
ϵF,
ϵF_absolute,
ϵⱼ,
ϵⱼ_absolute,
allbins_ϵF,
visitedbins_ϵⱼ,
TOϵⱼ,
μϵⱼ,
InvariantDistribution(μϵF, findall(μϵF .> 0))
)
end
function inducedrectangularinvariantmeasure(points::Vector{Vector{T}}, ϵF, ϵⱼ) where {T}
inducedrectangularinvariantmeasure(hcat(points...,), ϵF, ϵⱼ)
end
function inducedrectangularinvariantmeasure(points::Vector{SVector{T}}, ϵF, ϵⱼ) where {T}
inducedrectangularinvariantmeasure(Array(hcat(points...,)), ϵF, ϵⱼ)
end
function inducedrectangularinvariantmeasure(points::Vector{MVector{T}}, ϵF, ϵⱼ) where {T}
inducedrectangularinvariantmeasure(Array(hcat(points...,)), ϵF, ϵⱼ)
end
function inducedrectangularinvariantmeasure(points::Dataset, ϵF, ϵⱼ) where {T}
inducedrectangularinvariantmeasure(transpose(Matrix(points)), ϵF, ϵⱼ)
end
function summarise(invm::InducedRectangularInvariantMeasure)
ϵF = invm.ϵF
ϵⱼ = invm.ϵj
ϵF_str = " partition resulting from binning scheme ϵF = $ϵF"
ϵⱼ_str = " partition resulting from binning scheme ϵⱼ = $ϵⱼ"
D = size(invm.points, 1)
npts = size(invm.points, 2)
measure_type = typeof(invm)
measure_type_str = "$measure_type"
pts_str = "$npts $D-dimensional points"
measure_type_str*" from "*pts_str*" at"*ϵF_str*" induced by"*ϵⱼ_str
end
Base.show(io::IO, invm::InducedRectangularInvariantMeasure) = println(io, summarise(invm))
|
{"hexsha": "64d5bca7dbdf69186e4b0e11e8611ee8f1a45a19", "size": 5926, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/InvariantMeasures/composite_types/InducedRectangularInvariantMeasure/InducedRectangularInvariantMeasure.jl", "max_stars_repo_name": "JuliaTagBot/PerronFrobenius.jl", "max_stars_repo_head_hexsha": "94e114cce8fcce52ac4f9b529aafa2555d87ee97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/InvariantMeasures/composite_types/InducedRectangularInvariantMeasure/InducedRectangularInvariantMeasure.jl", "max_issues_repo_name": "JuliaTagBot/PerronFrobenius.jl", "max_issues_repo_head_hexsha": "94e114cce8fcce52ac4f9b529aafa2555d87ee97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/InvariantMeasures/composite_types/InducedRectangularInvariantMeasure/InducedRectangularInvariantMeasure.jl", "max_forks_repo_name": "JuliaTagBot/PerronFrobenius.jl", "max_forks_repo_head_hexsha": "94e114cce8fcce52ac4f9b529aafa2555d87ee97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5063291139, "max_line_length": 97, "alphanum_fraction": 0.7298346271, "num_tokens": 1768}
|
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy
class BEARQL(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
self.critic_optim = Adam(self.critic.parameters(), weight_decay=1e-2)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=1e-4)
self.policy_target = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
hard_update(self.policy_target, self.policy)
self.dual_lambda = args.init_dual_lambda
self.dual_step_size = args.dual_step_size
self.cost_epsilon = args.cost_epsilon
self.coefficient_weight = args.coefficient_weight
self.dual_steps = args.dual_steps
self.dirac_policy_num = args.dirac_policy_num
self.m = args.m
self.n = args.n
self.mmd_before_tanh = args.mmd_before_tanh
# used in evaluation
def select_action(self, state):
# sample multiple policies and perform a greedy maximization of Q over these policies
with torch.no_grad():
state = torch.FloatTensor(state.reshape(1, -1)).repeat(self.dirac_policy_num, 1).to(self.device)
# state = torch.FloatTensor(state.reshape(1, -1)).to(self.device)
x_t, action, _, mean = self.policy.sample(state)
# q1, q2 = self.critic(state, action)
q1, q2, q3 = self.critic(state, action)
ind = (q1+q2+q3).max(0)[1]
return action[ind].cpu().data.numpy().flatten()
# return action.cpu().data.numpy().flatten()
# MMD functions
def compute_gau_kernel(self, x, y, sigma):
batch_size = x.shape[0]
x_size = x.shape[1]
y_size = y.shape[1]
dim = x.shape[2]
tiled_x = x.view(batch_size, x_size, 1, dim).repeat([1, 1, y_size, 1])
tiled_y = y.view(batch_size, 1, y_size, dim).repeat([1, x_size, 1, 1])
return torch.exp(-(tiled_x - tiled_y).pow(2).sum(dim=3) / (2 * sigma))
# MMD functions
def compute_lap_kernel(self, x, y, sigma):
batch_size = x.shape[0]
x_size = x.shape[1]
y_size = y.shape[1]
dim = x.shape[2]
tiled_x = x.view(batch_size, x_size, 1, dim).repeat([1, 1, y_size, 1])
tiled_y = y.view(batch_size, 1, y_size, dim).repeat([1, x_size, 1, 1])
return torch.exp(-torch.abs(tiled_x - tiled_y).sum(dim=3) / sigma)
def compute_mmd(self, x, y, kernel='lap'):
if kernel == 'gau':
x_kernel = self.compute_gau_kernel(x, x, 20)
y_kernel = self.compute_gau_kernel(y, y, 20)
xy_kernel = self.compute_gau_kernel(x, y, 20)
else:
x_kernel = self.compute_lap_kernel(x, x, 10)
y_kernel = self.compute_lap_kernel(y, y, 10)
xy_kernel = self.compute_lap_kernel(x, y, 10)
square_mmd = x_kernel.mean((1, 2)) + y_kernel.mean((1, 2)) - 2 * xy_kernel.mean((1, 2))
return square_mmd
def train(self, prior, memory, batch_size, m=4, n=4):
# Sample replay buffer / batch
state_np, action_np, reward_np, next_state_np, mask_np = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_np).to(self.device)
next_state_batch = torch.FloatTensor(next_state_np).to(self.device)
action_batch = torch.FloatTensor(action_np).to(self.device)
reward_batch = torch.FloatTensor(reward_np).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_np).to(self.device).unsqueeze(1)
# Critic Training
with torch.no_grad():
# Duplicate state 10 times
next_state_rep = torch.FloatTensor(np.repeat(next_state_np, self.dirac_policy_num, axis=0)).to(self.device)
# Soft Clipped Double Q-learning
_, next_state_action, _, _ = self.policy_target.sample(next_state_rep)
target_Q1, target_Q2, target_Q3 = self.critic_target(next_state_rep, next_state_action)
target_cat = torch.cat([target_Q1, target_Q2, target_Q3], 1)
target_Q = 0.75 * target_cat.min(1)[0] + 0.25 * target_cat.max(1)[0]
target_Q = target_Q.view(batch_size, -1).max(1)[0].view(-1, 1)
# target_Q1, target_Q2 = self.critic_target(next_state_rep, next_state_action)
# target_Q = 0.75 * torch.min(target_Q1, target_Q2) + 0.25 * torch.max(target_Q1, target_Q2)
# target_Q = target_Q.view(batch_size, -1).max(1)[0].view(-1, 1)
next_q_value = reward_batch + mask_batch * self.gamma * target_Q
qf1, qf2, qf3 = self.critic(state_batch, action_batch) # ensemble of k Q-functions
q_loss = F.mse_loss(qf1, next_q_value) + F.mse_loss(qf2, next_q_value) + F.mse_loss(qf3, next_q_value)
# qf1, qf2 = self.critic(state_batch, action_batch) # ensemble of k Q-functions
# q_loss = F.mse_loss(qf1, next_q_value) + F.mse_loss(qf2, next_q_value)
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Train Actor
with torch.no_grad():
state_rep_m = torch.FloatTensor(np.repeat(state_np, m, axis=0)).to(self.device)
state_rep_n = torch.FloatTensor(np.repeat(state_np, n, axis=0)).to(self.device)
prior_x_t, prior_a, _, _ = prior.sample(state_rep_n)
prior_a = prior_a.view(batch_size, n, -1)
prior_x_t = prior_x_t.view(batch_size, n, -1)
for s in range(self.dual_steps):
x_t_rep, a_rep, _, _ = self.policy.sample(state_rep_m)
if self.mmd_before_tanh:
x_t_rep = x_t_rep.view(batch_size, m, -1)
mmd_dist = self.compute_mmd(prior_x_t, x_t_rep)
else:
a_rep = a_rep.view(batch_size, m, -1)
mmd_dist = self.compute_mmd(prior_a, a_rep)
_, pi, _, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi, qf3_pi = self.critic(state_batch, pi)
qf_cat = torch.cat([qf1_pi, qf2_pi, qf3_pi], 1)
qf_mean = qf_cat.mean(1)
qf_var = qf_cat.var(1)
min_qf_pi = qf_mean - self.coefficient_weight * qf_var.sqrt() # used in BEAR
# qf1_pi, qf2_pi = self.critic(state_batch, pi)
# min_qf_pi = qf1_pi
policy_loss = -(min_qf_pi - self.dual_lambda * (mmd_dist - self.cost_epsilon)).mean()
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
# Dual Lambda Training
self.dual_gradients = mmd_dist.mean().item() - self.cost_epsilon
self.dual_lambda += self.dual_step_size * self.dual_gradients
self.dual_lambda = np.clip(self.dual_lambda, np.power(np.e, -5), np.power(np.e, 10))
# Update Target Networks
soft_update(self.critic_target, self.critic, self.tau)
soft_update(self.policy_target, self.policy, self.tau)
return q_loss.item(), policy_loss.item(), self.dual_lambda, mmd_dist.mean().item()
# Save model parameters
def save_model(self, env_name, suffix="", actor_path=None, critic_path=None):
if not os.path.exists('models/'):
os.makedirs('models/')
if actor_path is None:
actor_path = "models/BEAR_actor_{}_{}".format(env_name, suffix)
if critic_path is None:
critic_path = "models/BEAR_critic_{}_{}".format(env_name, suffix)
print('Saving models to {} and {}'.format(actor_path, critic_path))
torch.save(self.policy.state_dict(), actor_path)
torch.save(self.critic.state_dict(), critic_path)
# Load model parameters
def load_model(self, actor_path, critic_path):
print('Loading models from {} and {}'.format(actor_path, critic_path))
if actor_path is not None:
self.policy.load_state_dict(torch.load(actor_path))
if critic_path is not None:
self.critic.load_state_dict(torch.load(critic_path))
|
{"hexsha": "2eceacb798af304c5f3a9ae228c40a1dbd748162", "size": 8637, "ext": "py", "lang": "Python", "max_stars_repo_path": "bear.py", "max_stars_repo_name": "Yuibooo/BEAR", "max_stars_repo_head_hexsha": "d8cf22e3bf0017db0702a6b8b8eb00f22e760991", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-12-28T07:30:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-17T01:38:35.000Z", "max_issues_repo_path": "bear.py", "max_issues_repo_name": "ryanxhr/BEAR", "max_issues_repo_head_hexsha": "d8cf22e3bf0017db0702a6b8b8eb00f22e760991", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bear.py", "max_forks_repo_name": "ryanxhr/BEAR", "max_forks_repo_head_hexsha": "d8cf22e3bf0017db0702a6b8b8eb00f22e760991", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5206185567, "max_line_length": 126, "alphanum_fraction": 0.6371425263, "include": true, "reason": "import numpy", "num_tokens": 2270}
|
"""
Scripts to evaluate models.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
import numpy as np
import models
from utils.config import *
import os
from utils.csv_headers import IdealModelEvalHeaders as headers
from utils.file import *
from data import normalize
from transformation import transform
from evaluation.compute_accuracy import get_test_accuracies, PERFORM_MODE
PROJECT_DIR = PATH.PROJECT_DIR
ANALYSE_DIR = '{}/evaluation_results/2019-09-06_02-41-18'.format(PROJECT_DIR)
PREDICTION_DIR = '{}/prediction_result'.format(ANALYSE_DIR)
EVALUATION_DIR = '{}/evaluation'.format(ANALYSE_DIR)
attacks = [
'bim_ord2_nbIter100_eps500', # 0
'bim_ord2_nbIter100_eps1000', # 1
'bim_ordinf_nbIter100_eps250', # 2
'bim_ordinf_nbIter100_eps500', # 3
'deepfool_maxIter100', # 4
'deepfool_maxIter10000', # 5
'fgsm_eps100', # 6
'fgsm_eps250', # 7
'fgsm_eps300', # 8
'jsma_theta30_gamma50', # 9
'jsma_theta50_gamma70', # 10
'pgd_eps250_nbIter100_epsIter10', # 11
'pgd_eps500_nbIter100_epsIter10', # 12
'pgd_eps750_nbIter100_epsIter10', # 13
]
def eval_ideal_model(attack):
"""
Test accuracy of ideal model is the test accuracy upper bound of an ensemble of k
:return:
"""
predictions_file = 'predProb.npy'
label_file = 'labels.npy'
ideal_accuracy = {}
print('ATTACK ({})'.format(attack))
# get the upper bound of ideal test accuracy
print('Computing {}...'.format(headers.TOP_K.value))
upper_bound = get_test_accuracies(attack, predictions_file,
label_file, PERFORM_MODE.TOP)
ideal_accuracy[headers.NUM_OF_WEAK_DEFENSES.value] = list(upper_bound.keys())
ideal_accuracy[headers.TOP_K.value] = list(upper_bound.values())
# get the lower bound of ideal test accuracy
print('Computing {}...'.format(headers.BOTTOM_K.value))
lower_bound = get_test_accuracies(attack, predictions_file,
label_file, PERFORM_MODE.BOTTOM)
ideal_accuracy[headers.BOTTOM_K.value] = list(lower_bound.values())
# get test accuracies of randomly built ideal model
# they will be used to estimate the certainty of test accuracy of an ideal model
rand_acc = []
rand_prefix = 'RandK_R'
for i in range(30):
key = '{}{}'.format(rand_prefix, i)
print('Computing {}...'.format(key))
acc = get_test_accuracies(attack, predictions_file,
label_file, PERFORM_MODE.RANDOM)
rand_acc.append(list(acc.values()))
ideal_accuracy[key] = list(acc.values())
# compute average, upper-bound, and lower-bound of test accuracy certainty
nb_of_rounds, nb_of_samples = np.asarray(rand_acc).shape
average = np.zeros(nb_of_samples)
upperbounds = np.zeros(nb_of_samples)
lowerbounds = np.zeros(nb_of_samples) + np.infty
for i in range(nb_of_samples):
for j in range(nb_of_rounds):
average[i] += rand_acc[j][i]
upperbounds[i] = max(upperbounds[i], rand_acc[j][i])
lowerbounds[i] = min(lowerbounds[i], rand_acc[j][i])
average = np.round(average / nb_of_rounds, 4)
ideal_accuracy[headers.RANDK_AVG.value] = list(average)
ideal_accuracy[headers.RANDK_UPPERBOUND.value] = list(upperbounds)
ideal_accuracy[headers.RANDK_LOWERBOUND.value] = list(lowerbounds)
ideal_accuracy[headers.GAP.value] = list(np.asarray(ideal_accuracy[headers.TOP_K.value])
- np.asarray(ideal_accuracy[headers.RANDK_AVG.value]))
if MODE.DEBUG:
print(ideal_accuracy)
file_name = 'acc-ideal_model-{}-{}.csv'.format('mnist', attack)
file_path = os.path.join(EVALUATION_DIR, file_name)
dict2csv(ideal_accuracy, file_path, list_as_value=True)
def eval_single_model(model_name, testset_name, labels_name):
"""
Evaluate model on test set
:param model_name:
:param testset_name:
:return:
"""
prefix, dataset, architect, trans_type = model_name.split('-')
X_test = np.load('{}/{}.npy'.format(PATH.ADVERSARIAL_FILE, testset_name))
labels = np.load('{}/{}.npy'.format(PATH.ADVERSARIAL_FILE, labels_name))
if 'composition' in trans_type:
trans_type = TRANSFORMATION.get_transformation_compositions()
print(type(trans_type), trans_type)
# apply transformation(s)
X_test = transform(X_test, trans_type)
# evaluate each of the composition
if 'composition' in trans_type:
for trans in trans_type:
print(type(trans), trans)
m_name = '{}-{}-{}-{}'.format(prefix, dataset, architect, trans)
model = models.load_model(m_name)
print('*** Evaluating ({}) on ({})...'.format(m_name, testset_name))
scores = model.evaluate(X_test, labels, verbose=2)
print(scores)
del model
# evaluate the model
model = models.load_model(model_name)
if (dataset == DATA.cifar_10):
X_test = normalize(X_test)
print('*** Evaluating ({}) on ({})...'.format(model_name, testset_name))
scores = model.evaluate(X_test, labels, verbose=2)
print(scores)
return scores
def main():
"""
Evaluate single model
"""
# model_name = 'model-mnist-cnn-composition'
# testset_name = 'test_AE-mnist-cnn-clean-bim_ordinf_nbIter100_eps250'
# labels_name = 'test_Label-mnist-clean'
# eval_single_model(model_name, testset_name, labels_name)
"""
Evaluate ideal model
"""
attack = attacks[8]
eval_ideal_model(attack)
if __name__ == '__main__':
MODE.debug_off()
# MODE.debug_on()
main()
|
{"hexsha": "a49f561506f9ed61f76dde96038b82045c8673da", "size": 5747, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/evaluate_models.py", "max_stars_repo_name": "nybupt/athena", "max_stars_repo_head_hexsha": "2808f5060831382e603e5dc5ec6a9e9d8901a3b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/evaluate_models.py", "max_issues_repo_name": "nybupt/athena", "max_issues_repo_head_hexsha": "2808f5060831382e603e5dc5ec6a9e9d8901a3b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:32:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:17:17.000Z", "max_forks_repo_path": "scripts/evaluate_models.py", "max_forks_repo_name": "nybupt/athena", "max_forks_repo_head_hexsha": "2808f5060831382e603e5dc5ec6a9e9d8901a3b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-12T12:48:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T12:48:51.000Z", "avg_line_length": 34.4131736527, "max_line_length": 99, "alphanum_fraction": 0.6605185314, "include": true, "reason": "import numpy", "num_tokens": 1472}
|
# this file contains the regularization options
# * these are just standard Tikhonov regularizations of either
# (1) the function ("L2"), or
# (2) gradients of the functions ("H1")
from params import k,x,y,dt
from scipy.fft import ifft2,fft2
import numpy as np
def lap(f):
# negative Laplacian computed via Fourier transform
return ifft2((k**2)*fft2(f)).real
def reg(f,reg_type):
# first variation of regularization functional
if reg_type == 'H1':
R = lap(f) # note: adding the L2 term here seems to help with convergence
elif reg_type == 'L2':
R = f
return R
|
{"hexsha": "ad924d7d35776a715d0b93ce15b3327997e9134c", "size": 611, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/regularizations.py", "max_stars_repo_name": "agstub/subglacial-inversion", "max_stars_repo_head_hexsha": "0f96e59771773187bbe32e5184272fdff59dc3c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/regularizations.py", "max_issues_repo_name": "agstub/subglacial-inversion", "max_issues_repo_head_hexsha": "0f96e59771773187bbe32e5184272fdff59dc3c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-18T20:23:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-04T16:47:11.000Z", "max_forks_repo_path": "code/regularizations.py", "max_forks_repo_name": "agstub/subglacial-inversion", "max_forks_repo_head_hexsha": "0f96e59771773187bbe32e5184272fdff59dc3c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5652173913, "max_line_length": 87, "alphanum_fraction": 0.6759410802, "include": true, "reason": "import numpy,from scipy", "num_tokens": 167}
|
""" Membership inference attack on synthetic data that implements the risk of linkability. """
from pandas import DataFrame
from numpy import ndarray, concatenate, stack, array, round
from os import path
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import ShuffleSplit
from synthetic_data.utils.datagen import convert_df_to_array
from synthetic_data.utils.logging import LOGGER
from .privacy_attack import PrivacyAttack
LABEL_OUT = 0
LABEL_IN = 1
class MIAttackClassifier(PrivacyAttack):
"""" Parent class for membership inference attacks based on shadow modelling
using a sklearn classifier as attack model """
def __init__(self, AttackClassifier, metadata, priorProbabilities, FeatureSet=None):
"""
:param AttackClassifier: Classifier: An object that implements a binary classifier
:param metadata: dict: Attribute metadata describing the data domain of the synthetic target data
:param priorProbabilities: dict: Prior probabilities over the target's membership
:param FeatureSet: FeatureSet: An object that implements a feacture extraction strategy for converting a dataset into a feature vector
"""
self.AttackClassifier = AttackClassifier
self.FeatureSet = FeatureSet
self.ImputerCat = SimpleImputer(strategy='most_frequent')
self.ImputerNum = SimpleImputer(strategy='median')
self.metadata = metadata
self.priorProbabilities = priorProbabilities
self.trained = False
self.__name__ = f'{self.AttackClassifier.__class__.__name__}{self.FeatureSet.__class__.__name__}'
def _get_prior_probability(self, secret):
""" Get prior probability of the adversary guessing the target's secret
:param secret: int: Target's true secret. Either LABEL_IN=1 or LABEL_OUT=0
"""
try:
return self.priorProbabilities[secret]
except:
return 0
def train(self, synA, labels):
""" Train a membership inference attack on a labelled training set
:param synA: list of ndarrays: A list of synthetic datasets
:param labels: list: A list of labels that indicate whether target was in the training data (LABEL_IN=1) or not (LABEL_OUT=0)
"""
if self.FeatureSet is not None:
synA = stack([self.FeatureSet.extract(s) for s in synA])
else:
if isinstance(synA[0], DataFrame):
synA = [self._impute_missing_values(s) for s in synA]
synA = stack([convert_df_to_array(s, self.metadata).flatten() for s in synA])
else:
synA = stack([s.flatten() for s in synA])
if not isinstance(labels, ndarray):
labels = array(labels)
self.AttackClassifier.fit(synA, labels)
LOGGER.debug('Finished training MIA distinguisher')
self.trained = True
del synA, labels
def attack(self, synT):
""" Makes a guess about target's presence in the training set of the model that produced the synthetic input data
:param synT: ndarray or DataFrame: A synthetic dataset
"""
assert self.trained, 'Attack must first be trained before can predict membership'
if self.FeatureSet is not None:
synT = stack([self.FeatureSet.extract(s) for s in synT])
else:
if isinstance(synT[0], DataFrame):
synT = stack([convert_df_to_array(s, self.metadata).flatten() for s in synT])
else:
synT = stack([s.flatten() for s in synT])
return round(self.AttackClassifier.predict(synT), 0).astype(int).tolist()
def get_probability_of_success(self, synT, secret):
"""Calculate probability that attacker correctly predicts whether target was present in model's training data
:param synT: ndarray or DataFrame: A synthetic dataset
:param secret: int: Target's true secret. Either LABEL_IN=1 or LABEL_OUT=0
"""
assert self.trained, 'Attack must first be trained on some random data before can predict membership of target data'
if self.FeatureSet is not None:
synT = stack([self.FeatureSet.extract(s) for s in synT])
else:
if isinstance(synT[0], DataFrame):
synT = stack([convert_df_to_array(s, self.metadata).flatten() for s in synT])
else:
synT = stack([s.flatten() for s in synT])
probs = self.AttackClassifier.predict_proba(synT)
return [p[s] for p,s in zip(probs, secret)]
def _impute_missing_values(self, df):
""" Impute missing values in a DataFrame
:param df: DataFrame
"""
cat_cols = list(df.select_dtypes(['object', 'category']))
if len(cat_cols) > 0:
self.ImputerCat.fit(df[cat_cols])
df[cat_cols] = self.ImputerCat.transform(df[cat_cols])
num_cols = list(df.select_dtypes(['int', 'float']))
if len(num_cols) > 0:
self.ImputerNum.fit(df[num_cols])
df[num_cols] = self.ImputerNum.transform(df[num_cols])
return df
class MIAttackClassifierLinearSVC(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a linear SVClassifier """
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(SVC(kernel='linear', probability=True), metadata, priorProbabilities, FeatureSet)
class MIAttackClassifierSVC(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a non-linear SVClassifier"""
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(SVC(probability=True), metadata, priorProbabilities, FeatureSet)
class MIAttackClassifierLogReg(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a LogisticRegression Classifier"""
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(LogisticRegression(), metadata, priorProbabilities, FeatureSet)
class MIAttackClassifierRandomForest(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a RandomForestClassifier"""
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(RandomForestClassifier(), metadata, priorProbabilities, FeatureSet)
class MIAttackClassifierKNN(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a KNeighborsClassifier """
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(KNeighborsClassifier(n_neighbors=5), metadata, priorProbabilities, FeatureSet)
class MIAttackClassifierMLP(MIAttackClassifier):
""" Membership inference attack based on shadow modelling using a multi-layer perceptron as classifier"""
def __init__(self, metadata, priorProbabilities, FeatureSet=None):
super().__init__(MLPClassifier((200,), solver='lbfgs'), metadata, priorProbabilities, FeatureSet)
def generate_mia_shadow_data_shufflesplit(GenModel, target, rawA, sizeRaw, sizeSyn, numModels, numCopies):
""" Procedure to train a set of shadow models on multiple training sets sampled from a reference dataset.
:param GenModel: GenerativeModel: An object that implements a generative model training procedure
:param target: ndarray or DataFrame: The target record
:param rawA: ndarray or DataFrame: Attacker's reference dataset of size n_A
:param sizeRaw: int: Size of the target training set
:param sizeSyn: int: Size of the synthetic dataset the adversary will be given access to
:param numModels: int: Number of shadow models to train
:param numCopies: int: Number of synthetic training datasets sampled from each shadow model
:returns
:return synA: list of ndarrays or DataFrames: List of synthetic datasets
:return labels: list: List of labels indicating whether target was in or out
"""
assert isinstance(rawA, GenModel.datatype), f"GM expectes datatype {GenModel.datatype} but got {type(rawA)}"
assert isinstance(target, type(rawA)), f"Mismatch of datatypes between target record and raw data"
kf = ShuffleSplit(n_splits=numModels, train_size=sizeRaw)
synA, labels = [], []
LOGGER.debug(f'Start training {numModels} shadow models of class {GenModel.__class__.__name__}')
for train_index, _ in kf.split(rawA):
# Sample a new training set from the reference dataset
if isinstance(rawA, DataFrame):
rawAout = rawA.iloc[train_index]
else:
rawAout = rawA[train_index, :]
# Fit GM to raw data without target
GenModel.fit(rawAout)
# Generate synthetic samples from model trained without target and label as out
SynAout = [GenModel.generate_samples(sizeSyn) for _ in range(numCopies)]
synA.extend(SynAout)
labels.extend([LABEL_OUT for _ in range(numCopies)])
# Insert target record into training data
if isinstance(rawA, DataFrame):
rawAin = rawAout.append(target)
else:
if len(target.shape) == 1:
target = target.reshape(1, len(target))
rawAin = concatenate([rawAout, target])
# Fit generative model to raw data including target
GenModel.fit(rawAin)
# Generate synthetic samples from model trained on data including target
synthetic_in = [GenModel.generate_samples(sizeSyn) for _ in range(numCopies)]
synA.extend(synthetic_in)
labels.extend([LABEL_IN for _ in range(numCopies)])
return synA, labels
def generate_mia_shadow_data_allin(GenModel, target, rawA, sizeSyn, numCopies):
""" Generate training data for the MIA from a *single* shadow model trained on the entire reference dataset at once
:param GenModel: GenerativeModel: An object that implements a generative model training procedure
:param target: ndarray or DataFrame: The target record
:param rawA: ndarray or DataFrame: Attacker's reference dataset of size n_A
:param sizeSyn: int: Size of the synthetic dataset the adversary will be given access to
:param numCopies: int: Number of synthetic training datasets sampled from the shadow model
:returns
:return synA: list of ndarrays or DataFrames: List of synthetic datasets
:return labels: list: List of labels indicating whether target was in or out
"""
assert isinstance(rawA, GenModel.datatype), f"GM expectes datatype {GenModel.datatype} but got {type(rawA)}"
assert isinstance(target, type(rawA)), f"Mismatch of datatypes between target record and raw data"
synA, labels = [], []
LOGGER.debug(f'Start training shadow model of class {GenModel.__class__.__name__} on data of size {len(rawA)}')
# Fit GM to data without target's data
GenModel.fit(rawA)
# Generate synthetic sample for data without target
synAout = [GenModel.generate_samples(sizeSyn) for _ in range(numCopies)]
synA.extend(synAout)
labels.extend([LABEL_OUT for _ in range(numCopies)])
# Insert targets into training data
if isinstance(rawA, DataFrame):
rawAin = rawA.append(target)
else:
if len(target.shape) == 1:
target = target.reshape(1, len(target))
rawAin = concatenate([rawA, target])
# Fit generative model to data including target
GenModel.fit(rawAin)
# Generate synthetic sample for data including target
synAin = [GenModel.generate_samples(sizeSyn) for _ in range(numCopies)]
synA.extend(synAin)
labels.extend([LABEL_IN for _ in range(numCopies)])
return synA, labels
|
{"hexsha": "4b761409c5a1936f777996aaad93ba2fdffbf7e8", "size": 11983, "ext": "py", "lang": "Python", "max_stars_repo_path": "synthetic_data/privacy_attacks/membership_inference.py", "max_stars_repo_name": "kasra-hosseini/synthetic_data_release", "max_stars_repo_head_hexsha": "768fe15cae6a033a17390d8dc2152bb75a083ca2", "max_stars_repo_licenses": ["MIT", "BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "synthetic_data/privacy_attacks/membership_inference.py", "max_issues_repo_name": "kasra-hosseini/synthetic_data_release", "max_issues_repo_head_hexsha": "768fe15cae6a033a17390d8dc2152bb75a083ca2", "max_issues_repo_licenses": ["MIT", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "synthetic_data/privacy_attacks/membership_inference.py", "max_forks_repo_name": "kasra-hosseini/synthetic_data_release", "max_forks_repo_head_hexsha": "768fe15cae6a033a17390d8dc2152bb75a083ca2", "max_forks_repo_licenses": ["MIT", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7964285714, "max_line_length": 142, "alphanum_fraction": 0.7026621046, "include": true, "reason": "from numpy", "num_tokens": 2657}
|
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import visvis as vv
import geovis_notebook_version
def get_view(
dir_voxels,
voxel_number_list = None
):
if not voxel_number_list:
fname_voxels_list = [
os.path.join(dir_voxels, f)
for f in os.listdir(dir_voxels)
if os.path.isfile(os.path.join(dir_voxels, f))
and 'voxel' in f
]
else:
fname_voxels_list = [
os.path.join(dir_voxels, 'voxels{}.npz'.format(num))
for num in voxel_number_list
]
for idx, fn in enumerate(fname_voxels_list):
if idx == 0:
view = geovis_notebook_version.MasonView(fn)
else:
view.add_samples(geovis_notebook_version.MasonView(fn))
return(view)
def get_mean_layer(view, layer_idx):
layer_mean = view.meanlayer(layer_idx)
ml = get_mean_layer()
len_argv = len(sys.argv)
voxel_number_list = None
if len_argv == 1:
print("No voxel directory specified")
if len_argv >= 2:
dir_voxels = sys.argv[1]
print(dir_voxels)
if len_argv == 3:
dir_out = sys.argv[2]
print(dir_out)
if len_argv == 4:
voxel_number_list = eval(sys.argv[3])
print(voxel_number_list)
view = get_view(dir_voxels, voxel_number_list)
no_layers = np.shape(view.layers)[0]
for layer_idx in range(no_layers):
layer_mean = view.meanlayer(layer_idx)
fname = 'mean-posterior-layer{}'.format(layer_idx)
out_path = os.path.join(dir_out, fname)
np.save(out_path, layer_mean)
|
{"hexsha": "c7dbb8b32d2f2e74dd7e0154b7cdac33c2236754", "size": 1422, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/get_mean_posterior.py", "max_stars_repo_name": "divad-nhok/obsidian_fork", "max_stars_repo_head_hexsha": "e5bee2b706f78249564f06c88a18be086b17c895", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-08T16:28:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T14:55:59.000Z", "max_issues_repo_path": "scripts/get_mean_posterior.py", "max_issues_repo_name": "divad-nhok/obsidian_fork", "max_issues_repo_head_hexsha": "e5bee2b706f78249564f06c88a18be086b17c895", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-16T00:46:58.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-16T00:46:58.000Z", "max_forks_repo_path": "scripts/get_mean_posterior.py", "max_forks_repo_name": "divad-nhok/obsidian_fork", "max_forks_repo_head_hexsha": "e5bee2b706f78249564f06c88a18be086b17c895", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-02-26T01:03:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T02:31:37.000Z", "avg_line_length": 24.1016949153, "max_line_length": 58, "alphanum_fraction": 0.741209564, "include": true, "reason": "import numpy", "num_tokens": 415}
|
# encoding: utf-8
"""
placemap_viewer.py -- An interactive GUI interface for individual spatial maps
Created by Joe Monaco on 04-30-2008.
Copyright (c) 2008 Columbia University. All rights reserved.
"""
# Library imports
import numpy as N, scipy as S
from matplotlib import cm
# Package imports
from .ratemap import PlaceMap
from .tools.images import array_to_rgba
from .tools.stats import integer_hist
from .tools.bash import CPrint
# Traits imports
from enthought.traits.api import HasTraits, Instance, Trait, TraitError, \
Property, Enum, Int, Float, Range, Delegate
from enthought.traits.ui.api import View, Group, Item, Heading
# Chaco imports
from enthought.chaco.api import ArrayPlotData, Plot, BasePlotContainer, VPlotContainer, copper
from enthought.enable.component_editor import ComponentEditor
class PlaceMapViewer(HasTraits):
"""
Chaco viewer for placemap data
Constructor arguments:
pmap -- PlaceMap (or subclass) object to view
Public methods:
view -- Bring up the Chaco View window for looking at data
"""
# Console output
out = Instance(CPrint)
# Reference to PlaceMap object
PMap = Trait(PlaceMap)
# Stage map traits
stage_map = Instance(Plot)
stage_map_type = Enum('representation', 'coverage', 'field_centers')
sparsity = Delegate('PMap')
num_active = Delegate('PMap')
stage_coverage = Delegate('PMap')
stage_repr = Delegate('PMap')
peak_rate = Delegate('PMap')
# Unit map traits
_unit = Int
unit_map = Instance(Plot)
unit_map_type = Enum('ratemap', 'single', 'fields')
num_fields = Int
coverage = Float
avg_area = Float
avg_diameter = Float
max_rate = Float
# Unit data traits
unit_data_plots = Instance(BasePlotContainer)
unit_bins = Range(low=5, high=50, value=20)
# Field data traits
field_data_plots = Instance(BasePlotContainer)
field_bins = Range(low=5, high=50, value=20)
# Chaco view definition
traits_view = \
View(
Group(
Group(
Item('stage_map_type'),
Item('stage_map', editor=ComponentEditor(), show_label=False),
Group(
Item('sparsity', style='readonly'),
Item('num_active', style='readonly'),
Item('stage_coverage', label='Coverage', style='readonly'),
Item('stage_repr', label='Representation', style='readonly'),
Item('peak_rate', style='readonly'),
label='Stage Coding',
show_border=True),
label='Stage Maps',
orientation='v'),
Group(
Item('unit_map_type'),
Item('unit', style='custom'),
Item('unit_map', editor=ComponentEditor(), show_label=False),
Group(
Item('max_rate', style='readonly'),
Item('num_fields', style='readonly'),
Item('coverage', style='readonly'),
Item('avg_area', label='Mean Field Area', style='readonly'),
Item('avg_diameter', label='Mean Field Diameter', style='readonly'),
label='Place Unit',
show_border=True),
label='Unit Maps',
orientation='v'),
Group(
Heading('Distributions of Single-Unit Properties'),
Item('unit_data_plots', editor=ComponentEditor(), show_label=False),
Item('unit_bins', label='Bins'),
label='Unit Data'),
Group(
Heading('Distributions of Single-Field Properties'),
Item('field_data_plots', editor=ComponentEditor(), show_label=False),
Item('field_bins', label='Bins'),
label='Field Data'),
layout='tabbed'),
title='Placemap Viewer',
resizable=True,
height=800,
width=700,
kind='live',
buttons=['Cancel', 'OK'])
def __init__(self, pmap, **traits):
HasTraits.__init__(self, **traits)
try:
self.PMap = pmap
except TraitError:
self.out('PlaceMap subclass instance required', error=True)
return
self.fdata = self.PMap.get_field_data()
self.udata = self.PMap.get_unit_data()
self.add_trait('unit', Range(low=0, high=self.PMap.num_maps-1))
self._update_unit_values()
self.out('Bringing up place-map visualization...')
self.view()
self.out('Done!')
def view(self):
self.configure_traits()
# Plot creation methods
def _stage_map_default(self):
# RGBA maps
rep_map = array_to_rgba(self.PMap.stage_repr_map, cmap=cm.hot)
cov_map = array_to_rgba(self.PMap.stage_coverage_map, cmap=cm.gray)
# Data sources and plot object
data = ArrayPlotData(fields_x=self.fdata['x'], fields_y=self.fdata['y'],
fields_z=self.fdata['peak'], rep=rep_map, cov=cov_map)
p = Plot(data)
# Plot the field centers
p.plot(('fields_x', 'fields_y', 'fields_z'), name='centers', type='cmap_scatter',
marker='dot', marker_size=5, color_mapper=copper, line_width=1, fill_alpha=0.6)
# Plot the representation and coverage maps
p.img_plot('rep', name='rep', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
p.img_plot('cov', name='cov', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
# Start with only the representation map visible
p.plots['cov'][0].visible = False
p.plots['centers'][0].visible = False
# Plot tweaks
p.aspect_ratio = 1.0
p.y_axis.title = 'Y (cm)'
p.x_axis.title = 'X (cm)'
p.x_axis.orientation = 'bottom'
p.title = 'Stage Maps'
return p
def _unit_map_default(self):
# Set the initial unit map
data = ArrayPlotData(unit_map=self._get_unit_map_data())
p = Plot(data)
# Plot the map
p.img_plot('unit_map', name='unit', xbounds=(0, self.PMap.W), ybounds=(0, self.PMap.H),
origin='top left')
# Plot tweaks
p.aspect_ratio = 1.0
p.y_axis.title = 'Y (cm)'
p.x_axis.title = 'X (cm)'
p.x_axis.orientation = 'bottom'
p.title = 'Single Unit Maps'
return p
def _unit_data_plots_default(self):
# Plot data and vertical container object
data = ArrayPlotData(**self._get_unit_plots_data())
container = VPlotContainer()
# Add individual distribution plots to container
for key in ('avg_diameter', 'avg_area', 'coverage', 'max_r', 'num_fields'):
p = Plot(data)
p.plot((key+'_bins', key), name=key, type='polygon', edge_width=2,
edge_color='mediumblue', face_color='lightsteelblue')
p.x_axis.title = key
p.y_axis.title = 'count'
p.padding = [50, 30, 20, 40]
if key == 'num_fields':
p.x_axis.tick_interval = 1
container.add(p)
return container
def _field_data_plots_default(self):
# Plot data and vertical container object
data = ArrayPlotData(**self._get_field_plots_data())
container = VPlotContainer()
# Add individual distributions plots to container
for key in ('area', 'diameter', 'average', 'peak'):
p = Plot(data)
p.plot((key+'_bins', key), name=key, type='polygon', edge_width=2,
edge_color='red', face_color='salmon')
p.x_axis.title = key
p.y_axis.title = 'count'
p.padding = [50, 30, 20, 40]
container.add(p)
return container
# Plot update methods
def _update_stage_map(self):
"""Handle switching between different stage maps"""
# Update and equalize bounds for all subplots
self.stage_map.plots['rep'][0].bounds = self.stage_map.bounds
self.stage_map.plots['cov'][0].bounds = self.stage_map.bounds
self.stage_map.plots['centers'][0].bounds = self.stage_map.bounds
# Set visibility flags
if self.stage_map_type is 'representation':
self.stage_map.title = 'Relative Representation'
vis_plots = (True, False, False)
elif self.stage_map_type is 'coverage':
self.stage_map.title = 'Total Stage Coverage'
vis_plots = (False, True, False)
elif self.stage_map_type is 'field_centers':
self.stage_map.title = 'Place Field Centroids'
vis_plots = (False, False, True)
# Toggle plot visibility and redraw
self.stage_map.plots['rep'][0].visible, \
self.stage_map.plots['cov'][0].visible, \
self.stage_map.plots['centers'][0].visible = vis_plots
self.stage_map.request_redraw()
def _update_unit_map(self):
"""Update current image source and title; then redraw the plot"""
self.unit_map.data.set_data('unit_map', self._get_unit_map_data())
self.unit_map.title = '%s of Unit %d'%(self.unit_map_type.capitalize(), self.unit)
self.unit_map.request_redraw()
def _update_unit_values(self):
"""Update the scalar readonly values"""
if self._unit == -1:
self.num_fields = 0
self.coverage = self.avg_area = self.avg_diameter = 0.0
self.max_rate = self.PMap.maxima[self.unit, 2]
else:
self.num_fields = int(self.udata[self._unit]['num_fields'])
self.coverage = float(self.udata[self._unit]['coverage'])
self.avg_area = float(self.udata[self._unit]['avg_area'])
self.avg_diameter = float(self.udata[self._unit]['avg_diameter'])
self.max_rate = float(self.udata[self._unit]['max_r'])
def _get_unit_map_data(self):
"""Helper function to get RGBA array for current unit and map type"""
if self.unit_map_type is 'ratemap':
map_data = array_to_rgba(self.PMap.Map[self.unit], cmap=cm.jet,
norm=False, cmax=self.peak_rate)
elif self.unit_map_type is 'single':
map_data = array_to_rgba(self.PMap.single_maps[self.unit], cmap=cm.hot)
elif self.unit_map_type is 'fields':
map_data = array_to_rgba(self.PMap.coverage_maps[self.unit], cmap=cm.gray)
return map_data
def _get_unit_plots_data(self):
"""Helper function for getting unit data distributions"""
# Integer distribution for number of fields
data = {}
data['num_fields_bins'], data['num_fields'] = integer_hist(self.udata['num_fields'])
# Continuous distributions of other unit statistics
for key in ('avg_area', 'avg_diameter', 'coverage', 'max_r'):
keyb = key + '_bins'
data[key], data[keyb] = S.histogram(self.udata[key], bins=self.unit_bins)
data[keyb] += (data[keyb][1] - data[keyb][0]) / 2
data[keyb] = data[keyb][:-1]
# Add 0-value end-points for polygon display
for key in data:
if key[-4:] == 'bins':
data[key] = N.r_[data[key][0], data[key], data[key][-1]]
else:
data[key] = N.r_[0, data[key], 0]
return data
def _get_field_plots_data(self):
"""Helper function for getting field data distributions"""
# Continuous distributions of place field properties
data = {}
for key in ('area', 'diameter', 'average', 'peak'):
keyb = key + '_bins'
data[key], data[keyb] = S.histogram(self.fdata[key], bins=self.field_bins)
data[keyb] += (data[keyb][1] - data[keyb][0]) / 2
data[keyb] = data[keyb][:-1]
# Add 0-value end-points for polygon display
for key in data:
if key[-4:] == 'bins':
data[key] = N.r_[data[key][0], data[key], data[key][-1]]
else:
data[key] = N.r_[0, data[key], 0]
return data
# Map traits notifications
def _unit_bins_changed(self):
"""Update plot data for unit distributions"""
data = self._get_unit_plots_data()
plot_data = self.unit_data_plots.components[0].data
for key in data:
plot_data.set_data(key, data[key])
def _field_bins_changed(self):
data = self._get_field_plots_data()
plot_data = self.field_data_plots.components[0].data
for key in data:
plot_data.set_data(key, data[key])
def _stage_map_type_changed(self):
self._update_stage_map()
def _unit_map_type_changed(self):
self._update_unit_map()
def _unit_changed(self):
"""Update the unit map and scalar values"""
find_unit = (self.udata['unit'] == self.unit).nonzero()[0]
if find_unit.shape[0]:
self._unit = find_unit[0]
else:
self._unit = -1
self._update_unit_map()
self._update_unit_values()
# Output object default
def _out_default(self):
return CPrint(prefix=self.__class__.__name__, color='purple')
|
{"hexsha": "18e97d6a14af7a440f9764db0ac61fdcfc2e75a4", "size": 13874, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/placemap_viewer.py", "max_stars_repo_name": "jdmonaco/grid-remapping-model", "max_stars_repo_head_hexsha": "5794b0666d51be4359fd8d74da93dca8e98402bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-31T13:56:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T18:04:29.000Z", "max_issues_repo_path": "src/placemap_viewer.py", "max_issues_repo_name": "ModelDBRepository/138951", "max_issues_repo_head_hexsha": "f71d4febd29e6e09bb7370022ab85f4a8f1c81b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/placemap_viewer.py", "max_forks_repo_name": "ModelDBRepository/138951", "max_forks_repo_head_hexsha": "f71d4febd29e6e09bb7370022ab85f4a8f1c81b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5989159892, "max_line_length": 95, "alphanum_fraction": 0.570491567, "include": true, "reason": "import numpy", "num_tokens": 3135}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for Heat Exchanger 1D unit model.
Author: Jaffer Ghouse
"""
import pytest
from pyomo.environ import (ConcreteModel, TerminationCondition,
SolverStatus, value, units as pyunits)
from pyomo.common.config import ConfigBlock
from pyomo.util.check_units import (assert_units_consistent,
assert_units_equivalent)
from idaes.core import (FlowsheetBlock, MaterialBalanceType, EnergyBalanceType,
MomentumBalanceType, useDefault)
from idaes.generic_models.unit_models.heat_exchanger_1D import HeatExchanger1D as HX1D
from idaes.generic_models.unit_models.heat_exchanger_1D import WallConductionType
from idaes.generic_models.unit_models.heat_exchanger import HeatExchangerFlowPattern
from idaes.generic_models.properties.core.generic.generic_property import (
GenericParameterBlock)
from idaes.generic_models.properties.core.examples.BT_PR import \
configuration
from idaes.generic_models.properties.activity_coeff_models.BTX_activity_coeff_VLE \
import BTXParameterBlock
from idaes.generic_models.properties import iapws95
from idaes.generic_models.properties.examples.saponification_thermo import (
SaponificationParameterBlock)
from idaes.core.util.exceptions import ConfigurationError
from idaes.core.util.model_statistics import (degrees_of_freedom,
number_variables,
number_total_constraints,
number_unused_variables)
from idaes.core.util.testing import (PhysicalParameterTestBlock,
initialization_tester)
from idaes.core.util import get_solver
from idaes.core.util import scaling as iscale
# Imports to assemble BT-PR with different units
from idaes.core import LiquidPhase, VaporPhase, Component
from idaes.generic_models.properties.core.state_definitions import FTPx
from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType
from idaes.generic_models.properties.core.phase_equil import SmoothVLE
from idaes.generic_models.properties.core.phase_equil.bubble_dew import \
LogBubbleDew
from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity
import idaes.generic_models.properties.core.pure.RPP4 as RPP
# -----------------------------------------------------------------------------
# Get default solver for testing
solver = get_solver()
# -----------------------------------------------------------------------------
@pytest.mark.unit
def test_config():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = PhysicalParameterTestBlock()
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties}})
# Check unit config arguments
assert len(m.fs.unit.config) == 8
assert isinstance(m.fs.unit.config.shell_side, ConfigBlock)
assert isinstance(m.fs.unit.config.tube_side, ConfigBlock)
assert m.fs.unit.config.flow_type == HeatExchangerFlowPattern.cocurrent
assert m.fs.unit.config.has_wall_conduction == \
WallConductionType.zero_dimensional
assert m.fs.unit.config.finite_elements == 20
assert m.fs.unit.config.collocation_points == 5
# Check shell side config arguments
assert len(m.fs.unit.config.shell_side) == 11
assert m.fs.unit.config.shell_side.dynamic == useDefault
assert m.fs.unit.config.shell_side.has_holdup == useDefault
assert m.fs.unit.config.shell_side.material_balance_type == \
MaterialBalanceType.useDefault
assert m.fs.unit.config.shell_side.energy_balance_type == \
EnergyBalanceType.useDefault
assert m.fs.unit.config.shell_side.momentum_balance_type == \
MomentumBalanceType.pressureTotal
assert not m.fs.unit.config.shell_side.has_pressure_change
assert not m.fs.unit.config.shell_side.has_phase_equilibrium
assert m.fs.unit.config.shell_side.transformation_method == \
'dae.finite_difference'
assert m.fs.unit.config.shell_side.transformation_scheme == 'BACKWARD'
# Check tube side config arguments
assert len(m.fs.unit.config.tube_side) == 11
assert m.fs.unit.config.tube_side.dynamic == useDefault
assert m.fs.unit.config.tube_side.has_holdup == useDefault
assert m.fs.unit.config.tube_side.material_balance_type == \
MaterialBalanceType.useDefault
assert m.fs.unit.config.tube_side.energy_balance_type == \
EnergyBalanceType.useDefault
assert m.fs.unit.config.tube_side.momentum_balance_type == \
MomentumBalanceType.pressureTotal
assert not m.fs.unit.config.tube_side.has_pressure_change
assert not m.fs.unit.config.tube_side.has_phase_equilibrium
assert m.fs.unit.config.tube_side.transformation_method == \
'dae.finite_difference'
assert m.fs.unit.config.tube_side.transformation_scheme == 'BACKWARD'
@pytest.mark.unit
def test_config_validation():
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = BTXParameterBlock(default={"valid_phase": 'Liq'})
with pytest.raises(ConfigurationError):
m.fs.HX_co_current = HX1D(
default={"shell_side": {"property_package": m.fs.properties,
"transformation_scheme": "BACKWARD"},
"tube_side": {"property_package": m.fs.properties,
"transformation_scheme": "FORWARD"},
"flow_type": HeatExchangerFlowPattern.cocurrent})
with pytest.raises(ConfigurationError):
m.fs.HX_counter_current = HX1D(
default={"shell_side": {"property_package": m.fs.properties,
"transformation_method":
"dae.finite_difference"},
"tube_side": {"property_package": m.fs.properties,
"transformation_method":
"dae.collocation"},
"flow_type": HeatExchangerFlowPattern.countercurrent})
# -----------------------------------------------------------------------------
class TestBTX_cocurrent(object):
@pytest.fixture(scope="class")
def btx(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = BTXParameterBlock(default={"valid_phase": 'Liq'})
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.cocurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_mol[0].fix(5) # mol/s
m.fs.unit.shell_inlet.temperature[0].fix(365) # K
m.fs.unit.shell_inlet.pressure[0].fix(101325) # Pa
m.fs.unit.shell_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.shell_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
m.fs.unit.tube_inlet.flow_mol[0].fix(1) # mol/s
m.fs.unit.tube_inlet.temperature[0].fix(300) # K
m.fs.unit.tube_inlet.pressure[0].fix(101325) # Pa
m.fs.unit.tube_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.tube_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
iscale.calculate_scaling_factors(m)
return m
@pytest.mark.unit
@pytest.mark.build
def test_build(self, btx):
assert hasattr(btx.fs.unit, "shell_inlet")
assert len(btx.fs.unit.shell_inlet.vars) == 4
assert hasattr(btx.fs.unit.shell_inlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_inlet, "temperature")
assert hasattr(btx.fs.unit.shell_inlet, "pressure")
assert hasattr(btx.fs.unit, "tube_inlet")
assert len(btx.fs.unit.tube_inlet.vars) == 4
assert hasattr(btx.fs.unit.tube_inlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_inlet, "temperature")
assert hasattr(btx.fs.unit.tube_inlet, "pressure")
assert hasattr(btx.fs.unit, "shell_outlet")
assert len(btx.fs.unit.shell_outlet.vars) == 4
assert hasattr(btx.fs.unit.shell_outlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_outlet, "temperature")
assert hasattr(btx.fs.unit.shell_outlet, "pressure")
assert hasattr(btx.fs.unit, "tube_outlet")
assert len(btx.fs.unit.tube_outlet.vars) == 4
assert hasattr(btx.fs.unit.tube_outlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_outlet, "temperature")
assert hasattr(btx.fs.unit.tube_outlet, "pressure")
assert hasattr(btx.fs.unit, "shell_area")
assert hasattr(btx.fs.unit, "shell_length")
assert hasattr(btx.fs.unit, "tube_area")
assert hasattr(btx.fs.unit, "tube_length")
assert hasattr(btx.fs.unit, "d_shell")
assert hasattr(btx.fs.unit, "d_tube_outer")
assert hasattr(btx.fs.unit, "d_tube_inner")
assert hasattr(btx.fs.unit, "N_tubes")
assert hasattr(btx.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "temperature_wall")
assert hasattr(btx.fs.unit, "shell_heat_transfer_eq")
assert hasattr(btx.fs.unit, "tube_heat_transfer_eq")
assert hasattr(btx.fs.unit, "wall_0D_model")
assert hasattr(btx.fs.unit, "area_calc_tube")
assert hasattr(btx.fs.unit, "area_calc_shell")
assert number_variables(btx) == 869
assert number_total_constraints(btx) == 803
assert number_unused_variables(btx) == 8
@pytest.mark.integration
def test_units(self, btx):
assert_units_equivalent(btx.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(btx.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
btx.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
btx.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.degK)
assert_units_equivalent(btx.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(btx)
@pytest.mark.unit
def test_dof(self, btx):
assert degrees_of_freedom(btx) == 0
@pytest.mark.component
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
def test_initialize(self, btx):
initialization_tester(btx)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, btx):
results = solver.solve(btx)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, btx):
assert (pytest.approx(5, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.flow_mol[0]))
assert (pytest.approx(322.669, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.temperature[0]))
assert (pytest.approx(101325, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.pressure[0]))
assert (pytest.approx(1, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.flow_mol[0]))
assert (pytest.approx(322.463, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.temperature[0]))
assert (pytest.approx(101325, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.pressure[0]))
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, btx):
assert abs(value(btx.fs.unit.shell_inlet.flow_mol[0] -
btx.fs.unit.shell_outlet.flow_mol[0])) <= 1e-6
assert abs(value(btx.fs.unit.tube_inlet.flow_mol[0] -
btx.fs.unit.tube_outlet.flow_mol[0])) <= 1e-6
shell_side = value(
btx.fs.unit.shell_outlet.flow_mol[0] *
(btx.fs.unit.shell.properties[0, 0].enth_mol_phase['Liq'] -
btx.fs.unit.shell.properties[0, 1].enth_mol_phase['Liq']))
tube_side = value(
btx.fs.unit.tube_outlet.flow_mol[0]*btx.fs.unit.N_tubes *
(btx.fs.unit.tube.properties[0, 1].enth_mol_phase['Liq'] -
btx.fs.unit.tube.properties[0, 0].enth_mol_phase['Liq']))
assert abs(shell_side - tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, btx):
btx.fs.unit.report()
# -----------------------------------------------------------------------------
class TestBTX_countercurrent(object):
@pytest.fixture(scope="class")
def btx(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = BTXParameterBlock(default={"valid_phase": 'Liq'})
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.countercurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_mol[0].fix(5) # mol/s
m.fs.unit.shell_inlet.temperature[0].fix(365) # K
m.fs.unit.shell_inlet.pressure[0].fix(101325) # Pa
m.fs.unit.shell_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.shell_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
m.fs.unit.tube_inlet.flow_mol[0].fix(1) # mol/s
m.fs.unit.tube_inlet.temperature[0].fix(300) # K
m.fs.unit.tube_inlet.pressure[0].fix(101325) # Pa
m.fs.unit.tube_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.tube_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
return m
@pytest.mark.unit
@pytest.mark.build
def test_build(self, btx):
assert hasattr(btx.fs.unit, "shell_inlet")
assert len(btx.fs.unit.shell_inlet.vars) == 4
assert hasattr(btx.fs.unit.shell_inlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_inlet, "temperature")
assert hasattr(btx.fs.unit.shell_inlet, "pressure")
assert hasattr(btx.fs.unit, "tube_inlet")
assert len(btx.fs.unit.tube_inlet.vars) == 4
assert hasattr(btx.fs.unit.tube_inlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_inlet, "temperature")
assert hasattr(btx.fs.unit.tube_inlet, "pressure")
assert hasattr(btx.fs.unit, "shell_outlet")
assert len(btx.fs.unit.shell_outlet.vars) == 4
assert hasattr(btx.fs.unit.shell_outlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_outlet, "temperature")
assert hasattr(btx.fs.unit.shell_outlet, "pressure")
assert hasattr(btx.fs.unit, "tube_outlet")
assert len(btx.fs.unit.tube_outlet.vars) == 4
assert hasattr(btx.fs.unit.tube_outlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_outlet, "temperature")
assert hasattr(btx.fs.unit.tube_outlet, "pressure")
assert hasattr(btx.fs.unit, "shell_area")
assert hasattr(btx.fs.unit, "shell_length")
assert hasattr(btx.fs.unit, "tube_area")
assert hasattr(btx.fs.unit, "tube_length")
assert hasattr(btx.fs.unit, "d_shell")
assert hasattr(btx.fs.unit, "d_tube_outer")
assert hasattr(btx.fs.unit, "d_tube_inner")
assert hasattr(btx.fs.unit, "N_tubes")
assert hasattr(btx.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "temperature_wall")
assert hasattr(btx.fs.unit, "shell_heat_transfer_eq")
assert hasattr(btx.fs.unit, "tube_heat_transfer_eq")
assert hasattr(btx.fs.unit, "wall_0D_model")
assert hasattr(btx.fs.unit, "area_calc_tube")
assert hasattr(btx.fs.unit, "area_calc_shell")
assert number_variables(btx) == 869
assert number_total_constraints(btx) == 803
assert number_unused_variables(btx) == 8
@pytest.mark.integration
def test_units(self, btx):
assert_units_equivalent(btx.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(btx.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
btx.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
btx.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(btx.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(btx)
@pytest.mark.unit
def test_dof(self, btx):
assert degrees_of_freedom(btx) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, btx):
initialization_tester(
btx,
optarg={'tol': 1e-6},
shell_state_args={"flow_mol": 5,
"temperature": 304,
"pressure": 101325},
tube_state_args={"flow_mol": 1,
"temperature": 331.5,
"pressure": 101325})
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, btx):
results = solver.solve(btx)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, btx):
assert (pytest.approx(5, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.flow_mol[0]))
assert (pytest.approx(304.292, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.temperature[0]))
assert (pytest.approx(101325, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.pressure[0]))
assert (pytest.approx(1, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.flow_mol[0]))
assert (pytest.approx(331.435, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.temperature[0]))
assert (pytest.approx(101325, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.pressure[0]))
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, btx):
assert abs(value(btx.fs.unit.shell_inlet.flow_mol[0] -
btx.fs.unit.shell_outlet.flow_mol[0])) <= 1e-6
assert abs(value(btx.fs.unit.tube_inlet.flow_mol[0] -
btx.fs.unit.tube_outlet.flow_mol[0])) <= 1e-6
shell_side = value(
btx.fs.unit.shell_outlet.flow_mol[0] *
(btx.fs.unit.shell.properties[0, 0].enth_mol_phase['Liq'] -
btx.fs.unit.shell.properties[0, 1].enth_mol_phase['Liq']))
tube_side = value(
btx.fs.unit.tube_outlet.flow_mol[0]*btx.fs.unit.N_tubes *
(btx.fs.unit.tube.properties[0, 0].enth_mol_phase['Liq'] -
btx.fs.unit.tube.properties[0, 1].enth_mol_phase['Liq']))
assert abs(shell_side - tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, btx):
btx.fs.unit.report()
# -----------------------------------------------------------------------------
@pytest.mark.iapws
@pytest.mark.skipif(not iapws95.iapws95_available(),
reason="IAPWS not available")
class TestIAPWS_cocurrent(object):
@pytest.fixture(scope="class")
def iapws(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = iapws95.Iapws95ParameterBlock(default={
"phase_presentation": iapws95.PhaseType.LG})
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.cocurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_mol[0].fix(5)
m.fs.unit.shell_inlet.enth_mol[0].fix(50000)
m.fs.unit.shell_inlet.pressure[0].fix(101325)
m.fs.unit.tube_inlet.flow_mol[0].fix(5)
m.fs.unit.tube_inlet.enth_mol[0].fix(7000)
m.fs.unit.tube_inlet.pressure[0].fix(101325)
return m
@pytest.mark.unit
@pytest.mark.build
def test_build(self, iapws):
assert len(iapws.fs.unit.shell_inlet.vars) == 3
assert hasattr(iapws.fs.unit.shell_inlet, "flow_mol")
assert hasattr(iapws.fs.unit.shell_inlet, "enth_mol")
assert hasattr(iapws.fs.unit.shell_inlet, "pressure")
assert hasattr(iapws.fs.unit, "shell_outlet")
assert len(iapws.fs.unit.shell_outlet.vars) == 3
assert hasattr(iapws.fs.unit.shell_outlet, "flow_mol")
assert hasattr(iapws.fs.unit.shell_outlet, "enth_mol")
assert hasattr(iapws.fs.unit.shell_outlet, "pressure")
assert len(iapws.fs.unit.tube_inlet.vars) == 3
assert hasattr(iapws.fs.unit.tube_inlet, "flow_mol")
assert hasattr(iapws.fs.unit.tube_inlet, "enth_mol")
assert hasattr(iapws.fs.unit.tube_inlet, "pressure")
assert hasattr(iapws.fs.unit, "tube_outlet")
assert len(iapws.fs.unit.tube_outlet.vars) == 3
assert hasattr(iapws.fs.unit.tube_outlet, "flow_mol")
assert hasattr(iapws.fs.unit.tube_outlet, "enth_mol")
assert hasattr(iapws.fs.unit.tube_outlet, "pressure")
assert hasattr(iapws.fs.unit, "shell_area")
assert hasattr(iapws.fs.unit, "shell_length")
assert hasattr(iapws.fs.unit, "tube_area")
assert hasattr(iapws.fs.unit, "tube_length")
assert hasattr(iapws.fs.unit, "d_shell")
assert hasattr(iapws.fs.unit, "d_tube_outer")
assert hasattr(iapws.fs.unit, "d_tube_inner")
assert hasattr(iapws.fs.unit, "N_tubes")
assert hasattr(iapws.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(iapws.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(iapws.fs.unit, "temperature_wall")
assert hasattr(iapws.fs.unit, "shell_heat_transfer_eq")
assert hasattr(iapws.fs.unit, "tube_heat_transfer_eq")
assert hasattr(iapws.fs.unit, "wall_0D_model")
assert hasattr(iapws.fs.unit, "area_calc_tube")
assert hasattr(iapws.fs.unit, "area_calc_shell")
assert number_variables(iapws) == 617
assert number_total_constraints(iapws) == 553
assert number_unused_variables(iapws) == 10
@pytest.mark.integration
def test_units(self, iapws):
assert_units_equivalent(iapws.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(iapws.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(iapws.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(iapws.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(iapws.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
iapws.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
iapws.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(iapws.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(iapws)
@pytest.mark.unit
def test_dof(self, iapws):
assert degrees_of_freedom(iapws) == 0
@pytest.mark.initialization
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, iapws):
initialization_tester(iapws)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.unit
def test_solve(self, iapws):
results = solver.solve(iapws)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, iapws):
assert pytest.approx(5, abs=1e-5) == \
value(iapws.fs.unit.shell_outlet.flow_mol[0])
assert pytest.approx(5, abs=1e-5) == \
value(iapws.fs.unit.tube_outlet.flow_mol[0])
assert pytest.approx(46298, abs=4e0) == \
value(iapws.fs.unit.shell_outlet.enth_mol[0])
assert pytest.approx(7370, abs=1e0) == \
value(iapws.fs.unit.tube_outlet.enth_mol[0])
assert pytest.approx(101325, abs=1e2) == \
value(iapws.fs.unit.shell_outlet.pressure[0])
assert pytest.approx(101325, abs=1e2) == \
value(iapws.fs.unit.tube_outlet.pressure[0])
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, iapws):
assert abs(value(iapws.fs.unit.shell_inlet.flow_mol[0] -
iapws.fs.unit.shell_outlet.flow_mol[0])) <= 1e-6
assert abs(value(iapws.fs.unit.tube_inlet.flow_mol[0] -
iapws.fs.unit.tube_outlet.flow_mol[0])) <= 1e-6
shell_side = value(
iapws.fs.unit.shell_outlet.flow_mol[0] *
(iapws.fs.unit.shell_inlet.enth_mol[0] -
iapws.fs.unit.shell_outlet.enth_mol[0]))
tube_side = value(
iapws.fs.unit.tube_outlet.flow_mol[0]*iapws.fs.unit.N_tubes *
(iapws.fs.unit.tube_inlet.enth_mol[0] -
iapws.fs.unit.tube_outlet.enth_mol[0]))
assert abs(shell_side + tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, iapws):
iapws.fs.unit.report()
# -----------------------------------------------------------------------------
@pytest.mark.iapws
@pytest.mark.skipif(not iapws95.iapws95_available(),
reason="IAPWS not available")
class TestIAPWS_countercurrent(object):
@pytest.fixture(scope="class")
def iapws(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = iapws95.Iapws95ParameterBlock(default={
"phase_presentation": iapws95.PhaseType.LG})
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.countercurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_mol[0].fix(5)
m.fs.unit.shell_inlet.enth_mol[0].fix(50000)
m.fs.unit.shell_inlet.pressure[0].fix(101325)
m.fs.unit.tube_inlet.flow_mol[0].fix(5)
m.fs.unit.tube_inlet.enth_mol[0].fix(7000)
m.fs.unit.tube_inlet.pressure[0].fix(101325)
return m
@pytest.mark.unit
@pytest.mark.build
def test_build(self, iapws):
assert len(iapws.fs.unit.shell_inlet.vars) == 3
assert hasattr(iapws.fs.unit.shell_inlet, "flow_mol")
assert hasattr(iapws.fs.unit.shell_inlet, "enth_mol")
assert hasattr(iapws.fs.unit.shell_inlet, "pressure")
assert hasattr(iapws.fs.unit, "shell_outlet")
assert len(iapws.fs.unit.shell_outlet.vars) == 3
assert hasattr(iapws.fs.unit.shell_outlet, "flow_mol")
assert hasattr(iapws.fs.unit.shell_outlet, "enth_mol")
assert hasattr(iapws.fs.unit.shell_outlet, "pressure")
assert len(iapws.fs.unit.tube_inlet.vars) == 3
assert hasattr(iapws.fs.unit.tube_inlet, "flow_mol")
assert hasattr(iapws.fs.unit.tube_inlet, "enth_mol")
assert hasattr(iapws.fs.unit.tube_inlet, "pressure")
assert hasattr(iapws.fs.unit, "tube_outlet")
assert len(iapws.fs.unit.tube_outlet.vars) == 3
assert hasattr(iapws.fs.unit.tube_outlet, "flow_mol")
assert hasattr(iapws.fs.unit.tube_outlet, "enth_mol")
assert hasattr(iapws.fs.unit.tube_outlet, "pressure")
assert hasattr(iapws.fs.unit, "shell_area")
assert hasattr(iapws.fs.unit, "shell_length")
assert hasattr(iapws.fs.unit, "tube_area")
assert hasattr(iapws.fs.unit, "tube_length")
assert hasattr(iapws.fs.unit, "d_shell")
assert hasattr(iapws.fs.unit, "d_tube_outer")
assert hasattr(iapws.fs.unit, "d_tube_inner")
assert hasattr(iapws.fs.unit, "N_tubes")
assert hasattr(iapws.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(iapws.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(iapws.fs.unit, "temperature_wall")
assert hasattr(iapws.fs.unit, "shell_heat_transfer_eq")
assert hasattr(iapws.fs.unit, "tube_heat_transfer_eq")
assert hasattr(iapws.fs.unit, "wall_0D_model")
assert hasattr(iapws.fs.unit, "area_calc_tube")
assert hasattr(iapws.fs.unit, "area_calc_shell")
assert number_variables(iapws) == 617
assert number_total_constraints(iapws) == 553
assert number_unused_variables(iapws) == 10
@pytest.mark.integration
def test_units(self, iapws):
assert_units_equivalent(iapws.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(iapws.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(iapws.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(iapws.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(iapws.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(iapws.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
iapws.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
iapws.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(iapws.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(iapws)
@pytest.mark.unit
def test_dof(self, iapws):
assert degrees_of_freedom(iapws) == 0
@pytest.mark.initialization
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, iapws):
initialization_tester(iapws)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, iapws):
results = solver.solve(iapws)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, iapws):
assert pytest.approx(5, abs=1e-5) == \
value(iapws.fs.unit.shell_outlet.flow_mol[0])
assert pytest.approx(5, abs=1e-5) == \
value(iapws.fs.unit.tube_outlet.flow_mol[0])
assert pytest.approx(45359, abs=1e0) == \
value(iapws.fs.unit.shell_outlet.enth_mol[0])
assert pytest.approx(7464, abs=1e0) == \
value(iapws.fs.unit.tube_outlet.enth_mol[0])
assert pytest.approx(101325, abs=1e2) == \
value(iapws.fs.unit.shell_outlet.pressure[0])
assert pytest.approx(101325, abs=1e2) == \
value(iapws.fs.unit.tube_outlet.pressure[0])
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, iapws):
assert abs(value(iapws.fs.unit.shell_inlet.flow_mol[0] -
iapws.fs.unit.shell_outlet.flow_mol[0])) <= 1e-6
assert abs(value(iapws.fs.unit.tube_inlet.flow_mol[0] -
iapws.fs.unit.tube_outlet.flow_mol[0])) <= 1e-6
shell_side = value(
iapws.fs.unit.shell_outlet.flow_mol[0] *
(iapws.fs.unit.shell_inlet.enth_mol[0] -
iapws.fs.unit.shell_outlet.enth_mol[0]))
tube_side = value(
iapws.fs.unit.tube_outlet.flow_mol[0]*iapws.fs.unit.N_tubes *
(iapws.fs.unit.tube_inlet.enth_mol[0] -
iapws.fs.unit.tube_outlet.enth_mol[0]))
assert abs(shell_side + tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, iapws):
iapws.fs.unit.report()
# -----------------------------------------------------------------------------
class TestSaponification_cocurrent(object):
@pytest.fixture(scope="class")
def sapon(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = SaponificationParameterBlock()
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.cocurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_vol[0].fix(1e-3)
m.fs.unit.shell_inlet.temperature[0].fix(320)
m.fs.unit.shell_inlet.pressure[0].fix(101325)
m.fs.unit.shell_inlet.conc_mol_comp[0, "H2O"].fix(55388.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "NaOH"].fix(100.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "SodiumAcetate"].fix(0.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "Ethanol"].fix(0.0)
m.fs.unit.tube_inlet.flow_vol[0].fix(1e-3)
m.fs.unit.tube_inlet.temperature[0].fix(300)
m.fs.unit.tube_inlet.pressure[0].fix(101325)
m.fs.unit.tube_inlet.conc_mol_comp[0, "H2O"].fix(55388.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "NaOH"].fix(100.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "SodiumAcetate"].fix(0.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "Ethanol"].fix(0.0)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, sapon):
assert len(sapon.fs.unit.shell_inlet.vars) == 4
assert hasattr(sapon.fs.unit.shell_inlet, "flow_vol")
assert hasattr(sapon.fs.unit.shell_inlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.shell_inlet, "temperature")
assert hasattr(sapon.fs.unit.shell_inlet, "pressure")
assert len(sapon.fs.unit.shell_outlet.vars) == 4
assert hasattr(sapon.fs.unit.shell_outlet, "flow_vol")
assert hasattr(sapon.fs.unit.shell_outlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.shell_outlet, "temperature")
assert hasattr(sapon.fs.unit.shell_outlet, "pressure")
assert len(sapon.fs.unit.tube_inlet.vars) == 4
assert hasattr(sapon.fs.unit.tube_inlet, "flow_vol")
assert hasattr(sapon.fs.unit.tube_inlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.tube_inlet, "temperature")
assert hasattr(sapon.fs.unit.tube_inlet, "pressure")
assert len(sapon.fs.unit.tube_outlet.vars) == 4
assert hasattr(sapon.fs.unit.tube_outlet, "flow_vol")
assert hasattr(sapon.fs.unit.tube_outlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.tube_outlet, "temperature")
assert hasattr(sapon.fs.unit.tube_outlet, "pressure")
assert hasattr(sapon.fs.unit, "shell_area")
assert hasattr(sapon.fs.unit, "shell_length")
assert hasattr(sapon.fs.unit, "tube_area")
assert hasattr(sapon.fs.unit, "tube_length")
assert hasattr(sapon.fs.unit, "d_shell")
assert hasattr(sapon.fs.unit, "d_tube_outer")
assert hasattr(sapon.fs.unit, "d_tube_inner")
assert hasattr(sapon.fs.unit, "N_tubes")
assert hasattr(sapon.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(sapon.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(sapon.fs.unit, "temperature_wall")
assert hasattr(sapon.fs.unit, "shell_heat_transfer_eq")
assert hasattr(sapon.fs.unit, "tube_heat_transfer_eq")
assert hasattr(sapon.fs.unit, "wall_0D_model")
assert hasattr(sapon.fs.unit, "area_calc_tube")
assert hasattr(sapon.fs.unit, "area_calc_shell")
assert number_variables(sapon) == 995
assert number_total_constraints(sapon) == 917
assert number_unused_variables(sapon) == 14
@pytest.mark.integration
def test_units(self, sapon):
assert_units_equivalent(sapon.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(sapon.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(sapon.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(sapon.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(sapon.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
sapon.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
sapon.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(sapon.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(sapon)
@pytest.mark.unit
def test_dof(self, sapon):
assert degrees_of_freedom(sapon) == 0
@pytest.mark.initialization
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, sapon):
initialization_tester(sapon)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, sapon):
results = solver.solve(sapon)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, sapon):
assert pytest.approx(1e-3, abs=1e-6) == \
value(sapon.fs.unit.shell_outlet.flow_vol[0])
assert pytest.approx(1e-3, abs=1e-6) == \
value(sapon.fs.unit.tube_outlet.flow_vol[0])
assert 55388.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "H2O"])
assert 100.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "NaOH"])
assert 100.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "EthylAcetate"])
assert 0.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "SodiumAcetate"])
assert 0.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "Ethanol"])
assert 55388.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "H2O"])
assert 100.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "NaOH"])
assert 100.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "EthylAcetate"])
assert 0.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "SodiumAcetate"])
assert 0.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "Ethanol"])
assert pytest.approx(309.4, abs=1e-1) == \
value(sapon.fs.unit.shell_outlet.temperature[0])
assert pytest.approx(301.1, abs=1e-1) == \
value(sapon.fs.unit.tube_outlet.temperature[0])
assert pytest.approx(101325, abs=1e2) == \
value(sapon.fs.unit.shell_outlet.pressure[0])
assert pytest.approx(101325, abs=1e2) == \
value(sapon.fs.unit.tube_outlet.pressure[0])
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, sapon):
shell_side = value(
sapon.fs.unit.shell_outlet.flow_vol[0] *
sapon.fs.properties.dens_mol*sapon.fs.properties.cp_mol *
(sapon.fs.unit.shell_inlet.temperature[0] -
sapon.fs.unit.shell_outlet.temperature[0]))
tube_side = value(
sapon.fs.unit.tube_outlet.flow_vol[0]*sapon.fs.unit.N_tubes *
sapon.fs.properties.dens_mol*sapon.fs.properties.cp_mol *
(sapon.fs.unit.tube_inlet.temperature[0] -
sapon.fs.unit.tube_outlet.temperature[0]))
assert abs(shell_side + tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, sapon):
sapon.fs.unit.report()
# -----------------------------------------------------------------------------
class TestSaponification_countercurrent(object):
@pytest.fixture(scope="class")
def sapon(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.properties = SaponificationParameterBlock()
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties},
"flow_type": HeatExchangerFlowPattern.countercurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_vol[0].fix(1e-3)
m.fs.unit.shell_inlet.temperature[0].fix(320)
m.fs.unit.shell_inlet.pressure[0].fix(101325)
m.fs.unit.shell_inlet.conc_mol_comp[0, "H2O"].fix(55388.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "NaOH"].fix(100.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "SodiumAcetate"].fix(0.0)
m.fs.unit.shell_inlet.conc_mol_comp[0, "Ethanol"].fix(0.0)
m.fs.unit.tube_inlet.flow_vol[0].fix(1e-3)
m.fs.unit.tube_inlet.temperature[0].fix(300)
m.fs.unit.tube_inlet.pressure[0].fix(101325)
m.fs.unit.tube_inlet.conc_mol_comp[0, "H2O"].fix(55388.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "NaOH"].fix(100.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "EthylAcetate"].fix(100.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "SodiumAcetate"].fix(0.0)
m.fs.unit.tube_inlet.conc_mol_comp[0, "Ethanol"].fix(0.0)
return m
@pytest.mark.build
@pytest.mark.unit
def test_build(self, sapon):
assert len(sapon.fs.unit.shell_inlet.vars) == 4
assert hasattr(sapon.fs.unit.shell_inlet, "flow_vol")
assert hasattr(sapon.fs.unit.shell_inlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.shell_inlet, "temperature")
assert hasattr(sapon.fs.unit.shell_inlet, "pressure")
assert len(sapon.fs.unit.shell_outlet.vars) == 4
assert hasattr(sapon.fs.unit.shell_outlet, "flow_vol")
assert hasattr(sapon.fs.unit.shell_outlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.shell_outlet, "temperature")
assert hasattr(sapon.fs.unit.shell_outlet, "pressure")
assert len(sapon.fs.unit.tube_inlet.vars) == 4
assert hasattr(sapon.fs.unit.tube_inlet, "flow_vol")
assert hasattr(sapon.fs.unit.tube_inlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.tube_inlet, "temperature")
assert hasattr(sapon.fs.unit.tube_inlet, "pressure")
assert len(sapon.fs.unit.tube_outlet.vars) == 4
assert hasattr(sapon.fs.unit.tube_outlet, "flow_vol")
assert hasattr(sapon.fs.unit.tube_outlet, "conc_mol_comp")
assert hasattr(sapon.fs.unit.tube_outlet, "temperature")
assert hasattr(sapon.fs.unit.tube_outlet, "pressure")
assert hasattr(sapon.fs.unit, "shell_area")
assert hasattr(sapon.fs.unit, "shell_length")
assert hasattr(sapon.fs.unit, "tube_area")
assert hasattr(sapon.fs.unit, "tube_length")
assert hasattr(sapon.fs.unit, "d_shell")
assert hasattr(sapon.fs.unit, "d_tube_outer")
assert hasattr(sapon.fs.unit, "d_tube_inner")
assert hasattr(sapon.fs.unit, "N_tubes")
assert hasattr(sapon.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(sapon.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(sapon.fs.unit, "temperature_wall")
assert hasattr(sapon.fs.unit, "shell_heat_transfer_eq")
assert hasattr(sapon.fs.unit, "tube_heat_transfer_eq")
assert hasattr(sapon.fs.unit, "wall_0D_model")
assert hasattr(sapon.fs.unit, "area_calc_tube")
assert hasattr(sapon.fs.unit, "area_calc_shell")
assert number_variables(sapon) == 995
assert number_total_constraints(sapon) == 917
assert number_unused_variables(sapon) == 14
@pytest.mark.integration
def test_units(self, sapon):
assert_units_equivalent(sapon.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(sapon.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(sapon.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(sapon.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(sapon.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(sapon.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
sapon.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
sapon.fs.unit.tube_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(sapon.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(sapon)
@pytest.mark.unit
def test_dof(self, sapon):
assert degrees_of_freedom(sapon) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_initialize(self, sapon):
initialization_tester(sapon)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solve(self, sapon):
results = solver.solve(sapon)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_solution(self, sapon):
assert pytest.approx(1e-3, abs=1e-6) == \
value(sapon.fs.unit.shell_outlet.flow_vol[0])
assert pytest.approx(1e-3, abs=1e-6) == \
value(sapon.fs.unit.tube_outlet.flow_vol[0])
assert 55388.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "H2O"])
assert 100.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "NaOH"])
assert 100.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "EthylAcetate"])
assert 0.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "SodiumAcetate"])
assert 0.0 == value(
sapon.fs.unit.shell_inlet.conc_mol_comp[0, "Ethanol"])
assert 55388.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "H2O"])
assert 100.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "NaOH"])
assert 100.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "EthylAcetate"])
assert 0.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "SodiumAcetate"])
assert 0.0 == value(
sapon.fs.unit.tube_inlet.conc_mol_comp[0, "Ethanol"])
assert pytest.approx(309.2, abs=1e-1) == \
value(sapon.fs.unit.shell_outlet.temperature[0])
assert pytest.approx(301.1, abs=1e-1) == \
value(sapon.fs.unit.tube_outlet.temperature[0])
assert pytest.approx(101325, abs=1e2) == \
value(sapon.fs.unit.shell_outlet.pressure[0])
assert pytest.approx(101325, abs=1e2) == \
value(sapon.fs.unit.tube_outlet.pressure[0])
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.component
def test_conservation(self, sapon):
shell_side = value(
sapon.fs.unit.shell_outlet.flow_vol[0] *
sapon.fs.properties.dens_mol*sapon.fs.properties.cp_mol *
(sapon.fs.unit.shell_inlet.temperature[0] -
sapon.fs.unit.shell_outlet.temperature[0]))
tube_side = value(
sapon.fs.unit.tube_outlet.flow_vol[0]*sapon.fs.unit.N_tubes *
sapon.fs.properties.dens_mol*sapon.fs.properties.cp_mol *
(sapon.fs.unit.tube_inlet.temperature[0] -
sapon.fs.unit.tube_outlet.temperature[0]))
assert abs(shell_side + tube_side) <= 1e-6
@pytest.mark.ui
@pytest.mark.unit
def test_report(self, sapon):
sapon.fs.unit.report()
# -----------------------------------------------------------------------------
class TestBT_Generic_cocurrent(object):
@pytest.fixture(scope="class")
def btx(self):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
# As we lack other example prop packs with units, take the generic
# BT-PR package and change the base units
configuration2 = {
# Specifying components
"components": {
'benzene': {
"type": Component,
"enth_mol_ig_comp": RPP,
"entr_mol_ig_comp": RPP,
"pressure_sat_comp": RPP,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (78.1136E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (48.9e5, pyunits.Pa), # [1]
"temperature_crit": (562.2, pyunits.K), # [1]
"omega": 0.212, # [1]
"cp_mol_ig_comp_coeff": {
'A': (-3.392E1, pyunits.J/pyunits.mol/pyunits.K), # [1]
'B': (4.739E-1, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (-3.017E-4, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (7.130E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
82.9e3, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
-269, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {'A': (-6.98273, None), # [1]
'B': (1.33213, None),
'C': (-2.62863, None),
'D': (-3.33399, None)}}},
'toluene': {
"type": Component,
"enth_mol_ig_comp": RPP,
"entr_mol_ig_comp": RPP,
"pressure_sat_comp": RPP,
"phase_equilibrium_form": {("Vap", "Liq"): log_fugacity},
"parameter_data": {
"mw": (92.1405E-3, pyunits.kg/pyunits.mol), # [1]
"pressure_crit": (41e5, pyunits.Pa), # [1]
"temperature_crit": (591.8, pyunits.K), # [1]
"omega": 0.263, # [1]
"cp_mol_ig_comp_coeff": {
'A': (-2.435E1, pyunits.J/pyunits.mol/pyunits.K), # [1]
'B': (5.125E-1, pyunits.J/pyunits.mol/pyunits.K**2),
'C': (-2.765E-4, pyunits.J/pyunits.mol/pyunits.K**3),
'D': (4.911E-8, pyunits.J/pyunits.mol/pyunits.K**4)},
"enth_mol_form_vap_comp_ref": (
50.1e3, pyunits.J/pyunits.mol), # [3]
"entr_mol_form_vap_comp_ref": (
-321, pyunits.J/pyunits.mol/pyunits.K), # [3]
"pressure_sat_comp_coeff": {'A': (-7.28607, None), # [1]
'B': (1.38091, None),
'C': (-2.83433, None),
'D': (-2.79168, None)}}}},
# Specifying phases
"phases": {'Liq': {"type": LiquidPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}},
'Vap': {"type": VaporPhase,
"equation_of_state": Cubic,
"equation_of_state_options": {
"type": CubicType.PR}}},
# Set base units of measurement
"base_units": {"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.t,
"amount": pyunits.mol,
"temperature": pyunits.degR},
# Specifying state definition
"state_definition": FTPx,
"state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s),
"temperature": (273.15, 300, 500, pyunits.K),
"pressure": (5e4, 1e5, 1e6, pyunits.Pa)},
"pressure_ref": (101325, pyunits.Pa),
"temperature_ref": (298.15, pyunits.K),
# Defining phase equilibria
"phases_in_equilibrium": [("Vap", "Liq")],
"phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE},
"bubble_dew_method": LogBubbleDew,
"parameter_data": {"PR_kappa": {("benzene", "benzene"): 0.000,
("benzene", "toluene"): 0.000,
("toluene", "benzene"): 0.000,
("toluene", "toluene"): 0.000}}}
m.fs.properties = GenericParameterBlock(default=configuration)
m.fs.properties2 = GenericParameterBlock(default=configuration2)
m.fs.unit = HX1D(default={
"shell_side": {"property_package": m.fs.properties},
"tube_side": {"property_package": m.fs.properties2},
"flow_type": HeatExchangerFlowPattern.cocurrent})
m.fs.unit.d_shell.fix(1.04)
m.fs.unit.d_tube_outer.fix(0.01167)
m.fs.unit.d_tube_inner.fix(0.01067)
m.fs.unit.N_tubes.fix(10)
m.fs.unit.shell_length.fix(4.85)
m.fs.unit.tube_length.fix(4.85)
m.fs.unit.shell_heat_transfer_coefficient.fix(2000)
m.fs.unit.tube_heat_transfer_coefficient.fix(51000)
m.fs.unit.shell_inlet.flow_mol[0].fix(5) # mol/s
m.fs.unit.shell_inlet.temperature[0].fix(365) # K
m.fs.unit.shell_inlet.pressure[0].fix(101325) # Pa
m.fs.unit.shell_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.shell_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
m.fs.unit.tube_inlet.flow_mol[0].fix(1) # mol/s
m.fs.unit.tube_inlet.temperature[0].fix(540) # degR
m.fs.unit.tube_inlet.pressure[0].fix(101.325) # kPa
m.fs.unit.tube_inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.unit.tube_inlet.mole_frac_comp[0, "toluene"].fix(0.5)
return m
@pytest.mark.component
@pytest.mark.build
def test_build(self, btx):
assert hasattr(btx.fs.unit, "shell_inlet")
assert len(btx.fs.unit.shell_inlet.vars) == 4
assert hasattr(btx.fs.unit.shell_inlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_inlet, "temperature")
assert hasattr(btx.fs.unit.shell_inlet, "pressure")
assert hasattr(btx.fs.unit, "tube_inlet")
assert len(btx.fs.unit.tube_inlet.vars) == 4
assert hasattr(btx.fs.unit.tube_inlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_inlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_inlet, "temperature")
assert hasattr(btx.fs.unit.tube_inlet, "pressure")
assert hasattr(btx.fs.unit, "shell_outlet")
assert len(btx.fs.unit.shell_outlet.vars) == 4
assert hasattr(btx.fs.unit.shell_outlet, "flow_mol")
assert hasattr(btx.fs.unit.shell_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.shell_outlet, "temperature")
assert hasattr(btx.fs.unit.shell_outlet, "pressure")
assert hasattr(btx.fs.unit, "tube_outlet")
assert len(btx.fs.unit.tube_outlet.vars) == 4
assert hasattr(btx.fs.unit.tube_outlet, "flow_mol")
assert hasattr(btx.fs.unit.tube_outlet, "mole_frac_comp")
assert hasattr(btx.fs.unit.tube_outlet, "temperature")
assert hasattr(btx.fs.unit.tube_outlet, "pressure")
assert hasattr(btx.fs.unit, "shell_area")
assert hasattr(btx.fs.unit, "shell_length")
assert hasattr(btx.fs.unit, "tube_area")
assert hasattr(btx.fs.unit, "tube_length")
assert hasattr(btx.fs.unit, "d_shell")
assert hasattr(btx.fs.unit, "d_tube_outer")
assert hasattr(btx.fs.unit, "d_tube_inner")
assert hasattr(btx.fs.unit, "N_tubes")
assert hasattr(btx.fs.unit, "shell_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "tube_heat_transfer_coefficient")
assert hasattr(btx.fs.unit, "temperature_wall")
assert hasattr(btx.fs.unit, "shell_heat_transfer_eq")
assert hasattr(btx.fs.unit, "tube_heat_transfer_eq")
assert hasattr(btx.fs.unit, "wall_0D_model")
assert hasattr(btx.fs.unit, "area_calc_tube")
assert hasattr(btx.fs.unit, "area_calc_shell")
assert number_variables(btx) == 1601
assert number_total_constraints(btx) == 1469
assert number_unused_variables(btx) == 34
@pytest.mark.integration
def test_units(self, btx):
assert_units_equivalent(btx.fs.unit.shell_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.shell_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.tube_area, pyunits.m**2)
assert_units_equivalent(btx.fs.unit.tube_length, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_shell, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_outer, pyunits.m)
assert_units_equivalent(btx.fs.unit.d_tube_inner, pyunits.m)
assert_units_equivalent(btx.fs.unit.N_tubes, pyunits.dimensionless)
assert_units_equivalent(
btx.fs.unit.shell_heat_transfer_coefficient,
pyunits.W/pyunits.m**2/pyunits.K)
assert_units_equivalent(
btx.fs.unit.tube_heat_transfer_coefficient,
pyunits.kW/pyunits.m**2/pyunits.degR)
assert_units_equivalent(btx.fs.unit.temperature_wall, pyunits.K)
assert_units_consistent(btx)
@pytest.mark.component
def test_dof(self, btx):
assert degrees_of_freedom(btx) == 0
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.integration
def test_initialize(self, btx):
initialization_tester(btx)
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.integration
def test_solve(self, btx):
results = solver.solve(btx)
# Check for optimal solution
assert results.solver.termination_condition == \
TerminationCondition.optimal
assert results.solver.status == SolverStatus.ok
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.integration
def test_solution(self, btx):
assert (pytest.approx(5, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.flow_mol[0]))
assert (pytest.approx(322.959, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.temperature[0]))
assert (pytest.approx(101325, abs=1e-3) ==
value(btx.fs.unit.shell_outlet.pressure[0]))
assert (pytest.approx(1, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.flow_mol[0]))
assert (pytest.approx(581.126, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.temperature[0]))
assert (pytest.approx(101.325, abs=1e-3) ==
value(btx.fs.unit.tube_outlet.pressure[0]))
@pytest.mark.solver
@pytest.mark.skipif(solver is None, reason="Solver not available")
@pytest.mark.integration
def test_conservation(self, btx):
assert abs(value(btx.fs.unit.shell_inlet.flow_mol[0] -
btx.fs.unit.shell_outlet.flow_mol[0])) <= 1e-6
assert abs(value(btx.fs.unit.tube_inlet.flow_mol[0] -
btx.fs.unit.tube_outlet.flow_mol[0])) <= 1e-6
shell_side = value(
btx.fs.unit.shell_outlet.flow_mol[0] *
(btx.fs.unit.shell.properties[0, 0].enth_mol_phase['Liq'] -
btx.fs.unit.shell.properties[0, 1].enth_mol_phase['Liq']))
tube_side = value(pyunits.convert(
btx.fs.unit.tube_outlet.flow_mol[0]*btx.fs.unit.N_tubes *
(btx.fs.unit.tube.properties[0, 1].enth_mol_phase['Liq'] -
btx.fs.unit.tube.properties[0, 0].enth_mol_phase['Liq']),
to_units=pyunits.W))
assert abs((shell_side - tube_side)/shell_side) <= 1e-4
@pytest.mark.ui
@pytest.mark.component
def test_report(self, btx):
btx.fs.unit.report()
|
{"hexsha": "7148f4e57d7deed7a6f78d01ae6c156060311c66", "size": 67634, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/generic_models/unit_models/tests/test_heat_exchanger_1D.py", "max_stars_repo_name": "dangunter/idaes-pse", "max_stars_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/generic_models/unit_models/tests/test_heat_exchanger_1D.py", "max_issues_repo_name": "dangunter/idaes-pse", "max_issues_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idaes/generic_models/unit_models/tests/test_heat_exchanger_1D.py", "max_forks_repo_name": "dangunter/idaes-pse", "max_forks_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3919463087, "max_line_length": 86, "alphanum_fraction": 0.6301712157, "include": true, "reason": "from pyomo", "num_tokens": 17320}
|
/**
* @author : Zhao Chonyyao (cyzhao@zju.edu.cn)
* @date : 2021-04-30
* @description: embedded elasticity mass spring method problem
* @version : 1.0
*/
#include <memory>
#include <string>
#include <boost/property_tree/ptree.hpp>
#include "Common/error.h"
// TODO: possible bad idea of having dependence to model in problem module
#include "Model/fem/elas_energy.h"
#include "Model/fem/mass_matrix.h"
#include "Model/mass_spring/mass_spring_obj.h"
#include "Model/mass_spring/para.h"
#include "Geometry/extract_surface.imp"
#include "Geometry/interpolate.h"
#include "Problem/energy/basic_energy.h"
#include "Io/io.h"
#include "Geometry/extract_surface.imp"
#include "libigl/include/igl/readOBJ.h"
#include "embedded_mass_spring_problem.h"
namespace PhysIKA {
using namespace std;
using namespace Eigen;
using namespace igl;
template <typename T>
using MAT = Eigen::Matrix<T, -1, -1>;
template <typename T>
using VEC = Eigen::Matrix<T, -1, 1>;
template <typename T>
embedded_ms_problem_builder<T>::embedded_ms_problem_builder(const T* x, const boost::property_tree::ptree& para_tree)
{
pt_ = para_tree;
auto blender = para_tree.get_child("blender");
auto simulation_para = para_tree.get_child("simulation_para");
auto common = para_tree.get_child("common");
para::dt = common.get<double>("time_step", 0.01);
para::line_search = simulation_para.get<int>("line_search", true); // todo
para::density = common.get<double>("density", 10);
para::frame = common.get<int>("frame", 100);
para::newton_fastMS = simulation_para.get<string>("newton_fastMS");
para::stiffness = simulation_para.get<double>("stiffness", 8000);
para::gravity = common.get<double>("gravity", 9.8);
para::object_name = blender.get<string>("surf");
para::out_dir_simulator = common.get<string>("out_dir_simulator");
para::simulation_type = simulation_para.get<string>("simulation", "static");
para::weight_line_search =
simulation_para.get<double>("weight_line_search", 1e-5);
para::input_object = common.get<string>("input_object");
para::force_function = simulation_para.get<string>("force_function");
para::intensity = simulation_para.get<double>("intensity");
para::coll_z = simulation_para.get<bool>("coll_z", false);
//TODO: need to check exception
const string filename = common.get<string>("embedded_object", "");
const string filename_coarse = common.get<string>("input_object", "");
if (filename_coarse.empty() || filename.empty())
{
cerr << "no coarse mesh" << __LINE__ << endl;
exit(1);
}
Matrix<T, -1, -1> nods;
MatrixXi cells;
Matrix<T, -1, -1> nods_coarse;
MatrixXi cells_coarse;
if (filename.rfind(".obj") != string::npos)
{
readOBJ(filename.c_str(), nods, cells);
nods.transposeInPlace();
cells.transposeInPlace();
}
else
{
IF_ERR(exit, mesh_read_from_vtk<T, 4>(filename.c_str(), nods, cells));
}
IF_ERR(exit, mesh_read_from_vtk<T, 4>(filename_coarse.c_str(), nods_coarse, cells_coarse));
if (cells.size() == 0)
cells.resize(4, 0);
if (cells_coarse.size() == 0)
cells_coarse.resize(4, 0);
interp_pts_in_tets<T, 3>(nods, cells, nods_coarse, fine_to_coarse_coef_);
interp_pts_in_tets<T, 3>(nods_coarse, cells_coarse, nods, coarse_to_fine_coef_);
const size_t num_nods = nods_coarse.cols();
if (x != nullptr)
{
nods = Map<const MAT<T>>(x, nods.rows(), nods.cols());
nods_coarse = nods * fine_to_coarse_coef_;
}
REST_ = nods;
cells_ = cells;
fine_verts_num_ = REST_.cols();
//read fixed points
vector<size_t> cons(0);
if (para_tree.find("input_constraint") != para_tree.not_found())
{
const string cons_file_path = common.get<string>("input_constraint");
/* IF_ERR(exit, read_fixed_verts_from_csv(cons_file_path.c_str(), cons));*/
}
cout << "constrint " << cons.size() << " points" << endl;
//calc mass vector
Matrix<T, -1, 1> mass_vec(num_nods);
calc_mass_vector<T>(nods_coarse, cells_coarse, para::density, mass_vec);
// mass_calculator<T, 3, 4, 1, 1, basis_func, quadrature>(nods_coarse, cells_coarse, para::density, mass_vec);
cout << "build energy" << endl;
int ELAS = 0;
int GRAV = 1;
int KIN = 2;
int POS = 3;
if (para_tree.get<std::string>("solver_type") == "explicit")
POS = 2;
ebf_.resize(POS + 1);
ebf_[ELAS] = make_shared<MassSpringObj<T>>(para::input_object.c_str(), para::stiffness);
char axis = common.get<char>("grav_axis", 'y') | 0x20;
ebf_[GRAV] = make_shared<gravity_energy<T, 3>>(num_nods, 1, para::gravity, mass_vec, axis);
kinetic_ = make_shared<momentum<T, 3>>(nods_coarse.data(), num_nods, mass_vec, para::dt);
if (para_tree.get<string>("solver_type") == "implicit")
ebf_[KIN] = kinetic_;
ebf_[POS] = make_shared<position_constraint<T, 3>>(nods_coarse.data(), num_nods, simulation_para.get<double>("w_pos", 1e6), cons);
//set constraint
enum constraint_type
{
COLL
};
cbf_.resize(COLL + 1);
collider_ = nullptr;
cbf_[COLL] = collider_;
shared_ptr<Problem<T, 3>> pb = make_shared<Problem<T, 3>>(ebf_[0], nullptr);
auto dat_str = make_shared<dat_str_core<T, 3>>(pb->Nx() / 3, para_tree.get<bool>("hes_is_const", false));
compute_hes_pattern(pb->energy_, dat_str);
ebf_[0]->Hes(nods_coarse.data(), dat_str);
SparseMatrix<T> K = dat_str->get_hes();
embedded_interp_ = make_shared<embedded_interpolate<T>>(nods_coarse, coarse_to_fine_coef_, fine_to_coarse_coef_, K, 5868.03 / 2);
if (para_tree.get<string>("solver_type") == "explicit")
{
Map<Matrix<T, -1, 1>> position(nods_coarse.data(), nods_coarse.size());
semi_implicit_ = make_shared<semi_implicit<T>>(para::dt, mass_vec, position);
}
}
template <typename T>
int embedded_ms_problem_builder<T>::update_problem(const T* x, const T* v)
{
embedded_interp_->update_verts(x, fine_verts_num_);
const Eigen::Matrix<T, -1, -1>& verts = embedded_interp_->get_verts();
IF_ERR(return, kinetic_->update_location_and_velocity(verts.data(), v));
if (collider_ != nullptr)
IF_ERR(return, collider_->update(verts.data()));
return 0;
}
template class embedded_ms_problem_builder<double>;
template class embedded_ms_problem_builder<float>;
} // namespace PhysIKA
|
{"hexsha": "d3aa23c4652dee9b01df6e870c2b971ba798ab50", "size": 6680, "ext": "cc", "lang": "C++", "max_stars_repo_path": "Source/Dynamics/FiniteElementMethod/Source/Problem/integrated_problem/embedded_mass_spring_problem.cc", "max_stars_repo_name": "weikm/sandcarSimulation2", "max_stars_repo_head_hexsha": "fe499d0a3289c0ac1acce69c7dc78d8ce1b2708a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Source/Dynamics/FiniteElementMethod/Source/Problem/integrated_problem/embedded_mass_spring_problem.cc", "max_issues_repo_name": "weikm/sandcarSimulation2", "max_issues_repo_head_hexsha": "fe499d0a3289c0ac1acce69c7dc78d8ce1b2708a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Source/Dynamics/FiniteElementMethod/Source/Problem/integrated_problem/embedded_mass_spring_problem.cc", "max_forks_repo_name": "weikm/sandcarSimulation2", "max_forks_repo_head_hexsha": "fe499d0a3289c0ac1acce69c7dc78d8ce1b2708a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1111111111, "max_line_length": 134, "alphanum_fraction": 0.6511976048, "num_tokens": 1804}
|
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/*
Copyright (C) 2011 Klaus Spanderen
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev@lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
/*! \file exponentialjump1dmesher.cpp
\brief mesher for a exponential jump mesher with high
mean reversion rate and low jump intensity
*/
#include <ql/math/incompletegamma.hpp>
#include <ql/math/integrals/gausslobattointegral.hpp>
#include <ql/math/distributions/gammadistribution.hpp>
#include <ql/methods/finitedifferences/meshers/exponentialjump1dmesher.hpp>
#include <boost/bind.hpp>
namespace QuantLib {
ExponentialJump1dMesher::ExponentialJump1dMesher(
Size steps, Real beta, Real jumpIntensity, Real eta, Real eps)
: Fdm1dMesher(steps),
beta_(beta), jumpIntensity_(jumpIntensity), eta_(eta)
{
QL_REQUIRE(eps > 0.0 && eps < 1.0, "eps > 0.0 and eps < 1.0");
QL_REQUIRE(steps > 1, "minimum number of steps is two");
const Real start = 0.0;
const Real end = 1.0-eps;
const Real dx = (end-start)/(steps-1);
for (Size i=0; i < steps; ++i) {
const Real p = start + i*dx;
locations_[i] = -1.0/eta*std::log(1.0-p);
}
for (Size i=0; i < steps-1; ++i) {
dminus_[i+1] = dplus_[i] = locations_[i+1]-locations_[i];
}
dplus_.back() = dminus_.front() = Null<Real>();
}
Real ExponentialJump1dMesher::jumpSizeDensity(Real x, Time t) const {
const Real a = 1.0-jumpIntensity_/beta_;
const Real norm = 1.0-std::exp(-jumpIntensity_*t);
const Real gammaValue
= std::exp(GammaFunction().logValue(1.0-jumpIntensity_/beta_));
return jumpIntensity_*gammaValue/norm
*( incompleteGammaFunction(a, x*std::exp(beta_*t)*eta_)
-incompleteGammaFunction(a, x*eta_))
*std::pow(eta_, jumpIntensity_/beta_)
/(beta_*std::pow(x, a));
}
Real ExponentialJump1dMesher::jumpSizeDensity(Real x) const {
const Real a = 1.0-jumpIntensity_/beta_;
const Real gammaValue
= std::exp(GammaFunction().logValue(jumpIntensity_/beta_));
return std::exp(-x*eta_)*std::pow(x, -a) * std::pow(eta_, 1.0-a)
/ gammaValue;
}
Real ExponentialJump1dMesher::jumpSizeDistribution(Real x, Time t) const {
const Real xmin = std::min(x, 1.0e-100);
return GaussLobattoIntegral(1000000, 1e-12)(
boost::bind(&ExponentialJump1dMesher::jumpSizeDensity, this, _1, t),
xmin, std::max(x, xmin));
}
Real ExponentialJump1dMesher::jumpSizeDistribution(Real x) const {
const Real a = jumpIntensity_/beta_;
const Real xmin = std::min(x, QL_EPSILON);
const Real gammaValue
= std::exp(GammaFunction().logValue(jumpIntensity_/beta_));
const Real lowerEps =
(std::pow(xmin, a)/a - std::pow(xmin, a+1)/(a+1))/gammaValue;
return lowerEps + GaussLobattoIntegral(10000, 1e-12)(
boost::bind(&ExponentialJump1dMesher::jumpSizeDensity, this, _1),
xmin/eta_, std::max(x, xmin/eta_));
}
}
|
{"hexsha": "730957cfc446217af7cf586b81b067c92d72d57a", "size": 3957, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ql/methods/finitedifferences/meshers/exponentialjump1dmesher.cpp", "max_stars_repo_name": "quantosaurosProject/quantLib", "max_stars_repo_head_hexsha": "84b49913d3940cf80d6de8f70185867373f45e8d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ql/methods/finitedifferences/meshers/exponentialjump1dmesher.cpp", "max_issues_repo_name": "quantosaurosProject/quantLib", "max_issues_repo_head_hexsha": "84b49913d3940cf80d6de8f70185867373f45e8d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ql/methods/finitedifferences/meshers/exponentialjump1dmesher.cpp", "max_forks_repo_name": "quantosaurosProject/quantLib", "max_forks_repo_head_hexsha": "84b49913d3940cf80d6de8f70185867373f45e8d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-29T05:44:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T05:44:27.000Z", "avg_line_length": 40.3775510204, "max_line_length": 80, "alphanum_fraction": 0.6186504928, "num_tokens": 1063}
|
from logging import getLogger
from typing import List
import cv2
import numpy as np
from mtcnn import MTCNN
from mtcnn.exceptions.invalid_image import InvalidImage
from utils import set_gpu_memory_growth
set_gpu_memory_growth()
ARCFACE_LANDMARK = np.array(
[
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041],
],
dtype=np.float32,
)
class FaceDetectionError(Exception):
"""Error while detecting faces in the image"""
class FaceNotFoundError(Exception):
"""No face is found in the input image"""
class FaceRegionExtractor:
def __init__(self, logger=None, expand_margin=0):
self._logger = logger or getLogger(__name__)
self._detector = MTCNN()
self._image_size = 112 + expand_margin * 2 # expand both side
self._reference_landmarks = ARCFACE_LANDMARK + expand_margin
def extract(self, img: np.ndarray) -> List[np.ndarray]:
try:
faces = self._detector.detect_faces(img)
except InvalidImage as e:
raise FaceDetectionError("error while detecting faces") from e
if not faces:
raise FaceNotFoundError("face not found")
landmarks_for_each_face = [self.get_landmarks(face) for face in faces]
face_imgs = [
self.crop_face_image(img, landmarks)
for landmarks in landmarks_for_each_face
]
return face_imgs
def crop_face_image(self, img: np.ndarray, landmarks: np.ndarray) -> np.ndarray:
matrix, _ = cv2.estimateAffinePartial2D(
landmarks,
self._reference_landmarks,
method=cv2.LMEDS,
confidence=0.999999,
refineIters=100,
)
img_face = cv2.warpAffine(
img, matrix, (self._image_size, self._image_size), borderValue=0
)
return img_face
@staticmethod
def get_landmarks(face: dict) -> np.ndarray:
keypoints: dict = face["keypoints"]
landmarks = np.array(
[
keypoints["left_eye"],
keypoints["right_eye"],
keypoints["nose"],
keypoints["mouth_left"],
keypoints["mouth_right"],
]
)
return landmarks
|
{"hexsha": "f9b4f6c8781bb94f557f49503b573adad4fcaf18", "size": 2321, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/face_region_extractor.py", "max_stars_repo_name": "mamo3gr/batch_face_cropper", "max_stars_repo_head_hexsha": "c5b16cf4643f714911fab182c12675ed709b765a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/face_region_extractor.py", "max_issues_repo_name": "mamo3gr/batch_face_cropper", "max_issues_repo_head_hexsha": "c5b16cf4643f714911fab182c12675ed709b765a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/face_region_extractor.py", "max_forks_repo_name": "mamo3gr/batch_face_cropper", "max_forks_repo_head_hexsha": "c5b16cf4643f714911fab182c12675ed709b765a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9638554217, "max_line_length": 84, "alphanum_fraction": 0.6135286514, "include": true, "reason": "import numpy", "num_tokens": 541}
|
# Python program for project
import os
import sys
import time
import torch
import argparse
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import matplotlib
matplotlib.use('Agg')
from torch.autograd import Variable
from torch.utils import data
from ptsemseg.models import get_model
from ptsemseg.loader import get_loader, get_data_path
from ptsemseg.loss import cross_entropy2d
from ptsemseg.metrics import scores
from ptsemseg.utils import AverageMeter
from ptsemseg.loggers import Logger, savefig
from ptsemseg.models.utils import save_checkpoint, adjust_learning_rate
from lr_scheduling import *
sys.path.append("/data5/huangzh/sea-torch/pose/progress")
from progress.bar import Bar as Bar
best_acc = 0
def main(args):
global best_acc
# Setup Dataloader
data_loader = get_loader(args.dataset)
data_path = get_data_path(args.dataset)
train_loader = data_loader(
data_path,
'train',
is_transform=True,
img_size=(args.img_rows, args.img_cols))
val_loader = data_loader(
data_path,
'val',
is_transform=True,
img_size=(args.img_rows, args.img_cols))
n_classes = train_loader.n_classes
trainloader = data.DataLoader(
train_loader, batch_size=args.batch_size, num_workers=4, shuffle=True)
valloader = data.DataLoader(
val_loader, batch_size=args.batch_size, num_workers=4, shuffle=True)
if not os.path.isdir(args.checkpoint):
os.makedirs(args.checkpoint)
title = args.dataset + '-' + args.arch
print("Create model {}-{}".format(args.arch, args.dataset))
model = get_model(args.arch, n_classes, se=args.se)
# optimizer = torch.optim.SGD(
# model.parameters(), lr=args.l_rate, momentum=0.99, weight_decay=5e-4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.l_rate)
#, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
# logger = Logger(
# os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if torch.cuda.is_available():
model.cuda(0)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(
['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train MIOU', 'Val MIOU'])
cudnn.benchmark = True
print('Total params: %.2f M' % (sum(p.numel() for p in model.parameters()) /
(1024 * 1024)))
if args.evaluate:
print('Evaluation only')
score, class_iou = validate(valloader, model, cross_entropy2d,
n_classes, args.flip)
return
lr = args.l_rate
for epoch in range(args.n_epoch):
lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule,
args.gamma)
print('Epoch: %d | LR: %.8f' % (epoch + 1, lr))
train_loss, train_acc = train(trainloader, model, cross_entropy2d,
optimizer, n_classes, args.flip)
valid_loss, valid_acc = validate(valloader, model, cross_entropy2d,
n_classes, args.flip)
logger.append(
[epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict()
},
is_best,
checkpoint=args.checkpoint)
logger.close()
logger.plot(['Train MIOU', 'Val MIOU'])
savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(trainloader, model, criterion, optimizer, num_classes=3, flip=True):
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
model.train()
end = time.time()
bar = Bar('Processing', max=len(trainloader))
for i, (images, labels) in enumerate(trainloader):
data_time.update(time.time() - end)
if torch.cuda.is_available():
images = Variable(images.cuda(0))
labels = Variable(labels.cuda(0))
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
if flip:
flip_images_var = Variable(
torch.from_numpy(fliplr(images.clone().numpy())).float()
.cuda(0))
flip_outputs_var = model(flip_images_var)
flip_outputs_var = flip_back(flip_outputs_var.data.cpu())
# weight = torch.cuda.FloatTensor([0.1, 0.5, 0.4])
weight = None
loss = criterion(outputs, labels, weight)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
score, class_iou = scores(gt, pred, num_classes)
acc = score['Mean IoU : \t']
losses.update(loss.data[0], images.size(0))
acces.update(acc, images.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(trainloader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg)
bar.next()
bar.finish()
# test_output = model(test_image)
# predicted = train_loader.decode_segmap(test_output[0].cpu().data.numpy().argmax(0))
# target = train_loader.decode_segmap(test_segmap.numpy())
# vis.image(test_image[0].cpu().data.numpy(), opts=dict(title='Input' + str(epoch)))
# vis.image(np.transpose(target, [2,0,1]), opts=dict(title='GT' + str(epoch)))
# vis.image(np.transpose(predicted, [2,0,1]), opts=dict(title='Predicted' + str(epoch)))
# if not os.path.exists('checkpoints'):
# os.makedirs('checkpoints')
# torch.save(model, "checkpoints/{}_{}_{}_{}.pkl".format(args.arch, args.dataset, args.feature_scale, epoch))
return losses.avg, acces.avg
def validate(valloader, model, criterion, num_classes, flip=True):
data_time = AverageMeter()
batch_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
model.eval()
end = time.time()
bar = Bar('Processing', max=len(valloader))
gts, preds = [], []
for i, (images, labels) in enumerate(valloader):
data_time.update(time.time() - end)
if torch.cuda.is_available():
images = Variable(images.cuda(0), volatile=True)
labels = Variable(labels.cuda(0), volatile=True)
else:
images = Variable(images)
labels = Variable(labels)
outputs = model(images)
if flip:
flip_images_var = Variable(
torch.from_numpy(fliplr(images.clone().numpy())).float()
.cuda(0))
flip_outputs_var = model(flip_images_var)
flip_outputs_var = flip_back(flip_outputs_var.data.cpu())
# weight = torch.cuda.FloatTensor([0.1, 0.5, 0.4])
weight = None
loss = criterion(outputs, labels, weight)
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
gt = labels.data.cpu().numpy()
for gt_, pred_ in zip(gt, pred):
gts.append(gt_)
preds.append(pred_)
score, class_iou = scores(gt, pred, num_classes)
acc = score['Mean IoU : \t']
losses.update(loss.data[0], images.size(0))
acces.update(acc, images.size(0))
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(valloader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg)
bar.next()
bar.finish()
score, class_iou = scores(gts, preds, n_class=3)
for k, v in score.items():
print k, v
for i in range(num_classes):
print i, class_iou[i]
return losses.avg, acces.avg
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--resume', type=str, default='', help='the model path')
parser.add_argument(
'-a',
'--arch',
nargs='?',
type=str,
default='segnet',
help='Architecture to use [\'fcn8s, unet, segnet etc\']')
parser.add_argument(
'-d',
'--dataset',
nargs='?',
type=str,
default='ustc',
help='Dataset to use [\'pascal, camvid, ade20k etc\']')
parser.add_argument(
'--img_rows',
nargs='?',
type=int,
default=300,
help='Height of the input image')
parser.add_argument(
'--img_cols',
nargs='?',
type=int,
default=500,
help='Height of the input image')
parser.add_argument(
'-n',
'--n_epoch',
nargs='?',
type=int,
default=100,
help='# of the epochs')
parser.add_argument(
'-b', '--batch_size', nargs='?', type=int, default=8, help='Batch Size')
parser.add_argument(
'--l_rate', nargs='?', type=float, default=0.001, help='Learning Rate')
parser.add_argument(
'--feature_scale',
nargs='?',
type=int,
default=1,
help='Divider for # of features to use')
parser.add_argument(
'--schedule',
type=int,
nargs='+',
default=[60, 90],
help='Decrease learning rate at these epochs.')
parser.add_argument(
'--gamma',
type=float,
default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument(
'-f',
'--flip',
dest='flip',
action='store_true',
help='flip the input during validation')
parser.add_argument(
'-c',
'--checkpoint',
default='checkpoints',
type=str,
metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument(
'-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--se', action='store_true', help="has SE units or not")
parser.add_argument(
'-i',
'--img_path',
nargs='?',
type=str,
default=None,
help='Path of the input image')
parser.add_argument(
'-o',
'--out_path',
nargs='?',
type=str,
default=None,
help='Path of the output segmap')
args = parser.parse_args()
main(args)
|
{"hexsha": "57640ba25e338a6e0b33ce75d8395d6fba586ecf", "size": 11924, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "hzh8311/project", "max_stars_repo_head_hexsha": "4af81f9156e10738cd1d45d495613575ad2308c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "hzh8311/project", "max_issues_repo_head_hexsha": "4af81f9156e10738cd1d45d495613575ad2308c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "hzh8311/project", "max_forks_repo_head_hexsha": "4af81f9156e10738cd1d45d495613575ad2308c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.402173913, "max_line_length": 153, "alphanum_fraction": 0.5827742368, "include": true, "reason": "import numpy", "num_tokens": 2783}
|
import logging
import datetime
import time
import ray
import cupy
from alpa.collective.const import ENV
from alpa.collective.collective_group import nccl_util
from alpa.collective.collective_group.base_collective_group import BaseGroup
from alpa.collective.const import get_store_name
from alpa.collective.types import (AllReduceOptions, BarrierOptions, Backend,
ReduceOptions, BroadcastOptions,
AllGatherOptions, ReduceScatterOptions,
SendOptions, RecvOptions)
from alpa.collective.collective_group.cuda_stream import get_stream_pool
logger = logging.getLogger(__name__)
class Rendezvous:
"""A rendezvous class for different actor/task processes to meet.
To initialize an NCCL collective communication group, different
actors/tasks spawned in Ray in a collective group needs to meet
each other to synchronize the NCCLUniqueID. This class guarantees
they meet via the NCCLUniqueIDStore, initialized on the rank=0
process.
Args:
store_key (str): the unique store key, usually as a concatanation
of group_name and communicator key. See `get_nccl_communicator`
for more details.
"""
def __init__(self, store_key):
if not store_key:
raise ValueError(
"Invalid store_key. The store_key is a concatenation of "
"'group_name' and the 'communicator_key'. See the "
"docstring of `get_nccl_communicator` for details.")
self._store_key = store_key
self._store_name = None
self._store = None
def meet(self, timeout_s=180):
"""Meet at the named actor store.
Args:
timeout_s (int): timeout in seconds.
Return:
None
"""
if timeout_s <= 0:
raise ValueError("The 'timeout' argument must be positive. "
f"Got '{timeout_s}'.")
self._store_name = get_store_name(self._store_key)
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
try:
logger.debug(f"Trying to meet at the store '{self._store_name}'")
self._store = ray.get_actor(self._store_name)
except ValueError:
logger.debug(f"Failed to meet at the store '{self._store_name}'. "
"Trying again...")
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
logger.debug("Successful rendezvous!")
break
if not self._store:
raise RuntimeError("Unable to meet other processes "
"at the rendezvous store. If you are using "
"P2P communication, please check if tensors "
"are put in the correct GPU. ")
@property
def store(self):
return self._store
def get_nccl_id(self, timeout_s=180):
"""Get the NCCLUniqueID from the store through Ray.
Args:
timeout_s: timeout in seconds.
Return:
uid (str): the NCCLUniqueID if successful.
"""
if not self._store:
raise ValueError("Rendezvous store is not setup.")
uid = None
timeout_delta = datetime.timedelta(seconds=timeout_s)
elapsed = datetime.timedelta(seconds=0)
start_time = datetime.datetime.now()
while elapsed < timeout_delta:
uid = ray.get(self._store.get_id.remote())
if not uid:
time.sleep(1)
elapsed = datetime.datetime.now() - start_time
continue
break
if not uid:
raise RuntimeError("Unable to get the NCCLUniqueID from the store.")
return uid
def get_access_counter(self):
"""Return how many times the NCCLUniqueID has been accessed."""
return ray.get(self._store.get_access_counter.remote())
def destroy_store(self):
"""Delete the named actor."""
ray.kill(self._store)
self._store = None
class NCCLGroup(BaseGroup):
def __init__(self, world_size, rank, group_name):
"""Init an NCCL collective group."""
super().__init__(world_size, rank, group_name)
# communicator and stream cache.
# TODO (Hao): we need a lock here...
self._barrier_tensor = None
self._dev_comm_map = {}
self._dev_streams_map = {}
# record the used GPU IDs.
self._used_gpu_indices = set()
# TODO(Fu): might need an event map
self._dev_event_map = {}
if nccl_util.get_nccl_build_version() < 2000:
raise RuntimeError("NCCL in Ray requires NCCL >= 2.0.")
if nccl_util.get_nccl_runtime_version() < 2704:
logger.warning("NCCL send/recv calls requires NCCL>=2.7.4")
def destroy_group(self):
"""Destroy the group and release NCCL communicators."""
if len(self._dev_comm_map.keys()) > 0:
# TODO(Hao): check this barrier call
# self.barrier()
# Destroy the communicators and streams.
for comm_key, comms in self._dev_comm_map.items():
for c in comms:
c.destroy()
self._dev_comm_map[comm_key] = None
if self.rank == 0:
for comm_key in self._dev_comm_map:
assert not self._dev_comm_map[comm_key]
group_key = self._generate_group_key(comm_key)
self._destroy_store(group_key)
self._barrier_tensor = None
self._dev_comm_map = None
self._dev_streams_map = None
@classmethod
def backend(cls):
return Backend.NCCL
def allreduce(self, tensors, allreduce_options=AllReduceOptions()):
"""AllReduce tensors across the collective group following options.
Args:
tensors (List): the list of tensors to be reduced. Each tensor must
reside on one GPU of the current process.
allreduce_options: allreduce options.
Returns:
None
"""
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.allReduce(
nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
nccl_util.get_tensor_n_elements(input_tensor),
nccl_util.get_nccl_tensor_dtype(input_tensor),
nccl_util.get_nccl_reduce_op(allreduce_options.reduce_op),
stream.ptr)
self._collective(tensors, tensors, collective_fn)
def barrier(self, barrier_options=BarrierOptions()):
"""Blocks until all processes reach this barrier.
Args:
barrier_options: barrier options.
Returns:
None
"""
# Get the device list.
if self._used_gpu_indices:
devices = list(self._used_gpu_indices)
else:
devices = list(range(nccl_util.get_num_gpus()))
barrier_tensors = [None] * len(devices)
for i, d in enumerate(devices):
with nccl_util.Device(d):
barrier_tensors[i] = cupy.array([1])
self.allreduce(barrier_tensors)
def reduce(self, tensors, reduce_options=ReduceOptions()):
"""Reduce tensors to a destination gpu following options.
Args:
tensors (List): the list of tensors to be reduced, each tensor
must reside on one gpu of the current process.
reduce_options: reduce options.
Returns:
None
"""
root_rank = (len(tensors) * reduce_options.root_rank +
reduce_options.root_tensor)
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.reduce(nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
nccl_util.get_tensor_n_elements(input_tensor),
nccl_util.get_nccl_tensor_dtype(input_tensor),
nccl_util.get_nccl_reduce_op(reduce_options.reduce_op),
root_rank, stream.ptr)
self._collective(tensors, tensors, collective_fn)
def broadcast_partialgpu(self, tensors, broadcast_options=BroadcastOptions()):
"""Broadcast tensors to all other gpus following options.
It will only involve subset of gpu in this worker.
Args:
tensors (List): tensors to be broadcast or received.
broadcast_options: broadcast options.
Returns:
None
"""
root_rank = 0
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.broadcast(nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
broadcast_options.n_elements if broadcast_options.n_elements > 0 else
nccl_util.get_tensor_n_elements(input_tensor),
nccl_util.get_nccl_tensor_dtype(input_tensor),
root_rank, stream.ptr)
_check_gpu_tensors(tensors)
key = broadcast_options.comm_key
comms = self._get_nccl_broadcast_communicator(key, broadcast_options.world_size,
broadcast_options.devices_ids,
broadcast_options.devices_global_rank)
streams = self._dev_streams_map[key]
events = self._dev_event_map[key]
self._sync_streams(broadcast_options.devices_ids, events, streams)
nccl_util.groupStart()
for i, tensor in enumerate(tensors):
collective_fn(tensor, tensor, comms[i], streams[i])
nccl_util.groupEnd()
def _get_nccl_broadcast_communicator(self, comm_key, world_size, devices_ids, devices_global_rank, nccl_uid=None):
"""Create or retrieve an NCCL communicator for broadcast from cache.
Here we only use partial devices in a host, so we create this function
besides _get_nccl_collective_communicator.
If the communicator is found in cache, return the communicator. If not,
a communicator and a stream will be created and put in cache.
Args:
comm_key (str): the key to query the communicator cache.
world_size (int): the number of devices in this collective communicator.
devices_ids (List): a list of GPU devices of the current process
that participates into the collective.
devices_global_rank (List): the corresponding global rank for device in devices_ids.
nccl_uid : If it is None, we will create a nccl_uid here.
Returns:
communicator: the NCCL communicator corresponded to the devices.
"""
if not comm_key:
raise RuntimeError("Got empty communicator key.")
for d in devices_ids:
self._used_gpu_indices.add(d)
# TODO(Hao): lock the _dev_comm_map here.
if comm_key in self._dev_comm_map:
return self._dev_comm_map[comm_key]
group_key = self._generate_group_key(comm_key)
if devices_global_rank[0] == 0:
if nccl_uid is None:
nccl_uid = self._generate_nccl_uid(group_key)
else:
if nccl_uid is None:
rendezvous = Rendezvous(group_key)
rendezvous.meet()
nccl_uid = rendezvous.get_nccl_id()
# Recycle the NCCLUniqueIDStore named actor *pro-activately* to avoid
# named actor leak.
if rendezvous.get_access_counter() == self.world_size:
logger.debug(
"NCCLUniqueID has been broadcasted. The NCCLUniqueIDStore "
"will go out of context and be destroyed.")
rendezvous.destroy_store()
# Now create the communicators
comms = [None] * len(devices_ids)
streams = [None] * len(devices_ids)
events = [None] * len(devices_ids)
nccl_util.groupStart()
for i, (global_rank, device_id) in enumerate(zip(devices_global_rank, devices_ids)):
with nccl_util.Device(device_id):
comms[i] = nccl_util.create_nccl_communicator(world_size, nccl_uid, global_rank)
streams[i] = get_stream_pool(device_id).get_stream()
events[i] = cupy.cuda.Event()
nccl_util.groupEnd()
self._dev_comm_map[comm_key] = comms
self._dev_streams_map[comm_key] = streams
self._dev_event_map[comm_key] = events
return comms
def broadcast(self, tensors, broadcast_options=BroadcastOptions()):
"""Broadcast tensors to all other gpus following options.
Args:
tensors (List): tensors to be broadcast or received.
broadcast_options: broadcast options.
Returns:
None
"""
root_rank = (len(tensors) * broadcast_options.root_rank +
broadcast_options.root_tensor)
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.broadcast(nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
nccl_util.get_tensor_n_elements(input_tensor),
nccl_util.get_nccl_tensor_dtype(input_tensor),
root_rank, stream.ptr)
self._collective(tensors, tensors, collective_fn)
def allgather(self,
tensor_lists,
tensors,
allgather_options=AllGatherOptions()):
"""Allgather tensors across gpus into a list of tensors.
Args:
tensor_lists (List[List[Tensor]]): allgathered tensors.
tensors: the list of tensors to allgather across the group.
Each tensor must lolcate on a GPU of the process.
allgather_options: allgather options.
Returns:
None
"""
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.allGather(nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
nccl_util.get_tensor_n_elements(input_tensor),
nccl_util.get_nccl_tensor_dtype(input_tensor),
stream.ptr)
_check_inputs_compatibility_for_scatter_gather(tensors, tensor_lists)
output_flattened = [
_flatten_for_scatter_gather(tensor_list, copy=False)
for tensor_list in tensor_lists
]
def postprocess_fn(stream):
# TODO(Hao): designate a copy stream.
for i, tensor_list in enumerate(tensor_lists):
for j, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(tensor, output_flattened[i][j])
self._collective(tensors,
output_flattened,
collective_fn,
postprocess_fn=postprocess_fn)
def reducescatter(self,
tensors,
tensor_lists,
reducescatter_options=ReduceScatterOptions()):
"""Reduce then scatter a list of tensors across the group.
Args:
tensors (List): the output tensors (could be unspecified), each
located on a GPU of the current process.
tensor_lists (List[List]): the list of tensors to be reduced then
scattered.
reducescatter_options: reduce-scatter options.
Returns:
None
"""
def collective_fn(input_tensor, output_tensor, comm, stream):
comm.reduceScatter(
nccl_util.get_tensor_ptr(input_tensor),
nccl_util.get_tensor_ptr(output_tensor),
nccl_util.get_tensor_n_elements(output_tensor),
nccl_util.get_nccl_tensor_dtype(output_tensor),
nccl_util.get_nccl_reduce_op(reducescatter_options.reduce_op),
stream.ptr)
_check_inputs_compatibility_for_scatter_gather(tensors, tensor_lists)
input_flattened = [
_flatten_for_scatter_gather(tensor_list, copy=False)
for tensor_list in tensor_lists
]
def preprocess_fn(stream):
for i, tensor_list in enumerate(tensor_lists):
for j, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(input_flattened[i][j], tensor)
self._collective(input_flattened,
tensors,
collective_fn,
preprocess_fn=preprocess_fn)
def send(self, tensors, send_options=SendOptions()):
"""Send a tensor to a destination gpu in the group.
Args:
tensors (List): the tensor to send.
send_options: send options.
Returns:
None
"""
def p2p_fn(tensor, comm, stream, peer):
comm.send(
nccl_util.get_tensor_ptr(tensor),
send_options.n_elements if send_options.n_elements > 0 else
nccl_util.get_tensor_n_elements(tensor),
nccl_util.get_nccl_tensor_dtype(tensor), peer, stream.ptr)
self._point2point(tensors, p2p_fn, send_options.dst_rank,
send_options.dst_gpu_index)
def recv(self, tensors, recv_options=RecvOptions()):
"""Receive a tensor from a source gpu in the group.
Args:
tensors (List): the received tensor.
recv_options: Receive options.
Returns:
None
"""
def p2p_fn(tensor, comm, stream, peer):
comm.recv(
nccl_util.get_tensor_ptr(tensor),
recv_options.n_elements if recv_options.n_elements > 0 else
nccl_util.get_tensor_n_elements(tensor),
nccl_util.get_nccl_tensor_dtype(tensor), peer, stream.ptr)
self._point2point(tensors, p2p_fn, recv_options.src_rank,
recv_options.src_gpu_index)
def _get_nccl_collective_communicator(self, comm_key, device_list):
"""Create or retrieve an NCCL communicator from cache.
If the communicator is found in cache, return the communicator. If not,
a communicator and a stream will be created and put in cache.
TODO(Hao): this function is not thread-safe now.
Args:
comm_key (str): the key to query the communicator cache.
device_list (List): a list of GPU devices of the current process
that participates into the collective.
Returns:
communicator: the NCCL communicator corresponded to the devices.
"""
if not comm_key:
raise RuntimeError("Got empty communicator key.")
for d in device_list:
self._used_gpu_indices.add(d)
# TODO(Hao): lock the _dev_comm_map here.
if comm_key in self._dev_comm_map:
return self._dev_comm_map[comm_key]
group_key = self._generate_group_key(comm_key)
if self.rank == 0:
nccl_uid = self._generate_nccl_uid(group_key)
else:
rendezvous = Rendezvous(group_key)
rendezvous.meet()
nccl_uid = rendezvous.get_nccl_id()
# Recycle the NCCLUniqueIDStore named actor *pro-activately* to avoid
# named actor leak.
if rendezvous.get_access_counter() == self.world_size:
logger.debug(
"NCCLUniqueID has been broadcasted. The NCCLUniqueIDStore "
"will go out of context and be destroyed.")
rendezvous.destroy_store()
# Now create the communicators
actual_world_size = len(device_list) * self.world_size
comms = [None] * len(device_list)
streams = [None] * len(device_list)
events = [None] * len(device_list)
nccl_util.groupStart()
for i, device in enumerate(device_list):
actual_rank = self.rank * len(device_list) + i
with nccl_util.Device(device):
comms[i] = nccl_util.create_nccl_communicator(
actual_world_size, nccl_uid, actual_rank)
# request a stream from the pool
# note the device_idx is absolute index.
streams[i] = get_stream_pool(device).get_stream()
# TODO(Fu): double check the parameters
events[i] = cupy.cuda.Event()
nccl_util.groupEnd()
# TODO(Fu): lock
self._dev_comm_map[comm_key] = comms
self._dev_streams_map[comm_key] = streams
self._dev_event_map[comm_key] = events
return comms
@staticmethod
def _sync_streams(device_list, events, streams):
"""Let NCCL streams wait for current streams for every device."""
# TODO(Fu): recordStream besides calling this function?
if ENV.NCCL_USE_MULTISTREAM.val:
for i, device in enumerate(device_list):
with nccl_util.Device(device):
events[i].record(cupy.cuda.get_current_stream())
streams[i].wait_event(events[i])
def _get_nccl_p2p_communicator(self,
comm_key,
my_gpu_idx,
peer_rank,
peer_gpu_idx,
nccl_uid=None):
"""Create or retrieve an NCCL communicator for p2p tasks.
Note(Hao): this function is not thread-safe now.
Args:
comm_key (str): communicator key.
my_gpu_idx (int): the gpu index on the current process.
peer_rank (int): the rank of the destination process.
peer_gpu_idx (int): the gpu index on the peer process.
Returns:
communicator
"""
if not comm_key:
raise RuntimeError("Got empty communicator key.")
# TODO(Hao): lock the _dev_comm_map here.
if comm_key in self._dev_comm_map:
return self._dev_comm_map[comm_key]
# Note (Hao): This is a bit complex so I decide to take a note here.
# Here we need to consider three cases:
# Case 1: src_rank != dst_rank, hence the send and recv happen on
# different process (actors/tasks); each process makes independent
# collective calls and manages corresponding communicators.
# Case 2: src_rank == dst_rank, src_gpu_idx == dst_gpu_idx; for
# this case, we simply throw a RuntimeError;
# Case 3: src_rank == dst_rank, src_gpu_idx != dst_gpu_idx, which
# means the send and recv will be called on the same process. We
# DO NOT support this case for now. We need to properly scope:
# (1) communicators creation, and
# (2) send/recv calls
# using groupStart(( and groupEnd() calls to avoid deadlocks.
if self.rank < peer_rank:
my_p2p_rank = 0
elif self.rank > peer_rank:
my_p2p_rank = 1
else:
raise RuntimeError(
"Send and recv happens on the same process! "
"alpa.collective does not support this case as of now. "
"Alternatively, consider doing GPU to GPU memcpy?")
group_key = self._generate_group_key(comm_key)
if my_p2p_rank == 0:
if nccl_uid is None:
nccl_uid = self._generate_nccl_uid(group_key)
else:
if nccl_uid is None:
rendezvous = Rendezvous(group_key)
rendezvous.meet(timeout_s=3000)
nccl_uid = rendezvous.get_nccl_id()
# Recycle the NCCLUniqueIDStore named actor *pro-activately* to
# avoid named actor leak.
if rendezvous.get_access_counter() == 2:
logger.debug(
"NCCLUniqueID has been broadcasted. The NCCLUniqueIDStore "
"will go out of context and be destroyed.")
rendezvous.destroy_store()
# create the p2p communicators
with nccl_util.Device(my_gpu_idx):
comm = nccl_util.create_nccl_communicator(2, nccl_uid, my_p2p_rank)
stream = get_stream_pool(my_gpu_idx).get_stream()
event = cupy.cuda.Event()
self._dev_comm_map[comm_key] = [comm]
self._dev_streams_map[comm_key] = [stream]
self._dev_event_map[comm_key] = [event]
return [comm]
def _generate_group_key(self, comm_key):
"""Generate a unique key used to initialize the KV store.
The group key is a concatenation of the communicator key and
the group name, following: [comm_key]@[group_name].
"""
return comm_key + "@" + self.group_name
@staticmethod
def _destroy_store(group_key):
"""Destroy the KV store (Ray named actor).
Args:
group_key (str): the unique key to retrieve the KV store.
Returns:
None
"""
store_name = get_store_name(group_key)
try:
store = ray.get_actor(store_name)
ray.kill(store)
except ValueError:
logger.info(
f"The store with name {store_name} has been destroyed somewhere else.")
@staticmethod
def generate_nccl_uid():
group_uid = nccl_util.get_nccl_unique_id()
return group_uid
@staticmethod
def _generate_nccl_uid(key):
"""Generate an NCCL unique ID for initializing communicators.
The method will also create a KV store using Ray named actor and store
the NCCLUniqueID in the store. The store needs to be garbage collected
when destroying the collective group.
Args:
key (str): the key of the .
Returns:
NCCLUniqueID (str): NCCL unique ID.
"""
group_uid = nccl_util.get_nccl_unique_id()
store_name = get_store_name(key)
# Avoid a potential circular dependency in ray/actor.py
from alpa.collective.util import NCCLUniqueIDStore # pylint: disable=import-outside-toplevel
store = NCCLUniqueIDStore.options(
name=store_name, lifetime="detached").remote(store_name)
ray.get([store.set_id.remote(group_uid)])
return group_uid
def _collective(self,
input_tensors,
output_tensors,
collective_fn,
preprocess_fn=None,
postprocess_fn=None):
"""A method to encapsulate all collective calls.
Args:
input_tensors: the list of the input tensors.
output_tensors: the list of the output tensors.
collective_fn: the collective function call.
preprocess_fn: preprocess procedures before collective calls.
postprocess_fn: postprocess procedures after collective calls.
Returns:
None
"""
_check_gpu_tensors(input_tensors)
_check_gpu_tensors(output_tensors)
devices = nccl_util.get_tensor_device_list(input_tensors)
key = _get_comm_key_from_devices(devices)
comms = self._get_nccl_collective_communicator(key, devices)
streams = self._dev_streams_map[key]
events = self._dev_event_map[key]
# TODO(Hao): sync streams and events
self._sync_streams(devices, events, streams)
# Make the collective call
if preprocess_fn:
preprocess_fn(streams)
nccl_util.groupStart()
# TODO(Fu): how to recordStreams as there are no library functions
# We also need to make sure input tensors are not freed before their
# usages on ncclStreams finish. This can be achieved by calling
# c10::cuda::CUDACachingAllocator::recordStream, which remembers the
# usage stream (ncclStream), creates an event on the usage stream
# when GC attempts to free the input tensor, and delays GC until that
# event is done.
for i, tensor in enumerate(input_tensors):
collective_fn(tensor, output_tensors[i], comms[i], streams[i])
nccl_util.groupEnd()
if postprocess_fn:
postprocess_fn(streams)
def create_p2p_communicator(self,
my_gpu_idx: int,
peer_rank: int,
peer_gpu_idx: int,
nccl_uid: str = None):
"""A public method to create p2p communicators
Args:
my_gpu_idx (int): the gpu index on self rank.
peer_rank (int): the rank of the peer process.
peer_gpu_idx (int): the index of the gpu on the peer process.
nccl_uid (str, optional): optionally to provide the NCCLUniqueID in advance.
Returns:
None
"""
comm_key = _get_comm_key_send_recv(self.rank, my_gpu_idx, peer_rank,
peer_gpu_idx)
self._get_nccl_p2p_communicator(comm_key, my_gpu_idx, peer_rank,
peer_gpu_idx, nccl_uid)
def _point2point(self, tensors, p2p_fn, peer_rank: int, peer_gpu_idx: int):
"""A method to encapsulate all peer-to-peer calls (i.e., send/recv).
Args:
tensors: the tensor to send or receive.
p2p_fn: the p2p function call.
peer_rank (int): the rank of the peer process.
peer_gpu_idx (int): the index of the gpu on the peer process.
Returns:
None
"""
# check send/recv availability.
if nccl_util.get_nccl_runtime_version() < 2704:
raise RuntimeError("P2p send/recv requires NCCL >= 2.7.4. "
f"Got '{nccl_util.get_nccl_runtime_version()}'.")
_check_gpu_tensors(tensors)
# we currently only support single device to single device send/recv.
assert len(tensors) == 1
my_gpu_idx = nccl_util.get_tensor_device(tensors[0])
comm_key = _get_comm_key_send_recv(self.rank, my_gpu_idx, peer_rank,
peer_gpu_idx)
comms = self._get_nccl_p2p_communicator(comm_key, my_gpu_idx, peer_rank,
peer_gpu_idx)
streams = self._dev_streams_map[comm_key]
events = self._dev_event_map[comm_key]
# TODO(Hao): sync streams and events
self._sync_streams([my_gpu_idx], events, streams)
# We have made sure that self.rank != peer_rank during API check.
peer_p2p_rank = 0 if self.rank > peer_rank else 1
for i, t in enumerate(tensors):
p2p_fn(t, comms[i], streams[i], peer_p2p_rank)
def _flatten_for_scatter_gather(tensor_list, copy=False):
"""Flatten the tensor for gather/scatter operations.
Args:
tensor_list: the list of tensors to be scattered/gathered.
copy: whether the copy the tensors in tensor_list into the buffer.
Returns:
The flattened tensor buffer.
"""
if not tensor_list:
raise RuntimeError("Received an empty list.")
t = tensor_list[0]
# note we need a cupy dtype here.
dtype = nccl_util.get_cupy_tensor_dtype(t)
buffer_shape = [len(tensor_list)] + nccl_util.get_tensor_shape(t)
device = nccl_util.get_tensor_device(t)
with nccl_util.Device(device):
buffer = cupy.empty(buffer_shape, dtype=dtype)
if copy:
for i, tensor in enumerate(tensor_list):
nccl_util.copy_tensor(buffer[i], tensor)
return buffer
def _check_inputs_compatibility_for_scatter_gather(tensors, tensor_lists):
"""Check the compatibility between tensor input and tensor list input."""
if not tensors or not isinstance(tensors, list):
raise RuntimeError(
"The first argument 'tensors' expects a list of tensors.")
if not tensor_lists or not isinstance(tensor_lists, list):
raise RuntimeError("The second argument 'tensor_lists' "
"expects a list of tensor list.")
dtype = nccl_util.get_nccl_tensor_dtype(tensors[0])
shape = nccl_util.get_tensor_shape(tensors[0])
for i, tl in enumerate(tensor_lists):
# check all tensor in `tensors` match.
dt = nccl_util.get_nccl_tensor_dtype(tensors[i])
if dt != dtype:
raise RuntimeError("All tensor operands to scatter/gather must "
f"have the same dtype. Got '{dt}' and '{dtype}'.")
# Note: typically CCL libraries only requires they have the same
# number of elements; Here we make it more strict -- we require
# exact shape match.
s = nccl_util.get_tensor_shape(tensors[i])
if s != shape:
raise RuntimeError("All tensor operands to scatter/gather must "
f"have the same shape. Got '{s}' and '{shape}'.")
# check all tensors in `tensor_lists` match.
for t in tl:
# check dtype
dt = nccl_util.get_nccl_tensor_dtype(t)
if dt != dtype:
raise RuntimeError(
"All tensor operands to scatter/gather must "
f"have the same dtype. Got '{dt}' and '{dtype}'.")
s = nccl_util.get_tensor_shape(t)
if s != shape:
raise RuntimeError(
"All tensor operands to scatter/gather must "
f"have the same shape. Got '{s}' and '{shape}'.")
def _check_gpu_tensors(tensors):
"""Check all tensors are distributed on different GPUs."""
if not tensors or not isinstance(tensors, list):
raise RuntimeError("'tensors' must be a nonempty list.")
if len(tensors) > nccl_util.get_num_gpus():
raise RuntimeError("Tensor list cannot be larger than the number"
f"of available GPUs. Got {len(tensors)} > "
f"{nccl_util.get_num_gpus()}.")
t0 = tensors[0]
dt = nccl_util.get_nccl_tensor_dtype(t0)
s = nccl_util.get_tensor_shape(t0)
d = nccl_util.get_tensor_device(t0)
for i, t in enumerate(tensors):
if i == 0:
continue
# We need to check the following:
# (1) tensor is cuda (already checked during API)
# (2) tensor dtype
# (3) tensor shape match
# (4) each tensor is on a different GPU
dtype = nccl_util.get_nccl_tensor_dtype(t)
if dt != dtype:
raise RuntimeError(
f"Tensors must have identical dtypes. Got: '{dtype}'.")
shape = nccl_util.get_tensor_shape(t)
if s != shape:
raise RuntimeError(
f"Tensors must have identical shapes. Got: '{shape}'.")
device = nccl_util.get_tensor_device(t)
if device == d:
raise RuntimeError("Tensor must be on distinct GPUs.")
def _get_comm_key_from_devices(devices):
"""Return a key from a list of devices for collective calls.
For example, if the tensors are on gpus 0, 1, 2, 3,
then the key would be "0,1,2,3".
Args:
devices(list): a list of GPU device indices
Returns:
str: a string represents the key to query the communicator cache.
"""
return ",".join([str(d) for d in devices])
def _get_comm_key_send_recv(my_rank, my_gpu_idx, peer_rank, peer_gpu_idx):
"""Return a key given source and destination ranks for p2p tasks.
The p2p key is in the following form:
[min_rank]_[gpu_index]:[max_rank]_[gpu_index].
Args:
my_rank (int): the rank of the source process.
my_gpu_idx (int): the source gpu index on the process.
peer_rank (int): the rank of the destination process.
peer_gpu_idx (int): the destination gpu index on the process.
Returns:
comm_key (str): a string key to query the communication cache.
"""
if my_rank < peer_rank:
lower_key = str(my_rank) + "_" + str(my_gpu_idx)
higher_key = str(peer_rank) + "_" + str(peer_gpu_idx)
elif my_rank > peer_rank:
lower_key = str(peer_rank) + "_" + str(peer_gpu_idx)
higher_key = str(my_rank) + "_" + str(my_gpu_idx)
else:
raise RuntimeError(
"Send and recv happens on the same process. alpa.collective "
"does not support this case as of now. Alternatively, consider "
"doing GPU to GPU memcpy?")
comm_key = lower_key + ":" + higher_key
return comm_key
|
{"hexsha": "188789399a575b03352a8fe094b1b7e75174a1de", "size": 37302, "ext": "py", "lang": "Python", "max_stars_repo_path": "alpa/collective/collective_group/nccl_collective_group.py", "max_stars_repo_name": "alpa-projects/alpa", "max_stars_repo_head_hexsha": "2c54de2a8fa8a48c77069f4bad802f4e8fa6d126", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 114, "max_stars_repo_stars_event_min_datetime": "2022-03-02T20:38:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:41:50.000Z", "max_issues_repo_path": "alpa/collective/collective_group/nccl_collective_group.py", "max_issues_repo_name": "alpa-projects/alpa", "max_issues_repo_head_hexsha": "2c54de2a8fa8a48c77069f4bad802f4e8fa6d126", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2022-03-09T22:04:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T17:53:15.000Z", "max_forks_repo_path": "alpa/collective/collective_group/nccl_collective_group.py", "max_forks_repo_name": "alpa-projects/alpa", "max_forks_repo_head_hexsha": "2c54de2a8fa8a48c77069f4bad802f4e8fa6d126", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-03-05T12:04:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T03:55:42.000Z", "avg_line_length": 40.1528525296, "max_line_length": 118, "alphanum_fraction": 0.6019784462, "include": true, "reason": "import cupy", "num_tokens": 7790}
|
import Std
namespace LeanSAT
/-- CNF variable
NOTE: Unlike DIMACS, 0 is a valid variable. See `Var.toDIMACS`.
-/
def Var := Nat
deriving Inhabited, DecidableEq, Hashable, Repr, ToString
namespace Var
/-- Allow nat literals `5392` as notation for variables -/
instance : OfNat Var n := ⟨n⟩
end Var
/-- CNF literal -/
inductive Literal
| pos (v : Var) | neg (v : Var)
deriving Inhabited, DecidableEq, Hashable, Repr
namespace Literal
/-- The literal's variable -/
def var : Literal → Var
| pos v => v | neg v => v
/-- True iff the literal is `.pos v` -/
def isPos : Literal → Bool
| pos _ => true | neg _ => false
/-- True iff the literal is `.neg v` -/
def isNeg (l) := not (isPos l)
def not : Literal → Literal
| pos v => neg v
| neg v => pos v
/-- Automatically lift variables to positive literals -/
instance : Coe Var Literal := ⟨.pos⟩
/-- Allow literals to be written as nat constants -/
instance : OfNat Literal n := ⟨show Var from n⟩
instance : ToString Literal where
toString | pos v => s!"{v}"
| neg v => s!"¬{v}"
end Literal
/-- (Partial) assignment to the variables of a formula -/
def Assn := Std.HashMap Var Bool
namespace Assn
@[simp] def hasTrue (v : Var) (a : Assn) : Bool := a.find? v = some true
@[simp] def hasFalse (v : Var) (a : Assn) : Bool := a.find? v = some false
@[simp] def undecided (v : Var) (a : Assn) : Bool := a.find? v = none
def litTrue (l : Literal) (a : Assn) : Bool := a.find? l.var = some l.isPos
def litFalse (l : Literal) (a : Assn) : Bool := a.find? l.var = some l.isNeg
def litUndecided (l : Literal) (a : Assn) : Bool := a.find? l.var = none
@[simp] theorem litTrue_pos : litTrue (.pos v) a = hasTrue v a := rfl
@[simp] theorem litTrue_neg : litTrue (.neg v) a = hasFalse v a := rfl
@[simp] theorem litFalse_pos : litFalse (.pos v) a = hasFalse v a := rfl
@[simp] theorem litFalse_neg : litFalse (.neg v) a = hasTrue v a := rfl
@[simp] theorem litUndecided_pos : litUndecided (.pos v) a = undecided v a := rfl
@[simp] theorem litUndecided_neg : litUndecided (.neg v) a = undecided v a := rfl
def insertLit (l : Literal) (a : Assn) : Assn :=
a.insert l.var l.isPos
def toList (a : Assn) : List Literal :=
Std.HashMap.toList a |>.map (fun (v,pos) => if pos then .pos v else .neg v)
instance : ToString Assn :=
⟨fun assn => assn.toList |>.map toString |> String.intercalate " "⟩
end Assn
/-- CNF clause: just a list of literals -/
structure Clause where
lits : List Literal
deriving Inhabited, DecidableEq, Hashable, Repr
namespace Clause
/-- ⊥ / false clause -/
def empty : Clause := ⟨[]⟩
/-- Check whether any literals in `c` are set true by `a` -/
def eval (a : Assn) (c : Clause) : Bool :=
c.lits.any a.litTrue
@[simp]
theorem eval_nil : eval a ⟨[]⟩ = false
:= by
simp [eval, List.any, List.foldr]
@[simp]
theorem eval_cons : eval a ⟨l::ls⟩ = (a.litTrue l || eval a ⟨ls⟩)
:= by
simp [eval, List.any, List.foldr]
instance : OfNat Clause n := ⟨(⟨[.pos n]⟩)⟩
instance : Coe Literal Clause := ⟨(⟨[·]⟩)⟩
instance : Coe (List Literal) Clause := ⟨(⟨·⟩)⟩
instance : ToString Clause where
toString | ⟨lits⟩ => toString lits
end Clause
/-- CNF formula: a collection of clauses.
This structure is used for formalizing lemmas about sat/unsat
reductions and the likes. -/
structure Formula where
clauses : List Clause
deriving DecidableEq, Repr
namespace Formula
def numVars : Formula → Nat
| ⟨clauses⟩ =>
clauses.filterMap (·.lits.map (β := Nat) Literal.var |>.maximum?)
|>.maximum?.map Nat.succ |>.getD 0
def vars : Formula → List Var
| ⟨clauses⟩ => Id.run do
let mut set := Std.HashMap.empty
for c in clauses do
for l in c.lits do
set := set.insert l.var ()
return set.toList.map (·.1)
/-- ⊤ / true Formula -/
def empty : Formula := ⟨[]⟩
/-- Check whether all clauses in `c` are satisfied by `a` -/
def eval (a : Assn) (c : Formula) : Bool :=
c.clauses.all (·.eval a)
@[simp]
theorem eval_nil : eval a ⟨[]⟩ = true
:= by
simp [eval, List.all, List.foldr]
@[simp]
theorem eval_cons : eval a ⟨c::cs⟩ = (c.eval a && eval a ⟨cs⟩)
:= by
simp [eval, List.all, List.foldr]
/-- Formula `c` is satisfiable if there exists a variable assignment
on which it is satisfied. -/
def satisfiable (c : Formula) := ∃ a, c.eval a = true
/-- Formula `c` is unsatisfiable iff there does not exist a variable
assignment on which it is satisfied. -/
def unsat (c : Formula) := ¬c.satisfiable
instance : Coe Clause Formula := ⟨(⟨[·]⟩)⟩
instance : OfNat Formula n := ⟨Literal.pos n⟩
end Formula
/-! CNF notation -/
namespace Notation
scoped notation:30 a:31 " ∨ " b:30 => Clause.mk (List.append (Clause.lits a) (Clause.lits b))
scoped notation a "∧" b => Formula.mk (List.append (Formula.clauses a) (Formula.clauses b))
scoped notation:max "¬" l:40 => Literal.not l
example : Literal := 5
example : Literal := ¬5
example : Clause := ¬5 ∨ ¬10
example : Formula := (¬5 ∨ ¬10) ∧ 20 ∧ ¬30
end Notation
|
{"author": "JamesGallicchio", "repo": "LeanSAT", "sha": "719470ac796a9149e0f892ccb3dff80c0dd563d3", "save_path": "github-repos/lean/JamesGallicchio-LeanSAT", "path": "github-repos/lean/JamesGallicchio-LeanSAT/LeanSAT-719470ac796a9149e0f892ccb3dff80c0dd563d3/LeanSAT/CNF.lean"}
|
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule Qt5Tools_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("Qt5Tools")
JLLWrappers.@generate_main_file("Qt5Tools", UUID("a9c6e4b1-b2fb-56d5-96a9-25f276f13840"))
end # module Qt5Tools_jll
|
{"hexsha": "1648c88852b45be0258df731b4cafba050ef8878", "size": 310, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Qt5Tools_jll.jl", "max_stars_repo_name": "JuliaBinaryWrappers/Qt5Tools_jll.jl", "max_stars_repo_head_hexsha": "781c7deec44ae70639bfff64f8207771369fa002", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Qt5Tools_jll.jl", "max_issues_repo_name": "JuliaBinaryWrappers/Qt5Tools_jll.jl", "max_issues_repo_head_hexsha": "781c7deec44ae70639bfff64f8207771369fa002", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Qt5Tools_jll.jl", "max_forks_repo_name": "JuliaBinaryWrappers/Qt5Tools_jll.jl", "max_forks_repo_head_hexsha": "781c7deec44ae70639bfff64f8207771369fa002", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 89, "alphanum_fraction": 0.8096774194, "num_tokens": 106}
|
module emodel
# Uses an eccentric disk
export write_grid, write_model, write_lambda, write_dust, Parameters, Grid
using ..constants
# Write the wavelength sampling file. Only run on setup
function write_lambda(lams::Array{Float64, 1}, basedir::AbstractString)
fcam = open(basedir * "camera_wavelength_micron.inp", "w")
nlam = length(lams)
@printf(fcam, "%d\n", nlam)
for lam in lams
@printf(fcam, "%.9e\n", lam) # [microns]
end
close(fcam)
end
const eqmirror = true # mirror the grid about the z=0 midplane ?
# if we decide to mirror, then ncells = 1/2 of the true value
# Define a grid object which stores all of these variables
# This will not change for the duration of the run
immutable Grid
nr::Int
ntheta::Int
nphi::Int
ncells::Int
# cell edges
Rs::Vector{Float64}
Thetas::Vector{Float64}
Phis::Vector{Float64}
As::Vector{Float64}
# cell centers
rs::Vector{Float64}
thetas::Vector{Float64}
phis::Vector{Float64}
as::Vector{Float64}
a_widths::Vector{Float64}
end
function Grid(nr::Int, ntheta::Int, nphi::Int, r_in::Real, r_out::Real, na::Int, eqmirror::Bool)
# Specify a 2D axisymmetric *separable* grid in spherical coordinates:
# {r, theta, phi}, where theta is angle from zenith, phi is azimuth
# na is the number of eccentric bins that we want to have. Generally, I
# think this should be less than nr.
ncells = nr * ntheta * nphi
r_in = convert(Float64, r_in) * AU # Inner extent of disk
r_out = convert(Float64, r_out) * AU # Outer extent of disk
#Define the cell *walls*
Rs = logspace(log10(r_in), log10(r_out), nr+1) # [cm] logarithmically spaced
if eqmirror
ped = 0.1
#Thetas = linspace(0, pi/2., ntheta+1)
# [rad] Angles are internally defined in radians, not degrees
Thetas = pi/2. - (logspace(log10(ped), log10(pi/2. + ped), ntheta+1) - ped)[end:-1:1]
#Spaced closer near the z=0
else
Thetas = linspace(0, pi, ntheta+1)
# [rad] Angles are internally defined in radians, not degrees
end
Phis = linspace(0, 2pi, nphi + 1) # [rad] cell walls for inactive coordinate
# Define the semi-major axis bin edges such that we fully encapsulate the full range of radii
# Assume 0 <= e <= 0.7
eps = 0.1 * AU # [cm], tiny extra bit of comfort
e = 0.7 # Maximum e we'll tolerate
A_1 = (Rs[1] - eps)/(1 + e)
A_N = (Rs[end] + eps)/(1 - e)
As = logspace(log10(A_1), log10(A_N), na+1)
#Define the cell centers as the average between walls
rs = 0.5 * (Rs[1:end-1] + Rs[2:end])
thetas = 0.5 * (Thetas[1:end-1] + Thetas[2:end])
phis = 0.5 * (Phis[1:end-1] + Phis[2:end])
as = 0.5 * (As[1:end-1] + As[2:end])
a_widths = As[2:end] - As[1:end-1]
return Grid(nr, ntheta, nphi, ncells, Rs, Thetas, Phis, As, rs, thetas, phis, as, a_widths)
end
#This function only needs to be run once, upon setup.
function write_grid(basedir::AbstractString, grid::Grid)
#amr_grid.inp
f = open(basedir * "amr_grid.inp", "w")
#Write the header
@printf(f, "%d\n", 1) #iformat
@printf(f, "%d\n", 0) #regular grid (no AMR or Oct-tree)
@printf(f, "%d\n", 100) #spherical coordiantes
@printf(f, "%d\n", 0) #gridinfo (none needed for now)
#incl_r incl_phi incl_z #use this axis?
@printf(f, "%d %d %d \n", 1, 1, 1) # Full 3D grid
#n_r n_phi n_z #number of cells in this dimension
@printf(f, "%d %d %d \n", grid.nr, grid.ntheta, grid.nphi)
# Write out the cell walls
for R in grid.Rs
@printf(f, "%.9e\n", R)
end
for Theta in grid.Thetas
@printf(f, "%.9e\n", Theta)
end
for Phi in grid.Phis
@printf(f, "%.9e\n", Phi)
end
close(f)
end
#Let's try defining a parameters type, an object of which gets passed around.
type Parameters
M_star::Float64 # [M_sun] stellar mass
a_c::Float64 # [AU] characteristic radius
T_10::Float64 # [K] temperature at 10 AU
q::Float64 # temperature gradient exponent
gamma::Float64 # surface temperature gradient exponent
M_CO::Float64 # [M_earth] disk mass of CO
ksi::Float64 # [cm s^{-1}] microturbulence
dpc::Float64 # [pc] distance to system
incl::Float64 # [degrees] inclination 0 deg = face on, 90 = edge on.
PA::Float64 # [degrees] position angle (East of North)
e::Float64 # eccentricity
w::Float64 # [degrees] argument of periastron
vel::Float64 # [km/s] systemic velocity (positive is redshift/receeding)
mu_RA::Float64 # [arcsec] central offset in RA
mu_DEC::Float64 # [arcsec] central offset in DEC
end
# Given an array of bin edges (N) in radius from the star, and an array of radii (xx), return an
# array of indices of which bins (N-1) the radii fall into.
# Assumes both bins and xx are sorted in increasing order.
function get_bins(bin_edges, xx)
inds = Array(Int64, length(xx))
b::Int64 = 1
nb = length(bin_edges)
for (i,x) in enumerate(xx)
while true
# Greater than the lower bin
if x >= bin_edges[b]
# Smaller than the upper bin
if x < bin_edges[b+1]
inds[i] = b
break
# larger than the upper bin, means we need to try a larger bin index
else
b += 1
if b + 1 > nb
throw(error(@sprintf("x value % s out of bin range.", x)))
end
end
# Smaller than the lower bin, something went wrong?
else
throw(error(@sprintf("x value %s out of bin range.", x)))
end
end
end
return inds
end
# Assume all inputs to these functions are in CGS units and in *cylindrical* coordinates.
# Assume all angles are in *radians*, including argument of periapse
# phi is azimuthal angle, a is semi-major axis, e is eccentricity
function radius{T}(phi::T, a::Float64, e::Float64, w::Float64)
a * (1 - e^2)./(1 + e .* cos(phi - w))
end
radius{T}(phi::T, a::Float64, pars::Parameters) = radius(phi, a, pars.e, pars.w * deg)
# Linear density of the ring for the mass-on-a-wire approximation
# m is the total mass of the ring
function lambda(phi::Float64, a::Float64, e::Float64, w::Float64, m::Float64)
m * sqrt(1 - e^2) / (2pi * a * sqrt(1 + 2e * cos(phi - w) + e^2))
end
# Parametric type T allows passing individual Float64 or Vectors.
# Alternate functions accept pars passed around, where pars is in M_star, AU, etc...
function velocity(phi::Float64, a::Float64, e::Float64, w::Float64, M_star::Float64)
v_r = sqrt(G * M_star / a) * e * sin(phi - w) / sqrt(1 - e^2)
v_theta = 0.0
v_phi = sqrt(G * M_star / a) * (1 + e * cos(phi - w)) / sqrt(1 - e^2)
return Float64[v_r, v_theta, v_phi]
end
velocity(phi::Float64, a::Float64, pars::Parameters) = velocity(phi, a, pars.e, pars.w * deg, pars.M_star * M_sun)
function temperature{T}(r::T, T_10::Float64, q::Float64)
T_10 * (r ./ (10. * AU)).^(-q)
end
temperature{T}(r::T, pars::Parameters) = temperature(r, pars.T_10, pars.q)
function Hp{T}(r::T, M_star::Float64, T_10::Float64, q::Float64)
temp = temperature(r, T_10, q)
sqrt(kB * temp .* r.^3./(mu_gas * m_H * G * M_star))
end
Hp{T}(r::T, pars::Parameters) = Hp(r, pars.M_star * M_sun, pars.T_10, pars.q)
# No parametric type for number density, because it is a 2D function.
function n_CO(r::Float64, z::Float64, M_star::Float64, T_10::Float64, q::Float64)
H = Hp(r, M_star, T_10, q)
return 1./ (m_CO * sqrt(2pi) * H) * exp(-0.5 * (z/ H)^2)
end
n_CO(r::Float64, z::Float64, pars::Parameters) = n_CO(r, z, pars.M_star * M_sun, pars.T_10, pars.q)
function n_nobin(r::Float64, phi::Float64, z::Float64, M_star::Float64, a_c::Float64, T_10::Float64, q::Float64, gamma::Float64, M_CO::Float64, e::Float64, w::Float64)
n = n_CO(r, z, M_star, T_10, q)
a = r * (1 + e * cos(phi - w))/(1 - e^2)
Sigma = (2 - gamma) * M_CO / (2pi * a * a_c) * (a/a_c)^(1 - gamma) * exp(- (a/a_c)^(2 - gamma)) * (1 + e * cos(phi - w))/(sqrt(1 + 2 * e * cos(phi - w)) * sqrt(1 - e^2))
return Sigma * n
end
n_nobin(r::Float64, phi::Float64, z::Float64, pars::Parameters) = n_nobin(r, phi, z, pars.M_star * M_sun, pars.a_c * AU, pars.T_10, pars.q, pars.gamma, pars.M_CO * M_earth, pars.e, pars.w * deg)
# Calculate the mass in each ring using a_c and M_co
function ring_mass(as::Vector{Float64}, a_c::Float64, M_CO::Float64, gamma::Float64)
a = as ./ a_c
ms = M_CO .* a.^(-gamma) .* exp(-a.^(2 - gamma)) / sum(a.^(-gamma) .* exp(-a.^(2 - gamma)))
return ms
end
ring_mass(grid, pars) = ring_mass(grid.as, pars.a_c * AU, pars.M_CO * M_earth, pars.gamma)
function rho_dust(r::Float64, z::Float64, pars::Parameters)
nCO = n_CO(r, z, pars) # number of CO molecules per cm^3
# Convert from nCO to nH2
nH2 = nCO / 7.e-5 # number density ratio
# Convert from nH2 (assuming nH2 ~ nGas ) to mGas
mGas = constants.m0 * nH2 # [g]
# Convert from mGas to mDust using Gas/Dust ratio of 100
mDust = mGas * 0.01 # [g]
return mDust
end
# Ksi is microturbulent broadining width in units of km/s. Output of this function
# is in cm/s for RADMC (RADMC manual, eqn 7.12)
function microturbulence(ksi::Float64)
return ksi * 1.e5 # convert from km/s to cm/s
end
microturbulence(pars::Parameters) = microturbulence(pars.ksi)
function write_model(pars::Parameters, basedir::AbstractString, grid::Grid)
# numberdens_co.inp
fdens = open(basedir * "numberdens_co.inp", "w")
@printf(fdens, "%d\n", 1) #iformat
@printf(fdens, "%d\n", grid.ncells)
# gas_velocity.inp
fvel = open(basedir * "gas_velocity.inp", "w")
@printf(fvel, "%d\n", 1) #iformat
@printf(fvel, "%d\n", grid.ncells)
# gas_temperature.inp
ftemp = open(basedir * "gas_temperature.inp", "w")
@printf(ftemp, "%d\n", 1) #iformat
@printf(ftemp, "%d\n", grid.ncells)
# microturbulence.inp
fmicro = open(basedir * "microturbulence.inp", "w")
@printf(fmicro, "%d\n", 1) #iformat
@printf(fmicro, "%d\n", grid.ncells)
# Now, we will need to write the three other files as a function of grid position.
# Therefore we will do *one* loop over these indices, calculate the required value,
# and write it to the appropriate file.
# Calculate the mass in each ring using a_c and M_co
#Looping over the cell centers
for phi in grid.phis
# Calculate the ellipse bin-edges
# R_edges = grid.As .* (1 - pars.e^2)/(1 + pars.e * cos(phi - pars.w * deg))
#
# # println("grid.As", grid.As)
# # println("R_edges", R_edges)
# # println("grid.rs", grid.rs)
#
# # Determine the array of bin indices which correspond to each point in grid.rs
# inds = get_bins(R_edges, grid.rs)
#
# # Calculate all following phi-dependent quantities in vectorized form:
# # m_i, a_i, dr_i, Sigma_CO_i
#
# m_rings = ring_mass(grid, pars)
#
# # Calculate arrays of these values for each radius
# ms = [m_rings[ind] for ind in inds]
# as = [grid.as[ind] for ind in inds]
# das = [grid.a_widths[ind] for ind in inds]
#
# drs = das .* (1 - pars.e^2)/(1 + pars.e * cos(phi - pars.w * deg))
#
# lambdas = ms ./ (2pi .* as) .* (sqrt(1 - pars.e^2) ./ sqrt(1 + 2 * pars.e .* cos(phi - pars.w * deg) + pars.e^2))
#
# Sigmas = lambdas ./ drs
for theta in grid.thetas
for (i,r) in enumerate(grid.rs)
#Convert from spherical to cylindrical coordinates
z = r * cos(theta)
r_cyl = r * sin(theta)
a = r_cyl * (1 + pars.e * cos(phi - pars.w * deg))/(1 - pars.e^2)
#Calculate unscaled density as function of (r_cyl, z)
# n = Sigmas[i] * n_CO(r_cyl, z, pars)
v = velocity(phi, a, pars)
# @printf(fdens, "%.9e\n", n)
@printf(fdens, "%.9e\n", n_nobin(r_cyl, phi, z, pars))
@printf(fvel, "%.9e %.9e %.9e\n", v[1], v[2], v[3])
@printf(ftemp, "%.9e\n", temperature(r_cyl, pars))
@printf(fmicro, "%.9e\n", microturbulence(pars))
end
end
end
close(fdens)
close(fvel)
close(ftemp)
close(fmicro)
end
function write_dust(pars::Parameters, basedir::AbstractString, grid::Grid)
fdens = open(basedir * "dust_density.inp", "w")
@printf(fdens, "%d\n", 1) #iformat
@printf(fdens, "%d\n", grid.ncells)
@printf(fdens, "%d\n", 1) # number of dust species
for phi in grid.phis
for theta in grid.thetas
for r in grid.rs
#Convert from spherical to cylindrical coordinates
z = r * cos(theta)
r_cyl = r * sin(theta)
@printf(fdens, "%.9e\n", rho_dust(r_cyl, z, pars))
end
end
end
close(fdens)
end
end
|
{"hexsha": "c80fb6848192c89d5a1dba49bb4cd5bfd418f89f", "size": 13095, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "attic/emodel.jl", "max_stars_repo_name": "elnjensen/DiskJockey", "max_stars_repo_head_hexsha": "ef618d27c2aff9b0540b0e00035b9a4dbfea1968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "attic/emodel.jl", "max_issues_repo_name": "elnjensen/DiskJockey", "max_issues_repo_head_hexsha": "ef618d27c2aff9b0540b0e00035b9a4dbfea1968", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "attic/emodel.jl", "max_forks_repo_name": "elnjensen/DiskJockey", "max_forks_repo_head_hexsha": "ef618d27c2aff9b0540b0e00035b9a4dbfea1968", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5514511873, "max_line_length": 194, "alphanum_fraction": 0.6018327606, "num_tokens": 4154}
|
import torch
import torch.nn as nn
from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, ReLU, Sigmoid, Dropout, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
import copy
import numpy as np
from collections import namedtuple
from GraphConvolutionNetwork import GCN, GCNwithIntraAndInterMatrix
from Model import CountMeanOfFeature, CountMeanAndCovOfFeature, CountMeanOfFeatureInCluster
# Support: ['IR_18', 'IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels, channels // reduction, kernel_size=1, padding=0, bias=False)
nn.init.xavier_uniform_(self.fc1.weight.data)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(nn.Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(nn.Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
'''A named tuple describing a ResNet block.'''
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 18:
blocks = [
get_block(in_channel=64, depth=64, num_units=2),
get_block(in_channel=64, depth=128, num_units=2),
get_block(in_channel=128, depth=256, num_units=2),
get_block(in_channel=256, depth=512, num_units=2)
]
elif num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
return blocks
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1 or classname.find('ConvTranspose2d') != -1:
nn.init.kaiming_uniform_(m.weight)
nn.init.zeros_(m.bias)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.zeros_(m.bias)
elif classname.find('Linear') != -1:
nn.init.xavier_normal_(m.weight)
nn.init.zeros_(m.bias)
class Backbone(nn.Module):
def __init__(self, numOfLayer, useIntraGCN=True, useInterGCN=True, useRandomMatrix=False, useAllOneMatrix=False, useCov=False, useCluster=False, class_num = 7):
super(Backbone, self).__init__()
self.useIntraGCN = useIntraGCN
self.useInterGCN = useInterGCN
unit_module = bottleneck_IR
self.input_layer = Sequential(Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1,1), padding=(1,1), bias=False),BatchNorm2d(64), PReLU(64))
blocks = get_blocks(numOfLayer)
self.layer1 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[0]]) #get_block(in_channel=64, depth=64, num_units=3)])
self.layer2 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[1]]) #get_block(in_channel=64, depth=128, num_units=4)])
self.layer3 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[2]]) #get_block(in_channel=128, depth=256, num_units=14)])
self.layer4 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[3]]) #get_block(in_channel=256, depth=512, num_units=3)])
self.output_layer = Sequential(nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1,1)))
cropNet_modules = []
cropNet_blocks = [get_block(in_channel=128, depth=256, num_units=2), get_block(in_channel=256, depth=512, num_units=2)]
for block in cropNet_blocks:
for bottleneck in block:
cropNet_modules.append(
unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
cropNet_modules+=[nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=(1,1)), nn.ReLU()]
self.Crop_Net = nn.ModuleList([ copy.deepcopy(nn.Sequential(*cropNet_modules)) for i in range(5) ])
self.fc = nn.Linear(64 + 320, class_num)
self.fc.apply(init_weights)
self.loc_fc = nn.Linear(320, class_num)
self.loc_fc.apply(init_weights)
self.GAP = nn.AdaptiveAvgPool2d((1,1))
if self.useIntraGCN and self.useInterGCN:
#self.GCN = GCN(64, 128, 64)
self.GCN = GCNwithIntraAndInterMatrix(64, 128, 64, useIntraGCN=useIntraGCN, useInterGCN=useInterGCN, useRandomMatrix=useRandomMatrix, useAllOneMatrix=useAllOneMatrix)
self.SourceMean = (CountMeanAndCovOfFeature(64+320) if useCov else CountMeanOfFeature(64+320)) if not useCluster else CountMeanOfFeatureInCluster(64+320)
self.TargetMean = (CountMeanAndCovOfFeature(64+320) if useCov else CountMeanOfFeature(64+320)) if not useCluster else CountMeanOfFeatureInCluster(64+320)
self.SourceBN = BatchNorm1d(64+320)
self.TargetBN = BatchNorm1d(64+320)
def classify(self, imgs, locations):
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * class_num * class_num
global_feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
loc_feature = self.crop_featureMap(featureMap2, locations) # Batch * 320
feature = torch.cat((global_feature, loc_feature), 1) # Batch * (64+320)
# GCN
if self.useIntraGCN and self.useInterGCN:
if self.training:
feature = self.SourceMean(feature)
feature = torch.cat( ( self.SourceBN(feature), self.TargetBN(self.TargetMean.getSample(feature.detach())) ), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1).narrow(1, 0, 64+320) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
def transfer(self, imgs, locations, domain='Target'):
assert domain in ['Source', 'Target'], 'Parameter domain should be Source or Target.'
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * class_num * class_num
global_feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
loc_feature = self.crop_featureMap(featureMap2, locations) # Batch * 320
feature = torch.cat((global_feature, loc_feature), 1) # Batch * (64+320)
if self.useIntraGCN and self.useInterGCN:
if self.training:
# Compute Feature
SourceFeature = feature.narrow(0, 0, feature.size(0)//2) # Batch/2 * (64+320)
TargetFeature = feature.narrow(0, feature.size(0)//2, feature.size(0)//2) # Batch/2 * (64+320)
SourceFeature = self.SourceMean(SourceFeature) # Batch/2 * (64+320)
TargetFeature = self.TargetMean(TargetFeature) # Batch/2 * (64+320)
SourceFeature = self.SourceBN(SourceFeature) # Batch/2 * (64+320)
TargetFeature = self.TargetBN(TargetFeature) # Batch/2 * (64+320)
# Compute Mean
SourceMean = self.SourceMean.getSample(TargetFeature.detach()) # Batch/2 * (64+320)
TargetMean = self.TargetMean.getSample(SourceFeature.detach()) # Batch/2 * (64+320)
SourceMean = self.SourceBN(SourceMean) # Batch/2 * (64+320)
TargetMean = self.TargetBN(TargetMean) # Batch/2 * (64+320)
# GCN
feature = torch.cat( ( torch.cat((SourceFeature,TargetMean), 1), torch.cat((SourceMean,TargetFeature), 1) ), 0) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1) # Batch * (64+320 + 64+320)
feature = torch.cat( (feature.narrow(0, 0, feature.size(0)//2).narrow(1, 0, 64+320), feature.narrow(0, feature.size(0)//2, feature.size(0)//2).narrow(1, 64+320, 64+320) ), 0) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
# Inference
if domain=='Source':
SourceFeature = feature # Batch * (64+320)
TargetMean = self.TargetMean.getSample(SourceFeature.detach()) # Batch * (64+320)
SourceFeature = self.SourceBN(SourceFeature) # Batch * (64+320)
TargetMean = self.TargetBN(TargetMean) # Batch * (64+320)
feature = torch.cat((SourceFeature,TargetMean), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
elif domain=='Target':
TargetFeature = feature # Batch * (64+320)
SourceMean = self.SourceMean.getSample(TargetFeature.detach()) # Batch * (64+320)
SourceMean = self.SourceBN(SourceMean) # Batch * (64+320)
TargetFeature = self.TargetBN(TargetFeature) # Batch * (64+320)
feature = torch.cat((SourceMean,TargetFeature), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1)# Batch * (64+320 + 64+320)
if domain=='Source':
feature = feature.narrow(1, 0, 64+320)# Batch * (64+320)
elif domain=='Target':
feature = feature.narrow(1, 64+320, 64+320) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
feature = feature.view(feature.size(0), -1).narrow(1, 0, 64+320) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
def forward(self, imgs, locations, flag=True, domain='Target'):
if flag:
return self.classify(imgs, locations)
return self.transfer(imgs, locations, domain)
def output_num(self):
return 64*6
def get_parameters(self):
if self.useIntraGCN and self.useInterGCN:
parameter_list = [ {"params":self.input_layer.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer1.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer2.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer3.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer4.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.output_layer.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.loc_fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.Crop_Net.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.GCN.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.SourceBN.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.TargetBN.parameters(), "lr_mult":10, 'decay_mult':2}]
else:
parameter_list = [ {"params":self.input_layer.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer1.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer2.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer3.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer4.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.output_layer.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.loc_fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.Crop_Net.parameters(), "lr_mult":10, 'decay_mult':2}]
return parameter_list
def crop_featureMap(self, featureMap, locations):
batch_size = featureMap.size(0)
map_ch = featureMap.size(1)
map_len = featureMap.size(2)
grid_ch = map_ch
grid_len = 7 # 14, 6, 4
feature_list = []
for i in range(5):
grid_list = []
for j in range(batch_size):
w_min = locations[j,i,0]-int(grid_len/2)
w_max = locations[j,i,0]+int(grid_len/2)
h_min = locations[j,i,1]-int(grid_len/2)
h_max = locations[j,i,1]+int(grid_len/2)
map_w_min = max(0, w_min)
map_w_max = min(map_len-1, w_max)
map_h_min = max(0, h_min)
map_h_max = min(map_len-1, h_max)
grid_w_min = max(0, 0-w_min)
grid_w_max = grid_len + min(0, map_len-1-w_max)
grid_h_min = max(0, 0-h_min)
grid_h_max = grid_len + min(0, map_len-1-h_max)
grid = torch.zeros(grid_ch, grid_len, grid_len)
if featureMap.is_cuda:
grid = grid.cuda()
grid[:, grid_h_min:grid_h_max+1, grid_w_min:grid_w_max+1] = featureMap[j, :, map_h_min:map_h_max+1, map_w_min:map_w_max+1]
grid_list.append(grid)
feature = torch.stack(grid_list, dim=0)
feature_list.append(feature)
# feature list: 5 * [ batch_size * channel * 3 * 3 ]
output_list = []
for i in range(5):
output = self.Crop_Net[i](feature_list[i])
output = self.GAP(output)
output_list.append(output)
loc_feature = torch.stack(output_list, dim=1) # batch_size * 5 * 64 * 1 * 1
loc_feature = loc_feature.view(batch_size, -1) # batch_size * 320
return loc_feature
class Backbone_old(nn.Module):
def __init__(self, numOfLayer, useIntraGCN=True, useInterGCN=True, useRandomMatrix=False, useAllOneMatrix=False, useCov=False, useCluster=False, class_num = 7):
super(Backbone, self).__init__()
unit_module = bottleneck_IR
self.input_layer = Sequential(Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1,1), padding=(1,1), bias=False),
BatchNorm2d(64),
PReLU(64))
blocks = get_blocks(numOfLayer)
self.layer1 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[0]]) #get_block(in_channel=64, depth=64, num_units=3)])
self.layer2 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[1]]) #get_block(in_channel=64, depth=128, num_units=4)])
self.layer3 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[2]]) #get_block(in_channel=128, depth=256, num_units=14)])
self.layer4 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[3]]) #get_block(in_channel=256, depth=512, num_units=3)])
self.output_layer = Sequential(nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1,1)))
cropNet_modules = []
cropNet_blocks = [get_block(in_channel=128, depth=256, num_units=2), get_block(in_channel=256, depth=512, num_units=2)]
for block in cropNet_blocks:
for bottleneck in block:
cropNet_modules.append(
unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
cropNet_modules+=[nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=(1,1)), nn.ReLU()]
self.Crop_Net = nn.ModuleList([ copy.deepcopy(nn.Sequential(*cropNet_modules)) for i in range(5) ])
self.fc = nn.Linear(64 + 320, class_num)
self.fc.apply(init_weights)
self.loc_fc = nn.Linear(320, class_num)
self.loc_fc.apply(init_weights)
self.GAP = nn.AdaptiveAvgPool2d((1,1))
#self.GCN = GCN(64, 128, 64)
self.GCN = GCNwithIntraAndInterMatrix(64, 128, 64, useIntraGCN=useIntraGCN, useInterGCN=useInterGCN, useRandomMatrix=useRandomMatrix, useAllOneMatrix=useAllOneMatrix)
self.SourceMean = (CountMeanAndCovOfFeature(64+320) if useCov else CountMeanOfFeature(64+320)) if not useCluster else CountMeanOfFeatureInCluster(64+320)
self.TargetMean = (CountMeanAndCovOfFeature(64+320) if useCov else CountMeanOfFeature(64+320)) if not useCluster else CountMeanOfFeatureInCluster(64+320)
self.SourceBN = BatchNorm1d(64+320)
self.TargetBN = BatchNorm1d(64+320)
def classify(self, imgs, locations):
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * class_num * class_num
global_feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
loc_feature = self.crop_featureMap(featureMap2, locations) # Batch * 320
feature = torch.cat((global_feature, loc_feature), 1) # Batch * (64+320)
# GCN
if self.training:
feature = self.SourceMean(feature)
feature = torch.cat( ( self.SourceBN(feature), self.TargetBN(self.TargetMean.getSample(feature.detach())) ), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1).narrow(1, 0, 64+320) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
def transfer(self, imgs, locations, domain='Target'):
assert domain in ['Source', 'Target'], 'Parameter domain should be Source or Target.'
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * class_num * class_num
global_feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
loc_feature = self.crop_featureMap(featureMap2, locations) # Batch * 320
feature = torch.cat((global_feature, loc_feature), 1) # Batch * (64+320)
if self.training:
# Compute Feature
SourceFeature = feature.narrow(0, 0, feature.size(0)//2) # Batch/2 * (64+320)
TargetFeature = feature.narrow(0, feature.size(0)//2, feature.size(0)//2) # Batch/2 * (64+320)
SourceFeature = self.SourceMean(SourceFeature) # Batch/2 * (64+320)
TargetFeature = self.TargetMean(TargetFeature) # Batch/2 * (64+320)
SourceFeature = self.SourceBN(SourceFeature) # Batch/2 * (64+320)
TargetFeature = self.TargetBN(TargetFeature) # Batch/2 * (64+320)
# Compute Mean
SourceMean = self.SourceMean.getSample(TargetFeature.detach()) # Batch/2 * (64+320)
TargetMean = self.TargetMean.getSample(SourceFeature.detach()) # Batch/2 * (64+320)
SourceMean = self.SourceBN(SourceMean) # Batch/2 * (64+320)
TargetMean = self.TargetBN(TargetMean) # Batch/2 * (64+320)
# GCN
feature = torch.cat( ( torch.cat((SourceFeature,TargetMean), 1), torch.cat((SourceMean,TargetFeature), 1) ), 0) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1) # Batch * (64+320 + 64+320)
feature = torch.cat( (feature.narrow(0, 0, feature.size(0)//2).narrow(1, 0, 64+320), feature.narrow(0, feature.size(0)//2, feature.size(0)//2).narrow(1, 64+320, 64+320) ), 0) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320) # Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
# Inference
if domain=='Source':
SourceFeature = feature # Batch * (64+320)
TargetMean = self.TargetMean.getSample(SourceFeature.detach()) # Batch * (64+320)
SourceFeature = self.SourceBN(SourceFeature) # Batch * (64+320)
TargetMean = self.TargetBN(TargetMean) # Batch * (64+320)
feature = torch.cat((SourceFeature,TargetMean), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
elif domain=='Target':
TargetFeature = feature # Batch * (64+320)
SourceMean = self.SourceMean.getSample(TargetFeature.detach()) # Batch * (64+320)
SourceMean = self.SourceBN(SourceMean) # Batch * (64+320)
TargetFeature = self.TargetBN(TargetFeature) # Batch * (64+320)
feature = torch.cat((SourceMean,TargetFeature), 1) # Batch * (64+320 + 64+320)
feature = self.GCN(feature.view(feature.size(0), 12, -1)) # Batch * 12 * 64
feature = feature.view(feature.size(0), -1)# Batch * (64+320 + 64+320)
if domain=='Source':
feature = feature.narrow(1, 0, 64+320)# Batch * (64+320)
elif domain=='Target':
feature = feature.narrow(1, 64+320, 64+320) # Batch * (64+320)
loc_feature = feature.narrow(1, 64, 320)# Batch * 320
pred = self.fc(feature) # Batch * class_num
loc_pred = self.loc_fc(loc_feature) # Batch * class_num
return feature, pred, loc_pred
def forward(self, imgs, locations, flag=True, domain='Target'):
if flag:
return self.classify(imgs, locations)
return self.transfer(imgs, locations, domain)
def output_num(self):
return 64*6
def get_parameters(self):
parameter_list = [ {"params":self.input_layer.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer1.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer2.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer3.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.layer4.parameters(), "lr_mult":1, 'decay_mult':2}, {"params":self.output_layer.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.loc_fc.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.Crop_Net.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.GCN.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.SourceBN.parameters(), "lr_mult":10, 'decay_mult':2}, {"params":self.TargetBN.parameters(), "lr_mult":10, 'decay_mult':2}]
return parameter_list
def crop_featureMap(self, featureMap, locations):
batch_size = featureMap.size(0)
map_ch = featureMap.size(1)
map_len = featureMap.size(2)
grid_ch = map_ch
grid_len = 7 # 14, 6, 4
feature_list = []
for i in range(5):
grid_list = []
for j in range(batch_size):
w_min = locations[j,i,0]-int(grid_len/2)
w_max = locations[j,i,0]+int(grid_len/2)
h_min = locations[j,i,1]-int(grid_len/2)
h_max = locations[j,i,1]+int(grid_len/2)
map_w_min = max(0, w_min)
map_w_max = min(map_len-1, w_max)
map_h_min = max(0, h_min)
map_h_max = min(map_len-1, h_max)
grid_w_min = max(0, 0-w_min)
grid_w_max = grid_len + min(0, map_len-1-w_max)
grid_h_min = max(0, 0-h_min)
grid_h_max = grid_len + min(0, map_len-1-h_max)
grid = torch.zeros(grid_ch, grid_len, grid_len)
if featureMap.is_cuda:
grid = grid.cuda()
grid[:, grid_h_min:grid_h_max+1, grid_w_min:grid_w_max+1] = featureMap[j, :, map_h_min:map_h_max+1, map_w_min:map_w_max+1]
grid_list.append(grid)
feature = torch.stack(grid_list, dim=0)
feature_list.append(feature)
# feature list: 5 * [ batch_size * channel * 3 * 3 ]
output_list = []
for i in range(5):
output = self.Crop_Net[i](feature_list[i])
output = self.GAP(output)
output_list.append(output)
loc_feature = torch.stack(output_list, dim=1) # batch_size * 5 * 64 * 1 * 1
loc_feature = loc_feature.view(batch_size, -1) # batch_size * 320
return loc_feature
class Backbone_onlyGlobal(nn.Module):
def __init__(self,numOfLayer):
super(Backbone_onlyGlobal, self).__init__()
unit_module = bottleneck_IR
self.input_layer = Sequential(Conv2d(in_channels=3, out_channels=64, kernel_size=(3, 3), stride=(1,1), padding=(1,1), bias=False),BatchNorm2d(64),PReLU(64))
blocks = get_blocks(numOfLayer)
self.layer1 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[0]]) #get_block(in_channel=64, depth=64, num_units=3)])
self.layer2 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[1]]) #get_block(in_channel=64, depth=128, num_units=4)])
self.layer3 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[2]]) #get_block(in_channel=128, depth=256, num_units=14)])
self.layer4 = Sequential(*[unit_module(bottleneck.in_channel,bottleneck.depth,bottleneck.stride) for bottleneck in blocks[3]]) #get_block(in_channel=256, depth=512, num_units=3)])
self.output_layer = Sequential(nn.Conv2d(in_channels=512, out_channels=64, kernel_size=(3,3), stride=(1,1), padding=(1,1)),
nn.ReLU(),
nn.AdaptiveAvgPool2d((1,1)))
self.fc = nn.Linear(64, 7)
self.fc.apply(init_weights)
def classify(self, imgs, locations):
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * 7 * 7
feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
pred = self.fc(feature) # Batch * 7
loc_pred = None
return feature, pred, loc_pred
def transfer(self, imgs, locations, domain='Target'):
assert domain in ['Source', 'Target'], 'Parameter domain should be Source or Target.'
featureMap = self.input_layer(imgs)
featureMap1 = self.layer1(featureMap) # Batch * 64 * 56 * 56
featureMap2 = self.layer2(featureMap1) # Batch * 128 * 28 * 28
featureMap3 = self.layer3(featureMap2) # Batch * 256 * 14 * 14
featureMap4 = self.layer4(featureMap3) # Batch * 512 * 7 * 7
feature = self.output_layer(featureMap4).view(featureMap.size(0), -1) # Batch * 64
pred = self.fc(feature) # Batch * 7
loc_pred = None
return feature, pred, loc_pred
def forward(self, imgs, locations, flag=True, domain='Target'):
if flag:
return self.classify(imgs, locations)
return self.transfer(imgs, locations, domain)
def output_num(self):
return 64
def get_parameters(self):
parameter_list = [ {"params":self.input_layer.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.layer1.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.layer2.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.layer3.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.layer4.parameters(), "lr_mult":1, 'decay_mult':2}, \
{"params":self.output_layer.parameters(), "lr_mult":10, 'decay_mult':2}, \
{"params":self.fc.parameters(), "lr_mult":10, 'decay_mult':2}, \
]
return parameter_list
def IR(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num):
"""Constructs a ir-18/ir-50 model."""
model = Backbone(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num)
return model
def IR_onlyGlobal(numOfLayer):
"""Constructs a ir-18/ir-50 model."""
model = Backbone_onlyGlobal(numOfLayer)
return model
|
{"hexsha": "a43e38dd35d6c89c78fab6851824c3b455a2c30b", "size": 34817, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/ResNet.py", "max_stars_repo_name": "meghbhalerao/da-fer", "max_stars_repo_head_hexsha": "058dfb3a99aea93af934de8d5f0ef23cd2a85c2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-07T07:07:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T07:07:41.000Z", "max_issues_repo_path": "training/ResNet.py", "max_issues_repo_name": "meghbhalerao/da-fer", "max_issues_repo_head_hexsha": "058dfb3a99aea93af934de8d5f0ef23cd2a85c2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training/ResNet.py", "max_forks_repo_name": "meghbhalerao/da-fer", "max_forks_repo_head_hexsha": "058dfb3a99aea93af934de8d5f0ef23cd2a85c2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-27T14:55:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-27T14:55:47.000Z", "avg_line_length": 50.6797671033, "max_line_length": 840, "alphanum_fraction": 0.5910618376, "include": true, "reason": "import numpy", "num_tokens": 9053}
|
#========== tuples ==========#
struct TupleVector{T,DT,L} <: AbstractVector{T}
data::DT
TupleVector(tup::DT) where {DT <: Tuple} =
new{mapreduce(typeof, Base.promote_typejoin, tup), DT, length(tup)}(tup)
end
Base.size(v::TupleVector{T,DT,L}) where {T,DT,L} = (L,)
Base.@propagate_inbounds Base.getindex(v::TupleVector, i::Integer) = getindex(v.data, i)
Core.Tuple(v::TupleVector) = v.data
Base.iterate(v::TupleVector) = iterate(v.data)
Base.iterate(v::TupleVector, state) = iterate(v.data, state)
transmute(tup::Tuple, perm) = transmute(TupleVector(tup), perm)
transmute(tup::Tuple, ::Val{perm}) where {perm} = transmute(TupleVector(tup), Val(perm))
transmutedims(tup::Tuple, perm=(2,1)) = collect(transmute(tup, perm))
function Base.showarg(io::IO, v::TupleVector{T,DT}, toplevel) where {T,DT}
if all(==(T), DT.parameters)
toplevel || print(io, "::")
print(io, "TupleVector{", T, "}")
else
print(io, "TupleVector(::", DT, ")")
toplevel && print(io, " with eltype ", T)
end
end
#========== dropdims ==========#
# Especially when dropping a trivial dimension, we don't want to produce
# reshape(TransmutedDimsArray(::Array ...
Base.dropdims(A::TransmutedDimsArray; dims) = _dropdims(A, dims...)
_dropdims(A) = A
function _dropdims(A::TransmutedDimsArray{T,N,P}, d::Int, dims...) where {T,N,P}
if P[d]==0
perm = ntuple(n -> n<d ? P[n] : P[n+1], N-1)
newdims = map(n -> n<d ? n : n-1, dims)
_dropdims(transmute(A.parent, perm), newdims...)
else
perm = ntuple(N-1) do n
Pn = n<d ? P[n] : P[n+1]
Pn<d ? Pn : Pn-1
end
newdims = map(n -> n<d ? n : n-1, dims)
_dropdims(transmute(dropdims(A.parent; dims=P[d]), perm), newdims...)
end
end
#=
@btime dropdims($(transmute(rand(3,3), (2,0,1))), dims=2)
@code_warntype dropdims(transmute(rand(3,3), (2,0,1)), dims=2)
@btime dropdims($(transmute(rand(3,1,3), (3,2,1))), dims=2)
@code_warntype dropdims(transmute(rand(3,1,3), (3,2,1)), dims=2)
=#
#========== reshape ==========#
function Base.vec(A::TransmutedDimsArray{T,N,P}) where {T,N,P}
if increasing_or_zero(P) # the case which transmute() will avoid creating
vec(A.parent)
else
reshape(A, length(A))
end
end
#========== transpose, etc ==========#
Base.transpose(A::TransmutedDimsArray{<:Number, 1}) = transmute(A, Val((2,1)))
Base.transpose(A::TransmutedDimsArray{<:Number, 2}) = transmute(A, Val((2,1)))
Base.adjoint(A::TransmutedDimsArray{<:Real, 1}) = transmute(A, Val((2,1)))
Base.adjoint(A::TransmutedDimsArray{<:Real, 2}) = transmute(A, Val((2,1)))
Base.PermutedDimsArray(A::TransmutedDimsArray, perm) = transmute(A, perm)
#========== reductions ==========#
# Same strategy as in https://github.com/JuliaLang/julia/pull/39513
function Base.mapreducedim!(f, op, B::AbstractArray, A::TransmutedDimsArray{T,N,P,Q}) where {T,N,P,Q}
if unique_or_zero(Val(P))
# any dense transmutation
Base.mapreducedim!(f, op, transmute(B, Q), parent(A)) # using Val(Q) changes nothing
else
# default next step
Base._mapreducedim!(f, op, B, A)
end
B
end
if VERSION > v"1.6-"
Base._mapreduce_dim(f, op, init::Base._InitialValue, A::TransmutedDimsArray, dims::Colon) =
_mapreduce_scalar(f, op, init, A, dims)
else
Base._mapreduce_dim(f, op, init::NamedTuple{()}, A::TransmutedDimsArray, dims::Colon) =
_mapreduce_scalar(f, op, init, A, dims)
end
@inline function _mapreduce_scalar(f, op, init, A::TransmutedDimsArray{T,N,P}, dims::Colon) where {T,N,P}
if dims === Colon() && f === identity && op === Base.add_sum
# safe & easy
Base._mapreduce_dim(f, op, init, parent(A), dims)
elseif unique_or_zero(Val(P))
# any dense transmutation
Base._mapreduce_dim(f, op, init, parent(A), dims)
elseif op === Base.add_sum && iszero(f(zero(T)))
# like sum(::Diagonal)
Base._mapreduce_dim(f, op, init, parent(A), dims)
else
# default next step
Base._mapreduce(f, op, IndexStyle(A), A)
end
end
#========== copyto! ==========#
function Base.copyto!(dst::AbstractArray, src::TransmutedDimsArray)
if axes(dst) == axes(src)
copy!(dst, src)
elseif length(dst) == length(src)
copy!(reshape(dst, axes(src)), src) # could save a reshape when increasing_or_zero(P)
elseif length(dst) < length(src)
throw(BoundsError(dst, lastindex(src)))
else
throw(BoundsError(src, lastindex(dst)))
end
dst
end
# @propagate_inbounds
function Base.copy!(dst::AbstractArray, src::TransmutedDimsArray{T,N,P,Q}) where {T,N,P,Q}
@boundscheck axes(dst) == axes(src) || throw(ArgumentError("arrays must have the same axes for copy! (consider using copyto!"))
if increasing_or_zero(P) # just a reshape
copyto!(dst, parent(src))
else
if unique_or_zero(P)
_densecopy_permuted!(dst, parent(src), Val(P))
# this is happy to reshape... should it be limited to
else
fill!(dst, zero(T)) # Diagonal-like
_copy_into!(dst, parent(src), Val(P))
end
end
dst
end
# For Arrays, this dispatches to use Strided.jl version. Second best:
@generated function _densecopy_permuted!(dst::DenseArray, src::AbstractArray, val::Val{P}) where {P}
Pminus = filter(!=(0), collect(P))
if 0 in P
SB = [:(axes(src,$p)) for p in Pminus]
Bex = :(reshape(dst, ($(SB...),)))
else
Bex = :dst
end
if sort(Pminus) == 1:ndims(src)
Aex = :src
perm = Tuple(Pminus)
else
SA = [:(axes(src,$d)) for d in 1:ndims(src) if d in Pminus]
Aex = :(reshape(src, ($(SA...),)))
perm = Tuple(sortperm(Pminus))
end
:(permutedims!($Bex, $Aex, $perm); nothing)
end
# Fallback option:
_densecopy_permuted!(dst::AbstractArray, src::AbstractArray, val::Val) =
_copy_into!(dst, src, val)
function _copy_into!(dst::AbstractArray, parent::AbstractArray, ::Val{P}) where {P}
@inbounds @simd for I in CartesianIndices(parent)
J = CartesianIndex(map(p -> p==0 ? 1 : I[p], P))
dst[J] = parent[I]
end
nothing
end
#========== view ==========#
function Base.view(A::TransmutedDimsArray{T,N,P,Q}, inds::Vararg{Union{Int,Colon},N}) where {T,N,P,Q}
if _is_simple(inds, P)
parent_inds = genperm_zero(inds, Q, missing)
view(parent(A), parent_inds...)
else
view(A, Base.to_indices(A, inds)...)
end
end
# Only allow one colon, and P there is not zero
@inline function _is_simple(inds::Tuple, P::NTuple{N,Int}) where {N}
sum(map(i -> Int(i isa Colon), inds)) == 1 || return false
n = sum(ntuple(d -> inds[d] isa Colon ? d : 0, N))
return P[n] != 0
end
# @btime TransmuteDims._is_simple((1,2,:,3), (1,0,2,3)) # 0.041 ns
# @inline function _is_simple(inds, P::Tuple)
# count(i -> i isa Colon, inds) == 1 || return false
# n = findfirst(i -> i isa Colon, inds)
# return P[n] != 0
# end
# @btime _is_simple((1,2,:,3), (1,0,2,3)) # 58.365 ns
#========== the end. ==========#
|
{"hexsha": "d42146393e52b651f5802332880100d75597fe94", "size": 7155, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/base.jl", "max_stars_repo_name": "mcabbott/TransmuteDims.jl", "max_stars_repo_head_hexsha": "bcb0bf4dbed353a80562e016532620971662ba67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-26T17:32:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-11T17:37:09.000Z", "max_issues_repo_path": "src/base.jl", "max_issues_repo_name": "mcabbott/TransmuteDims.jl", "max_issues_repo_head_hexsha": "bcb0bf4dbed353a80562e016532620971662ba67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2019-09-27T11:51:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-08T16:54:14.000Z", "max_forks_repo_path": "src/base.jl", "max_forks_repo_name": "mcabbott/TransmuteDims.jl", "max_forks_repo_head_hexsha": "bcb0bf4dbed353a80562e016532620971662ba67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:25:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:25:22.000Z", "avg_line_length": 33.125, "max_line_length": 131, "alphanum_fraction": 0.6050314465, "num_tokens": 2212}
|
import numpy as np
import scipy.sparse
import pycuda.gpuarray as gpuarray
from . import cusparse as cs
class MatrixVectorProduct:
"""Perform GPU-based, sparse matrix-vector products."""
def __init__(self, matrix: scipy.sparse.csr_matrix) -> None:
self.m = matrix.shape[0]
self.n = matrix.shape[1]
self.nnz = matrix.nnz
self.csrValA = gpuarray.to_gpu(matrix.data.astype(np.float64))
self.csrRowPtrA = gpuarray.to_gpu(matrix.indptr)
self.csrColIndA = gpuarray.to_gpu(matrix.indices)
self.handle = cs.cusparseCreate()
self.descr = cs.cusparseCreateMatDescr()
def __del__(self) -> None:
if hasattr(self, 'descr'):
cs.cusparseDestroyMatDescr(self.descr)
self.descr = None
if hasattr(self, 'handle'):
cs.cusparseDestroy(self.handle)
self.handle = None
def product(self, x: gpuarray.GPUArray) -> gpuarray.GPUArray:
"""Multiply sparse matrix by dense vector."""
y = gpuarray.empty_like(x)
op = cs.cusparseOperation.CUSPARSE_OPERATION_NON_TRANSPOSE
cs.cusparseDcsrmv(self.handle, op, self.m, self.n, self.nnz, 1.0,
self.descr, self.csrValA, self.csrRowPtrA,
self.csrColIndA, x, 0.0, y)
return y
|
{"hexsha": "74e33975ff8df1b3ed08da53b01061b70d5e1bba", "size": 1329, "ext": "py", "lang": "Python", "max_stars_repo_path": "diffusion_maps/matrix_vector_product.py", "max_stars_repo_name": "felix11/diffusion-maps", "max_stars_repo_head_hexsha": "7f909ac5bdfeafb8e5b69a93cfa7731a315538f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "diffusion_maps/matrix_vector_product.py", "max_issues_repo_name": "felix11/diffusion-maps", "max_issues_repo_head_hexsha": "7f909ac5bdfeafb8e5b69a93cfa7731a315538f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "diffusion_maps/matrix_vector_product.py", "max_forks_repo_name": "felix11/diffusion-maps", "max_forks_repo_head_hexsha": "7f909ac5bdfeafb8e5b69a93cfa7731a315538f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9736842105, "max_line_length": 73, "alphanum_fraction": 0.6297968397, "include": true, "reason": "import numpy,import scipy,import pycuda", "num_tokens": 344}
|
# coding: utf-8
import os
import sys
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
import logging
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
logger = logging.getLogger(__file__)
class Embeddings():
def __init__(self):
self.model = None
def load_model(self, method='word2vec', model_path=''):
'''Load embedding model including word2vec/fasttext/glove/bert.
Input:
method: string. Including "word2vec"/"fasttext"/"glove"/"bert".
model_path: string. Path of model.
Output:
model: model object.
'''
self.method = method
self.model_path = model_path
if model_path == '':
self.model = None
return None
logger.info('Load embedding model...')
if method in ['word2vec','glove']:
from gensim.models import KeyedVectors
if model_path[-4:]=='.txt':
self.model = KeyedVectors.load_word2vec_format(model_path,binary=False).wv
elif model_path[-4:] =='.bin':
self.model = KeyedVectors.load_word2vec_format(model_path,binary=True).wv
else:
self.model = KeyedVectors.load(model_path,mmap='r').wv
return self.model
elif method == 'fasttext':
from gensim.models.wrappers import FastText
self.model = FastText.load_fasttext_format(model_path).wv
return self.model
elif method == 'bert':
from transformers import BertTokenizer, BertModel
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.model = BertModel.from_pretrained(model_path)
return self.model
elif method == "sentence_transformers":
# https://github.com/UKPLab/sentence-transformers
from sentence_transformers import SentenceTransformer
self.model = SentenceTransformer(model_path)
return self.model
else:
self.model = None
return None
def bow(self, corpus, ngram_range=(1,1), min_df=1):
'''Get BOW (bag of words) embeddings.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
Output:
embeddings: array of shape [n_sample, dim]
'''
vectorizer = CountVectorizer(ngram_range=ngram_range, min_df=min_df, token_pattern='(?u)\\b\\w+\\b')
# The default token_pattern r'\b\w+\b' tokenizes the string by extracting words of at least 2 letters, which is not suitable for Chinese
X = vectorizer.fit_transform(corpus)
#print(vectorizer.get_feature_names())
embeddings = X.toarray()
return embeddings
def tfidf(self, corpus, ngram_range=(1,1), min_df=1):
'''Get TFIDF embeddings.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
Output:
embeddings: array of shape [n_sample, dim]
'''
transformer = TfidfTransformer(use_idf=True, smooth_idf=True)
counts = self.bow(corpus, ngram_range, min_df)
X = transformer.fit_transform(counts)
embeddings = X.toarray()
return embeddings
def lda(self, corpus, ngram_range=(1,1), min_df=1, dim=5, random_state=0):
'''Get LDA embeddings.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
dim: int. Dimention of embedding.
random_state: int.
Output:
embeddings: array of shape [n_sample, dim]
'''
transformer=LatentDirichletAllocation(n_components=dim, random_state=random_state)
# transform corpus to bow format
counts = self.bow(corpus, ngram_range, min_df)
# get lda embeddings
embeddings = transformer.fit_transform(counts)
return embeddings
def lsa(self, corpus, ngram_range=(1,1), min_df=1, n_iter=5, dim=5, base_embeddings='tfidf'):
'''Get LSA embeddings.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
n_iter: int. Number of iterations.
dim: int. Dimention of embedding.
base_embeddings: string. "tfidf" or "bow"
Output:
embeddings: array of shape [n_sample, dim]
'''
# get base embeddings
if base_embeddings=='tfidf':
X = self.tfidf(corpus, ngram_range, min_df)
else:
X = self.bow(corpus, ngram_range, min_df)
# get LSA embeddings
transformer = TruncatedSVD(n_components=dim,algorithm='randomized',n_iter=n_iter)
embeddings = transformer.fit_transform(X)
return embeddings
def pca(self, corpus, ngram_range=(1,1), min_df=1, dim=5, base_embeddings='tfidf'):
'''Get PCA embeddings.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
dim: int. Dimention of embedding.
base_embeddings: string. "tfidf" or "bow"
Output:
embeddings: array of shape [n_sample, dim]
'''
# get base embeddings
if base_embeddings=='tfidf':
X = self.tfidf(corpus, ngram_range, min_df)
else:
X = self.bow(corpus, ngram_range, min_df)
# get PCA embeddings
transformer = PCA(n_components=dim, svd_solver='auto')
embeddings = transformer.fit_transform(X)
return embeddings
def word2vec(self, corpus, method='word2vec', model_path=''):
'''Get Word2Vec embeddings.
Input:
corpus: list of preprocessed strings.
method: string. "word2vec"/"glove"/"fasttext"
model_path: string. Path of model.
Output:
embeddings: array of shape [n_sample, dim]
'''
# load model
if self.model is None and model_path!='':
self.load_model(method, model_path)
embeddings = []
# drop tokens which not in vocab
for text in corpus:
tokens = text.split(' ')
tokens = [token for token in tokens if token in self.model.vocab]
#logger.info(', '.join(tokens))
if len(tokens)==0:
embedding = self.model['unk'].tolist()
else:
embedding = np.mean(self.model[tokens],axis=0).tolist()
embeddings.append(embedding)
embeddings = np.array(embeddings)
return embeddings
def bert(self, corpus, model_path='', mode='cls'):
'''Get BERT embeddings.
Input:
corpus: list of preprocessed strings.
model_path: string. Path of model.
mode: string. "cls"/"mean". "cls" mode: get the embedding of the first
token of a sentence; "mean" mode: get the average embedding of all tokens of
a sentence except for the first [CLS] and the last [SEP] tokens.
Output:
embeddings: array of shape [n_sample, dim]
'''
import torch
# load model
if self.model is None and model_path!='':
self.load_model('bert',model_path)
embeddings = []
for text in corpus:
# tokenize and encode
input_ids = torch.tensor(self.tokenizer.encode(text)).unsqueeze(0) # Batch size 1
# get embedding
outputs = self.model(input_ids)
embedding = outputs[0].detach().numpy() # The last hidden-state is the first element of the output tuple
if mode=='cls':
embedding = embedding[0][0]
elif mode=='mean':
embedding = np.mean(embedding[0],axis=0)
embeddings.append(embedding)
embeddings = np.array(embeddings)
return embeddings
def sentence_transformers(self, corpus, model_path=''):
'''Get Sentence-Transformers embeddings.
Reference: https://github.com/UKPLab/sentence-transformers
Input:
corpus: list of preprocessed strings.
model_path: string. Path of model.
Output:
embeddings: array of shape [n_sample, dim]
'''
# load model
if self.model is None and model_path!='':
self.load_model('sentence_transformers',model_path)
embeddings = self.model.encode(corpus)
return embeddings
def get_embeddings(self, corpus, ngram_range=(1,1), min_df=1, dim=5, method='tfidf', model_path=''):
'''Get embeddings according to params.
Input:
corpus: list of preprocessed strings.
ngram_range: tuple. (min_ngram, max_ngram) means min_ngram<=ngram<=max_ngram
min_df: int. Mininum frequencey of a word.
dim: int. Dimention of embedding.
method: string. Including "bow"/"tfidf"/"lda"/"lsa"/"pca"/"word2vec"/"glove"/
"fasttext"/"bert"
Output:
embeddings: array of shape [n_sample, dim]
'''
self.method = method
if self.method == 'bow':
return self.bow(corpus, ngram_range=ngram_range, min_df=min_df)
elif self.method == 'tfidf':
return self.tfidf(corpus, ngram_range=ngram_range, min_df=min_df)
elif self.method == 'lda':
return self.lda(corpus, ngram_range=ngram_range, min_df=min_df, dim=dim)
elif self.method == 'lsa':
return self.lsa(corpus, ngram_range=ngram_range, min_df=min_df, dim=dim)
elif self.method == 'pca':
return self.pca(corpus, ngram_range=ngram_range, min_df=min_df, dim=dim)
elif self.method in ['word2vec','glove','fasttext']:
return self.word2vec(corpus, method=method, model_path=model_path)
elif self.method == 'bert':
return self.bert(corpus, model_path=model_path)
elif self.method == 'sentence_transformers':
return self.sentence_transformers(corpus, model_path=model_path)
if __name__ == '__main__':
corpus_en = ['This is the first document.',
'This is the second second document.',
'And the third one!',
'Is this the first document?']
corpus_zh = ["一项研究发现,在某些法国和荷兰的奶酪中存在不同程度的K2。",
"但其含量多少取决于奶酪品种、成熟时间、脂肪含量和奶酪的产地等。",
"但通常来讲,高脂肪和陈年奶酪的K2含量较高",
"此外,维生素K还是一种脂溶性维生素,就是说当它与富含健康脂肪的食物!"]
# text preprocess
from preprocess import Preprocess
tp_en = Preprocess('en')
tp_zh = Preprocess('zh')
corpus_en = tp_en.preprocess(corpus_en)
print(corpus_en)
corpus_zh = tp_zh.preprocess(corpus_zh)
print(corpus_zh)
'''
# bow
emb = Embeddings()
bow_en = emb.bow(corpus_en)
print(bow_en)
bow_zh = emb.bow(corpus_zh)
print(bow_zh)
# tfidf
emb = Embeddings()
tfidf_en = emb.tfidf(corpus_en)
print(tfidf_en)
tfidf_zh = emb.tfidf(corpus_zh)
print(tfidf_zh)
# lda
emb = Embeddings()
lda_en = emb.lda(corpus_en,dim=2)
print(lda_en)
lda_zh = emb.lda(corpus_zh,dim=2)
print(lda_zh)
# pca
emb = Embeddings()
pca_en = emb.pca(corpus_en,dim=2,base_embeddings='bow')
print(pca_en)
pca_zh = emb.pca(corpus_zh,dim=2)
print(pca_zh)
# lsa
emb = Embeddings()
lsa_en = emb.lsa(corpus_en,dim=2,base_embeddings='bow')
print(lsa_en)
lsa_zh = emb.lsa(corpus_zh,dim=2)
print(lsa_zh)
# word2vec
emb = Embeddings()
emb.load_model(method='word2vec',model_path="../w2v_models/Tencent_AILab_ChineseEmbedding/Tencent_AILab_ChineseEmbedding")
word2vec_zh = emb.word2vec(corpus_zh)
print(word2vec_zh)
# glove
# transform glove model to word2vec format
from utils import transformGlove
source_model_path = os.path.join(os.path.dirname(__file__),"../w2v_models/GloVe/en/glove.42B.300d.txt")
target_model = os.path.join(os.path.dirname(__file__),"../w2v_models/GloVe/en/glove.42B.300d.w2v")
#transformGlove(source_model_path,target_model,binary=True)
target_model_path = target_model+'.bin'
# get embeddings
emb = Embeddings()
emb.load_model(method='word2vec',model_path=target_model_path)
glove_en = emb.word2vec(corpus_en)
print(glove_en)
# fasttext
model_path = os.path.join(os.path.dirname(__file__),"../w2v_models/FastText/en/cc.en.300.bin")
emb = Embeddings()
emb.load_model(method='fasttext', model_path=model_path)
ft_en = emb.word2vec(corpus_en)
print(ft_en.shape)
emb = Embeddings()
model_path = os.path.join(os.path.dirname(__file__),"../../bert-base-chinese")
emb.load_model(method='bert', model_path=model_path)
bert_zh = emb.bert(corpus_zh)
print(bert_zh.shape)
'''
emb = Embeddings()
bow_en = emb.bow(corpus_en)
print(bow_en)
bow_zh = emb.bow(corpus_zh)
print(bow_zh)
|
{"hexsha": "3142f1c6340432098a6f50e4850cdf9a6dcaf701", "size": 13906, "ext": "py", "lang": "Python", "max_stars_repo_path": "textgo/embeddings.py", "max_stars_repo_name": "Lipairui/textgo", "max_stars_repo_head_hexsha": "e6156663e7e8040c40f6a2bfac393bdfa0bfdaba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2020-07-14T14:46:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T18:18:55.000Z", "max_issues_repo_path": "textgo/embeddings.py", "max_issues_repo_name": "Lipairui/textgo", "max_issues_repo_head_hexsha": "e6156663e7e8040c40f6a2bfac393bdfa0bfdaba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-08-13T11:09:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-03T08:15:48.000Z", "max_forks_repo_path": "textgo/embeddings.py", "max_forks_repo_name": "Lipairui/textgo", "max_forks_repo_head_hexsha": "e6156663e7e8040c40f6a2bfac393bdfa0bfdaba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-06T07:35:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-06T07:35:16.000Z", "avg_line_length": 40.3072463768, "max_line_length": 144, "alphanum_fraction": 0.6138357544, "include": true, "reason": "import numpy", "num_tokens": 3564}
|
#!/usr/bin/env python
from io import BytesIO
import datetime
import cgi
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from pyiem.util import get_dbconn, ssw
def make_plot(form):
"""Make the make_plot"""
year = int(form.getfirst("year", 2013))
varname = form.getfirst("varname", "AGR1")[:10]
pgconn = get_dbconn("sustainablecorn")
cursor = pgconn.cursor()
cursor.execute(
"""
SELECT date(updated) as d,
sum(case when value not in ('.') then 1 else 0 end),
count(*) from agronomic_data WHERE year = %s
and varname = %s GROUP by d ORDER by d ASC
""",
(year, varname),
)
x = []
y = []
total = 0
for i, row in enumerate(cursor):
if i == 0:
x.append(row[0] - datetime.timedelta(days=1))
y.append(0)
x.append(row[0])
y.append(y[-1] + row[1])
total += row[2]
xticks = []
xticklabels = []
now = x[0]
while now < x[-1]:
if now.day == 1:
fmt = "%b\n%Y" if (len(xticks) == 0 or now.month == 1) else "%b"
xticks.append(now)
xticklabels.append(now.strftime(fmt))
now += datetime.timedelta(days=1)
(fig, ax) = plt.subplots(1, 1)
ax.plot(x, np.array(y) / float(total) * 100.0)
ax.set_ylim(0, 100)
ax.set_yticks([0, 25, 50, 75, 100])
ax.set_ylabel("Percentage [%]")
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_title("CSCAP %s Upload Progress for %s" % (varname, year))
ax.grid(True)
return fig
def main():
"""Make a plot please"""
form = cgi.FieldStorage()
fig = make_plot(form)
ssw("Content-type: image/png\n\n")
ram = BytesIO()
fig.savefig(ram, format="png", dpi=100)
ram.seek(0)
res = ram.read()
ssw(res)
if __name__ == "__main__":
# Go Main
main()
|
{"hexsha": "5e1f6b667667d9a8a733067d2cbf201f85d68463", "size": 1898, "ext": "py", "lang": "Python", "max_stars_repo_path": "htdocs/admin/varprogress.py", "max_stars_repo_name": "isudatateam/datateam", "max_stars_repo_head_hexsha": "eb8e1dad6c05cb1b236689862fe87c56b25ea6fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-05-20T04:51:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T18:55:27.000Z", "max_issues_repo_path": "htdocs/admin/varprogress.py", "max_issues_repo_name": "isudatateam/datateam", "max_issues_repo_head_hexsha": "eb8e1dad6c05cb1b236689862fe87c56b25ea6fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 275, "max_issues_repo_issues_event_min_datetime": "2017-03-09T20:31:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T22:43:47.000Z", "max_forks_repo_path": "htdocs/admin/varprogress.py", "max_forks_repo_name": "isudatateam/datateam", "max_forks_repo_head_hexsha": "eb8e1dad6c05cb1b236689862fe87c56b25ea6fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-01T15:03:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T13:46:58.000Z", "avg_line_length": 23.725, "max_line_length": 76, "alphanum_fraction": 0.5727081138, "include": true, "reason": "import numpy", "num_tokens": 557}
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 18:13:00 2018
@author: Nicholas Fong
"""
# import the necessary packages
from sklearn.model_selection import train_test_split
from pyimagesearch.nn.conv import FongNet
from pyimagesearch.preprocessing import ImageToArrayPreprocessor
from pyimagesearch.preprocessing import AspectAwarePreprocessor
from pyimagesearch.datasets import SimpleDatasetLoader
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras.optimizers import SGD
from keras.optimizers import Adadelta
from keras.models import load_model
from imutils import paths
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
## construct the argument parse and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-w", "--weights", required=True,
# help="path to best model weights file")
#args = vars(ap.parse_args())
args = {'dataset': '../datasets/menClassTestImages', 'weights': 'weights/cnn_best_weights_day_7.hdf5'}
# grab the list of images that we'll be describing, then extract
# the class label names from the image paths
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
#classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
#classNames = [str(x) for x in np.unique(classNames)]
# initialize the image preprocessors
aap = AspectAwarePreprocessor(128, 128)
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0
print("[INFO] loading pre-trained network...")
model = load_model(args["weights"])
# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(data, batch_size=32)
print(np.round(predictions, 1))
|
{"hexsha": "b7c71e0ddd5a2084f4ca530e0fb8577a6b5a6057", "size": 2000, "ext": "py", "lang": "Python", "max_stars_repo_path": "COEN 345 - Computer Vision II/COEN345Project/CNNTester.py", "max_stars_repo_name": "nicholasmfong/oldHomework", "max_stars_repo_head_hexsha": "82f10998a7f05c0db79647818e40924c38484484", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-05T17:45:05.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-05T17:45:05.000Z", "max_issues_repo_path": "COEN 345 - Computer Vision II/COEN345Project/CNNTester.py", "max_issues_repo_name": "nicholasmfong/oldHomework", "max_issues_repo_head_hexsha": "82f10998a7f05c0db79647818e40924c38484484", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "COEN 345 - Computer Vision II/COEN345Project/CNNTester.py", "max_forks_repo_name": "nicholasmfong/oldHomework", "max_forks_repo_head_hexsha": "82f10998a7f05c0db79647818e40924c38484484", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3333333333, "max_line_length": 103, "alphanum_fraction": 0.7525, "include": true, "reason": "import numpy", "num_tokens": 459}
|
###############################################################################
############################# IMPORTS ###############################
###############################################################################
import TSC as simul
import numpy as np
import math
import pandas as pd
import random as rd
import sys
import os
import shutil
import matplotlib.pyplot as plt
import csv
#PRINCIPE
#a chaque iteration, des mutations aleatoires sont executees (indel ou inversion)
#durant ces mutations, les fichiers contenant les positions des genes, sections
#codantes et non codantes etc, (present dans 'evol') sont lus sous forme de
#dataframes, qui seront modifies par la mutation
#les fichiers sont regeneres dans le dossier 'evol2'
#on calcule la fitness des nouvelles valeurs
#si elle est meilleure qu'avant la mutation, alors la version mutee devient la
#nouvelle version du genome (les fichiers d'evol2 sont copies dans evol)
#meme si la fitness est moins bonne, l'update peut quand meme se faire avec une probabilite
#ERREURS :
#les fonctions de mutation marchent bien MAIS:
#proba de garder une fitness inférieure systématiquement = 1
#premiere ligne de tous genes identiques est tej durant la simulation --> erreurs
#vérifier le calcul de la fitness
###############################################################################
############################# VARIABLES ###############################
###############################################################################
#params.ini a ete modifie, travaille sur le dossier "evol" qui est une copie de
#"tousgenesidentiques", afin de continuer de faire évoluer le meme genome.
output_dir="output/"
input_file = "params.ini"
input_folder = "evol/"
#Tous les parametres sont arbitraires (a réévaluer)
#parametre d'evolution (pondere la probabilite de garder une fitness inférieure a la fitness actuelle
q = 0.00005
#probabilité d'indel (proba d'inversion = 1-pi)
pid = 0.9
#unité du programme (nb de nucleotides)
unit = 60
#temps entre chaque sauvegarde
savetime = 50
#total genome size
genome_size = 30000
#amount of iterations
n_iter = 1000
#gff head text
def gff_head() :
global genome_size
return "##gff-version 3\n"+ \
"#!gff-spec-version 1.20\n"+ \
"#!processor NCBI annotwriter\n"+ \
"##sequence-region\ttousgenesidentiques\t1\t"+str(genome_size)+"\n" +\
"tousgenesidentiques\tRefSeq\tregion\t1\t"+str(genome_size)+"\t.\t+\t.\tID=id0;Name=tousgenesidentiques"
#pb : genome_size n'est pas toujours a jour (surtout si on fait un seul indel)
###############################################################################
############################# FUNCTIONS ###############################
###############################################################################
#copies files from evol2 in evol
def Evol2ToEvol():
files = ['evol2/prot.dat', 'evol2/TSS.dat', 'evol2/TTS.dat', 'evol2/tousgenesidentiques.gff']
for f in files:
shutil.copy(f, 'evol/')
#used to create folder
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
#used to add line at beginning of line
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
#gets result of transcription
def get_expression():
tr_info = pd.read_csv(output_dir+"all_tr_info.csv", sep='\t')
target = pd.read_csv("environment.dat", sep = "\t", header = None)
target.columns = ["TU", "tr_rate"]
return tr_info, target
#calculate fitness
def fitness():
#get observed expression and target expression rates
tr_info, target = get_expression()
#only keep columns of interest
f_cible = target["tr_rate"]
f_obs = tr_info["number of transcripts generated"].values
#normalize observed expression rate
f_obs = f_obs/f_obs.sum(axis=0,keepdims=1)
#number of genes
nb_genes = len(f_cible)
#calculate fitness
fitness = 0
for i in range(nb_genes):
fitness += math.fabs(math.log(f_obs[i]/f_cible[i]))
fitness = math.exp(-fitness)
return fitness
#select random position and check that it's not problematic (in a gene)
#works for now, check again after inversions etc
def select_mut_pos(tss, tts, prot, tousgenesid, safety_mes):
pos = rd.randint(1, genome_size)
trunc = pos%unit
pos -= trunc
#checks if position is correct
correct=True
for i in range(tousgenesid.shape[0]):
start = tousgenesid.at[i, "start"]
end = tousgenesid.at[i, "end"]
if start > end:
a = end
end = start
start = a
if pos > start - safety_mes and pos < end + safety_mes:
correct = False
break
if correct == False:
pos = select_mut_pos(tss, tts, prot, tousgenesid, safety_mes)
return pos
#read all files from "evol" folder
def read_from_folder():
tousgenesidentiques = pd.read_csv(input_folder+"tousgenesidentiques.gff", sep="\t", skiprows = 4, header = 0,
names = ["seqname", "source", "feature", "start", "end",
"score", "strand", "frame", "attribute"])
prot = pd.read_csv(input_folder+"prot.dat", sep="\t", header = 0)
TTS = pd.read_csv(input_folder+"TTS.dat", sep="\t", header = 0)
TSS = pd.read_csv(input_folder+"TSS.dat", sep="\t", header = 0)
return tousgenesidentiques, prot, TTS, TSS
#writes files for cell after mutation to 'evol2'
def write_to_folder(tousgenesidentiques, TSS, TTS, prot):
global genome_size
tousgenesidentiques.to_csv("evol2/tousgenesidentiques.gff", sep="\t", index = False, header = False)
line_prepender("evol2/tousgenesidentiques.gff", gff_head())
TSS.to_csv("evol2/TSS.dat", sep="\t", index = False)
TTS.to_csv("evol2/TTS.dat", sep="\t", index = False)
prot.to_csv("evol2/prot.dat", sep="\t", index = False)
def indel():
global genome_size
#chooses an insertion or a deletion
mut_type = np.random.choice(["insertion", "deletion"], 1, p = [0.5, 0.5])
print (mut_type)
#reads files
tousgenesidentiques, prot, TTS, TSS = read_from_folder()
#pick a random position
pos = select_mut_pos(TSS, TTS, prot, tousgenesidentiques, unit)
print (pos)
#define wether we will delete or add 60 nucleotides
if mut_type == "insertion" : change = unit
elif mut_type == "deletion" : change = -1*unit
else :
print ("error : incorrect mutation type")
sys.exit()
genome_size += change
for index, row in tousgenesidentiques.iterrows():
if min(row["start"], row["end"]) > pos:
tousgenesidentiques.at[index, "start"] += change
tousgenesidentiques.at[index, "end"] += change
for index, row in TSS.iterrows():
if row["TSS_pos"] > pos:
TSS.at[index, "TSS_pos"] += change
for index, row in TTS.iterrows():
if row["TTS_pos"] > pos:
TTS.at[index, "TTS_pos"] += change
for index, row in prot.iterrows():
if row["prot_pos"] > pos:
prot.at[index, "prot_pos"] += change
print ("genome size : ", genome_size)
#write new files to folder
write_to_folder(tousgenesidentiques, TSS, TTS, prot)
#works for the most part
def inversion():
print ("inversion")
#explication position
#1(barriere 1) -- 1001 (TSS) -- 2000 (TTS) -- 3000 (fin) -- 3001(barriere 2)
#read all files from "evol" folder
tousgenesidentiques, prot, TTS, TSS = read_from_folder()
#pick start and end positions for inversions
inv_start = select_mut_pos(TSS, TTS, prot, tousgenesidentiques, unit)
inv_end = select_mut_pos(TSS, TTS, prot, tousgenesidentiques, unit)
print (inv_start, inv_end)
if inv_start > inv_end:
a = inv_end
inv_end = inv_start
inv_start = a
#do the coordinates inversions
for index, row in tousgenesidentiques.iterrows():
if min(row["start"], row["end"]) > inv_start and max(row["start"], row["end"]) < inv_end:
tousgenesidentiques.at[index, "start"] = inv_end - (row["start"] - inv_start)
tousgenesidentiques.at[index, "end"] = inv_start + (inv_end - row["end"])
if row["strand"] == "+" : tousgenesidentiques.at[index, "strand"] = "-"
elif row["strand"] == "-" : tousgenesidentiques.at[index, "strand"] = "+"
else :
print("strand problem in tousgenesidentiques !")
sys.exit()
for index, row in TSS.iterrows():
if row["TSS_pos"] > inv_start and row["TSS_pos"] < inv_end:
TSS.at[index, "TSS_pos"] = inv_end - (row["TSS_pos"] - inv_start)
if row["TUorient"] == "+" : TSS.at[index, "TUorient"] = "-"
elif row["TUorient"] == "-" : TSS.at[index, "TUorient"] = "+"
for index, row in TTS.iterrows():
if row["TTS_pos"] > inv_start and row["TTS_pos"] < inv_end:
TTS.at[index, "TTS_pos"] = inv_end - (row["TTS_pos"] - inv_start)
if row["TUorient"] == "+" : TTS.at[index, "TUorient"] = "-"
elif row["TUorient"] == "-" : TTS.at[index, "TUorient"] = "+"
for index, row in prot.iterrows():
if row["prot_pos"] > inv_start and row["prot_pos"] < inv_end:
prot.at[index, "prot_pos"] = inv_end - (row["prot_pos"] - inv_start)
print ("genome size : ", genome_size)
#copy results to evol2
write_to_folder(tousgenesidentiques, TSS, TTS, prot)
###############################################################################
############################# MAIN ###############################
###############################################################################
def _main():
fitness_list=[0]
mut_list = []
#iterate
for i in (range (n_iter)):
print("\n\nITERATION : ", i)
#select random mutation
mut_type = np.random.choice([True, False], 1, p = [pid, 1-pid])
if mut_type :
indel()
mut_list.append("indel")
else :
inversion()
mut_list.append("inv")
#transcribe new genome and calculate fitness
simul.start_transcribing("params_evol2.ini", output_dir)
fit = fitness()
#if fitness is better, keep new genome
if fit > fitness_list[-1]:
print ("new_fitness : ", fit)
fitness_list.append(fit)
Evol2ToEvol()
#otherwise, only keep it with probabilty
else:
df = fit-fitness_list[-1]
prob = math.exp(df/q)
print ("prob", prob, "df : ", df, "q : ", q, "fitness_list : ", fitness_list)
if rd.uniform(0,1) < prob:
fitness_list.append(fit)
Evol2ToEvol()
#copier evol2 dans evol
else:
fitness_list.append(fitness_list[-1])
if i%savetime == 0:
with open('fitness_q_'+str(q)+'_pid_'+str(pid)+'.csv','w') as f:
writer = csv.writer(f);
writer.writerows(zip(fitness_list, mut_list));
#ca marche pas le plot
plt.plot(fitness_list)
plt.show()
if __name__ == "__main__":
_main()
|
{"hexsha": "d96b980b916e67e4ea532c1e8e2306bb15c71f1d", "size": 11599, "ext": "py", "lang": "Python", "max_stars_repo_path": "evolution.py", "max_stars_repo_name": "yvancluet/projet_sam_meyer", "max_stars_repo_head_hexsha": "803a97b3dacec588b870eb26779c919be783b5dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evolution.py", "max_issues_repo_name": "yvancluet/projet_sam_meyer", "max_issues_repo_head_hexsha": "803a97b3dacec588b870eb26779c919be783b5dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evolution.py", "max_forks_repo_name": "yvancluet/projet_sam_meyer", "max_forks_repo_head_hexsha": "803a97b3dacec588b870eb26779c919be783b5dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.936746988, "max_line_length": 113, "alphanum_fraction": 0.5721182861, "include": true, "reason": "import numpy", "num_tokens": 2968}
|
#!/usr/bin/env python3
# coding: utf-8
import os
import sys
sys.append('../..')
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import cartopy.crs as ccrs
from deepsphere.data import LabeledDatasetWithNoise, LabeledDataset
datapath = "../../data/ghcn-daily/processed/" # "/mnt/nas/LTS2/datasets/ghcn-daily/processed/"
rawpath = "../../data/ghcn-daily/raw/" # "/mnt/nas/LTS2/datasets/ghcn-daily/raw/"
# newdatapath = "../../data/ghcn-daily/processed/"
def get_stations(data_path, years, rawpath=None):
filename = 'stations_{:4d}-{:4d}.npz'.format(years[0], years[-1])
if not os.path.isfile(datapath+filename):
print('Problem occured')
raise ValueError()
id_ghcn, lat, lon, elev, name = [], [], [], [], []
with open(rawpath+'ghcnd-stations.txt', 'r') as f:
for line in f:
iid, ilat, ilon, ielev, iname = line[0:11], line[12:20], line[21:30], line[31:37], line[41:71]
assert (not iid.isspace()) and (not ilat.isspace()) and (not ilon.isspace()) \
and (not ielev.isspace()) and (not iname.isspace())
id_ghcn.append(iid.strip())
lat.append(float(ilat.strip()))
lon.append(float(ilon.strip()))
elev.append(float(ielev.strip()))
name.append(iname.strip())
id_ghcn, lat, lon, elev, name = np.array(id_ghcn), np.array(lat), np.array(lon), np.array(elev), np.array(name)
id_ghcn_relevant = set([])
for yearIdx,year in enumerate(years):
filename2 = rawpath+'{:4}.csv.gz'.format(year)
print('- pre-parsing : {}'.format(filename2))
df = pd.read_csv(filename2, names=['id_ghcn', 'date', 'type', 'value', '?0', '?1', '?2', '?3'], \
nrows=None, usecols=[0,1,2,3])
id_ghcn_relevant |= set(df["id_ghcn"].values)
# second, find identifiers both in id_ghcn and id_ghcn_relevant
id_ghcn_relevant = set(id_ghcn) & id_ghcn_relevant
# third, keep only relevant station data
keep = [id in id_ghcn_relevant for id in id_ghcn]
id_ghcn, lat, lon, elev, name = id_ghcn[keep], lat[keep], lon[keep], elev[keep], name[keep]
# free up some memory
del id_ghcn_relevant, keep
np.savez_compressed(datapath+filename, id_ghcn=id_ghcn, lat=lat, lon=lon, elev=elev, name=name, years=years)
else:
station_file = np.load(datapath+filename)
id_ghcn, lat, lon, elev, name = station_file['id_ghcn'], station_file['lat'], station_file['lon'], station_file['elev'], station_file['name']
del station_file
n_stations = id_ghcn.shape[0]
print('{} weather stations identified.'.format(n_stations))
# a dictionary mapping GHCN ids to local ids (rows in id array)
ghcn_to_local = dict(zip(id_ghcn, np.arange(n_stations)))
return n_stations, ghcn_to_local, lat, lon, elev, name
def get_data(datapath, years, feature_names, ghcn_to_local, rawpath=None):
filenames = []
datas = []
n_years = len(years)
for feature_name in feature_names:
filenames.append('data_{:4d}-{:4d}_{}.npz'.format(years[0], years[-1], feature_name))
print('- Checking if file {} exists..'.format(filenames[-1]))
# only recompute if necessary
if not os.path.isfile(datapath+filenames[-1]):
print('- The file is not there. Parsing everything from raw. This will take a while.')
os.makedirs(datapath, exist_ok=True)
# Load the station measurements into a year-list of dataframes
df_years = []
for yearIdx,year in enumerate(years):
filename_year = rawpath+'{:4}.csv.gz'.format(year)
print(' - parsing *{}*'.format(filename_year))
df = pd.read_csv(filename_year, names=['id_ghcn', 'date', 'type', 'value', 'MF', 'qualityF', 'source', '?0'], \
nrows=None, usecols=[0,1,2,3,5])
# create a new column with the id_local
id_local = [ghcn_to_local.get(id_g) for id_g in df["id_ghcn"].values]
id_local = [-1 if v is None else v for v in id_local]
id_local = np.array(id_local).astype(np.int)
df = df.assign(id_local=pd.Series(id_local, index=df.index).values)
# remove measurement of stations with unknown id_local
df = df[df.id_local != -1]
# replace measurements with bad quality flag
#df.value[~df.qualityF.isna()] = np.nan
df = df[df.qualityF.isna()]
df = df.drop('qualityF', axis=1)
df_years.append(df)
del df, id_local
print('done!')
# Construct one array per feature and save it to disk
# indicate for which days we have measurements (this also deals with months of different lengths)
valid_days = np.zeros((n_years, 12, 31), dtype=np.bool)
for _, name in enumerate(feature_names):
print(' - Looking at {}'.format(name))
data = np.zeros((n_stations, n_years, 12, 31), dtype=np.float) * np.nan
for yearIdx,year in enumerate(years):
df = df_years[yearIdx]
idf = df.loc[df.type.str.contains(name)]
print(' - year {}'.format(year))
# remove measurement of stations with unknown id_local
idf = idf[idf.id_local != -1]
for monthIdx,month in enumerate(range(1,12+1)):
for dayIdx,day in enumerate(range(1,31+1)):
date = int('{:4d}{:02d}{:02d}'.format(year,month,day))
jdf = idf.loc[idf['date'] == date]
# sort data according to the id_local
jdf.set_index('id_local', inplace=True)
jdf = jdf.sort_index()
index = jdf.index.values
if name is 'WT' or name is 'WV':
values = jdf.type.str.extract(r'(\d+)').values.astype(int)
values = values[:,0]
else:
values = jdf['value'].values.astype(np.float)
if len(index) != 0:
data[index,yearIdx,monthIdx,dayIdx] = values
valid_days[yearIdx,monthIdx,dayIdx] = True
print(' - saving to disk')
np.savez_compressed(datapath+'data_{:4d}-{:4d}_{}.npz'.format(years[0], years[-1], name), data=data, valid_days=valid_days)
del index, values, df, idf, jdf
else:
print('- Loading data from disk..')
data_file = np.load(datapath+filenames[-1])
data, valid_days = data_file['data'], data_file['valid_days']
n_stations = data.shape[0]
print('- {} stations loaded.'.format(n_stations))
data = data.reshape((n_stations, n_years*12*31))
if feature_name == 'TMIN' or feature_name == 'TMAX' or feature_name == 'PRCP':
data = data.astype(np.float)
data /= 10
datas.append(data)
valid_days = np.squeeze(valid_days.reshape(n_years*12*31)).astype(np.bool)
full_data = np.stack(datas, axis=2)
full_data = full_data[:, valid_days, :]
n_days = full_data.shape[1]
return full_data, n_days, valid_days
def clean_nodes(data, feat, lon, lat, superset=False, neighbor=10, figs=False, **kwargs):
"""
data: full data from GHCN
feat: list or tuple containing indices of first and last feature to keep
lon, lat: position of weath nodes
superset: keep only minset if False, else nodes having at least 75% of measurements
neighbor: number of neighbor in knn-graph
figs: print the figs
"""
sl = slice(*feat)
dataset = data.transpose((1, 0, 2))
keepToo = ~np.isnan(dataset[:,:,sl]).any(axis=0)
keepSuper = ((~np.isnan(dataset[:,:,sl])).sum(axis=0)>0.75*dataset.shape[0])
keepToo = keepToo.all(axis=1)
keepSuper = keepSuper.all(axis=1)
dataset = dataset[:, keepToo, sl]
print("number of stations in min set: {}\nnumber of stations in super set: {}".format(keepToo.sum(), keepSuper.sum()))
keep = keepSuper if superset else keepToo
if keep.sum()==0:
print("no nodes for the current configuration")
return [None]*3
graph = sphereGraph(lon[keep], lat[keep], neighbor, **kwargs)
graph.compute_laplacian("combinatorial")
if figs:
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
plt.plot(lon[keep], lat[keep], 'or', marker='o', markerfacecolor='r', markersize=2)
fig2 = plt.figure(figsize=(20,20))
axes = fig2.add_subplot(111, projection='3d')
graph.plot(vertex_size=10, edges=True, ax=axes)
return dataset, keep, graph
def dataset_temp(datas, lon=None, lat=None, alt=None, w_days=None, add_feat=True, ratio=0.7):
n_days = datas.shape[0]
limit = int(ratio*n_days)
mean = datas.mean(axis=(0,1))[0]
std = datas.std(axis=(0,1))[0]
x_train = (datas[:limit,:,0] - mean) / std
labels_train = datas[:limit,:,1]
x_val = (datas[limit:,:,0] - mean) / std
labels_val = datas[limit:,:,1]
if add_feat:
# location of stations
coords_v = np.stack([lon, lat], axis=-1)
coords_v = (coords_v-coords_v.mean(axis=0))/coords_v.std(axis=0)
# altitude of stations
alt_v = alt
alt_v = (alt_v-alt_v.mean())/alt_v.std()
x_train = np.dstack([x_train, np.repeat(coords_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(w_days[:limit, np.newaxis], x_train.shape[1], axis=1)])
x_val = np.dstack([x_val, np.repeat(coords_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(w_days[limit:, np.newaxis], x_val.shape[1], axis=1)])
training = LabeledDataset(x_train, labels_train)
validation = LabeledDataset(x_val, labels_val)
return training, validation
def dataset_prec(datas, lon=None, lat=None, alt=None, w_days=None, add_feat=True, ratio=0.7):
n_days = datas.shape[0]
limit = int(ratio*n_days)
mean = datas.mean(axis=(0,1))[1:3]
std = datas.std(axis=(0,1))[1:3]
x_train = (datas[:limit,:,1:3] - mean) / std
labels_train = datas[:limit,:,0]
x_val = (datas[limit:,:,1:3] - mean) / std
labels_val = datas[limit:,:,0]
if add_feat:
# location of stations
coords_v = np.stack([lon, lat], axis=-1)
coords_v = (coords_v-coords_v.mean(axis=0))/coords_v.std(axis=0)
# altitude of stations
alt_v = alt
alt_v = (alt_v-alt_v.mean())/alt_v.std()
x_train = np.dstack([x_train, np.repeat(coords_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(w_days[:limit, np.newaxis], x_train.shape[1], axis=1)])
x_val = np.dstack([x_val, np.repeat(coords_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(w_days[limit:, np.newaxis], x_val.shape[1], axis=1)])
training = LabeledDataset(x_train, labels_train)
validation = LabeledDataset(x_val, labels_val)
return training, validation
def dataset_reg(datas, lon=None, lat=None, alt=None, w_days=None, add_feat=False, days_pred=5, ratio=0.7):
n_days, n_stations, n_feature= datas.shape
limit = int(ratio*(n_days-days_pred))
dataset_x = np.vstack([np.roll(datas, -i, axis=0) for i in range(days_pred)])
dataset_x = dataset_x.reshape(days_pred, n_days, n_stations, n_feature).transpose((1,2,3,0))
# days_x = np.vstack([np.roll(w_days, -i, axis=0) for i in range(days_pred)])
# days_x = days_x.reshape(days_pred, n_days).transpose()
x_train = dataset_x[:limit,:,:,:].transpose(0, 2, 1, 3).reshape(-1, n_stations, days_pred)
labels_train = datas[days_pred:limit+days_pred,:,:].transpose(0,2,1).reshape(-1, n_stations)
x_val = dataset_x[limit:n_days-days_pred,:,:,:].transpose(0, 2, 1, 3).reshape(-1, n_stations, days_pred)
labels_val = datas[days_pred+limit:,:,:].transpose(0,2,1).reshape(-1, n_stations)
if add_feat:
# location of stations
coords_v = np.stack([lon, lat], axis=-1)
coords_v = (coords_v-coords_v.mean(axis=0))/coords_v.std(axis=0)
# altitude of stations
alt_v = alt
alt_v = (alt_v-alt_v.mean())/alt_v.std()
x_train = np.dstack([x_train,
# np.broadcast_to(month_x[:n_days-days_pred,np.newaxis, :], x_train.shape),
np.repeat(coords_v[np.newaxis,:], x_train.shape[0],axis=0),
np.repeat(alt_v[np.newaxis,:], x_train.shape[0],axis=0),
np.tile(np.repeat(w_days[:limit, np.newaxis], x_train.shape[1],axis=1), (2,1))])
# np.broadcast_to(days_x[:n_days-days_pred,np.newaxis, :], x_train.shape)])
x_val = np.dstack([x_val,
# np.broadcast_to(month_x[:n_days-days_pred,np.newaxis, :], x_val.shape),
np.repeat(coords_v[np.newaxis,:], x_val.shape[0],axis=0),
np.repeat(alt_v[np.newaxis,:], x_val.shape[0],axis=0),
np.tile(np.repeat(w_days[limit:n_days-days_pred, np.newaxis], x_val.shape[1],axis=1), (2,1))])
# np.broadcast_to(days_x[:n_days-days_pred,np.newaxis, :], x_val.shape)])
training = LabeledDataset(x_train, labels_train)
validation = LabeledDataset(x_val, labels_val)
return training, validation
def dataset_snow(datas, lon=None, lat=None, alt=None, w_days=None, add_feat=True, ratio=0.7):
n_days = datas.shape[0]
limit = int(ratio*n_days)
mean = datas.mean(axis=(0,1))[:3]
std = datas.std(axis=(0,1))[:3]
x_train = (datas[:limit,:,:3] - mean) / std
labels_train = datas[:limit,:,3]
x_val = (datas[limit:,:,:3] - mean) / std
labels_val = datas[limit:,:,3]
if add_feat:
# location of stations
coords_v = np.stack([lon, lat], axis=-1)
coords_v = (coords_v-coords_v.mean(axis=0))/coords_v.std(axis=0)
# altitude of stations
alt_v = alt
alt_v = (alt_v-alt_v.mean())/alt_v.std()
x_train = np.dstack([x_train, np.repeat(coords_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(w_days[:limit, np.newaxis], x_train.shape[1], axis=1)])
x_val = np.dstack([x_val, np.repeat(coords_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(w_days[limit:, np.newaxis], x_val.shape[1], axis=1)])
training = LabeledDataset(x_train, labels_train)
validation = LabeledDataset(x_val, labels_val)
return training, validation
def dataset_global(datas, lon=None, lat=None, alt=None, w_days=None, add_feat=True, ratio=0.7):
n_days = datas.shape[0]
limit = int(ratio*n_days)
mean = datas.mean(axis=(0,1))[0]
std = datas.std(axis=(0,1))[0]
x_train = np.atleast_3d((datas[:limit,:,0] - mean) / std)
labels_train = w_days[:limit]
x_val = np.atleast_3d((datas[limit:,:,0] - mean) / std)
labels_val = w_days[limit:]
if add_feat:
# location of stations
coords_v = np.stack([lon, lat], axis=-1)
coords_v = (coords_v-coords_v.mean(axis=0))/coords_v.std(axis=0)
# altitude of stations
alt_v = alt
alt_v = (alt_v-alt_v.mean())/alt_v.std()
x_train = np.dstack([x_train, np.repeat(coords_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_train.shape[0], axis=0),
np.repeat(w_days[:limit, np.newaxis], x_train.shape[1], axis=1)])
x_val = np.dstack([x_val, np.repeat(coords_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(alt_v[np.newaxis,:], x_val.shape[0], axis=0),
np.repeat(w_days[limit:, np.newaxis], x_val.shape[1], axis=1)])
training = LabeledDataset(x_train, labels_train)
validation = LabeledDataset(x_val, labels_val)
return training, validation
from pygsp.graphs import NNGraph
class sphereGraph(NNGraph):
def __init__(self, phi, theta, neighbors, rad=True, epsilon=False, **kwargs):
if not rad:
theta, phi = np.deg2rad(theta), np.deg2rad(phi)
theta -= np.pi/2
ct = np.cos(theta).flatten()
st = np.sin(theta).flatten()
cp = np.cos(phi).flatten()
sp = np.sin(phi).flatten()
x = st * cp
y = st * sp
z = ct
self.coords = np.vstack([x, y, z]).T
nntype = 'radius' if epsilon else 'knn'
plotting = {"limits": np.array([-1, 1, -1, 1, -1, 1])*0.5}
self.n_vertices = len(self.coords)
super(sphereGraph, self).__init__(self.coords, k=neighbors, NNtype=nntype, center=False, rescale=False,
plotting=plotting, **kwargs)
|
{"hexsha": "4855c831f3698f718dbcbf5a009a88e20077f319", "size": 17839, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/ghcn/GHCN_preprocessing.py", "max_stars_repo_name": "deepsphere/deepsphere_v2_code", "max_stars_repo_head_hexsha": "83c42ad3ec89c8a45f81b2001392d51f7bd34716", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-02-19T09:15:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-05T13:19:10.000Z", "max_issues_repo_path": "experiments/ghcn/GHCN_preprocessing.py", "max_issues_repo_name": "deepsphere/deepsphere_v2_code", "max_issues_repo_head_hexsha": "83c42ad3ec89c8a45f81b2001392d51f7bd34716", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-19T10:08:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-19T10:08:21.000Z", "max_forks_repo_path": "experiments/ghcn/GHCN_preprocessing.py", "max_forks_repo_name": "deepsphere/deepsphere_v2_code", "max_forks_repo_head_hexsha": "83c42ad3ec89c8a45f81b2001392d51f7bd34716", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-16T10:18:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-05T13:19:12.000Z", "avg_line_length": 42.7793764988, "max_line_length": 149, "alphanum_fraction": 0.579572846, "include": true, "reason": "import numpy", "num_tokens": 4746}
|
__id__ = "$Id: Geometry.py 51 2007-04-25 20:43:07Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 51 $"
__date__ = "$Date: 2007-04-25 14:43:07 -0600 (Wed, 25 Apr 2007) $"
import scipy
import Errors
class Geometry(object):
"""
Geometry is a class to hold information about the geometry of the problem.
"""
def __init__(self, bins, range):
"""
bins: A tuple each number is how many spatial bins in each dimension (up
to 3)
range: A list of [min, max] pairs; the limits of the spatial geometry in
each dimension.
"""
try:
self.dimension = len(bins)
except TypeError:
self.dimension = 1
if self.dimension != 1:
raise Errors.GeometryError(
"Geometry currently only suppors 1-D geometry")
elif self.dimension != len(range):
raise Errors.GeometryError(
"Bins and Range must have same degree")
else:
self.bins = bins
self.range = range
self.edges = scipy.zeros(self.bins+1)
self.centers = scipy.zeros(self.bins) # Bin centers
width = self.max - self.min
for i in xrange(self.bins+1):
edge = self.min + i*(width/float(self.bins))
self.edges[i] = edge
for i in xrange(len(self.centers)):
self.centers[i] = self.edges[i] + (self.edges[i+1] - self.edges[i])/2.0
def __repr__(self):
"""
"""
return "bins: %s, range: %s" %(self.bins, self.range)
def _getMinX(self):
return min(self.range[0])
def _getMaxX(self):
return max(self.range[0])
min = property(fget=_getMinX)
max = property(fget=_getMaxX)
|
{"hexsha": "9943a954b6c98669a7f2d794d8606fb4a934d9b6", "size": 1826, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py", "max_stars_repo_name": "jlconlin/PhDThesis", "max_stars_repo_head_hexsha": "8e704613721a800ce1c59576e94f40fa6f7cd986", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py", "max_issues_repo_name": "jlconlin/PhDThesis", "max_issues_repo_head_hexsha": "8e704613721a800ce1c59576e94f40fa6f7cd986", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py", "max_forks_repo_name": "jlconlin/PhDThesis", "max_forks_repo_head_hexsha": "8e704613721a800ce1c59576e94f40fa6f7cd986", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9344262295, "max_line_length": 87, "alphanum_fraction": 0.552026287, "include": true, "reason": "import scipy", "num_tokens": 468}
|
import numpy as np
import datetime
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn import linear_model
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import make_scorer
from sklearn import datasets
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
#from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.svm import SVC
from sklearn import tree
from sklearn import svm
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
def saveres(filename, uipair, label):
recPairs = ''
posenum = 0
sum = 0
for p in label:
if p == 1:
recPairs += str(int(uipair[sum][0])) + ',' + str(int(uipair[sum][1])) + '\n' #uid,iid
posenum += 1
sum += 1
neganum = sum - posenum
percent = (float)(posenum) / neganum
print 'predict num of pos samples:' + str(posenum) + " ,num of neg examples:" + str(
neganum) + ",percent of both:" + str(percent)
fout = open(filename, "w")
fout.write("user_id,item_id\n" + recPairs)
fout.close()
#规则1 昨天是否加入购物车
def rule1(uipairs, lastday, feat=[], label=[]):
conn = MySQLdb.connect(host=sqlhost, user=sqluser, passwd=sqlpswd, db=sqldb, charset='utf8', port=3306)
cur = conn.cursor()
sql = "select DISTINCT user_id,item_id from train_user_target_item_new where behavior_type = 3 and date(time) = '" + lastday + "'"
cur.execute(sql)
results = cur.fetchall()
ui_list = uipairs.tolist()
if len(label) != 0:
new_uipairs = np.array([])
new_feat = np.array([])
new_label = np.array([])
for row in results:
uipair = str(row[0]) + ',' + str(row[1])
if uipair in ui_list:
i = ui_list.index(uipair)
new_feat.add(feat[i])
new_uipairs.add(uipairs[i])
new_label.add(label[i])
posnum = len([a for a in new_label if a == 1]) #正样本
negnum = len(new_label) - posnum
print 'after rule1 clean,sample posenum is ' + str(posnum) + ' and negnum is ' + str(negnum)
return new_uipairs, new_feat, new_feat
#最近收藏夹的ui对
def rule2(uipairs, beginday, endday, feat, label):
conn = MySQLdb.connect(host=sqlhost, user=sqluser, passwd=sqlpswd, db=sqldb, charset='utf8', port=3306)
cur = conn.cursor()
sql = "select DISTINCT user_id,item_id from train_user_target_item_new where behavior_type = 3 and date(time) >= '" + beginday + "' and date(time) <= '" + endday + "'"
cur.execute(sql)
results = cur.fetchall()
ui_list = uipairs.tolist()
new_uipairs = np.array([])
new_feat = np.array([])
new_label = np.array([])
for row in results:
uipair = str(row[0]) + ',' + str(row[1])
if uipair in ui_list:
i = ui_list.index(uipair)
new_feat.add(feat[i])
new_uipairs.add(uipairs[i])
new_label.add(label[i])
posnum = len([a for a in new_label if a == 1]) #正样本
negnum = len(new_label) - posnum
print 'after rule2 clean,sample posenum is ' + str(posnum) + ' and negnum is ' + str(negnum)
return new_uipairs, new_feat, new_label
#规则2 是否已经购买过
def rule3(uipairs, lastday, feat=0, label=0):
conn = MySQLdb.connect(host=sqlhost, user=sqluser, passwd=sqlpswd, db=sqldb, charset='utf8', port=3306)
cur = conn.cursor()
sql = "select DISTINCT user_id,item_id from train_user_target_item_new where behavior_type = 4 and date(time) = '" + lastday + "'"
cur.execute(sql)
results = cur.fetchall()
rule2 = []
for row in results:
uipair = str(row[0]) + ',' + str(row[1])
rule2.append(uipair)
for i in range(0, len(uipairs)):
uipair = str(int(uipairs[i][0])) + ',' + str(int(uipairs[i][1]))
if uipair in rule2:
uipairs = np.delete(uipairs, i, 0)
label = np.delete(label, i, 0)
feat = np.delete(feat, i, 0)
posnum = len([a for a in label if a == 1]) #正样本
negnum = len(label) - posnum
print 'after rule2 clean,sample posenum is ' + str(posnum) + ' and negnum is ' + str(negnum)
#规则3 没有购买行为,点击行为异常
def rule4(uipairs, feat=0, label=0):
conn = MySQLdb.connect(host=sqlhost, user=sqluser, passwd=sqlpswd, db=sqldb, charset='utf8', port=3306)
cur = conn.cursor()
sql = 'select DISTINCT user_id from train_user_target_item_new where behavior_type = 4'
cur.execute(sql)
results = cur.fetchall()
rule3 = set()
for row in results:
rule3.add(row[0])
for i in range(0, len(uipairs)):
user_id = int(uipairs[i].splite(',')[0])
if user_id not in rule3:
uipairs = np.delete(uipairs, i, 0)
label = np.delete(label, i, 0)
feat = np.delete(feat, i, 0)
posnum = len([a for a in label if a == 1]) #正样本
negnum = len(label) - posnum
print 'after rule3 clean,sample posenum is ' + str(posnum) + ' and negnum is ' + str(negnum)
def rulepool4train(uipairs, lastday, feat, label):
#rule1(uipairs,lastday,feat,label)
#rule2(uipairs,lastday,feat,label)
#rule3(uipairs,lastday,feat,label)
posnum = len([a for a in label if a == 1]) #正样本
negnum = len(label) - posnum
print 'after rule pool clean,sample posenum is ' + str(posnum) + ' and negnum is ' + str(negnum)
return uipairs, feat, label
#规则池
def rulepool4result(uipairs, lastday, probs, threshold):
uipairs
cleanedpairs = []
cleaningpairs = set()
i = 0
for prob in probs:
if prob < threshold:
cleaningpairs.add(uipairs[i])
else:
cleanedpairs.append(uipairs[i])
i += 1
rule1(cleaningpairs, lastday)
rule2(cleaningpairs, lastday)
rule3(cleaningpairs, lastday)
cleanedpairs.extend(cleaningpairs)
return cleanedpairs
def prepare4test(file4feat):
matrix_file = open(file4feat)
matrix = np.loadtxt(matrix_file, skiprows=1, delimiter=',')
row, column = matrix.shape
train_ui = matrix[:, 0:2]
train_x = matrix[:, 2:column]
matrix_file.close()
print "load " + file4feat + " completed"
return train_ui, train_x
def prepare4train(file4feat):
matrix_file = open(file4feat)
matrix = np.loadtxt(matrix_file, skiprows=1, delimiter=',')
row, column = matrix.shape
train_ui = matrix[:, 0:2]
train_x = matrix[:, 2:column - 1]
train_y = matrix[:, -1]
matrix_file.close()
print "load " + file4feat + " completed"
return train_ui, train_x, train_y
def train(file4feat):
uipair, feat, label = prepare4train(file4feat)
print(label)
feat = preprocessing.scale(feat)
np.random.shuffle(feat)#
#feat = preprocessing.scale(feat)
estimators = {}
#estimators['LR'] = linear_model.LogisticRegression(class_weight={1: 25, 0: 1}) #class_weight = {1:1.5,0:1}
estimators['RF'] = RandomForestClassifier()
#estimators['GBDT'] = GradientBoostingClassifier()
#estimators['bayes'] = GaussianNB()
#estimators['tree'] = tree.DecisionTreeClassifier()
#estimators['SVM'] = svm.SVC()
tuned_parameters = {}
#tuned_parameters['LR'] = [{'C': [1,0.1,0.001],'penalty':['l2','l1'],'class_weight':[{1:25,0:1}, 'auto']}]
#tuned_parameters['RF'] = [{'n_estimators': [10,100], 'max_depth': [1,3,5,7,9]}]
#tuned_parameters['GBDT'] = [{'n_estimators': [10,100], 'max_depth': [1,3,5,7,9]}]
#tuned_parameters['bayes'] = [{'alpha': [1,0,10]}]
#tuned_parameters['tree'] = [{'max_depth':[1,10,100,1000,None]}]
#tuned_parameters['SVM'] = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],'C': [1, 10, 100, 1000]},{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
starttime = datetime.datetime.now()
for k in estimators.keys():
print("# Tuning hyper-parameters for %s" % k)
estimators[k].fit(feat, label)
#label_pred = estimators[k].predict(feat)
return estimators
def predict(estimators, file4feat):
uipair, feat = prepare4test(file4feat)
feat = preprocessing.scale(feat)
for k in estimators.keys():
label_pred = estimators[k].predict(feat)
rulepool4result(uipairs,lastday,probs,0.6)
saveres('predict_result' + k + '.csv', uipair, label_pred)
num = len([a for a in label_pred if a == 1])
print 'num of pos samples:' + str(num)
trainData = "/home/laboratory/github/data/train/features.txt.sample"
testData = "/home/laboratory/github/data/test/features.txt"
estimators = train(trainData)
predict(estimators,testData)
|
{"hexsha": "ac14eca7a99d6a4767afc6b92f071df1d15ca2ba", "size": 8752, "ext": "py", "lang": "Python", "max_stars_repo_path": "searchEngine/recommand/model.py", "max_stars_repo_name": "Og192/homeWork", "max_stars_repo_head_hexsha": "b64b6a67699816f46fd0129a9cff31a27175d711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-12-13T13:29:56.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-14T00:34:12.000Z", "max_issues_repo_path": "searchEngine/recommand/model.py", "max_issues_repo_name": "Og192/homeWork", "max_issues_repo_head_hexsha": "b64b6a67699816f46fd0129a9cff31a27175d711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "searchEngine/recommand/model.py", "max_forks_repo_name": "Og192/homeWork", "max_forks_repo_head_hexsha": "b64b6a67699816f46fd0129a9cff31a27175d711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-12-31T12:37:10.000Z", "max_forks_repo_forks_event_max_datetime": "2016-12-31T12:37:10.000Z", "avg_line_length": 35.5772357724, "max_line_length": 171, "alphanum_fraction": 0.6404250457, "include": true, "reason": "import numpy", "num_tokens": 2598}
|
# -*- coding: utf-8 -*-
"""Human 3D pose and 2D projection generators."""
import theano as th
import theano.tensor as tt
import theano.tensor.slinalg as sla
from bvh import theano_renderer
from dgm.utils import (
partition, generator_decorator, multi_output_generator_decorator)
@generator_decorator
def bone_lengths_generator(u, consts):
"""Generate skeleton bone lengths from log-normal model."""
return tt.exp(consts['log_lengths_mean'] +
u.dot(consts['log_lengths_covar_chol']))
def joint_angles_cos_sin_vae_decoder(h, layers, n_joint_angle):
h = layers[0]['nonlinearity'](
h.dot(layers[0]['weights']) + layers[0]['biases'])
# intermediate layers with skip-connections
for layer in layers[1:-1]:
h = layer['nonlinearity'](
h.dot(layer['weights']) + layer['biases']) + h
h = layers[-1]['nonlinearity'](
h.dot(layers[-1]['weights']) + layers[-1]['biases'])
return h[:, :n_joint_angle * 2], tt.exp(0.5 * h[:, n_joint_angle * 2:])
@generator_decorator
def joint_angles_generator(u, consts):
"""Generate joint angles from VAE decoder model."""
h, n = partition(u, [consts['n_joint_angle_latent'],
consts['n_joint_angle'] * 2])
ang_cos_sin_mean, ang_cos_sin_std = joint_angles_cos_sin_vae_decoder(
h, consts['joint_angles_vae_decoder_layers'], consts['n_joint_angle'])
ang_cos_sin = ang_cos_sin_mean + ang_cos_sin_std * n
return tt.arctan2(ang_cos_sin[:, consts['n_joint_angle']:],
ang_cos_sin[:, :consts['n_joint_angle']])
def camera_generator(u, consts):
"""Generate camera parameters from (log-)normal model."""
cam_foc = tt.ones_like(u[:, 0]) * consts['cam_foc']
cam_pos = tt.concatenate([
consts['cam_pos_x_mean'] + consts['cam_pos_x_std'] * u[:, 0:1],
consts['cam_pos_y_mean'] + consts['cam_pos_y_std'] * u[:, 1:2],
tt.exp(consts['log_cam_pos_z_mean'] +
consts['log_cam_pos_z_std'] * u[:, 2:3])
], 1)
cam_ang = tt.ones_like(u[:, :3]) * consts['cam_ang']
return cam_foc, cam_pos, cam_ang
def joint_3d_pos_generator(u, consts):
"""Generate 3D joint positions.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system.
"""
input_sizes = [consts['n_bone_length_input'],
consts['n_joint_angle_input']]
u_len, u_ang = partition(u, input_sizes)
bone_lengths = bone_lengths_generator(u_len, consts)
joint_angles = joint_angles_generator(u_ang, consts)
return tt.stack(theano_renderer.joint_positions_batch(
consts['skeleton'], joint_angles, consts['fixed_joint_angles'],
lengths=bone_lengths, lengths_map=consts['bone_lengths_map'],
skip=consts['joints_to_skip']), 2)
def root_position_generator(u, consts):
return u * consts['root_pos_std'] + consts['root_pos_mean']
def joint_3d_pos_var_root_generator(u, consts):
"""Generate 3D joint positions with variable root position.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system.
"""
input_sizes = [consts['n_bone_length_input'],
consts['n_joint_angle_input'], 3]
u_len, u_ang, u_pos = partition(u, input_sizes)
bone_lengths = bone_lengths_generator(u_len, consts)
joint_angles = joint_angles_generator(u_ang, consts)
root_position = root_position_generator(u_pos, consts)
joint_pos_3d = tt.stack(theano_renderer.joint_positions_batch(
consts['skeleton'], joint_angles, consts['fixed_joint_angles'],
lengths=bone_lengths, lengths_map=consts['bone_lengths_map'],
skip=consts['joints_to_skip']), 2)
joint_pos_3d = tt.inc_subtensor(joint_pos_3d[:, :3, :], root_position[:, :, None])
return joint_pos_3d
@generator_decorator
def monocular_2d_proj_var_root_generator(u, consts):
"""Generate monocular 2D joint position projections with variable root position.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system, before projecting to 2D image coordinates using a single generated
camera model.
"""
input_sizes = [consts['n_bone_length_input'] +
consts['n_joint_angle_input'] + 3,
consts['n_camera_input']]
u_ske, u_cam = partition(u, input_sizes)
joint_pos_3d = joint_3d_pos_var_root_generator(u_ske, consts)
cam_foc, cam_pos, cam_ang = camera_generator(u_cam, consts)
camera_matrix = theano_renderer.camera_matrix_batch(
cam_foc, cam_pos, cam_ang)
joint_pos_2d_hom = tt.batched_dot(camera_matrix, joint_pos_3d)
joint_pos_2d = (joint_pos_2d_hom[:, :2] /
joint_pos_2d_hom[:, 2][:, None, :])
return joint_pos_2d
@generator_decorator
def monocular_2d_proj_generator(u, consts):
"""Generate monocular 2D joint position projections.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system, before projecting to 2D image coordinates using a single generated
camera model.
"""
input_sizes = [consts['n_bone_length_input'] +
consts['n_joint_angle_input'],
consts['n_camera_input']]
u_ske, u_cam = partition(u, input_sizes)
joint_pos_3d = joint_3d_pos_generator(u_ske, consts)
cam_foc, cam_pos, cam_ang = camera_generator(u_cam, consts)
camera_matrix = theano_renderer.camera_matrix_batch(
cam_foc, cam_pos, cam_ang)
joint_pos_2d_hom = tt.batched_dot(camera_matrix, joint_pos_3d)
joint_pos_2d = (joint_pos_2d_hom[:, :2] /
joint_pos_2d_hom[:, 2][:, None, :])
return joint_pos_2d
@generator_decorator
def noisy_monocular_2d_proj_generator(u, consts):
"""Generate noisy monocular 2D joint position projections.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system, before projecting to 2D image coordinates using a single generated
camera model and adding Gaussian observation noise to projections.
"""
input_sizes = [consts['n_bone_length_input'] +
consts['n_joint_angle_input'] +
consts['n_camera_input'],
consts['n_joint'] * 2]
u_pos, u_noi = partition(u, input_sizes)
return (monocular_2d_proj_generator(u_pos, consts) +
consts['output_noise_std'] * u_noi)
@generator_decorator
def binocular_2d_proj_generator(u, consts):
"""Generate binocular 2D joint position projections.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system, before projecting to two sets of 2D image coordinates using two
offset generated camera models.
"""
n_batch = u.shape[0]
input_sizes = [consts['n_bone_length_input'] +
consts['n_joint_angle_input'],
consts['n_camera_input']]
u_ske, u_cam, = partition(u, input_sizes)
joint_pos_3d = joint_3d_pos_generator(u_ske, consts)
cam_foc, cam_pos, cam_ang = camera_generator(u_cam, consts)
cam_mtx_1 = theano_renderer.camera_matrix_batch(
cam_foc, cam_pos + consts['cam_pos_offset'],
cam_ang + consts['cam_ang_offset'])
cam_mtx_2 = theano_renderer.camera_matrix_batch(
cam_foc, cam_pos - consts['cam_pos_offset'],
cam_ang - consts['cam_ang_offset'])
joint_pos_2d_hom_1 = tt.batched_dot(cam_mtx_1, joint_pos_3d)
joint_pos_2d_1 = (joint_pos_2d_hom_1[:, :2] /
joint_pos_2d_hom_1[:, 2][:, None, :])
joint_pos_2d_hom_2 = tt.batched_dot(cam_mtx_2, joint_pos_3d)
joint_pos_2d_2 = (joint_pos_2d_hom_2[:, :2] /
joint_pos_2d_hom_2[:, 2][:, None, :])
return tt.concatenate(
[joint_pos_2d_1.reshape((n_batch, -1)),
joint_pos_2d_2.reshape((n_batch, -1))], 1)
@generator_decorator
def noisy_binocular_2d_proj_generator(u, consts):
"""Generate noisy binocular 2D joint position projections.
Generates bone lengths and joint angles from respective models then uses
skeleton definition to convert to 3D joint positions in global coordinate
system, before projecting to two sets of 2D image coordinates using two
offset generated camera models and adding Gaussian observation noise to
projections.
"""
input_sizes = [consts['n_bone_length_input'] +
consts['n_joint_angle_input'] +
consts['n_camera_input'],
consts['n_joint'] * 4]
u_pos, u_noi = partition(u, input_sizes)
return (binocular_2d_proj_generator(u_pos, consts) +
consts['output_noise_std'] * u_noi)
def inputs_to_state(u, consts):
input_sizes = [consts['n_bone_length_input'],
consts['n_joint_angle_latent'],
consts['n_joint_angle'] * 2,
consts['n_camera_input']]
u_len, joint_ang_latent, joint_ang_noise, u_cam = partition(u, input_sizes)
log_bone_lengths = (consts['log_lengths_mean'] +
u_len.dot(consts['log_lengths_covar_chol']))
joint_ang_cos_sin_mean, joint_ang_cos_sin_log_var = (
consts['joint_angles_cos_sin_vae'].x_gvn_z(joint_ang_latent))
joint_ang_cos_sin = tt.squeeze(
joint_ang_cos_sin_mean +
tt.exp(0.5 * joint_ang_cos_sin_log_var) * joint_ang_noise)
cam_pos_x = consts['cam_pos_x_mean'] + consts['cam_pos_x_std'] * u_cam[0:1]
cam_pos_y = consts['cam_pos_y_mean'] + consts['cam_pos_y_std'] * u_cam[1:2]
log_cam_pos_z = (consts['log_cam_pos_z_mean'] +
consts['log_cam_pos_z_std'] * u_cam[2:3])
return tt.concatenate([
log_bone_lengths, joint_ang_latent, joint_ang_cos_sin,
cam_pos_x, cam_pos_y, log_cam_pos_z], 0)
def joint_3d_pos_generator_hier(state, consts):
state_partition = [
consts['n_bone_length_input'],
consts['n_joint_angle_latent'],
consts['n_joint_angle'] * 2,
]
log_bone_lengths, joint_ang_latent, joint_ang_cos_sin = (
partition(state, state_partition))
joint_angles = tt.arctan2(joint_ang_cos_sin.T[consts['n_joint_angle']:].T,
joint_ang_cos_sin.T[:consts['n_joint_angle']].T)
bone_lengths = tt.exp(log_bone_lengths)
return tt.squeeze(tt.stack(theano_renderer.joint_positions_batch(
consts['skeleton'], joint_angles, consts['fixed_joint_angles'],
lengths=bone_lengths, lengths_map=consts['bone_lengths_map'],
skip=consts['joints_to_skip']), 2))
def energy_func_hier_monocular(state, y_data, consts):
state_partition = [
consts['n_bone_length_input'],
consts['n_joint_angle_latent'],
consts['n_joint_angle'] * 2,
1, 1, 1
]
(log_bone_lengths, joint_ang_latent,
joint_ang_cos_sin, cam_pos_x,
cam_pos_y, log_cam_pos_z) = partition(state, state_partition)
ang_cos_sin_mean, ang_cos_sin_std = joint_angles_cos_sin_vae_decoder(
joint_ang_latent, consts['joint_angles_vae_decoder_layers'],
consts['n_joint_angle'])
joint_angles = tt.arctan2(joint_ang_cos_sin[consts['n_joint_angle']:],
joint_ang_cos_sin[:consts['n_joint_angle']])
bone_lengths = tt.exp(log_bone_lengths)
joint_pos_3d = tt.stack(theano_renderer.joint_positions(
consts['skeleton'], joint_angles, consts['fixed_joint_angles'],
lengths=bone_lengths, lengths_map=consts['bone_lengths_map'],
skip=consts['joints_to_skip']), 1)
cam_foc = tt.exp(consts['cam_foc'])
cam_pos = tt.concatenate([cam_pos_x, cam_pos_y, tt.exp(log_cam_pos_z)])
cam_ang = consts['cam_ang']
cam_mtx = theano_renderer.camera_matrix(cam_foc, cam_pos, cam_ang)
joint_pos_2d_hom = cam_mtx.dot(joint_pos_3d)
joint_pos_2d = joint_pos_2d_hom[:2] / joint_pos_2d_hom[2]
y_model = joint_pos_2d.flatten()
log_lengths_minus_mean = log_bone_lengths - consts['log_lengths_mean']
return 0.5 * (
(((y_data - y_model) / consts['output_noise_std'])**2).sum() +
(((joint_ang_cos_sin - ang_cos_sin_mean) / ang_cos_sin_std)**2).sum() +
joint_ang_latent.dot(joint_ang_latent) +
log_lengths_minus_mean.dot(sla.solve_upper_triangular(
consts['log_lengths_covar_chol'],
sla.solve_lower_triangular(
consts['log_lengths_covar_chol'].T,
log_lengths_minus_mean)
)) +
((cam_pos_x - consts['cam_pos_x_mean']) / consts['cam_pos_x_std'])**2 +
((cam_pos_y - consts['cam_pos_y_mean']) / consts['cam_pos_y_std'])**2 +
((log_cam_pos_z - consts['log_cam_pos_z_mean']) /
consts['log_cam_pos_z_std'])**2
)[0]
def energy_func_hier_binocular(state, y_data, consts):
state_partition = [
consts['n_bone_length_input'],
consts['n_joint_angle_latent'],
consts['n_joint_angle'] * 2,
1, 1, 1
]
(log_bone_lengths, joint_ang_latent,
joint_ang_cos_sin, cam_pos_x,
cam_pos_y, log_cam_pos_z) = partition(state, state_partition)
ang_cos_sin_mean, ang_cos_sin_std = joint_angles_cos_sin_vae_decoder(
joint_ang_latent[None, :], consts['joint_angles_vae_decoder_layers'],
consts['n_joint_angle'])
joint_angles = tt.arctan2(joint_ang_cos_sin[consts['n_joint_angle']:],
joint_ang_cos_sin[:consts['n_joint_angle']])
bone_lengths = tt.exp(log_bone_lengths)
joint_pos_3d = tt.stack(theano_renderer.joint_positions(
consts['skeleton'], joint_angles, consts['fixed_joint_angles'],
lengths=bone_lengths, lengths_map=consts['bone_lengths_map'],
skip=consts['joints_to_skip']), 1)
cam_foc = consts['cam_foc']
cam_pos = tt.concatenate([cam_pos_x, cam_pos_y, tt.exp(log_cam_pos_z)])
cam_ang = consts['cam_ang']
cam_mtx_1 = theano_renderer.camera_matrix(
cam_foc, cam_pos + consts['cam_pos_offset'],
cam_ang + consts['cam_ang_offset'])
cam_mtx_2 = theano_renderer.camera_matrix(
cam_foc, cam_pos - consts['cam_pos_offset'],
cam_ang - consts['cam_ang_offset'])
joint_pos_2d_hom_1 = tt.dot(cam_mtx_1, joint_pos_3d)
joint_pos_2d_1 = joint_pos_2d_hom_1[:2] / joint_pos_2d_hom_1[2]
joint_pos_2d_hom_2 = tt.dot(cam_mtx_2, joint_pos_3d)
joint_pos_2d_2 = joint_pos_2d_hom_2[:2] / joint_pos_2d_hom_2[2]
y_model = tt.concatenate([joint_pos_2d_1.flatten(),
joint_pos_2d_2.flatten()], 0)
log_lengths_minus_mean = log_bone_lengths - consts['log_lengths_mean']
return 0.5 * (
(y_data - y_model).dot(y_data - y_model) /
consts['output_noise_std']**2 +
(((joint_ang_cos_sin - ang_cos_sin_mean) / ang_cos_sin_std)**2).sum() +
joint_ang_latent.dot(joint_ang_latent) +
log_lengths_minus_mean.dot(sla.solve_upper_triangular(
consts['log_lengths_covar_chol'],
sla.solve_lower_triangular(
consts['log_lengths_covar_chol'].T,
log_lengths_minus_mean)
)) +
((cam_pos_x - consts['cam_pos_x_mean']) / consts['cam_pos_x_std'])**2 +
((cam_pos_y - consts['cam_pos_y_mean']) / consts['cam_pos_y_std'])**2 +
((log_cam_pos_z - consts['log_cam_pos_z_mean']) /
consts['log_cam_pos_z_std'])**2
)[0]
|
{"hexsha": "a6ef024abb3b49efec40b2bd7ad1a36a369a84d0", "size": 15760, "ext": "py", "lang": "Python", "max_stars_repo_path": "dgm/pose.py", "max_stars_repo_name": "matt-graham/differentiable-generative-models", "max_stars_repo_head_hexsha": "6b450e7a846a416138cb5383a0c574f5cb945843", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-03-15T16:41:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-26T05:23:52.000Z", "max_issues_repo_path": "dgm/pose.py", "max_issues_repo_name": "matt-graham/differentiable-generative-models", "max_issues_repo_head_hexsha": "6b450e7a846a416138cb5383a0c574f5cb945843", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dgm/pose.py", "max_forks_repo_name": "matt-graham/differentiable-generative-models", "max_forks_repo_head_hexsha": "6b450e7a846a416138cb5383a0c574f5cb945843", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9002849003, "max_line_length": 86, "alphanum_fraction": 0.6828680203, "include": true, "reason": "import theano", "num_tokens": 4005}
|
/*===========================================================================
This library is released under the MIT license. See FSBAllocator.html
for further information and documentation.
Copyright (c) 2008-2011 Juha Nieminen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
=============================================================================*/
#ifndef INCLUDE_FSBALLOCATOR_HH
#define INCLUDE_FSBALLOCATOR_HH
#include <new>
#include <cassert>
#include <vector>
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_STD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OPENMP
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_PTHREAD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC
#define FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
//#include <mutex>
//typedef std::mutex FSBAllocator_Mutex;
#include <atomic>
class FSBAllocator_Mutex
{
public:
#ifdef _MSC_VER
FSBAllocator_Mutex() {
lck.clear();
}
#endif
inline void lock()
{
while (lck.test_and_set(std::memory_order_acquire))
{
}
}
inline void unlock()
{
lck.clear(std::memory_order_release);
}
private:
#ifdef _MSC_VER
std::atomic_flag lck;
#else
std::atomic_flag lck = ATOMIC_FLAG_INIT;
#endif
};
#endif
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_STD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OPENMP
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_PTHREAD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC
#define FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
#include <boost/thread.hpp>
typedef boost::mutex FSBAllocator_Mutex;
#endif
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OPENMP
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_STD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_PTHREAD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC
#define FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
#include <omp.h>
class FSBAllocator_Mutex
{
omp_lock_t mutex;
public:
FSBAllocator_Mutex() { omp_init_lock(&mutex); }
~FSBAllocator_Mutex() { omp_destroy_lock(&mutex); }
void lock() { omp_set_lock(&mutex); }
void unlock() { omp_unset_lock(&mutex); }
};
#endif
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_PTHREAD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_STD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OPENMP
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC
#define FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
#include <pthread.h>
class FSBAllocator_Mutex
{
pthread_mutex_t mutex;
public:
FSBAllocator_Mutex() { pthread_mutex_init(&mutex, NULL); }
~FSBAllocator_Mutex() { pthread_mutex_destroy(&mutex); }
void lock() { pthread_mutex_lock(&mutex); }
void unlock() { pthread_mutex_unlock(&mutex); }
};
#endif
#if defined(FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC) || defined(FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC_WITH_SCHED)
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_STD
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OPENMP
#undef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_PTHREAD
#define FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC_WITH_SCHED
#include <sched.h>
#endif
class FSBAllocator_Mutex
{
volatile int lockFlag;
public:
FSBAllocator_Mutex(): lockFlag(0) {}
void lock()
{
while(!__sync_bool_compare_and_swap(&lockFlag, 0, 1))
{
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_GCC_WITH_SCHED
sched_yield();
#endif
}
}
void unlock() { lockFlag = 0; }
};
#endif
template<unsigned ElemSize>
class FSBAllocator_ElemAllocator
{
typedef std::size_t Data_t;
static const Data_t BlockElements = 512;
static const Data_t DSize = sizeof(Data_t);
static const Data_t ElemSizeInDSize = (ElemSize + (DSize-1)) / DSize;
static const Data_t UnitSizeInDSize = ElemSizeInDSize + 1;
static const Data_t BlockSize = BlockElements*UnitSizeInDSize;
class MemBlock
{
Data_t* block;
Data_t firstFreeUnitIndex, allocatedElementsAmount, endIndex;
public:
MemBlock():
block(0),
firstFreeUnitIndex(Data_t(-1)),
allocatedElementsAmount(0)
{}
bool isFull() const
{
return allocatedElementsAmount == BlockElements;
}
void clear()
{
delete[] block;
block = 0;
firstFreeUnitIndex = Data_t(-1);
}
void* allocate(Data_t vectorIndex)
{
if(firstFreeUnitIndex == Data_t(-1))
{
if(!block)
{
block = new Data_t[BlockSize];
if(!block) return 0;
endIndex = 0;
}
Data_t* retval = block + endIndex;
endIndex += UnitSizeInDSize;
retval[ElemSizeInDSize] = vectorIndex;
++allocatedElementsAmount;
return retval;
}
else
{
Data_t* retval = block + firstFreeUnitIndex;
firstFreeUnitIndex = *retval;
++allocatedElementsAmount;
return retval;
}
}
void deallocate(Data_t* ptr)
{
*ptr = firstFreeUnitIndex;
firstFreeUnitIndex = ptr - block;
if(--allocatedElementsAmount == 0)
clear();
}
};
struct BlocksVector
{
std::vector<MemBlock> data;
BlocksVector() { data.reserve(1024); }
~BlocksVector()
{
for(std::size_t i = 0; i < data.size(); ++i)
data[i].clear();
}
};
static BlocksVector blocksVector;
static std::vector<Data_t> blocksWithFree;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
static FSBAllocator_Mutex mutex;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
struct Lock: boost::mutex::scoped_lock
{
Lock(): boost::mutex::scoped_lock(mutex) {}
};
#else
struct Lock
{
Lock() { mutex.lock(); }
~Lock() { mutex.unlock(); }
};
#endif
#endif
public:
static void* allocate()
{
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
Lock lock;
#endif
if(blocksWithFree.empty())
{
blocksWithFree.push_back(blocksVector.data.size());
blocksVector.data.push_back(MemBlock());
}
const Data_t index = blocksWithFree.back();
MemBlock& block = blocksVector.data[index];
void* retval = block.allocate(index);
if(block.isFull())
blocksWithFree.pop_back();
return retval;
}
static void deallocate(void* ptr)
{
if(!ptr) return;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
Lock lock;
#endif
Data_t* unitPtr = (Data_t*)ptr;
const Data_t blockIndex = unitPtr[ElemSizeInDSize];
MemBlock& block = blocksVector.data[blockIndex];
if(block.isFull())
blocksWithFree.push_back(blockIndex);
block.deallocate(unitPtr);
}
};
template<unsigned ElemSize>
typename FSBAllocator_ElemAllocator<ElemSize>::BlocksVector
FSBAllocator_ElemAllocator<ElemSize>::blocksVector;
template<unsigned ElemSize>
std::vector<typename FSBAllocator_ElemAllocator<ElemSize>::Data_t>
FSBAllocator_ElemAllocator<ElemSize>::blocksWithFree;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
template<unsigned ElemSize>
FSBAllocator_Mutex FSBAllocator_ElemAllocator<ElemSize>::mutex;
#endif
template<unsigned ElemSize>
class FSBAllocator2_ElemAllocator
{
static const std::size_t BlockElements = 1024;
static const std::size_t DSize = sizeof(std::size_t);
static const std::size_t ElemSizeInDSize = (ElemSize + (DSize-1)) / DSize;
static const std::size_t BlockSize = BlockElements*ElemSizeInDSize;
struct Blocks
{
std::vector<std::size_t*> ptrs;
Blocks()
{
ptrs.reserve(256);
ptrs.push_back(new std::size_t[BlockSize]);
}
~Blocks()
{
for(std::size_t i = 0; i < ptrs.size(); ++i)
delete[] ptrs[i];
}
};
static Blocks blocks;
static std::size_t headIndex;
static std::size_t* freeList;
static std::size_t allocatedElementsAmount;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
static FSBAllocator_Mutex mutex;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_BOOST
struct Lock: boost::mutex::scoped_lock
{
Lock(): boost::mutex::scoped_lock(mutex) {}
};
#else
struct Lock
{
Lock() { mutex.lock(); }
~Lock() { mutex.unlock(); }
};
#endif
#endif
static void freeAll()
{
for(std::size_t i = 1; i < blocks.ptrs.size(); ++i)
delete[] blocks.ptrs[i];
blocks.ptrs.resize(1);
headIndex = 0;
freeList = 0;
}
public:
static void* allocate()
{
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
Lock lock;
#endif
++allocatedElementsAmount;
if(freeList)
{
std::size_t* retval = freeList;
freeList = reinterpret_cast<std::size_t*>(*freeList);
return retval;
}
if(headIndex == BlockSize)
{
blocks.ptrs.push_back(new std::size_t[BlockSize]);
headIndex = 0;
}
std::size_t* retval = &(blocks.ptrs.back()[headIndex]);
headIndex += ElemSizeInDSize;
return retval;
}
static void deallocate(void* ptr)
{
if(ptr)
{
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
Lock lock;
#endif
std::size_t* sPtr = (std::size_t*)ptr;
*sPtr = reinterpret_cast<std::size_t>(freeList);
freeList = sPtr;
if(--allocatedElementsAmount == 0)
freeAll();
}
}
static void cleanSweep(std::size_t unusedValue = std::size_t(-1))
{
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
Lock lock;
#endif
while(freeList)
{
std::size_t* current = freeList;
freeList = reinterpret_cast<std::size_t*>(*freeList);
*current = unusedValue;
}
for(std::size_t i = headIndex; i < BlockSize; i += ElemSizeInDSize)
blocks.ptrs.back()[i] = unusedValue;
for(std::size_t blockInd = 1; blockInd < blocks.ptrs.size();)
{
std::size_t* block = blocks.ptrs[blockInd];
std::size_t freeAmount = 0;
for(std::size_t i = 0; i < BlockSize; i += ElemSizeInDSize)
if(block[i] == unusedValue)
++freeAmount;
if(freeAmount == BlockElements)
{
delete[] block;
blocks.ptrs[blockInd] = blocks.ptrs.back();
blocks.ptrs.pop_back();
}
else ++blockInd;
}
const std::size_t* lastBlock = blocks.ptrs.back();
for(headIndex = BlockSize; headIndex > 0; headIndex -= ElemSizeInDSize)
if(lastBlock[headIndex-ElemSizeInDSize] != unusedValue)
break;
const std::size_t lastBlockIndex = blocks.ptrs.size() - 1;
for(std::size_t blockInd = 0; blockInd <= lastBlockIndex; ++blockInd)
{
std::size_t* block = blocks.ptrs[blockInd];
for(std::size_t i = 0; i < BlockSize; i += ElemSizeInDSize)
{
if(blockInd == lastBlockIndex && i == headIndex)
break;
if(block[i] == unusedValue)
deallocate(block + i);
}
}
}
};
template<unsigned ElemSize>
typename FSBAllocator2_ElemAllocator<ElemSize>::Blocks
FSBAllocator2_ElemAllocator<ElemSize>::blocks;
template<unsigned ElemSize>
std::size_t FSBAllocator2_ElemAllocator<ElemSize>::headIndex = 0;
template<unsigned ElemSize>
std::size_t* FSBAllocator2_ElemAllocator<ElemSize>::freeList = 0;
template<unsigned ElemSize>
std::size_t FSBAllocator2_ElemAllocator<ElemSize>::allocatedElementsAmount = 0;
#ifdef FSBALLOCATOR_USE_THREAD_SAFE_LOCKING_OBJECT
template<unsigned ElemSize>
FSBAllocator_Mutex FSBAllocator2_ElemAllocator<ElemSize>::mutex;
#endif
template<typename Ty>
class FSBAllocator
{
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef Ty *pointer;
typedef const Ty *const_pointer;
typedef Ty& reference;
typedef const Ty& const_reference;
typedef Ty value_type;
pointer address(reference val) const { return &val; }
const_pointer address(const_reference val) const { return &val; }
template<class Other>
struct rebind
{
typedef FSBAllocator<Other> other;
};
FSBAllocator() throw() {}
template<class Other>
FSBAllocator(const FSBAllocator<Other>&) throw() {}
template<class Other>
FSBAllocator& operator=(const FSBAllocator<Other>&) { return *this; }
pointer allocate(size_type count, const void* = 0)
{
assert(count == 1);
return static_cast<pointer>
(FSBAllocator_ElemAllocator<sizeof(Ty)>::allocate());
}
void deallocate(pointer ptr, size_type)
{
FSBAllocator_ElemAllocator<sizeof(Ty)>::deallocate(ptr);
}
void construct(pointer ptr, const Ty& val)
{
new ((void *)ptr) Ty(val);
}
void destroy(pointer ptr)
{
ptr->Ty::~Ty();
}
size_type max_size() const throw() {
//return 1;
return std::numeric_limits<size_type>::max() / sizeof(Ty);
}
};
template<typename Ty>
class FSBAllocator2
{
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef Ty *pointer;
typedef const Ty *const_pointer;
typedef Ty& reference;
typedef const Ty& const_reference;
typedef Ty value_type;
pointer address(reference val) const { return &val; }
const_pointer address(const_reference val) const { return &val; }
template<class Other>
struct rebind
{
typedef FSBAllocator2<Other> other;
};
FSBAllocator2() throw() {}
template<class Other>
FSBAllocator2(const FSBAllocator2<Other>&) throw() {}
template<class Other>
FSBAllocator2& operator=(const FSBAllocator2<Other>&) { return *this; }
pointer allocate(size_type count, const void* = 0)
{
assert(count == 1);
return static_cast<pointer>
(FSBAllocator2_ElemAllocator<sizeof(Ty)>::allocate());
}
void deallocate(pointer ptr, size_type)
{
FSBAllocator2_ElemAllocator<sizeof(Ty)>::deallocate(ptr);
}
void construct(pointer ptr, const Ty& val)
{
new ((void *)ptr) Ty(val);
}
void destroy(pointer ptr)
{
ptr->Ty::~Ty();
}
size_type max_size() const throw() {
// return 1;
return std::numeric_limits<size_type>::max() / sizeof(Ty);
}
void cleanSweep(std::size_t unusedValue = std::size_t(-1))
{
FSBAllocator2_ElemAllocator<sizeof(Ty)>::cleanSweep(unusedValue);
}
};
typedef FSBAllocator2<std::size_t> FSBRefCountAllocator;
#endif
|
{"hexsha": "5dd4c508b5230fe94d0295cd1d51890b7f308b82", "size": 16317, "ext": "hh", "lang": "C++", "max_stars_repo_path": "FSBAllocator/FSBAllocator.hh", "max_stars_repo_name": "r-lyeh/malloc-survey", "max_stars_repo_head_hexsha": "6da5aca6aa2720d64bff709c111a5d8a5fa7a1be", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 16.0, "max_stars_repo_stars_event_min_datetime": "2015-06-26T20:58:23.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-05T09:46:45.000Z", "max_issues_repo_path": "FSBAllocator/FSBAllocator.hh", "max_issues_repo_name": "r-lyeh/test-allocators", "max_issues_repo_head_hexsha": "6da5aca6aa2720d64bff709c111a5d8a5fa7a1be", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2015-07-22T22:48:56.000Z", "max_issues_repo_issues_event_max_datetime": "2015-07-22T22:48:56.000Z", "max_forks_repo_path": "FSBAllocator/FSBAllocator.hh", "max_forks_repo_name": "r-lyeh/test-allocators", "max_forks_repo_head_hexsha": "6da5aca6aa2720d64bff709c111a5d8a5fa7a1be", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2015-09-26T17:40:20.000Z", "max_forks_repo_forks_event_max_datetime": "2016-06-23T17:15:23.000Z", "avg_line_length": 26.7931034483, "max_line_length": 117, "alphanum_fraction": 0.6514065085, "num_tokens": 3899}
|
'''
Improved methods for finding nearest neighbours,
as well as some other tweaks to `.given` to better suit me.
'''
import pandas as pd
import numpy as np
from opt_nn.given import haversine, slow, make_data
def h_distance(p1, p2):
'''
Return haversine distance between two points.
(This wraps the given.haversine() function,
allowing us to more intuitively feed in the
two points (with `lng` and `lat` attributes)
that we are interested in finding the distance for.
'''
return haversine(p1.lng, p1.lat, p2.lng, p2.lat)
def compare_solutions(sol1, sol2=slow, df=None):
'''
Compare nearest neighbour indices returned by two solutions.
'''
if df is None:
df = make_data(100)
# need to copy df to compare answers as `slow()` modifies inplace
df1 = df.copy()
df2 = df.copy()
# use solution to generate answers
a1 = sol1(df1)
a2 = sol2(df2)
# compare equality of neighbour indices
return {'First': a1.neighbour_index,
'Second': a2.neighbour_index,
'Agreed': (a1.neighbour_index == a2.neighbour_index)}
class Distances():
'''
Make sure we never calculate a distance twice.
'''
def __init__(self, points_df, metric=h_distance):
'''
Initialize with dataframe of points.
'''
self.metric = metric
self.points = points_df
self.n = len(points_df)
self.table = np.zeros((self.n, self.n), float)
def lookup(self, i, j):
'''
Lookup distance between points i and j,
as indexed in the points dataframe.
'''
# take advantage of symmetry
if i > j:
i, j = j, i
# if points are the same point, no need to look
if i == j:
return 0
# if we have found the answer before, no need to again
elif self.table[i,j]:
return self.table[i,j]
# otherwise calculate, save, and return
else:
self.table[i,j] = self.metric(
self.points.iloc[i],
self.points.iloc[j])
return self.table[i,j]
def find_all(self):
'''Find all distances.'''
for i in range(self.n):
for j in range(i, self.n):
self.lookup(i,j)
def find_nn(self):
'''
Find all nearest neighbours and return index
and distance.
'''
self.find_all()
# fill in empty half of matrix
self.table = np.maximum(self.table.T, self.table)
# prevent zero distance between same point looking like minimum
self.table = np.where(self.table > 0, self.table, np.inf)
nn_df = pd.DataFrame([
np.min(self.table, axis=0),
np.argmin(self.table, axis=0)
]).T
nn_df.columns = ['distance_km', 'neighbour_index']
return nn_df
def less_slow(df):
'''
The simplest idea is to improve slow function
by not calculating d(x,y) if we know d(y,x)
since haversine metric is symmetric.
So we start the `j` loop from `i`, and also compare
whether the distance is the nearest for j.
'''
# Loop over each point in the dataframe
for i in range(len(df)):
# compare it to each other point in the dataframe
# (ADDITION:) that we have not yet compared it to
for j in range(i, len(df)):
# Calculate the distance
distance = haversine(df.loc[i, "lng"], df.loc[i, "lat"],
df.loc[j, "lng"], df.loc[j, "lat"])
# If the distance is 0 then it's the same point
if distance == 0:
continue
# If there is no distance set then this is the closest so far
if df.loc[i, "distance_km"] is None:
df.loc[i, "distance_km"] = distance
df.loc[i, "neighbour_index"] = j
# if this distance is closer than the previous best then
# lets use this one
elif df.loc[i, "distance_km"] > distance:
df.loc[i, "distance_km"] = distance
df.loc[i, "neighbour_index"] = j
# (ADDITION: We also need to do the symmetric bit)
if df.loc[j, "distance_km"] is None:
df.loc[j, "distance_km"] = distance
df.loc[j, "neighbour_index"] = i
# if this distance is closer than the previous best then
# lets use this one
elif df.loc[j, "distance_km"] > distance:
df.loc[j, "distance_km"] = distance
df.loc[j, "neighbour_index"] = i
# (end ADDITION)
return df
|
{"hexsha": "da1b4775ac3de8f6e17abd721cd288466a9130ac", "size": 4708, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/opt_nn/improved.py", "max_stars_repo_name": "peterprescott/optimize-nn", "max_stars_repo_head_hexsha": "643bbebef8c0846567a360f31172e50ae9a67186", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/opt_nn/improved.py", "max_issues_repo_name": "peterprescott/optimize-nn", "max_issues_repo_head_hexsha": "643bbebef8c0846567a360f31172e50ae9a67186", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-10-09T23:52:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-12T04:46:20.000Z", "max_forks_repo_path": "src/opt_nn/improved.py", "max_forks_repo_name": "peterprescott/optimize-nn", "max_forks_repo_head_hexsha": "643bbebef8c0846567a360f31172e50ae9a67186", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.425, "max_line_length": 73, "alphanum_fraction": 0.5645709431, "include": true, "reason": "import numpy", "num_tokens": 1110}
|
import numpy as np
import torch
def objective_function(
config,
model_objective,
model_cost,
task_feature_objective,
task_feature_cost,
x_mean_objective,
x_std_objective,
x_mean_cost,
x_std_cost,
y_mean_objective=None,
y_std_objective=None,
y_mean_cost=None,
y_std_cost=None,
log_objective=False,
with_noise=True,
):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
output = model_objective.forward(x_norm).data.numpy()
mean = output[:, 0]
log_variance = output[:, 1]
if y_mean_objective is not None or y_std_objective is not None:
mean = mean * y_std_objective + y_mean_objective
log_variance *= y_std_objective**2
feval = mean
if with_noise:
feval += np.random.randn() * np.sqrt(np.exp(log_variance))
if log_objective:
feval = np.exp(feval)
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
output = model_cost.forward(x_norm).data.numpy()
log_mean = output[:, 0]
log_log_variance = output[:, 1]
if y_mean_cost is not None or y_std_cost is not None:
log_mean = log_mean * y_std_cost + y_mean_cost
log_log_variance *= y_std_cost**2
log_cost = log_mean
if with_noise:
log_cost += np.random.randn() * np.sqrt(np.exp(log_log_variance))
return feval[:, None], np.exp(log_cost)[:, None]
|
{"hexsha": "1b69410cf31f15ee48d4b75dd61c77ddd7db9296", "size": 1655, "ext": "py", "lang": "Python", "max_stars_repo_path": "emukit/examples/profet/meta_benchmarks/meta_surrogates.py", "max_stars_repo_name": "EmuKit/Emukit", "max_stars_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 272, "max_stars_repo_stars_event_min_datetime": "2018-09-18T11:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-10T22:21:25.000Z", "max_issues_repo_path": "emukit/examples/profet/meta_benchmarks/meta_surrogates.py", "max_issues_repo_name": "EmuKit/Emukit", "max_issues_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 278, "max_issues_repo_issues_event_min_datetime": "2018-09-19T15:38:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-14T13:45:24.000Z", "max_forks_repo_path": "emukit/examples/profet/meta_benchmarks/meta_surrogates.py", "max_forks_repo_name": "EmuKit/Emukit", "max_forks_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 88, "max_forks_repo_forks_event_min_datetime": "2018-09-18T11:56:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-23T13:33:55.000Z", "avg_line_length": 29.0350877193, "max_line_length": 79, "alphanum_fraction": 0.6682779456, "include": true, "reason": "import numpy", "num_tokens": 447}
|
! https://github.com/JuliaLang/julia/blob/master/test/perf/micro/perf.f90
module perf
use, intrinsic :: iso_fortran_env, only : REAL64,INT64, stderr=>error_unit
implicit none
contains
real(real64) function sysclock2ms(t)
! Convert a number of clock ticks, as returned by system_clock called
! with integer(int64) arguments, to milliseconds
integer(int64), intent(in) :: t
integer(int64) :: rate
real(real64) :: r
call system_clock(count_rate=rate)
r = 1000.d0 / rate
sysclock2ms = t * r
end function sysclock2ms
subroutine init_random_seed()
integer :: i, n, clock
integer, allocatable :: seed(:)
call random_seed(size=n)
allocate(seed(n))
call system_clock(count=clock)
seed = clock + 37 * [ (i - 1, i = 1, n) ]
call random_seed(put=seed)
end subroutine
subroutine assert(cond)
logical, intent(in) :: cond
if (.not. cond) error stop 'assertion failed, halting test'
end subroutine assert
End Module perf
|
{"hexsha": "7d9749a6e3109e586e4284ce092dd99049a1e2a5", "size": 949, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "perf.f90", "max_stars_repo_name": "scivision/zakharov", "max_stars_repo_head_hexsha": "3dadd53d29daf6ff8df6bf5d935557627e160448", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-12-13T18:04:20.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-13T18:04:20.000Z", "max_issues_repo_path": "perf.f90", "max_issues_repo_name": "scienceopen/zakharov", "max_issues_repo_head_hexsha": "3dadd53d29daf6ff8df6bf5d935557627e160448", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perf.f90", "max_forks_repo_name": "scienceopen/zakharov", "max_forks_repo_head_hexsha": "3dadd53d29daf6ff8df6bf5d935557627e160448", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-22T12:27:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-22T12:27:29.000Z", "avg_line_length": 24.3333333333, "max_line_length": 76, "alphanum_fraction": 0.7186512118, "num_tokens": 277}
|
\documentclass[10pt]{article}
\usepackage{fullpage}
\usepackage{url}
\pagestyle{empty}
% Customize section headings
%\usepackage{sectsty}
%\sectionfont{\rmfamily\mdseries\Large}
%\subsectionfont{\rmfamily\bfseries\normalsize}
% Don't indent paragraphs.
\setlength\parindent{0em}
\setlength\parskip{0.5em}
% Make lists without bullets
\newenvironment{itemize*}{
\begin{list}{}
{
\setlength{\itemsep}{5pt}
\setlength{\parsep}{0pt}
\setlength{\topsep}{0pt}
\setlength{\leftmargin}{0em}
}
} {
\end{list}
}
\begin{document}
{\Large Benjamin R. Hillman} \\
PO Box 5800 \\
Mail Stop 0734 \\
Albuquerque, NM 87185-0734 \\
\url{bhillma@sandia.gov} // (425) 218-8086
\section*{Education}
\begin{itemize*}
\item Ph.D., Atmospheric Sciences,
University of Washington, Seattle, WA,
June 2016
\item M.S., Atmospheric Sciences,
University of Washington, Seattle, WA,
2012
\item B.S., Physics and Mathematics \textit{Cum Laude},
Western Washington University, Bellingham, WA,
2008
\item A.S.,
Shoreline Community College, Seattle, WA,
2005
\end{itemize*}
\section*{Research Experience}
\begin{itemize*}
\item \textbf{Postdoctoral Appointee},
Department of Atmospheric Science,
Sandia National Laboratories, Albuquerque, NM,
Summer 2016--present
\begin{itemize}
\item Improving understanding of Arctic cloud processes and model biases through high resolution atmospheric modeling and observations.
\item Development and analysis of cutting-edge techniques for improved simulation in global climate models, including the use and development of super-parameterization and regionally-fined meshes.
\end{itemize}
\item \textbf{Graduate Research Associate},
Department of Atmospheric Sciences,
University of Washington, Seattle, WA,
Fall 2008--Spring 2016
\begin{itemize}
\item Evaluating cloud properties in atmospheric models against satellite remote sensing retrievals using satellite instrument simulators to account for limitations and uncertainties in retrievals.
\item Quantification of uncertainties and inherent biases in the satellite simulator framework due to representations of unresolved scales.
\item Development and implementation of an improved parameterization of unresolved cloud properties for use in satellite simulators.
\end{itemize}
\item \textbf{Research Associate},
Department of Chemistry,
Western Washington University, Bellingham, WA,
Summer 2008
\begin{itemize}
\item Modeling growth of thin semiconductor films using a deposition, diffusion, aggregation model.
\end{itemize}
\end{itemize*}
\section*{Technical Skills}
\begin{itemize}
\item Development and analysis of a range of global climate models, including the GFDL global atmosphere model (AM2), the NCAR Community Earth System Model (CESM), the Super-Parameterized Community Atmosphere Model (SP-CAM), and the DOE Accelerated Climate Model for Energy (ACME)
\item Expertise in the use of satellite instrument simulators for model evaluation
\item Development of analysis tools for end-user applications, including incorporation of new diagnostics into the NCAR Atmosphere Model Working Group (AMWG) diagnostics package
\item Experience with a range of programming and analysis languages including Fortran (77 and 90), C, Python, Matlab, NCL, and UNIX shell scripting
\item Analysis of geospatial datasets using the netCDF operators (NCO)
\item Using git and github for software version control and project management
\item Working in high-performance computing environments
\end{itemize}
\section*{Teaching Experience}
\begin{itemize*}
\item
Teaching Assistant,
Atmospheric Radiative Transfer (ATM S 341),
University of Washington, Seattle, WA,
Spring 2014
\item
Teaching Assistant,
Introduction to Weather (ATM S 101),
University of Washington, Seattle, WA,
Winter 2010
\item
Teaching Assistant,
Department of Physics and Astronomy,
Western Washington University, Bellingham, WA,
Winter 2006--Spring 2008
\end{itemize*}
\section*{Field Experience}
\begin{itemize*}
\item Storm Peak Lab Cloud Property Validation Experiment (STORMVEx)
Steamboat Springs, CO,
Winter 2011
\end{itemize*}
\section*{Honors}
\begin{itemize*}
\item 2011 NCAR Advanced Study Program Graduate Visitor
\item 2008 Dr. James and Joann Albers memorial scholarship
\item 2007 Dr. James and Joann Albers memorial scholarship
\end{itemize*}
\section*{Publications}
\begin{itemize*}
\item Hillman, B. R., R. Marchand, T. P. Ackerman, G. G. Mace and S. Benson, 2015: Assessing the accuracy of MISR and MISR-simulated cloud top heights using CloudSat and CALIPSO-retrieved hydrometeor profiles (in review).
\item Hillman, B. R., R. Marchand, and T. P. Ackerman, 2015: Errors in simulated satellite cloud diagnostics from global climate models due to unresolved cloud structure and variability (in prep).
\item Hillman, B. R., R. Marchand, and T. P. Ackerman, A. Bodas-Salcedo, J. Cole, J.-C. Golaz, J. E. Kay, 2015: Comparing cloud biases in CMIP5: insights using MISR and ISCCP observations and satellite simulators (in prep).
\item Hillman, B. R., 2016:
Reducing errors in simulated satellite views of clouds from
large-scale models.
Ph.D. dissertation, University of Washington.
\item Hillman, B. R., 2012:
Evaluating clouds in global climate models using instrument simulators.
M.S. thesis, University of Washington.
\item Kay, J. E., B. R. Hillman, S. A. Klein, Y. Zhang, B. Medeiros,
R. Pincus, A. Gettelman, B. Eaton, J. Boyle, R. Marchand,
and T. P. Ackerman,
2012:
Exposing global cloud biases in the Community Atmosphere Model (CAM) using
satellite observations and their corresponding instrument simulators.
J. Climate, 25, 5190–5207, doi:10.1175/JCLI-D-11-00469.1.
\end{itemize*}
\section*{Selected Presentations}
\begin{itemize*}
\item Hillman, B. R., R. Marchand, T. P. Ackerman,
2014:
Comparison of MISR and MISR-simulated cloud top heights using CloudSat and CALIPSO profiles.
MISR Science Team Meeting,
Pasadena, CA.
\item Hillman, B. R., R. Marchand, T. P. Ackerman, A. Bodas-Salcedo, J. Cole, J.-C. Golaz, J. E. Kay,
2012:
Comparing cloud biases in CMIP5: insights using MISR and ISCCP
American Geophysical Union Fall Meeting,
San Francisco, CA.
\item Hillman, B. R., R. Marchand, T. P. Ackerman, A. Bodas-Salcedo, J. Cole, J.-C. Golaz, J. E. Kay,
2012:
An intercomparison of clouds and radiation in CMIP5 models
using MISR and ISCCP simulators.
1st Pan-Global Atmosphere Systems Studies (GASS) Conference,
Boulder, CO.
\item Hillman, B. R., J. E. Kay, S. A. Klein, Y. Zhang, B. Medeiros,
R. Pincus, A. Gettelman, B. Eaton, J. Boyle, R. Marchand, and T. P. Ackerman,
2011:
Evaluating clouds in climate models using satellite simulators:
from mean state to feedbacks.
MISR Data Users Symposium,
Pasadena, CA.
\item Hillman, B. R., J. E. Kay, S. A. Klein, Y. Zhang, B. Medeiros,
R. Pincus, A. Gettelman, B. Eaton, J. Boyle, R. Marchand, and T. P. Ackerman,
2011:
Evaluating clouds in climate models using satellite simulators:
from mean state to feedbacks.
American Geophysical Union Fall Meeting,
San Francisco, CA.
\item Hillman, B.,
2011:
Use of satellite instrument simulators in the evaluation of climate models.
University of Washington Department of Atmospheric Sciences
Physics and Chemistry Seminar,
Seattle, WA.
\item Hillman, B., J. Kay, and T. Ackerman, 2011:
Evaluating clouds in the Community Atmosphere Model using COSP.
Poster presentation, CESM Annual Workshop,
Breckenridge, CO.
\item Hillman, B., R. Marchand, and T. Ackerman,
2010:
Evaluation of Clouds in Climate Models Using Instrument Simulators.
Western Washington University Physics Department Invited Colloquium,
Bellingham, WA.
\item Hillman, B., R. Marchand, and T. Ackerman,
2010:
Evaluation of Low Clouds in the NCAR CAM3 and GFDL AM2 Using MISR Joint
Histograms.
American Geophysical Union Fall Meeting,
San Francisco, CA.
\item Hillman, B., R. Marchand, and T. Ackerman,
2010:
Evaluation of Low Clouds in the NCAR CAM3 and GFDL AM2 Using MISR Joint
Histograms.
MISR Data Users Symposium,
Pasadena, CA.
\item Hillman, B., T. Ackerman, and R. Marchand,
2009:
Evaluating global climate models using a MISR simulator.
Presentation,
MISR Data Users Science Symposium,
Pasadena, CA.
\end{itemize*}
\end{document}
|
{"hexsha": "756bebe7f413c731f335c2edcb8d8ed57e6ccdd8", "size": 8716, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "cv.tex", "max_stars_repo_name": "brhillman/cv", "max_stars_repo_head_hexsha": "eab8dbe8b9c8c7bd5281ab27946939fb975149cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cv.tex", "max_issues_repo_name": "brhillman/cv", "max_issues_repo_head_hexsha": "eab8dbe8b9c8c7bd5281ab27946939fb975149cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cv.tex", "max_forks_repo_name": "brhillman/cv", "max_forks_repo_head_hexsha": "eab8dbe8b9c8c7bd5281ab27946939fb975149cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5689655172, "max_line_length": 280, "alphanum_fraction": 0.7273978889, "num_tokens": 2387}
|
#' Gets TVKs for a query
#'
#' Given a search term this function returns taxon information, including pTVKs,
#' for the first 25 taxa that match that search on the NBN.
#'
#' @export
#' @param query A query string. This can range from latin binomials to partial english names.
#' @param species_only Logical, if \code{TRUE} pTVKs of species are returned (i.e.
#' sub-species and aggregates are removed). Defaults to \code{TRUE}
#' @param rec_only Logical, if \code{TRUE} pTVKs of recommended names are returned (i.e.
#' synonyms are removed). Defaults to \code{FALSE}. Remember, the pTVK of a synonym is a
#' taxa with 'recommended' name status.
#' @param top Logical, if \code{TRUE} only the top answer is returned. This is what the
#' gateway thinks you are most likely to be after but may not always be right, use with
#' care!
#' @param ... Further named parameters passed on to \code{\link[httr]{GET}}
#' @return A dataframe containing information on each taxa entry that matches the query
#' string in rows. ptaxonVersionKey (preferred taxon version key) should be used when
#' searching for records.
#' @author Tom August, CEH \email{tomaug@@ceh.ac.uk}
#' @seealso \code{\link{getGroupSpeciesTVKs}}, \code{\link{getOccurrences}}
#' @examples \dontrun{
#' t <- getTVKQuery('blue tit')
#' }
#'
getTVKQuery<-function(query = NULL, species_only = TRUE, rec_only = FALSE, top = FALSE, ...){
if(is.null(q)) stop('query string must not be null')
d_master <- NULL
for(q in query){
q <- tolower(gsub(' ','%20', q))
json <- runnbnurl(service = "query",
query = q,
... = ...)
json<-json$results
if (length(json) > 0) {
## find the unique names that are used in occ
n <- unique(unlist(c(sapply(json, names))))
## dimension a matrix for the required number of rows and cols
d <- matrix(nrow=length(json), ncol=length(n),
dimnames=list(seq(1:length(json)),n))
## now we can go through the list and insert
## the values into the correct cells of the matrix
## This should be quick because the matrix is pre-allocated
for (i in 1:length(json)) {
for (j in 1:length(json[[i]])) {
k <- grep(names(json[[i]][j]),n)
d[i,k] <- json[[i]][[j]]
}
}
## cooerce the matrix to a data.frame
d <- as.data.frame(d, stringsAsFactors = FALSE)
# On reflection it is probably best to return everything # keep only the columns I need
#d <- d[colnames(d) %in% c('searchMatchTitle','rank','nameStatus','ptaxonVersionKey')]
## Keep only species if desired
if(species_only) d <- d[d$rank == 'Species',]
## Keep only recommended if desired
if(rec_only) d <- d[d$nameStatus == 'Recommended',]
## Keep top only
if(top) d <- d[1,]
if(is.null(d_master)){d_master <- d}else{d_master<-merge(d_master,d,all=T)}
}
}
return(d_master)
}
|
{"hexsha": "eb60a46357e4db349cd3f3a4eb1b499b442361b0", "size": 3287, "ext": "r", "lang": "R", "max_stars_repo_path": "R/getTVKQuery.r", "max_stars_repo_name": "AugustT/rnbn", "max_stars_repo_head_hexsha": "ab068f1a30071849e5813e22c090b3c70ae0f676", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/getTVKQuery.r", "max_issues_repo_name": "AugustT/rnbn", "max_issues_repo_head_hexsha": "ab068f1a30071849e5813e22c090b3c70ae0f676", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/getTVKQuery.r", "max_forks_repo_name": "AugustT/rnbn", "max_forks_repo_head_hexsha": "ab068f1a30071849e5813e22c090b3c70ae0f676", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.141025641, "max_line_length": 110, "alphanum_fraction": 0.5752966231, "num_tokens": 800}
|
Require Export GeoCoq.Tarski_dev.Definitions.
Require Export GeoCoq.Tactics.finish.
Ltac prolong A B x C D :=
assert (sg:= segment_construction A B C D);
ex_and sg x.
Section T1_1.
Context `{Tn:Tarski_neutral_dimensionless}.
Lemma cong_reflexivity : forall A B,
Cong A B A B.
Proof.
intros.
apply (cong_inner_transitivity B A A B); apply cong_pseudo_reflexivity.
Qed.
Lemma cong_symmetry : forall A B C D : Tpoint,
Cong A B C D -> Cong C D A B.
Proof.
intros.
eapply cong_inner_transitivity.
apply H.
apply cong_reflexivity.
Qed.
Lemma cong_transitivity : forall A B C D E F : Tpoint,
Cong A B C D -> Cong C D E F -> Cong A B E F.
Proof.
intros.
eapply cong_inner_transitivity; eauto using cong_symmetry.
Qed.
Lemma cong_left_commutativity : forall A B C D,
Cong A B C D -> Cong B A C D.
Proof.
intros.
eapply cong_inner_transitivity.
apply cong_symmetry.
apply cong_pseudo_reflexivity.
assumption.
Qed.
Lemma cong_right_commutativity : forall A B C D,
Cong A B C D -> Cong A B D C.
Proof.
intros.
apply cong_symmetry.
apply cong_symmetry in H.
apply cong_left_commutativity.
assumption.
Qed.
Lemma cong_3421 : forall A B C D,
Cong A B C D -> Cong C D B A.
Proof.
auto using cong_symmetry, cong_right_commutativity.
Qed.
Lemma cong_4312 : forall A B C D,
Cong A B C D -> Cong D C A B.
Proof.
auto using cong_symmetry, cong_right_commutativity.
Qed.
Lemma cong_4321 : forall A B C D,
Cong A B C D -> Cong D C B A.
Proof.
auto using cong_symmetry, cong_right_commutativity.
Qed.
Lemma cong_trivial_identity : forall A B : Tpoint,
Cong A A B B.
Proof.
intros.
prolong A B E A A.
eapply cong_inner_transitivity.
apply H0.
assert(B=E).
eapply cong_identity.
apply H0.
subst.
apply cong_reflexivity.
Qed.
Lemma cong_reverse_identity : forall A C D,
Cong A A C D -> C=D.
Proof.
intros.
apply cong_symmetry in H.
eapply cong_identity.
apply H.
Qed.
Lemma cong_commutativity : forall A B C D,
Cong A B C D -> Cong B A D C.
Proof.
intros.
apply cong_left_commutativity.
apply cong_right_commutativity.
assumption.
Qed.
End T1_1.
Hint Resolve cong_commutativity cong_3421 cong_4312 cong_4321 cong_trivial_identity
cong_left_commutativity cong_right_commutativity
cong_transitivity cong_symmetry cong_reflexivity : cong.
Ltac Cong := auto 4 with cong.
Ltac eCong := eauto with cong.
Section T1_2.
Context `{Tn:Tarski_neutral_dimensionless}.
(* We pre-compute some trivial lemmas to have more efficient automatic proofs. *)
Lemma not_cong_2134 : forall A B C D, ~ Cong A B C D -> ~ Cong B A C D.
Proof.
auto with cong.
Qed.
Lemma not_cong_1243 : forall A B C D, ~ Cong A B C D -> ~ Cong A B D C.
Proof.
auto with cong.
Qed.
Lemma not_cong_2143 : forall A B C D, ~ Cong A B C D -> ~ Cong B A D C.
Proof.
auto with cong.
Qed.
Lemma not_cong_3412 : forall A B C D, ~ Cong A B C D -> ~ Cong C D A B.
Proof.
auto with cong.
Qed.
Lemma not_cong_4312 : forall A B C D, ~ Cong A B C D -> ~ Cong D C A B.
Proof.
auto with cong.
Qed.
Lemma not_cong_3421 : forall A B C D, ~ Cong A B C D -> ~ Cong C D B A.
Proof.
auto with cong.
Qed.
Lemma not_cong_4321 : forall A B C D, ~ Cong A B C D -> ~ Cong D C B A.
Proof.
auto with cong.
Qed.
End T1_2.
Hint Resolve not_cong_2134 not_cong_1243 not_cong_2143
not_cong_3412 not_cong_4312 not_cong_3421 not_cong_4321 : cong.
Section T1_3.
Context `{Tn:Tarski_neutral_dimensionless}.
Lemma five_segment_with_def : forall A B C D A' B' C' D',
OFSC A B C D A' B' C' D' -> A<>B -> Cong C D C' D'.
Proof.
unfold OFSC.
intros;spliter.
apply (five_segment A A' B B'); assumption.
Qed.
Lemma cong_diff : forall A B C D : Tpoint,
A <> B -> Cong A B C D -> C <> D.
Proof.
intros.
intro.
subst.
apply H.
eauto using cong_identity.
Qed.
Lemma cong_diff_2 : forall A B C D ,
B <> A -> Cong A B C D -> C <> D.
Proof.
intros.
intro;subst.
apply H.
symmetry.
eauto using cong_identity, cong_symmetry.
Qed.
Lemma cong_diff_3 : forall A B C D ,
C <> D -> Cong A B C D -> A <> B.
Proof.
intros.
intro;subst.
apply H.
eauto using cong_identity, cong_symmetry.
Qed.
Lemma cong_diff_4 : forall A B C D ,
D <> C -> Cong A B C D -> A <> B.
Proof.
intros.
intro;subst.
apply H.
symmetry.
eauto using cong_identity, cong_symmetry.
Qed.
Lemma cong_3_sym : forall A B C A' B' C',
Cong_3 A B C A' B' C' -> Cong_3 A' B' C' A B C.
Proof.
unfold Cong_3.
intuition.
Qed.
Lemma cong_3_swap : forall A B C A' B' C',
Cong_3 A B C A' B' C' -> Cong_3 B A C B' A' C'.
Proof.
unfold Cong_3.
intuition.
Qed.
Lemma cong_3_swap_2 : forall A B C A' B' C',
Cong_3 A B C A' B' C' -> Cong_3 A C B A' C' B'.
Proof.
unfold Cong_3.
intuition.
Qed.
Lemma cong3_transitivity : forall A0 B0 C0 A1 B1 C1 A2 B2 C2,
Cong_3 A0 B0 C0 A1 B1 C1 -> Cong_3 A1 B1 C1 A2 B2 C2 -> Cong_3 A0 B0 C0 A2 B2 C2.
Proof.
unfold Cong_3.
intros.
spliter.
repeat split; eapply cong_transitivity; eCong.
Qed.
End T1_3.
Hint Resolve cong_3_sym : cong.
Hint Resolve cong_3_swap cong_3_swap_2 cong3_transitivity : cong3.
Hint Unfold Cong_3 : cong3.
Section T1_4.
Context `{TnEQD:Tarski_neutral_dimensionless_with_decidable_point_equality}.
Lemma eq_dec_points : forall A B : Tpoint, A=B \/ ~ A=B.
Proof. exact point_equality_decidability. Qed.
Lemma distinct : forall P Q R : Tpoint, P <> Q -> (R <> P \/ R <> Q).
Proof.
intros.
induction (eq_dec_points R P).
subst R.
right.
assumption.
left.
assumption.
Qed.
Lemma l2_11 : forall A B C A' B' C',
Bet A B C -> Bet A' B' C' -> Cong A B A' B' -> Cong B C B' C' -> Cong A C A' C'.
Proof.
intros.
induction (eq_dec_points A B).
subst B.
assert (A' = B') by
(apply (cong_identity A' B' A); Cong).
subst; Cong.
apply cong_commutativity; apply (five_segment A A' B B' C C' A A'); Cong.
Qed.
Lemma bet_cong3 : forall A B C A' B', Bet A B C -> Cong A B A' B' -> exists C', Cong_3 A B C A' B' C'.
Proof.
intros.
assert (exists x, Bet A' B' x /\ Cong B' x B C) by (apply segment_construction).
ex_and H1 x.
assert (Cong A C A' x).
eapply l2_11.
apply H.
apply H1.
assumption.
Cong.
exists x;unfold Cong_3; repeat split;Cong.
Qed.
Lemma construction_uniqueness : forall Q A B C X Y,
Q <> A -> Bet Q A X -> Cong A X B C -> Bet Q A Y -> Cong A Y B C -> X=Y.
Proof.
intros.
assert (Cong A X A Y) by (apply cong_transitivity with B C; Cong).
assert (Cong Q X Q Y) by (apply (l2_11 Q A X Q A Y);Cong).
assert(OFSC Q A X Y Q A X X) by (unfold OFSC;repeat split;Cong).
apply five_segment_with_def in H6; try assumption.
apply cong_identity with X; Cong.
Qed.
Lemma Cong_cases :
forall A B C D,
Cong A B C D \/ Cong A B D C \/ Cong B A C D \/ Cong B A D C \/
Cong C D A B \/ Cong C D B A \/ Cong D C A B \/ Cong D C B A ->
Cong A B C D.
Proof.
intros.
decompose [or] H;clear H; Cong.
Qed.
Lemma Cong_perm :
forall A B C D,
Cong A B C D ->
Cong A B C D /\ Cong A B D C /\ Cong B A C D /\ Cong B A D C /\
Cong C D A B /\ Cong C D B A /\ Cong D C A B /\ Cong D C B A.
Proof.
intros.
repeat split; Cong.
Qed.
End T1_4.
|
{"author": "princeton-vl", "repo": "CoqGym", "sha": "0c03a6fba3a3ea7e2aecedc1c624ff3885f7267e", "save_path": "github-repos/coq/princeton-vl-CoqGym", "path": "github-repos/coq/princeton-vl-CoqGym/CoqGym-0c03a6fba3a3ea7e2aecedc1c624ff3885f7267e/coq_projects/GeoCoq/Tarski_dev/Ch02_cong.v"}
|
'''
IKI Bangladesh (MIOASI): S1b Tidy netCDF metadata
In some instances, it's useful to run this script independent of other
data processing scripts.
Author: HS
Created: 19/7/19
'''
import argparse
import datetime as dt
import glob
import iris
import numpy as np
import os
import sys
import time
from cf_units import Unit
from ciid_tools.rim_remove import rim_remove
import dataprocessing as dp
from user_vars import EVENTS, NAMES, RES, SCRATCH, HCNC
# Parse other processing options from the command line
parser = argparse.ArgumentParser(description='Specify file options')
parser.add_argument('-r', '--resolution', choices=RES, default='4p4',
help='Specify pp file resolution')
parser.add_argument('-e', '--event', choices=list(EVENTS.keys()), default='Sidr',
help='Specify event (storm name)')
parser.add_argument('-t', '--tidync', action='store_true',
help='Optional, run tidy netcdf function to tidy additional netcdf metadata')
pargs = parser.parse_args()
# Define constants
DATADIR = SCRATCH
OUTDIR = SCRATCH
RIM_WIDTH = 13
# Set custom global attributes specific to this dataset
ATTRS = {'summary': 'Tropical cyclone footprint ensemble over Bangladesh',
'title': 'Downscaled Tropical Cyclone footprint ensemble over Bangladesh',
'date_created': time.strftime('%Y%m%dT%H:%M:%S'),
'contact': 'enquiries@metoffice.gov.uk',
'Conventions': 'CF-1.7', # Name of the conventions followed by the dataset.
'comment': 'Supported by the International Climate Initiative (IKI) and the Federal Ministry for the '
'Environment, Nature Conservation and Nuclear Safety, based on a decision of the Germany '
'Bundestag',
'data_type': 'grid',
'spatial_resolution': pargs.resolution.replace('p', '.') + 'km',
'history': '(1.0) Initial release',
'keywords': 'Bangladesh, ensemble, footprint, Met Office',
'product_version': 'v1.0',
'project': 'Oasis Platform for Climate and Catastrophe Risk Assessment – Asia',
'references': '', # References that describe the data or methods used to produce it
'source': 'Copernicus Climate Change Service Information (C3S) ECMWF ERA5 / Met Office UM RA2T CON',
# Method of production of the original data.
'standard_name_vocabulary': 'NetCDF Climate and Forecast (CF) Standard Names version 51',
'type': 'float',
'licence': 'Creative Commons Attribution 4.0 International (CC BY 4.0)'
}
dp.tidy_netcdf(HCNC, fglob='fpens*.nc', global_attributes=ATTRS)
|
{"hexsha": "d22ade478784cca76916f269f17591cdbc57f4be", "size": 2647, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/s2b_tidy_netcdf_fpens.py", "max_stars_repo_name": "MetOffice/IKI-Oasis-Bangladesh", "max_stars_repo_head_hexsha": "a280be8a151b395c0117e700a259b37948faa3f2", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/s2b_tidy_netcdf_fpens.py", "max_issues_repo_name": "MetOffice/IKI-Oasis-Bangladesh", "max_issues_repo_head_hexsha": "a280be8a151b395c0117e700a259b37948faa3f2", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/s2b_tidy_netcdf_fpens.py", "max_forks_repo_name": "MetOffice/IKI-Oasis-Bangladesh", "max_forks_repo_head_hexsha": "a280be8a151b395c0117e700a259b37948faa3f2", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-11T06:11:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-11T06:11:05.000Z", "avg_line_length": 41.359375, "max_line_length": 111, "alphanum_fraction": 0.6826596147, "include": true, "reason": "import numpy", "num_tokens": 617}
|
"""
Data types used for pysc2 environment
We don't use the data types provided in acme's file "types.py" because it is less expressive than customized classes below
"""
import numpy as np
from typing import List
from pysc2.lib import actions
import numpy as np
class Space:
"""
Holds information about any generic space
In essence is a simplification of gym.spaces module into a single endpoint
"""
def __init__(self, shape=(), dtype=np.int32, domain=(0, 1), categorical=False, name=None):
self.name = name
self.shape, self.dtype = shape, dtype
self.categorical = categorical
(self.minimum, self.maximum) = domain
def is_discrete(self) -> bool:
"""
Space is considered discrete if its values are only ints
"""
return np.issubdtype(self.dtype, np.integer)
def is_continuous(self) -> bool:
"""
Space is considered continuous if its values can be floats
"""
return np.issubdtype(self.dtype, np.floating)
def is_spatial(self) -> bool:
"""
Space is considered spacial if it has three-dimensional shape HxWxC
"""
return len(self.shape) > 1 or type(self.maximum) in [list, tuple]
def size(self) -> int:
"""
Number of labels if categorical
Number of intervals if discrete (can have multiple in one space)
Number of mean and log std.dev if continuous
Meant to be used to determine size of logit outputs in models
"""
if self.is_discrete() and self.categorical:
if self.is_spatial():
return self.maximum
return self.maximum - self.minimum
sz = 1
if len(self.shape) == 1:
sz = self.shape[0]
return sz
def sample(self, n=1):
"""
Sample from this space. Useful for random agent, for example.
"""
if self.is_discrete():
return np.random.randint(self.minimum, self.maximum+1, (n, ) + self.shape)
if self.is_continuous():
return np.random.uniform(self.minimum, self.maximum+1e-10, (n, ) + self.shape)
def __repr__(self):
mid = str(self.shape)
if self.categorical:
mid += ", cat: " + str(self.maximum)
return "Space(%s, %s, %s)" % (self.name, mid, str(self.dtype).strip("<class>' "))
class SC2Space(Space):
def __init__(self, shape, name, spatial_feats=None, spatial_dims=None):
"""
Example of one space with spatial features:
name: screen{player_relative, selected, visibility_map, unit_hit_points_ratio, unit_density}
"""
if spatial_feats:
name += "{%s}" % ", ".join(spatial_feats)
self.spatial_feats, self.spatial_dims = spatial_feats, spatial_dims
super().__init__(shape, name=name)
class SC2FuncIdSpace(Space):
def __init__(self, func_ids, args):
super().__init__(domain=(0,len(func_ids)), dtype=np.int64, categorical=True, name="function_id")
self.args_mask = []
for fn_id in func_ids:
fn_id_args = [
arg_type.name for arg_type in actions.FUNCTIONS[fn_id].args]
self.args_mask.append([arg in fn_id_args for arg in args])
|
{"hexsha": "ef82f0c08b4460f1956c7ad9e6309485c1e29e96", "size": 3272, "ext": "py", "lang": "Python", "max_stars_repo_path": "acme/sc2_types.py", "max_stars_repo_name": "MEDCOMP/SC2_ACME", "max_stars_repo_head_hexsha": "511f5c4388ad4b8ef157e46678cc22bb0a199ad4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "acme/sc2_types.py", "max_issues_repo_name": "MEDCOMP/SC2_ACME", "max_issues_repo_head_hexsha": "511f5c4388ad4b8ef157e46678cc22bb0a199ad4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "acme/sc2_types.py", "max_forks_repo_name": "MEDCOMP/SC2_ACME", "max_forks_repo_head_hexsha": "511f5c4388ad4b8ef157e46678cc22bb0a199ad4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.387755102, "max_line_length": 122, "alphanum_fraction": 0.6179706601, "include": true, "reason": "import numpy", "num_tokens": 757}
|
import numpy as np
class LaneEKF():
def __init__(self, Q_u, Q_z, R_lane_frame):
"""
EKF that is based upon tracking a lane.
Reference paper with original implementation:
Petrich et al, "Map-based long term motion prediction for vehicles in traffic environments", ITSC 2013.
https://doi.org/10.1109/ITSC.2013.6728549
The system has the following states, inputs, and measurements:
States: [x, y, theta, v], kinematic state
Inputs: [u_acc, u_curv], acceleration + curvature
Measurements: [x_{ALP}, y_{ALP}, theta_{ALP}] where ALP is the active lane point.
"""
self.nz = 4 # state dimension
self.nu = 2 # input dimension
self.nm = 3 # measurement dimension
self.z = np.zeros(self.nz) # mean
self.P = np.eye(self.nz) # covariance
# Position covariance due to unmodeled effects.
self.update_Q_z(Q_z)
# Input covariance, i.e.
# Q_u = diag(sigma^2_{acc}, sigma^2_{curv}).
self.update_Q_u(Q_u)
# Measurement covariance in the lane-aligned frame, i.e.
# R_lane_frame = diag(sigma^2_s, sigma^2_{ey}, sigma^2_{epsi}).
assert R_lane_frame.shape == (self.nm, self.nm)
assert np.allclose(R_lane_frame, R_lane_frame.T)
assert np.linalg.det(R_lane_frame) > 0.
self.R_lane_frame = R_lane_frame
def update_Q_z(self, Q_z):
# This is used to deal with issues of integration + unmodeled dynamics.
assert Q_z.shape == (self.nz, self.nz)
assert np.allclose(Q_z, Q_z.T)
assert np.linalg.det(Q_z) > 0.
self.Q_z = Q_z
def update_Q_u(self, Q_u):
# In theory, this should just be called once by the constructor.
# For fitting this parameter, we may want to pick a different
# candidate value,which is what this function is useful for.
assert Q_u.shape == (self.nu, self.nu)
assert np.allclose(Q_u, Q_u.T)
assert np.linalg.det(Q_u) > 0.
self.Q_u = Q_u
def time_update(self, u, dt):
A = self._dynamics_state_jacobian(self.z, u, dt)
B = self._dynamics_input_jacobian(self.z, u, dt)
z_next = self._dynamics_model(self.z, u, dt)
self.z = z_next
self.P = A @ self.P @ A.T + B @ self.Q_u @ B.T + self.Q_z
return self.z, self.P, A, B
def measurement_update(self, lane_localizer):
x = self.z[0]
y = self.z[1]
lane_pose, rot_local_to_global = lane_localizer.get_lane_measurement(x, y)
H = np.zeros((self.nm, self.nz))
H[:, :self.nm] = np.eye(self.nm)
residual = lane_pose - H @ self.z
residual[2] = self._bound_angle_within_pi(residual[2])
R_lane_global = np.copy(self.R_lane_frame)
R_lane_global[:2, :2] = rot_local_to_global @ R_lane_global[:2, :2] @ rot_local_to_global.T
res_covar = H @ self.P @ H.T + R_lane_global
K = self.P @ H.T @ np.linalg.pinv(res_covar)
self.z = self.z + K @ residual
self.z[2] = self._bound_angle_within_pi(self.z[2])
self.P = (np.eye(self.nz) - K @ H) @ self.P
return self.z, self.P, residual, res_covar
def _reset(self, z_init, P_init):
assert z_init.shape == (self.nz,)
assert P_init.shape == (self.nz, self.nz)
self.z = z_init
self.P = P_init
@staticmethod
def _bound_angle_within_pi(angle):
""" Given an angle, adjusts it to lie within a +/- PI range """
return (angle + np.pi) % (2 * np.pi) - np.pi # https://stackoverflow.com/questions/15927755/opposite-of-numpy-unwrap
@staticmethod
def _dynamics_model(z, u, dt):
u_acc, u_curv = u
x, y, th, v = z
xn = x + dt*(v * np.cos(th))
yn = y + dt*(v * np.sin(th))
thn = th + dt*(v*u_curv)
vn = v + dt*(u_acc)
vn = max(0, vn)
thn = LaneEKF._bound_angle_within_pi(thn)
return np.array([xn, yn, thn, vn])
@staticmethod
def _dynamics_state_jacobian(z, u, dt):
u_acc, u_curv = u
x, y, th, v = z
A = np.eye(4) + dt * \
np.array([[0, 0, -v*np.sin(th), np.cos(th)],
[0, 0, v*np.cos(th), np.sin(th)],
[0, 0, 0, u_curv],
[0, 0, 0, 0]])
return A
@staticmethod
def _dynamics_input_jacobian(z, u, dt):
u_acc, u_curv = u
x, y, th, v = z
B = np.array([[ 0, 0],
[ 0, 0],
[ 0, v*dt],
[dt, 0]])
return B
if __name__ == '__main__':
nz, nu = 4, 2
z = np.array([10., 5., 0.25, 8.0])
u = np.array([1.2, -0.2])
dt = 0.1
eps = 1e-4
# TEST STATE JACOBIAN
A = LaneEKF._dynamics_state_jacobian(z, u, dt)
A_num_jac = np.zeros((nz, nz))
for i in range(nz):
z_plus = z + eps * np.array([int(ind==i) for ind in range(nz)])
z_minus = z - eps * np.array([int(ind==i) for ind in range(nz)])
f_plus = LaneEKF._dynamics_model(z_plus, u, dt)
f_minus = LaneEKF._dynamics_model(z_minus, u, dt)
diff = f_plus - f_minus
diff[2] = LaneEKF._bound_angle_within_pi(diff[2])
A_num_jac[:, i] = diff/(2.*eps)
print("STATE JACOBIAN")
print(A)
print(f"STATE JACOBIAN ERROR: {np.linalg.norm(A - A_num_jac, ord=np.inf)}")
# TEST INPUT JACOBIAN
B = LaneEKF._dynamics_input_jacobian(z, u, dt)
B_num_jac = np.zeros((nz, nu))
for i in range(nu):
u_plus = u + eps * np.array([int(ind==i) for ind in range(nu)])
u_minus = u - eps * np.array([int(ind==i) for ind in range(nu)])
f_plus = LaneEKF._dynamics_model(z, u_plus, dt)
f_minus = LaneEKF._dynamics_model(z, u_minus, dt)
diff = f_plus - f_minus
diff[2] = LaneEKF._bound_angle_within_pi(diff[2])
B_num_jac[:, i] = diff/(2.*eps)
print("INPUT JACOBIAN")
print(B)
print(f"INPUT JACOBIAN ERROR: {np.linalg.norm(B - B_num_jac, ord=np.inf)}")
|
{"hexsha": "d941c7d405dc9043a072e077b3c9d931f0479f79", "size": 6168, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/models/lane_utils/lane_ekf.py", "max_stars_repo_name": "govvijaycal/confidence_aware_predictions", "max_stars_repo_head_hexsha": "c5fea8aac271dc792eedc00a689c02fcd658edec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-08-28T11:09:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T14:38:06.000Z", "max_issues_repo_path": "scripts/models/lane_utils/lane_ekf.py", "max_issues_repo_name": "govvijaycal/confidence_aware_predictions", "max_issues_repo_head_hexsha": "c5fea8aac271dc792eedc00a689c02fcd658edec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-10T09:03:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T14:36:40.000Z", "max_forks_repo_path": "scripts/models/lane_utils/lane_ekf.py", "max_forks_repo_name": "govvijaycal/confidence_aware_predictions", "max_forks_repo_head_hexsha": "c5fea8aac271dc792eedc00a689c02fcd658edec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-30T06:00:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T14:40:20.000Z", "avg_line_length": 34.4581005587, "max_line_length": 124, "alphanum_fraction": 0.5614461738, "include": true, "reason": "import numpy", "num_tokens": 1855}
|
BLOCK DATA DT_BLKD43
C***********************************************************************
C *
C Created on 10 december 1991 by Alfredo Ferrari & Paola Sala *
C Infn - Milan *
C *
C Last change on 18-dec-2012 by S.Roesler *
C *
C This is the original common reac of Hadrin *
C *
C***********************************************************************
IMPLICIT DOUBLE PRECISION (A-H,O-Z)
SAVE
C
INCLUDE 'inc/hnreac'
DIMENSION UMOpi(92) , UMOkc(68) , UMOp(39) , UMOn(63) , UMOk0(34)
& , PLApi(92) , PLAkc(68) , PLAp(39) , PLAn(63) ,
& PLAk0(34) , SPIkp1(315) , SPIkpu(278) , SPIkpv(372) ,
& SPIkpw(278) , SPIkpx(372) , SPIkp4(315) , SPIkp5(187) ,
& SPIkp6(289) , SKMpel(102) , SPIkp7(289) , SKMnel(68) ,
& SPIkp8(187) , SPIkp9(143) , SPIkp0(169) , SPKpv(143) ,
& SAPpel(105) , SPIkpe(399) , SAPnel(84) , SPIkpz(273) ,
& SANpel(84) , SPIkpf(273) , SPKp15(187) , SPKp16(272) ,
& NRKpi(164) , NRKkc(132) , NRKp(70) , NRKn(116) ,
& NRKk0(54) , NUReln(60)
C
DIMENSION NRKlin(532)
EQUIVALENCE (NRK(1,1),NRKlin(1))
EQUIVALENCE (UMO(1),UMOpi(1))
EQUIVALENCE (UMO(93),UMOkc(1))
EQUIVALENCE (UMO(161),UMOp(1))
EQUIVALENCE (UMO(200),UMOn(1))
EQUIVALENCE (UMO(263),UMOk0(1))
EQUIVALENCE (PLAbf(1),PLApi(1))
EQUIVALENCE (PLAbf(93),PLAkc(1))
EQUIVALENCE (PLAbf(161),PLAp(1))
EQUIVALENCE (PLAbf(200),PLAn(1))
EQUIVALENCE (PLAbf(263),PLAk0(1))
EQUIVALENCE (WK(1),SPIkp1(1))
EQUIVALENCE (WK(316),SPIkpu(1))
EQUIVALENCE (WK(594),SPIkpv(1))
EQUIVALENCE (WK(966),SPIkpw(1))
EQUIVALENCE (WK(1244),SPIkpx(1))
EQUIVALENCE (WK(1616),SPIkp4(1))
EQUIVALENCE (WK(1931),SPIkp5(1))
EQUIVALENCE (WK(2118),SPIkp6(1))
EQUIVALENCE (WK(2407),SKMpel(1))
EQUIVALENCE (WK(2509),SPIkp7(1))
EQUIVALENCE (WK(2798),SKMnel(1))
EQUIVALENCE (WK(2866),SPIkp8(1))
EQUIVALENCE (WK(3053),SPIkp9(1))
EQUIVALENCE (WK(3196),SPIkp0(1))
EQUIVALENCE (WK(3365),SPKpv(1))
EQUIVALENCE (WK(3508),SAPpel(1))
EQUIVALENCE (WK(3613),SPIkpe(1))
EQUIVALENCE (WK(4012),SAPnel(1))
EQUIVALENCE (WK(4096),SPIkpz(1))
EQUIVALENCE (WK(4369),SANpel(1))
EQUIVALENCE (WK(4453),SPIkpf(1))
EQUIVALENCE (WK(4726),SPKp15(1))
EQUIVALENCE (WK(4913),SPKp16(1))
EQUIVALENCE (NRK(1,1),NRKlin(1))
EQUIVALENCE (NRKlin(1),NRKpi(1))
EQUIVALENCE (NRKlin(165),NRKkc(1))
EQUIVALENCE (NRKlin(297),NRKp(1))
EQUIVALENCE (NRKlin(367),NRKn(1))
EQUIVALENCE (NRKlin(483),NRKk0(1))
EQUIVALENCE (NURe(1,1),NUReln(1))
C
DATA PLApi/0.D0 , .3D0 , .5D0 , .6D0 , .7D0 , .8D0 , .9D0 ,
& .95D0 , 1.D0 , 1.15D0 , 1.3D0 , 1.5D0 , 1.6D0 , 1.8D0 ,
& 2.D0 , 2.3D0 , 2.5D0 , 2.8D0 , 3.D0 , 3.5D0 , 4.D0 , 0.D0 ,
& .285D0 , .4D0 , .45D0 , .5D0 , .6D0 , .7D0 , .75D0 , .8D0 ,
& .85D0 , .9D0 , 1.D0 , 1.15D0 , 1.3D0 , 1.5D0 , 1.6D0 ,
& 1.8D0 , 2.D0 , 2.3D0 , 2.5D0 , 2.8D0 , 3.D0 , 3.5D0 , 4.D0 ,
& 4.5D0 , 0.D0 , .285D0 , .4D0 , .45D0 , .5D0 , .6D0 , .7D0 ,
& .75D0 , .8D0 , .85D0 , .9D0 , 1.D0 , 1.15D0 , 1.3D0 , 1.5D0 ,
& 1.6D0 , 1.8D0 , 2.D0 , 2.3D0 , 2.5D0 , 2.8D0 , 3.D0 , 3.5D0 ,
& 4.D0 , 4.5D0 , 0.D0 , .3D0 , .5D0 , .6D0 , .7D0 , .8D0 ,
& .9D0 , .95D0 , 1.D0 , 1.15D0 , 1.3D0 , 1.5D0 , 1.6D0 ,
& 1.8D0 , 2.D0 , 2.3D0 , 2.5D0 , 2.8D0 , 3.D0 , 3.5D0 , 4.D0/
DATA PLAkc/0.D0 , .58D0 , .8D0 , 1.01D0 , 1.23D0 , 1.45D0 ,
& 1.68D0 , 1.94D0 , 2.18D0 , 2.42D0 , 2.68D0 , 2.96D0 ,
& 3.24D0 , 3.51D0 , 3.84D0 , 4.16D0 , 4.49D0 , 0.D0 , .58D0 ,
& .8D0 , 1.01D0 , 1.23D0 , 1.45D0 , 1.68D0 , 1.94D0 , 2.18D0 ,
& 2.42D0 , 2.68D0 , 2.96D0 , 3.24D0 , 3.51D0 , 3.84D0 ,
& 4.16D0 , 4.49D0 , 0.D0 , .58D0 , .8D0 , 1.01D0 , 1.23D0 ,
& 1.45D0 , 1.68D0 , 1.94D0 , 2.18D0 , 2.42D0 , 2.68D0 ,
& 2.96D0 , 3.24D0 , 3.51D0 , 3.84D0 , 4.16D0 , 4.49D0 , 0.D0 ,
& .58D0 , .8D0 , 1.01D0 , 1.23D0 , 1.45D0 , 1.68D0 , 1.94D0 ,
& 2.18D0 , 2.42D0 , 2.68D0 , 2.96D0 , 3.24D0 , 3.51D0 ,
& 3.84D0 , 4.16D0 , 4.49D0/
DATA PLAk0/0.D0 , .58D0 , .8D0 , 1.01D0 , 1.23D0 , 1.45D0 ,
& 1.68D0 , 1.94D0 , 2.18D0 , 2.42D0 , 2.68D0 , 2.96D0 ,
& 3.24D0 , 3.51D0 , 3.84D0 , 4.16D0 , 4.49D0 , 0.D0 , .58D0 ,
& .8D0 , 1.01D0 , 1.23D0 , 1.45D0 , 1.68D0 , 1.94D0 , 2.18D0 ,
& 2.42D0 , 2.68D0 , 2.96D0 , 3.24D0 , 3.51D0 , 3.84D0 ,
& 4.16D0 , 4.49D0/
C pp pn np nn *
DATA PLAp/0.D0 , 1.06D0 , 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 ,
& 2.5D0 , 2.8D0 , 3.1D0 , 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0 ,
& 0.D0 , 1.06D0 , 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 , 2.5D0 ,
& 2.8D0 , 3.1D0 , 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0 , 0.D0 ,
& 1.06D0 , 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 , 2.5D0 , 2.8D0 ,
& 3.1D0 , 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0/
C app apn anp ann *
DATA PLAn/0.D0 , 1.D-3 , .1D0 , .2D0 , .3D0 , .4D0 , .5D0 , .6D0 ,
& .74D0 , 1.06D0 , 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 , 2.5D0 ,
& 2.8D0 , 3.1D0 , 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0 , 0.D0 ,
& 1.D-3 , .1D0 , .2D0 , .3D0 , .4D0 , .5D0 , .6D0 , .74D0 ,
& 1.06D0 , 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 , 2.5D0 , 2.8D0 ,
& 3.1D0 , 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0 , 0.D0 , 1.D-3 ,
& .1D0 , .2D0 , .3D0 , .4D0 , .5D0 , .6D0 , .74D0 , 1.06D0 ,
& 1.34D0 , 1.63D0 , 1.92D0 , 2.2D0 , 2.5D0 , 2.8D0 , 3.1D0 ,
& 3.43D0 , 3.75D0 , 4.07D0 , 4.43D0/
DATA SIIn/296*0.D0/
DATA UMOpi/1.08D0 , 1.233D0 , 1.302D0 , 1.369D0 , 1.496D0 ,
& 1.557D0 , 1.615D0 , 1.6435D0 , 1.672D0 , 1.753D0 , 1.831D0 ,
& 1.930D0 , 1.978D0 , 2.071D0 , 2.159D0 , 2.286D0 , 2.366D0 ,
& 2.482D0 , 2.56D0 , 2.735D0 , 2.90D0 , 1.08D0 , 1.222D0 ,
& 1.302D0 , 1.3365D0 , 1.369D0 , 1.434D0 , 1.496D0 , 1.527D0 ,
& 1.557D0 , 1.586D0 , 1.615D0 , 1.672D0 , 1.753D0 , 1.831D0 ,
& 1.930D0 , 1.978D0 , 2.071D0 , 2.159D0 , 2.286D0 , 2.366D0 ,
& 2.482D0 , 2.560D0 , 2.735D0 , 2.90D0 , 3.06D0 , 1.08D0 ,
& 1.222D0 , 1.302D0 , 1.3365D0 , 1.369D0 , 1.434D0 , 1.496D0 ,
& 1.527D0 , 1.557D0 , 1.586D0 , 1.615D0 , 1.672D0 , 1.753D0 ,
& 1.831D0 , 1.930D0 , 1.978D0 , 2.071D0 , 2.159D0 , 2.286D0 ,
& 2.366D0 , 2.482D0 , 2.560D0 , 2.735D0 , 2.90D0 , 3.06D0 ,
& 1.08D0 , 1.233D0 , 1.302D0 , 1.369D0 , 1.496D0 , 1.557D0 ,
& 1.615D0 , 1.6435D0 , 1.672D0 , 1.753D0 , 1.831D0 , 1.930D0 ,
& 1.978D0 , 2.071D0 , 2.159D0 , 2.286D0 , 2.366D0 , 2.482D0 ,
& 2.56D0 , 2.735D0 , 2.90D0/
DATA UMOkc/1.44D0 , 1.598D0 , 1.7D0 , 1.8D0 , 1.9D0 , 2.0D0 ,
& 2.1D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 ,
& 2.8D0 , 2.9D0 , 3.0D0 , 3.1D0 , 1.44D0 , 1.598D0 , 1.7D0 ,
& 1.8D0 , 1.9D0 , 2.0D0 , 2.1D0 , 2.2D0 , 2.3D0 , 2.4D0 ,
& 2.5D0 , 2.6D0 , 2.7D0 , 2.8D0 , 2.9D0 , 3.0D0 , 3.1D0 ,
& 1.44D0 , 1.598D0 , 1.7D0 , 1.8D0 , 1.9D0 , 2.0D0 , 2.1D0 ,
& 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 , 2.8D0 ,
& 2.9D0 , 3.0D0 , 3.1D0 , 1.44D0 , 1.598D0 , 1.7D0 , 1.8D0 ,
& 1.9D0 , 2.0D0 , 2.1D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 ,
& 2.6D0 , 2.7D0 , 2.8D0 , 2.9D0 , 3.0D0 , 3.1D0/
DATA UMOk0/1.44D0 , 1.598D0 , 1.7D0 , 1.8D0 , 1.9D0 , 2.0D0 ,
& 2.1D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 ,
& 2.8D0 , 2.9D0 , 3.0D0 , 3.1D0 , 1.44D0 , 1.598D0 , 1.7D0 ,
& 1.8D0 , 1.9D0 , 2.0D0 , 2.1D0 , 2.2D0 , 2.3D0 , 2.4D0 ,
& 2.5D0 , 2.6D0 , 2.7D0 , 2.8D0 , 2.9D0 , 3.0D0 , 3.1D0/
C pp pn np nn *
DATA UMOp/1.88D0 , 2.102D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 ,
& 2.6D0 , 2.7D0 , 2.8D0 , 2.9D0 , 3.D0 , 3.1D0 , 3.2D0 ,
& 1.88D0 , 2.102D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 ,
& 2.7D0 , 2.8D0 , 2.9D0 , 3.D0 , 3.1D0 , 3.2D0 , 1.88D0 ,
& 2.102D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 ,
& 2.8D0 , 2.9D0 , 3.D0 , 3.1D0 , 3.2D0/
C app apn anp ann *
DATA UMOn/1.877D0 , 1.87701D0 , 1.879D0 , 1.887D0 , 1.9D0 ,
& 1.917D0 , 1.938D0 , 1.962D0 , 2.D0 , 2.102D0 , 2.2D0 ,
& 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 , 2.8D0 , 2.9D0 ,
& 3.D0 , 3.1D0 , 3.2D0 , 1.877D0 , 1.87701D0 , 1.879D0 ,
& 1.887D0 , 1.9D0 , 1.917D0 , 1.938D0 , 1.962D0 , 2.D0 ,
& 2.102D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 , 2.7D0 ,
& 2.8D0 , 2.9D0 , 3.D0 , 3.1D0 , 3.2D0 , 1.877D0 , 1.87701D0 ,
& 1.879D0 , 1.887D0 , 1.9D0 , 1.917D0 , 1.938D0 , 1.962D0 ,
& 2.D0 , 2.102D0 , 2.2D0 , 2.3D0 , 2.4D0 , 2.5D0 , 2.6D0 ,
& 2.7D0 , 2.8D0 , 2.9D0 , 3.D0 , 3.1D0 , 3.2D0/
DATA NRKpi/13 , 1 , 15 , 21 , 81 , 0 , 13 , 54 , 23 , 53 , 13 ,
& 63 , 13 , 58 , 23 , 57 , 13 , 65 , 1 , 32 , 53 , 31 , 54 ,
& 32 , 53 , 33 , 53 , 35 , 63 , 32 , 13 , 8 , 23 , 1 , 17 ,
& 15 , 21 , 24 , 22 , 15 , 82 , 0 , 61 , 0 , 13 , 55 , 23 ,
& 54 , 14 , 53 , 13 , 64 , 23 , 63 , 13 , 59 , 23 , 58 , 14 ,
& 57 , 13 , 66 , 23 , 65 , 1 , 31 , 8 , 32 , 1 , 33 , 1 , 35 ,
& 54 , 31 , 55 , 32 , 54 , 33 , 53 , 34 , 54 , 35 , 14 , 1 ,
& 23 , 8 , 17 , 24 , 20 , 15 , 22 , 24 , 83 , 0 , 62 , 0 , 14 ,
& 54 , 23 , 55 , 13 , 56 , 14 , 63 , 23 , 64 , 14 , 58 , 23 ,
& 59 , 13 , 60 , 14 , 65 , 23 , 66 , 8 , 31 , 1 , 34 , 8 , 33 ,
& 8 , 35 , 55 , 31 , 54 , 34 , 55 , 33 , 56 , 32 , 55 , 35 ,
& 14 , 8 , 24 , 20 , 84 , 0 , 14 , 55 , 23 , 56 , 14 , 64 ,
& 14 , 59 , 23 , 60 , 14 , 66 , 8 , 34 , 56 , 31 , 55 , 34 ,
& 56 , 33 , 56 , 35 , 64 , 34/
DATA NRKkc/15 , 1 , 89 , 0 , 24 , 53 , 15 , 54 , 1 , 36 , 1 , 40 ,
& 1 , 44 , 36 , 63 , 15 , 63 , 45 , 53 , 44 , 54 , 15 , 8 ,
& 24 , 1 , 91 , 0 , 24 , 54 , 15 , 55 , 8 , 36 , 1 , 37 , 8 ,
& 40 , 1 , 41 , 8 , 44 , 1 , 45 , 36 , 64 , 37 , 63 , 15 , 64 ,
& 24 , 63 , 45 , 54 , 44 , 55 , 16 , 1 , 25 , 8 , 17 , 23 ,
& 21 , 14 , 20 , 13 , 22 , 23 , 90 , 0 , 38 , 1 , 39 , 8 , 16 ,
& 54 , 25 , 55 , 1 , 42 , 8 , 43 , 16 , 63 , 25 , 64 , 39 ,
& 64 , 38 , 63 , 46 , 54 , 47 , 55 , 8 , 47 , 1 , 46 , 52 , 0 ,
& 51 , 0 , 16 , 8 , 17 , 14 , 20 , 23 , 22 , 14 , 92 , 0 , 8 ,
& 38 , 16 , 55 , 25 , 56 , 8 , 42 , 16 , 64 , 38 , 64 , 46 ,
& 55 , 47 , 56 , 8 , 46 , 94 , 0/
C *
C k0 p k0 n ak0 p ak0 n *
C *
C KN reaction channels corrected as pointed out by A.Ferrari 18/12/12
C & 37, 64, 24, 64, 44, 56, 45, 55, 25, 1, 17, 13, 22, 13, 21, 23,
DATA NRKk0/24 , 8 , 106 , 0 , 15 , 56 , 24 , 55 , 37 , 8 , 41 ,
& 8 , 45 , 8 , 37 , 64 , 24 , 64 , 44 , 56 , 45 , 55 , 25 , 1 ,
& 17 , 13 , 21 , 23 , 22 , 13 , 107 , 0 , 39 , 1 , 25 , 54 ,
& 16 , 53 , 43 , 1 , 25 , 63 , 39 , 63 , 47 , 54 , 46 , 53 ,
& 47 , 1 , 103 , 0 , 93 , 0/
C pp pn np nn *
C nn reaction channels corrected as pointed out by A.Ferrari 18/12/12
C & 8, 55, 1, 56, 8, 64, 8, 59, 1, 60, 2*55, 54, 56, 64, 55, 63, 56 /
DATA NRKp/1 , 1 , 85 , 0 , 8 , 53 , 1 , 54 , 1 , 63 , 8 , 57 , 1 ,
& 58 , 2*54 , 53 , 55 , 63 , 54 , 64 , 53 , 1 , 8 , 86 , 0 ,
& 8 , 54 , 1 , 55 , 8 , 63 , 1 , 64 , 8 , 58 , 1 , 59 , 64 ,
& 54 , 63 , 55 , 54 , 55 , 53 , 56 , 77 , 0 , 2*8 , 95 , 0 ,
& 1 , 56 , 8 , 55 , 8 , 64 , 1 , 60 , 8 , 59 , 2*55 , 54 , 56 ,
& 64 , 55 , 63 , 56/
C app apn anp ann *
DATA NRKn/1 , 2 , 17 , 18 , 15 , 16 , 8 , 9 , 13 , 14 , 99 , 0 ,
& 87 , 0 , 1 , 68 , 8 , 69 , 2 , 54 , 9 , 55 , 102 , 0 , 2 ,
& 63 , 9 , 64 , 1 , 75 , 8 , 76 , 53 , 67 , 54 , 68 , 55 , 69 ,
& 56 , 70 , 63 , 68 , 64 , 69 , 75 , 54 , 76 , 55 , 2 , 8 ,
& 18 , 20 , 16 , 24 , 14 , 23 , 101 , 0 , 88 , 0 , 2 , 55 , 9 ,
& 56 , 1 , 67 , 8 , 68 , 2 , 64 , 8 , 75 , 2 , 59 , 8 , 72 ,
& 68 , 55 , 67 , 54 , 69 , 56 , 1 , 9 , 18 , 21 , 15 , 25 ,
& 13 , 23 , 100 , 0 , 96 , 0 , 2 , 53 , 9 , 54 , 1 , 69 , 8 ,
& 70 , 1 , 76 , 9 , 63 , 1 , 73 , 9 , 58 , 55 , 70 , 53 , 68 ,
& 54 , 69/
DATA SPIkp1/0.D0 , 300.D0 , 40.D0 , 20.D0 , 13.D0 , 8.5D0 , 8.D0 ,
& 9.5D0 , 12.D0 , 14.D0 , 15.5D0 , 20.D0 , 17.D0 , 13.D0 ,
& 10.D0 , 9.D0 , 8.5D0 , 8.D0 , 7.8D0 , 7.3D0 , 6.7D0 ,
& 9*0.D0 , .23D0 , .35D0 , .7D0 , .52D0 , .4D0 , .3D0 , .2D0 ,
& .15D0 , .13D0 , .11D0 , .09D0 , .07D0 , 0.D0 , .033D0 ,
& .8D0 , 1.35D0 , 1.35D0 , .5D0 , 15*0.D0 , 3*0.D0 , .00D0 ,
& 0.80D0 , 2.2D0 , 3.6D0 , 4.6D0 , 4.7D0 , 3.5D0 , 2.4D0 ,
& 1.8D0 , 1.4D0 , .75D0 , .47D0 , .25D0 , .13D0 , .08D0 ,
& 6*0.D0 , 0.D0 , 1.2D0 , 3.3D0 , 5.4D0 , 6.9D0 , 7.3D0 ,
& 5.3D0 , 3.6D0 , 2.7D0 , 2.2D0 , 1.1D0 , .73D0 , .4D0 ,
& .22D0 , .12D0 , 9*0.D0 , .0D0 , 0.D0 , 2.0D0 , 4.4D0 ,
& 6.8D0 , 9.9D0 , 7.9D0 , 6.0D0 , 3.8D0 , 2.5D0 , 2.D0 ,
& 1.4D0 , 1.D0 , .6D0 , .35D0 , 10*0.D0 , .25D0 , .55D0 ,
& .75D0 , 1.25D0 , 1.9D0 , 2.D0 , 1.8D0 , 1.5D0 , 1.25D0 ,
& 1.D0 , .8D0 , 6*0.D0 , 4*0.D0 , .4D0 , .85D0 , 1.1D0 ,
& 1.85D0 , 2.8D0 , 3.D0 , 2.7D0 , 2.2D0 , 1.85D0 , 1.5D0 ,
& 1.2D0 , 6*0.D0 , 6*0.D0 , .5D0 , 1.2D0 , 1.7D0 , 3.4D0 ,
& 5.2D0 , 6.4D0 , 6.1D0 , 5.6D0 , 5.2D0 , 6*0.D0 , 2*0.D0 ,
& .0D0 , 1.D0 , 3.3D0 , 5.2D0 , 4.45D0 , 3.6D0 , 2.75D0 ,
& 1.9D0 , 1.65D0 , 1.3D0 , .95D0 , .6D0 , .45D0 , 6*0.D0 ,
& 3*0.D0 , .0D0 , .45D0 , 1.4D0 , 1.5D0 , 1.1D0 , .85D0 ,
& .5D0 , .3D0 , .2D0 , .15D0 , 8*0.D0 , 5*0.D0 , .0D0 , .0D0 ,
& .6D0 , .8D0 , .95D0 , .8D0 , .7D0 , .6D0 , .5D0 , .4D0 ,
& 6*0.D0 , 5*0.D0 , .0D0 , .00D0 , .85D0 , 1.2D0 , 1.4D0 ,
& 1.2D0 , 1.05D0 , .9D0 , .7D0 , .55D0 , 6*0.D0 , 5*0.D0 ,
& .0D0 , .00D0 , 1.D0 , 1.5D0 , 3.5D0 , 4.15D0 , 3.7D0 ,
& 2.7D0 , 2.3D0 , 1.75D0 , 6*0.D0 , 10*0.D0 , .5D0 , 2.0D0 ,
& 3.3D0 , 5.4D0 , 7.D0/
DATA SPIkpu/0.D0 , 25.D0 , 13.D0 , 11.D0 , 10.5D0 , 14.D0 ,
& 20.D0 , 20.D0 , 16.D0 , 14.D0 , 19.D0 , 28.D0 , 17.5D0 ,
& 13.5D0 , 12.D0 , 10.5D0 , 10.D0 , 10.D0 , 9.5D0 , 9.D0 ,
& 8.D0 , 7.5D0 , 7.D0 , 6.5D0 , 6.D0 , 0.D0 , 48.D0 , 19.D0 ,
& 15.D0 , 11.5D0 , 10.D0 , 8.D0 , 6.5D0 , 5.5D0 , 4.8D0 ,
& 4.2D0 , 7.5D0 , 3.4D0 , 2.5D0 , 2.5D0 , 2.1D0 , 1.4D0 ,
& 1.D0 , .8D0 , .6D0 , .46D0 , .3D0 , .2D0 , .15D0 , .13D0 ,
& 11*0.D0 , .95D0 , .65D0 , .48D0 , .35D0 , .2D0 , .18D0 ,
& .17D0 , .16D0 , .15D0 , .1D0 , .09D0 , .065D0 , .05D0 ,
& .04D0 , 12*0.D0 , .2D0 , .25D0 , .25D0 , .2D0 , .1D0 ,
& .08D0 , .06D0 , .045D0 , .03D0 , .02D0 , .01D0 , .005D0 ,
& .003D0 , 12*0.D0 , .3D0 , .24D0 , .18D0 , .15D0 , .13D0 ,
& .12D0 , .11D0 , .1D0 , .09D0 , .08D0 , .05D0 , .04D0 ,
& .03D0 , 0.D0 , 0.16D0 , .7D0 , 1.3D0 , 3.1D0 , 4.5D0 , 2.D0 ,
& 18*0.D0 , 3*.0D0 , 0.D0 , 0.D0 , 4.0D0 , 11.D0 , 11.4D0 ,
& 10.3D0 , 7.5D0 , 6.8D0 , 4.75D0 , 2.5D0 , 1.5D0 , .9D0 ,
& .55D0 , .35D0 , 13*0.D0 , .1D0 , .34D0 , .5D0 , .8D0 ,
& 1.1D0 , 2.25D0 , 3.3D0 , 2.3D0 , 1.6D0 , .95D0 , .45D0 ,
& .28D0 , .15D0 , 10*0.D0 , 2*0.D0 , .17D0 , .64D0 , 1.D0 ,
& 1.5D0 , 2.1D0 , 4.25D0 , 6.2D0 , 4.4D0 , 3.D0 , 1.8D0 ,
& .9D0 , .53D0 , .28D0 , 10*0.D0 , 2*0.D0 , .25D0 , .82D0 ,
& 1.3D0 , 1.9D0 , 2.8D0 , 5.5D0 , 8.D0 , 5.7D0 , 3.9D0 ,
& 2.35D0 , 1.15D0 , .69D0 , .37D0 , 10*0.D0 , 7*0.D0 , .0D0 ,
& .34D0 , 1.5D0 , 3.47D0 , 5.87D0 , 6.23D0 , 4.27D0 , 2.6D0 ,
& 1.D0 , .6D0 , .3D0 , .15D0 , 6*0.D0/
C
DATA SPIkpv/7*0.D0 , .00D0 , .16D0 , .75D0 , 1.73D0 , 2.93D0 ,
& 3.12D0 , 2.13D0 , 1.3D0 , .5D0 , .3D0 , .15D0 , .08D0 ,
& 6*0.D0 , 10*0.D0 , .2D0 , .6D0 , .92D0 , 2.4D0 , 4.9D0 ,
& 6.25D0 , 5.25D0 , 3.5D0 , 2.15D0 , 1.4D0 , 1.D0 , .7D0 ,
& 13*0.D0 , .13D0 , .4D0 , .62D0 , 1.6D0 , 3.27D0 , 4.17D0 ,
& 3.5D0 , 2.33D0 , 1.43D0 , .93D0 , .66D0 , .47D0 , 13*0.D0 ,
& .07D0 , .2D0 , .31D0 , .8D0 , 1.63D0 , 2.08D0 , 1.75D0 ,
& 1.17D0 , .72D0 , .47D0 , .34D0 , .23D0 , 17*0.D0 , .33D0 ,
& 1.D0 , 1.8D0 , 2.67D0 , 5.33D0 , 6.D0 , 5.53D0 , 5.D0 ,
& 17*0.D0 , .17D0 , .5D0 , .9D0 , 1.83D0 , 2.67D0 , 3.0D0 ,
& 2.77D0 , 2.5D0 , 3*0.D0 , 3*0.D0 , 1.D0 , 3.3D0 , 2.8D0 ,
& 2.5D0 , 2.3D0 , 1.8D0 , 1.5D0 , 1.1D0 , .8D0 , .7D0 , .55D0 ,
& .3D0 , 10*0.D0 , 9*0.D0 , .1D0 , .4D0 , 1.D0 , 1.4D0 ,
& 2.2D0 , 2.5D0 , 2.2D0 , 1.65D0 , 1.35D0 , 1.1D0 , .8D0 ,
& .6D0 , .4D0 , 12*0.D0 , .15D0 , .6D0 , 1.5D0 , 2.1D0 ,
& 3.3D0 , 3.8D0 , 3.3D0 , 2.45D0 , 2.05D0 , 1.65D0 , 1.2D0 ,
& .9D0 , .6D0 , 3*0.D0 , 9*0.D0 , .10D0 , .2D0 , .5D0 , .7D0 ,
& 1.3D0 , 1.55D0 , 1.9D0 , 1.8D0 , 1.55D0 , 1.35D0 , 1.15D0 ,
& .95D0 , .7D0 , 13*0.D0 , .2D0 , .5D0 , .7D0 , 1.3D0 ,
& 1.55D0 , 1.9D0 , 1.8D0 , 1.55D0 , 1.35D0 , 1.15D0 , .95D0 ,
& .7D0 , 17*0.D0 , .2D0 , .5D0 , .85D0 , 2.D0 , 2.15D0 ,
& 2.05D0 , 1.75D0 , 1.D0 , 17*0.D0 , .13D0 , .33D0 , .57D0 ,
& 1.33D0 , 1.43D0 , 1.36D0 , 1.17D0 , .67D0 , 17*0.D0 , .07D0 ,
& .17D0 , .28D0 , .67D0 , .72D0 , .69D0 , .58D0 , .33D0 ,
& 17*0.D0 , .4D0 , .7D0 , 1.D0 , 1.6D0 , 1.8D0 , 2.3D0 ,
& 1.9D0 , 1.7D0/
DATA SPIkpw/0.D0 , 25.D0 , 13.D0 , 11.D0 , 10.5D0 , 14.D0 ,
& 2*20.D0 , 16.D0 , 14.D0 , 19.D0 , 28.D0 , 17.5D0 , 13.5D0 ,
& 12.D0 , 10.5D0 , 2*10.D0 , 9.5D0 , 9.D0 , 8.D0 , 7.5D0 ,
& 7.D0 , 6.5D0 , 6.D0 , 0.D0 , 48.D0 , 19.D0 , 15.D0 , 11.5D0 ,
& 10.D0 , 8.D0 , 6.5D0 , 5.5D0 , 4.8D0 , 4.2D0 , 7.5D0 ,
& 3.4D0 , 2*2.5D0 , 2.1D0 , 1.4D0 , 1.D0 , .8D0 , .6D0 ,
& .46D0 , .3D0 , .2D0 , .15D0 , .13D0 , 11*0.D0 , .95D0 ,
& .65D0 , .48D0 , .35D0 , .2D0 , .18D0 , .17D0 , .16D0 ,
& .15D0 , .1D0 , .09D0 , .065D0 , .05D0 , .04D0 , 12*0.D0 ,
& .2D0 , 2*.25D0 , .2D0 , .1D0 , .08D0 , .06D0 , .045D0 ,
& .03D0 , .02D0 , .01D0 , .005D0 , .003D0 , 12*0.D0 , .3D0 ,
& .24D0 , .18D0 , .15D0 , .13D0 , .12D0 , .11D0 , .1D0 ,
& .09D0 , .08D0 , .05D0 , .04D0 , .03D0 , 0.D0 , 0.16D0 ,
& .7D0 , 1.3D0 , 3.1D0 , 4.5D0 , 2.D0 , 23*0.D0 , 4.0D0 ,
& 11.D0 , 11.4D0 , 10.3D0 , 7.5D0 , 6.8D0 , 4.75D0 , 2.5D0 ,
& 1.5D0 , .9D0 , .55D0 , .35D0 , 13*0.D0 , .1D0 , .34D0 ,
& .5D0 , .8D0 , 1.1D0 , 2.25D0 , 3.3D0 , 2.3D0 , 1.6D0 ,
& .95D0 , .45D0 , .28D0 , .15D0 , 12*0.D0 , .17D0 , .64D0 ,
& 1.D0 , 1.5D0 , 2.1D0 , 4.25D0 , 6.2D0 , 4.4D0 , 3.D0 ,
& 1.8D0 , .9D0 , .53D0 , .28D0 , 12*0.D0 , .25D0 , .82D0 ,
& 1.3D0 , 1.9D0 , 2.8D0 , 5.5D0 , 8.D0 , 5.7D0 , 3.9D0 ,
& 2.35D0 , 1.15D0 , .69D0 , .37D0 , 18*0.D0 , .34D0 , 1.5D0 ,
& 3.47D0 , 5.87D0 , 6.23D0 , 4.27D0 , 2.6D0 , 1.D0 , .6D0 ,
& .3D0 , .15D0 , 6*0.D0/
C
DATA SPIkpx/8*0.D0 , .16D0 , .75D0 , 1.73D0 , 2.93D0 , 3.12D0 ,
& 2.13D0 , 1.3D0 , .5D0 , .3D0 , .15D0 , .08D0 , 16*0.D0 ,
& .2D0 , .6D0 , .92D0 , 2.4D0 , 4.9D0 , 6.25D0 , 5.25D0 ,
& 3.5D0 , 2.15D0 , 1.4D0 , 1.D0 , .7D0 , 13*0.D0 , .13D0 ,
& .4D0 , .62D0 , 1.6D0 , 3.27D0 , 4.17D0 , 3.5D0 , 2.33D0 ,
& 1.43D0 , .93D0 , .66D0 , .47D0 , 13*0.D0 , .07D0 , .2D0 ,
& .31D0 , .8D0 , 1.63D0 , 2.08D0 , 1.75D0 , 1.17D0 , .72D0 ,
& .47D0 , .34D0 , .23D0 , 17*0.D0 , .33D0 , 1.D0 , 1.8D0 ,
& 2.67D0 , 5.33D0 , 6.D0 , 5.53D0 , 5.D0 , 17*0.D0 , .17D0 ,
& .5D0 , .9D0 , 1.83D0 , 2.67D0 , 3.0D0 , 2.77D0 , 2.5D0 ,
& 6*0.D0 , 1.D0 , 3.3D0 , 2.8D0 , 2.5D0 , 2.3D0 , 1.8D0 ,
& 1.5D0 , 1.1D0 , .8D0 , .7D0 , .55D0 , .3D0 , 19*0.D0 , .1D0 ,
& .4D0 , 1.D0 , 1.4D0 , 2.2D0 , 2.5D0 , 2.2D0 , 1.65D0 ,
& 1.35D0 , 1.1D0 , .8D0 , .6D0 , .4D0 , 12*0.D0 , .15D0 ,
& .6D0 , 1.5D0 , 2.1D0 , 3.3D0 , 3.8D0 , 3.3D0 , 2.45D0 ,
& 2.05D0 , 1.65D0 , 1.2D0 , .9D0 , .6D0 , 12*0.D0 , .10D0 ,
& .2D0 , .5D0 , .7D0 , 1.3D0 , 1.55D0 , 1.9D0 , 1.8D0 ,
& 1.55D0 , 1.35D0 , 1.15D0 , .95D0 , .7D0 , 13*0.D0 , .2D0 ,
& .5D0 , .7D0 , 1.3D0 , 1.55D0 , 1.9D0 , 1.8D0 , 1.55D0 ,
& 1.35D0 , 1.15D0 , .95D0 , .7D0 , 17*0.D0 , .2D0 , .5D0 ,
& .85D0 , 2.D0 , 2.15D0 , 2.05D0 , 1.75D0 , 1.D0 , 17*0.D0 ,
& .13D0 , .33D0 , .57D0 , 1.33D0 , 1.43D0 , 1.36D0 , 1.17D0 ,
& .67D0 , 17*0.D0 , .07D0 , .17D0 , .28D0 , .67D0 , .72D0 ,
& .69D0 , .58D0 , .33D0 , 17*0.D0 , .4D0 , .7D0 , 1.D0 ,
& 1.6D0 , 1.8D0 , 2.3D0 , 1.9D0 , 1.7D0/
DATA SPIkp4/0.D0 , 300.D0 , 40.D0 , 20.D0 , 13.D0 , 8.5D0 , 8.D0 ,
& 9.5D0 , 12.D0 , 14.D0 , 15.5D0 , 20.D0 , 17.D0 , 13.D0 ,
& 10.D0 , 9.D0 , 8.5D0 , 8.D0 , 7.8D0 , 7.3D0 , 6.7D0 ,
& 9*0.D0 , .23D0 , .35D0 , .7D0 , .52D0 , .4D0 , .3D0 , .2D0 ,
& .15D0 , .13D0 , .11D0 , .09D0 , .07D0 , 0.D0 , .033D0 ,
& .8D0 , 2*1.35D0 , .5D0 , 19*0.D0 , 0.8D0 , 2.2D0 , 3.6D0 ,
& 4.6D0 , 4.7D0 , 3.5D0 , 2.4D0 , 1.8D0 , 1.4D0 , .75D0 ,
& .47D0 , .25D0 , .13D0 , .08D0 , 7*0.D0 , 1.2D0 , 3.3D0 ,
& 5.4D0 , 6.9D0 , 7.3D0 , 5.3D0 , 3.6D0 , 2.7D0 , 2.2D0 ,
& 1.1D0 , .73D0 , .4D0 , .22D0 , .12D0 , 11*0.D0 , 2.0D0 ,
& 4.4D0 , 6.8D0 , 9.9D0 , 7.9D0 , 6.0D0 , 3.8D0 , 2.5D0 ,
& 2.D0 , 1.4D0 , 1.D0 , .6D0 , .35D0 , 10*0.D0 , .25D0 ,
& .55D0 , .75D0 , 1.25D0 , 1.9D0 , 2.D0 , 1.8D0 , 1.5D0 ,
& 1.25D0 , 1.D0 , .8D0 , 10*0.D0 , .4D0 , .85D0 , 1.1D0 ,
& 1.85D0 , 2.8D0 , 3.D0 , 2.7D0 , 2.2D0 , 1.85D0 , 1.5D0 ,
& 1.2D0 , 12*0.D0 , .5D0 , 1.2D0 , 1.7D0 , 3.4D0 , 5.2D0 ,
& 6.4D0 , 6.1D0 , 5.6D0 , 5.2D0 , 9*0.D0 , 1.D0 , 3.3D0 ,
& 5.2D0 , 4.45D0 , 3.6D0 , 2.75D0 , 1.9D0 , 1.65D0 , 1.3D0 ,
& .95D0 , .6D0 , .45D0 , 10*0.D0 , .45D0 , 1.4D0 , 1.5D0 ,
& 1.1D0 , .85D0 , .5D0 , .3D0 , .2D0 , .15D0 , 15*0.D0 , .6D0 ,
& .8D0 , .95D0 , .8D0 , .7D0 , .6D0 , .5D0 , .4D0 , 13*0.D0 ,
& .85D0 , 1.2D0 , 1.4D0 , 1.2D0 , 1.05D0 , .9D0 , .7D0 ,
& .55D0 , 13*0.D0 , 1.D0 , 1.5D0 , 3.5D0 , 4.15D0 , 3.7D0 ,
& 2.7D0 , 2.3D0 , 1.75D0 , 16*0.D0 , .5D0 , 2.0D0 , 3.3D0 ,
& 5.4D0 , 7.D0/
DATA SPIkp5/0.D0 , 20.D0 , 14.D0 , 12.D0 , 11.5D0 , 10.D0 , 8.D0 ,
& 7.D0 , 6.D0 , 5.5D0 , 5.3D0 , 5.D0 , 4.5D0 , 4.4D0 , 3.8D0 ,
& 3.D0 , 2.8D0 , 0.D0 , .5D0 , 1.15D0 , 2.D0 , 1.3D0 , .8D0 ,
& .45D0 , 13*0.D0 , 0.9D0 , 2.5D0 , 3.D0 , 2.5D0 , 2.3D0 ,
& 2.D0 , 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 , .21D0 ,
& .2D0 , 3*0.D0 , .9D0 , 2.5D0 , 3.D0 , 2.5D0 , 2.3D0 , 2.D0 ,
& 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 , .21D0 , .2D0 ,
& 4*0.D0 , 1.D0 , 2.1D0 , 2.6D0 , 2.3D0 , 2.1D0 , 1.8D0 ,
& 1.7D0 , 1.4D0 , 1.2D0 , 1.05D0 , .9D0 , .66D0 , .5D0 ,
& 7*0.D0 , .3D0 , 2*1.D0 , .9D0 , .7D0 , .4D0 , .3D0 , .2D0 ,
& 11*0.D0 , .1D0 , 1.D0 , 2.2D0 , 3.5D0 , 4.2D0 , 4.55D0 ,
& 4.85D0 , 4.9D0 , 10*0.D0 , .2D0 , .7D0 , 1.6D0 , 2.5D0 ,
& 2.2D0 , 1.71D0 , 1.6D0 , 6*0.D0 , 1.4D0 , 3.8D0 , 5.D0 ,
& 4.7D0 , 4.4D0 , 4.D0 , 3.5D0 , 2.85D0 , 2.35D0 , 2.01D0 ,
& 1.8D0 , 12*0.D0 , .1D0 , .8D0 , 2.05D0 , 3.31D0 , 3.5D0 ,
& 12*0.D0 , .034D0 , .2D0 , .75D0 , 1.04D0 , 1.24D0/
DATA SPIkp6/0.D0 , 6.D0 , 11.D0 , 13.D0 , 6.D0 , 5.D0 , 3.D0 ,
& 2.2D0 , 1.5D0 , 1.2D0 , 1.D0 , .7D0 , .6D0 , .5D0 , .45D0 ,
& .35D0 , .3D0 , 0.D0 , 6.D0 , 11.D0 , 13.D0 , 6.D0 , 5.D0 ,
& 3.D0 , 2.2D0 , 1.5D0 , 1.2D0 , 1.D0 , .7D0 , .6D0 , .5D0 ,
& .45D0 , .35D0 , .3D0 , 0.D0 , .5D0 , 1.3D0 , 2.8D0 , 2.3D0 ,
& 1.6D0 , .9D0 , 13*0.D0 , 0.9D0 , 2.5D0 , 3.D0 , 2.5D0 ,
& 2.3D0 , 2.D0 , 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 ,
& .21D0 , .2D0 , 3*0.D0 , 0.9D0 , 2.5D0 , 3.D0 , 2.5D0 ,
& 2.3D0 , 2.D0 , 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 ,
& .21D0 , .2D0 , 4*0.D0 , 1.D0 , 2.1D0 , 2.6D0 , 2.3D0 , 2.D0 ,
& 1.8D0 , 1.7D0 , 1.4D0 , 1.2D0 , 1.15D0 , .9D0 , .66D0 ,
& .5D0 , 4*0.D0 , 1.D0 , 2.1D0 , 2.6D0 , 2.3D0 , 2.1D0 ,
& 1.8D0 , 1.7D0 , 1.4D0 , 1.2D0 , 1.15D0 , .9D0 , .66D0 ,
& .5D0 , 7*0.D0 , .3D0 , 2*1.D0 , .9D0 , .7D0 , .4D0 , .35D0 ,
& .2D0 , 9*0.D0 , .3D0 , 2*1.D0 , .9D0 , .7D0 , .4D0 , .35D0 ,
& .2D0 , 11*0.D0 , .1D0 , 1.D0 , 2.4D0 , 3.5D0 , 4.25D0 ,
& 4.55D0 , 4.85D0 , 4.9D0 , 9*0.D0 , .1D0 , 1.D0 , 2.4D0 ,
& 3.5D0 , 4.25D0 , 4.55D0 , 4.85D0 , 4.9D0 , 10*0.D0 , .2D0 ,
& .7D0 , 1.6D0 , 2.5D0 , 2.2D0 , 1.71D0 , 1.6D0 , 10*0.D0 ,
& .2D0 , .7D0 , 1.6D0 , 2.5D0 , 2.2D0 , 1.71D0 , 1.6D0 ,
& 6*0.D0 , 1.4D0 , 3.8D0 , 5.D0 , 4.7D0 , 4.4D0 , 4.D0 ,
& 3.5D0 , 2.85D0 , 2.35D0 , 2.01D0 , 1.8D0 , 6*0.D0 , 1.4D0 ,
& 3.8D0 , 5.D0 , 4.7D0 , 4.4D0 , 4.D0 , 3.5D0 , 2.85D0 ,
& 2.35D0 , 2.01D0 , 1.8D0 , 12*0.D0 , .1D0 , .8D0 , 2.05D0 ,
& 3.31D0 , 3.5D0 , 12*0.D0 , .034D0 , .2D0 , .75D0 , 1.04D0 ,
& 1.24D0/
DATA SKMpel/0.D0 , 35.D0 , 22.D0 , 25.D0 , 17.D0 , 9.D0 , 9.5D0 ,
& 8.D0 , 7.D0 , 6.5D0 , 6.1D0 , 5.D0 , 4.8D0 , 4.6D0 , 4.45D0 ,
& 4.3D0 , 4.2D0 , 0.D0 , 8.D0 , 3.5D0 , 8.D0 , 3.D0 , 1.9D0 ,
& 1.7D0 , 1.D0 , .9D0 , .8D0 , .75D0 , .5D0 , .42D0 , .38D0 ,
& .34D0 , .25D0 , .2D0 , 0.D0 , 3.D0 , 3.2D0 , 3.5D0 , 1.5D0 ,
& 1.4D0 , 1.1D0 , .6D0 , .5D0 , .35D0 , .28D0 , .25D0 , .18D0 ,
& .12D0 , .1D0 , .08D0 , .04D0 , 0.D0 , 8.5D0 , 2.4D0 , 1.7D0 ,
& 1.3D0 , 1.3D0 , 1.1D0 , .5D0 , .4D0 , .4D0 , .35D0 , .3D0 ,
& .28D0 , .2D0 , .16D0 , .13D0 , .11D0 , 0.D0 , 7.D0 , 4.8D0 ,
& 1.4D0 , 1.9D0 , .9D0 , .4D0 , .2D0 , .13D0 , .1D0 , .08D0 ,
& .06D0 , .04D0 , .02D0 , .015D0 , .01D0 , .01D0 , 0.D0 ,
& 5.5D0 , 1.D0 , .8D0 , .75D0 , .32D0 , .2D0 , .1D0 , .09D0 ,
& .08D0 , .065D0 , .05D0 , .04D0 , .022D0 , .017D0 , 2*.01D0/
DATA SPIkp7/0.D0 , .56D0 , 1.46D0 , 3.16D0 , 2.01D0 , 1.28D0 ,
& .74D0 , 14*0.D0 , 1.13D0 , 2.61D0 , 2.91D0 , 2.58D0 ,
& 2.35D0 , 2.02D0 , 1.91D0 , 1.57D0 , 1.35D0 , 1.29D0 ,
& 1.01D0 , .74D0 , .65D0 , 4*0.D0 , 1.13D0 , 2.61D0 , 2.91D0 ,
& 2.58D0 , 2.35D0 , 2.02D0 , 1.91D0 , 1.57D0 , 1.35D0 ,
& 1.29D0 , 1.01D0 , .74D0 , .65D0 , 3*0.D0 , 1.0D0 , 3.03D0 ,
& 3.36D0 , 2.8D0 , 2.58D0 , 2.24D0 , 1.91D0 , 1.68D0 , 1.35D0 ,
& 1.01D0 , .67D0 , .5D0 , .24D0 , .23D0 , 3*0.D0 , 1.0D0 ,
& 3.03D0 , 3.36D0 , 2.8D0 , 2.58D0 , 2.24D0 , 1.91D0 , 1.68D0 ,
& 1.35D0 , 1.01D0 , .67D0 , .5D0 , .24D0 , .23D0 , 7*0.D0 ,
& .34D0 , 1.12D0 , 1.12D0 , 1.01D0 , .78D0 , .45D0 , .39D0 ,
& .22D0 , .07D0 , 0.D0 , 7*0.D0 , .34D0 , 1.12D0 , 1.12D0 ,
& 1.01D0 , .78D0 , .45D0 , .39D0 , .22D0 , .07D0 , 0.D0 ,
& 6*0.D0 , 1.71D0 , 4.26D0 , 5.6D0 , 5.57D0 , 4.93D0 , 4.48D0 ,
& 3.92D0 , 3.19D0 , 2.63D0 , 2.25D0 , 2.D0 , 6*0.D0 , 1.71D0 ,
& 4.26D0 , 5.6D0 , 5.57D0 , 4.93D0 , 4.48D0 , 3.92D0 , 3.19D0 ,
& 2.63D0 , 2.25D0 , 2.D0 , 10*0.D0 , .22D0 , .8D0 , .75D0 ,
& 1.D0 , 1.3D0 , 1.5D0 , 1.3D0 , 10*0.D0 , .22D0 , .8D0 ,
& .75D0 , 1.D0 , 1.3D0 , 1.5D0 , 1.3D0 , 13*0.D0 , .1D0 ,
& .3D0 , .7D0 , 1.D0 , 13*0.D0 , .1D0 , .3D0 , .7D0 , 1.D0 ,
& 9*0.D0 , .11D0 , 1.72D0 , 2.69D0 , 3.92D0 , 4.76D0 , 5.10D0 ,
& 5.44D0 , 5.3D0 , 9*0.D0 , .11D0 , 1.72D0 , 2.69D0 , 3.92D0 ,
& 4.76D0 , 5.1D0 , 5.44D0 , 5.3D0 , 5*0.D0 , 9.2D0 , 4.7D0 ,
& 1.9D0 , 10*0.D0 , 2.5D0 , 15.D0 , 21.5D0 , 15.3D0 , 3.D0 ,
& 1.5D0 , 10*0.D0/
C**** k- n data *
DATA SKMnel/0.D0 , 4.D0 , 9.5D0 , 20.D0 , 13.D0 , 9.5D0 , 6.D0 ,
& 4.4D0 , 3.D0 , 2.4D0 , 2.D0 , 1.4D0 , 1.2D0 , 1.D0 , .9D0 ,
& .7D0 , .6D0 , 0.D0 , 4.5D0 , 6.D0 , 5.D0 , 2.5D0 , 2.D0 ,
& 1.7D0 , 2.1D0 , 1.9D0 , .9D0 , .5D0 , .3D0 , .24D0 , .2D0 ,
& .18D0 , .1D0 , .09D0 , 0.D0 , 1.8D0 , 2.D0 , 1.1D0 , .9D0 ,
& .5D0 , .5D0 , .4D0 , .4D0 , .2D0 , .1D0 , .06D0 , .05D0 ,
& .04D0 , .03D0 , .02D0 , .02D0 , 0.D0 , 1.5D0 , 2.D0 , .9D0 ,
& 1.1D0 , .4D0 , .6D0 , .7D0 , .65D0 , .3D0 , .17D0 , .1D0 ,
& .08D0 , .07D0 , .06D0 , .04D0 , .03D0/
DATA SPIkp8/0.D0 , .56D0 , 1.29D0 , 2.26D0 , 1.01D0 , .64D0 ,
& .37D0 , 14*0.D0 , 1.13D0 , 2.61D0 , 2.91D0 , 2.58D0 ,
& 2.35D0 , 2.02D0 , 1.91D0 , 1.57D0 , 1.35D0 , 1.29D0 ,
& 1.01D0 , .74D0 , .65D0 , 3*0.D0 , 1.D0 , 3.03D0 , 3.36D0 ,
& 2.8D0 , 2.58D0 , 2.24D0 , 1.91D0 , 1.68D0 , 1.35D0 , 1.01D0 ,
& .67D0 , .5D0 , .24D0 , .23D0 , 3*0.D0 , 1.D0 , 3.03D0 ,
& 3.36D0 , 2.8D0 , 2.58D0 , 2.24D0 , 1.91D0 , 1.68D0 , 1.35D0 ,
& 1.01D0 , .67D0 , .5D0 , .24D0 , .23D0 , 7*0.D0 , .34D0 ,
& 1.12D0 , 1.12D0 , 1.01D0 , .78D0 , .45D0 , .39D0 , .22D0 ,
& .07D0 , 0.D0 , 6*0.D0 , 1.71D0 , 4.26D0 , 5.6D0 , 5.57D0 ,
& 4.93D0 , 4.48D0 , 3.92D0 , 3.19D0 , 2.63D0 , 2.25D0 , 2.D0 ,
& 10*0.D0 , .22D0 , .8D0 , .75D0 , 1.D0 , 1.3D0 , 1.5D0 ,
& 1.3D0 , 13*0.D0 , .1D0 , .3D0 , .7D0 , 1.D0 , 13*0.D0 ,
& .1D0 , .3D0 , .7D0 , 1.D0 , 9*0.D0 , .11D0 , 1.72D0 ,
& 2.69D0 , 3.92D0 , 4.76D0 , 5.10D0 , 5.44D0 , 5.3D0 , 4*0.D0 ,
& 0.00D0 , 9.2D0 , 4.7D0 , 1.9D0 , 9*0.D0/
C**** p p data *
DATA SPIkp9/0.D0 , 24.D0 , 25.D0 , 27.D0 , 23.D0 , 21.D0 , 20.D0 ,
& 19.D0 , 17.D0 , 15.5D0 , 14.D0 , 13.5D0 , 13.D0 , 0.D0 ,
& 3.6D0 , 1.7D0 , 10*0.D0 , .0D0 , 0.D0 , 8.7D0 , 17.7D0 ,
& 18.8D0 , 15.9D0 , 11.7D0 , 8.D0 , 6.D0 , 5.3D0 , 4.5D0 ,
& 3.9D0 , 3.5D0 , .0D0 , .0D0 , 2.8D0 , 5.8D0 , 6.2D0 , 5.1D0 ,
& 3.8D0 , 2.7D0 , 2.1D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.1D0 ,
& 5*0.D0 , 4.6D0 , 10.2D0 , 15.1D0 , 16.9D0 , 16.5D0 , 11.D0 ,
& 5.5D0 , 3.5D0 , 10*0.D0 , 4.3D0 , 7.6D0 , 9.D0 , 10*0.D0 ,
& 1.7D0 , 2.6D0 , 3.D0 , 6*0.D0 , .3D0 , .6D0 , 1.D0 , 1.6D0 ,
& 1.3D0 , .8D0 , .6D0 , 6*0.D0 , .7D0 , 1.2D0 , 1.8D0 , 2.5D0 ,
& 1.8D0 , 1.3D0 , 1.2D0 , 10*0.D0 , .6D0 , 1.4D0 , 1.7D0 ,
& 10*0.D0 , 1.9D0 , 4.1D0 , 5.2D0/
C**** p n data *
DATA SPIkp0/0.D0 , 24.D0 , 25.D0 , 27.D0 , 23.D0 , 21.D0 , 20.D0 ,
& 19.D0 , 17.D0 , 15.5D0 , 14.D0 , 13.5D0 , 13.D0 , 0.D0 ,
& 1.8D0 , .2D0 , 12*0.D0 , 3.2D0 , 6.05D0 , 9.9D0 , 5.1D0 ,
& 3.8D0 , 2.7D0 , 1.9D0 , 1.5D0 , 1.4D0 , 1.3D0 , 1.1D0 ,
& 2*.0D0 , 3.2D0 , 6.05D0 , 9.9D0 , 5.1D0 , 3.8D0 , 2.7D0 ,
& 1.9D0 , 1.5D0 , 1.4D0 , 1.3D0 , 1.1D0 , 5*0.D0 , 4.6D0 ,
& 10.2D0 , 15.1D0 , 16.4D0 , 15.2D0 , 11.D0 , 5.4D0 , 3.5D0 ,
& 5*0.D0 , 4.6D0 , 10.2D0 , 15.1D0 , 16.4D0 , 15.2D0 , 11.D0 ,
& 5.4D0 , 3.5D0 , 10*0.D0 , .7D0 , 5.1D0 , 8.D0 , 10*0.D0 ,
& .7D0 , 5.1D0 , 8.D0 , 10*.0D0 , .3D0 , 2.8D0 , 4.7D0 ,
& 10*.0D0 , .3D0 , 2.8D0 , 4.7D0 , 7*0.D0 , 1.2D0 , 2.5D0 ,
& 3.5D0 , 6.D0 , 5.3D0 , 2.9D0 , 7*0.D0 , 1.7D0 , 3.6D0 ,
& 5.4D0 , 9.D0 , 7.6D0 , 4.2D0 , 5*0.D0 , 7.7D0 , 6.1D0 ,
& 2.9D0 , 5*0.D0/
C nn - data *
C *
DATA SPKpv/0.D0 , 24.D0 , 25.D0 , 27.D0 , 23.D0 , 21.D0 , 20.D0 ,
& 19.D0 , 17.D0 , 15.5D0 , 14.D0 , 13.5D0 , 13.D0 , 0.D0 ,
& 3.6D0 , 1.7D0 , 12*0.D0 , 8.7D0 , 17.7D0 , 18.8D0 , 15.9D0 ,
& 11.7D0 , 8.D0 , 6.D0 , 5.3D0 , 4.5D0 , 3.9D0 , 3.5D0 , .0D0 ,
& .0D0 , 2.8D0 , 5.8D0 , 6.2D0 , 5.1D0 , 3.8D0 , 2.7D0 ,
& 2.1D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.1D0 , 5*0.D0 , 4.6D0 ,
& 10.2D0 , 15.1D0 , 16.9D0 , 16.5D0 , 11.D0 , 5.5D0 , 3.5D0 ,
& 10*0.D0 , 4.3D0 , 7.6D0 , 9.D0 , 10*0.D0 , 1.7D0 , 2.6D0 ,
& 3.D0 , 6*0.D0 , .3D0 , .6D0 , 1.D0 , 1.6D0 , 1.3D0 , .8D0 ,
& .6D0 , 6*0.D0 , .7D0 , 1.2D0 , 1.8D0 , 2.5D0 , 1.8D0 ,
& 1.3D0 , 1.2D0 , 10*0.D0 , .6D0 , 1.4D0 , 1.7D0 , 10*0.D0 ,
& 1.9D0 , 4.1D0 , 5.2D0/
C*************** ap - p - data *
DATA SAPpel/0.D0 , 176.D0 , 160.D0 , 105.D0 , 75.D0 , 68.D0 ,
& 65.D0 , 50.D0 , 50.D0 , 43.D0 , 42.D0 , 40.5D0 , 35.D0 ,
& 30.D0 , 28.D0 , 25.D0 , 22.D0 , 21.D0 , 20.D0 , 18.D0 ,
& 17.D0 , 11*0.D0 , .05D0 , .15D0 , .18D0 , .2D0 , .2D0 ,
& .3D0 , .4D0 , .6D0 , .7D0 , .85D0 , 0.D0 , 1.D0 , .9D0 ,
& .46D0 , .3D0 , .23D0 , .18D0 , .16D0 , .14D0 , .1D0 , .08D0 ,
& .05D0 , .02D0 , .015D0 , 4*.011D0 , 3*.005D0 , 0.D0 , 55.D0 ,
& 50.D0 , 25.D0 , 15.D0 , 15.D0 , 14.D0 , 12.D0 , 10.D0 ,
& 7.D0 , 6.D0 , 4.D0 , 3.3D0 , 2.8D0 , 2.4D0 , 2.D0 , 1.8D0 ,
& 1.55D0 , 1.3D0 , .95D0 , .75D0 , 0.D0 , 3.3D0 , 3.D0 ,
& 1.5D0 , 1.D0 , .7D0 , .4D0 , .35D0 , .4D0 , .25D0 , .18D0 ,
& .08D0 , .04D0 , .03D0 , .023D0 , .016D0 , .014D0 , .01D0 ,
& .008D0 , .006D0 , .005D0/
DATA SPIkpe/0.D0 , 215.D0 , 193.D0 , 170.D0 , 148.D0 , 113.D0 ,
& 97.D0 , 84.D0 , 78.D0 , 68.D0 , 64.D0 , 61.D0 , 46.D0 ,
& 36.D0 , 31.3D0 , 28.5D0 , 25.7D0 , 22.6D0 , 21.4D0 , 20.7D0 ,
& 19.9D0 , 9*0.D0 , 2.D0 , 2.5D0 , .2D0 , 19*0.D0 , .3D0 ,
& 1.4D0 , 2.2D0 , 1.2D0 , 1.1D0 , 1.D0 , .8D0 , .6D0 , .5D0 ,
& .4D0 , .3D0 , 10*0.D0 , .3D0 , 1.4D0 , 2.2D0 , 1.2D0 ,
& 1.1D0 , 1.D0 , .8D0 , .6D0 , .5D0 , .4D0 , .3D0 , 10*0.D0 ,
& .3D0 , 1.4D0 , 2.2D0 , 1.2D0 , 1.1D0 , 1.D0 , .8D0 , .6D0 ,
& .5D0 , .4D0 , .3D0 , 10*0.D0 , .3D0 , 1.4D0 , 2.2D0 , 1.2D0 ,
& 1.1D0 , 1.D0 , .8D0 , .6D0 , .5D0 , .4D0 , .3D0 , 9*0.D0 ,
& .6D0 , 2.5D0 , 5.D0 , 5.2D0 , 5.1D0 , 5.4D0 , 5.8D0 , 2.8D0 ,
& 2.1D0 , 1.8D0 , 1.6D0 , 1.2D0 , 13*0.D0 , 1.3D0 , 1.5D0 ,
& 2.D0 , 2.5D0 , 2.5D0 , 2.3D0 , 1.8D0 , 1.4D0 , 13*0.D0 ,
& 1.3D0 , 1.5D0 , 2.D0 , 2.5D0 , 2.5D0 , 2.3D0 , 1.8D0 ,
& 1.4D0 , 13*0.D0 , 1.3D0 , 1.5D0 , 2.D0 , 2.5D0 , 2.5D0 ,
& 2.3D0 , 1.8D0 , 1.4D0 , 13*0.D0 , 1.3D0 , 1.5D0 , 2.D0 ,
& 2.5D0 , 2.5D0 , 2.3D0 , 1.8D0 , 1.4D0 , 14*0.D0 , .2D0 ,
& .5D0 , 1.1D0 , 1.6D0 , 1.4D0 , 1.1D0 , .9D0 , 14*0.D0 ,
& .2D0 , .5D0 , 1.1D0 , 1.6D0 , 1.4D0 , 1.1D0 , .9D0 ,
& 14*0.D0 , .2D0 , .5D0 , 1.1D0 , 1.6D0 , 1.4D0 , 1.1D0 ,
& .9D0 , 14*0.D0 , .2D0 , .5D0 , 1.1D0 , 1.6D0 , 1.4D0 ,
& 1.1D0 , .9D0 , 17*0.D0 , .3D0 , 1.6D0 , 2.6D0 , 3.6D0 ,
& 17*0.D0 , .3D0 , 1.6D0 , 2.6D0 , 3.6D0 , 17*0.D0 , .3D0 ,
& 1.6D0 , 2.6D0 , 3.6D0 , 17*0.D0 , .3D0 , 1.6D0 , 2.6D0 ,
& 3.6D0/
C*************** ap - n - data *
DATA SAPnel/0.D0 , 176.D0 , 160.D0 , 105.D0 , 75.D0 , 68.D0 ,
& 65.D0 , 50.D0 , 50.D0 , 43.D0 , 42.D0 , 40.5D0 , 35.D0 ,
& 30.D0 , 28.D0 , 25.D0 , 22.D0 , 21.D0 , 20.D0 , 18.D0 ,
& 17.D0 , 11*0.D0 , .05D0 , .15D0 , .18D0 , .2D0 , .2D0 ,
& .3D0 , .4D0 , .6D0 , .7D0 , .85D0 , 0.D0 , 1.D0 , .9D0 ,
& .46D0 , .3D0 , .23D0 , .18D0 , .16D0 , .14D0 , .1D0 , .08D0 ,
& .05D0 , .02D0 , .015D0 , 4*.011D0 , 3*.005D0 , 0.D0 , 3.3D0 ,
& 3.D0 , 1.5D0 , 1.D0 , .7D0 , .4D0 , .35D0 , .4D0 , .25D0 ,
& .18D0 , .08D0 , .04D0 , .03D0 , .023D0 , .016D0 , .014D0 ,
& .01D0 , .008D0 , .006D0 , .005D0/
DATA SPIkpz/0.D0 , 215.D0 , 193.D0 , 170.D0 , 148.D0 , 113.D0 ,
& 97.D0 , 84.D0 , 78.D0 , 68.D0 , 64.D0 , 61.D0 , 46.D0 ,
& 36.D0 , 31.3D0 , 28.5D0 , 25.7D0 , 22.6D0 , 21.4D0 , 20.7D0 ,
& 19.9D0 , 9*0.D0 , 2.4D0 , .2D0 , 20*0.D0 , 1.8D0 , 2.8D0 ,
& 3.6D0 , 2.3D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 ,
& .3D0 , 10*0.D0 , 1.8D0 , 2.8D0 , 3.6D0 , 2.3D0 , 1.8D0 ,
& 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 , .3D0 , 10*0.D0 , 1.8D0 ,
& 2.8D0 , 3.6D0 , 2.3D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 ,
& .5D0 , .3D0 , 10*0.D0 , 1.8D0 , 2.8D0 , 3.6D0 , 2.3D0 ,
& 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 , .3D0 , 13*0.D0 ,
& 5.2D0 , 8.7D0 , 11.4D0 , 14.D0 , 11.9D0 , 7.6D0 , 6.D0 ,
& 5.D0 , 13*0.D0 , 5.2D0 , 8.7D0 , 11.4D0 , 14.D0 , 11.9D0 ,
& 7.6D0 , 6.D0 , 5.D0 , 18*0.D0 , 1.D0 , 4.9D0 , 8.5D0 ,
& 18*0.D0 , 1.D0 , 4.9D0 , 8.5D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0/
C *
C *
C*************** an - p - data *
C *
DATA SANpel/0.D0 , 176.D0 , 160.D0 , 105.D0 , 75.D0 , 68.D0 ,
& 65.D0 , 50.D0 , 50.D0 , 43.D0 , 42.D0 , 40.5D0 , 35.D0 ,
& 30.D0 , 28.D0 , 25.D0 , 22.D0 , 21.D0 , 20.D0 , 18.D0 ,
& 17.D0 , 11*0.D0 , .05D0 , .15D0 , .18D0 , .2D0 , .2D0 ,
& .3D0 , .4D0 , .6D0 , .7D0 , .85D0 , 0.D0 , 1.D0 , .9D0 ,
& .46D0 , .3D0 , .23D0 , .18D0 , .16D0 , .14D0 , .1D0 , .08D0 ,
& .05D0 , .02D0 , .015D0 , 4*.011D0 , 3*.005D0 , 0.D0 , 3.3D0 ,
& 3.D0 , 1.5D0 , 1.D0 , .7D0 , .4D0 , .35D0 , .4D0 , .25D0 ,
& .18D0 , .08D0 , .04D0 , .03D0 , .023D0 , .016D0 , .014D0 ,
& .01D0 , .008D0 , .006D0 , .005D0/
DATA SPIkpf/0.D0 , 215.D0 , 193.D0 , 170.D0 , 148.D0 , 113.D0 ,
& 97.D0 , 84.D0 , 78.D0 , 68.D0 , 64.D0 , 61.D0 , 46.D0 ,
& 36.D0 , 31.3D0 , 28.5D0 , 25.7D0 , 22.6D0 , 21.4D0 , 20.7D0 ,
& 19.9D0 , 9*0.D0 , 2.4D0 , .2D0 , 20*0.D0 , 1.8D0 , 2.8D0 ,
& 3.6D0 , 2.3D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 ,
& .3D0 , 10*0.D0 , 1.8D0 , 2.8D0 , 3.6D0 , 2.3D0 , 1.8D0 ,
& 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 , .3D0 , 10*0.D0 , 1.8D0 ,
& 2.8D0 , 3.6D0 , 2.3D0 , 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 ,
& .5D0 , .3D0 , 10*0.D0 , 1.8D0 , 2.8D0 , 3.6D0 , 2.3D0 ,
& 1.8D0 , 1.5D0 , 1.3D0 , 1.D0 , .7D0 , .5D0 , .3D0 , 13*0.D0 ,
& 5.2D0 , 8.7D0 , 11.4D0 , 14.D0 , 11.9D0 , 7.6D0 , 6.D0 ,
& 5.D0 , 13*0.D0 , 5.2D0 , 8.7D0 , 11.4D0 , 14.D0 , 11.9D0 ,
& 7.6D0 , 6.D0 , 5.D0 , 18*0.D0 , 1.D0 , 4.9D0 , 8.5D0 ,
& 18*0.D0 , 1.D0 , 4.9D0 , 8.5D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0 , 15*0.D0 , 1.9D0 , 2.3D0 ,
& 4.D0 , 6.5D0 , 5.2D0 , 3.4D0/
DATA SPKp15/0.D0 , 20.D0 , 14.D0 , 12.D0 , 11.5D0 , 10.D0 , 8.D0 ,
& 7.D0 , 6.D0 , 5.5D0 , 5.3D0 , 5.D0 , 4.5D0 , 4.4D0 , 3.8D0 ,
& 3.D0 , 2.8D0 , 0.D0 , .5D0 , 1.15D0 , 2.D0 , 1.3D0 , .8D0 ,
& .45D0 , 10*0.D0 , 3*0.D0 , 0.9D0 , 2.5D0 , 3.D0 , 2.5D0 ,
& 2.3D0 , 2.D0 , 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 ,
& .21D0 , .2D0 , 3*0.D0 , 0.9D0 , 2.5D0 , 3.D0 , 2.5D0 ,
& 2.3D0 , 2.D0 , 1.7D0 , 1.5D0 , 1.2D0 , .9D0 , .6D0 , .45D0 ,
& .21D0 , .2D0 , 4*0.D0 , 1.D0 , 2.1D0 , 2.6D0 , 2.3D0 ,
& 2.1D0 , 1.8D0 , 1.7D0 , 1.4D0 , 1.2D0 , 1.05D0 , .9D0 ,
& .66D0 , .5D0 , 7*0.D0 , .3D0 , 1.D0 , 1.D0 , .9D0 , .7D0 ,
& .4D0 , .30D0 , .2D0 , 11*0.D0 , .1D0 , 1.D0 , 2.2D0 , 3.5D0 ,
& 4.20D0 , 4.55D0 , 4.85D0 , 4.9D0 , 10*0.D0 , .2D0 , .7D0 ,
& 1.6D0 , 2.5D0 , 2.2D0 , 1.71D0 , 1.6D0 , 6*0.D0 , 1.4D0 ,
& 3.8D0 , 5.D0 , 4.7D0 , 4.4D0 , 4.D0 , 3.5D0 , 2.85D0 ,
& 2.35D0 , 2.01D0 , 1.8D0 , 12*0.D0 , .1D0 , .8D0 , 2.05D0 ,
& 3.31D0 , 3.5D0 , 12*0.D0 , .034D0 , .20D0 , .75D0 , 1.04D0 ,
& 1.24D0/
DATA SPKp16/0.D0 , 4.D0 , 9.5D0 , 20.D0 , 13.D0 , 9.5D0 , 6.D0 ,
& 4.4D0 , 3.D0 , 2.4D0 , 2.D0 , 1.4D0 , 1.2D0 , 1.D0 , .9D0 ,
& .7D0 , .6D0 , 0.D0 , 4.5D0 , 6.D0 , 5.D0 , 2.5D0 , 2.D0 ,
& 1.7D0 , 2.1D0 , 1.9D0 , .9D0 , .5D0 , .3D0 , .24D0 , .2D0 ,
& .18D0 , .1D0 , .09D0 , 0.D0 , 1.8D0 , 2.D0 , 1.1D0 , .9D0 ,
& .5D0 , .5D0 , .4D0 , .4D0 , .2D0 , .1D0 , .06D0 , .05D0 ,
& .04D0 , .03D0 , .02D0 , .02D0 , 0.D0 , 1.5D0 , 2.D0 , .9D0 ,
& 1.1D0 , .4D0 , .6D0 , .7D0 , .65D0 , .3D0 , .17D0 , .1D0 ,
& .08D0 , .07D0 , .06D0 , .04D0 , .03D0 , 0.D0 , .56D0 ,
& 1.29D0 , 2.26D0 , 1.01D0 , .64D0 , .37D0 , 14*0.D0 , 1.13D0 ,
& 2.61D0 , 2.91D0 , 2.58D0 , 2.35D0 , 2.02D0 , 1.91D0 ,
& 1.57D0 , 1.35D0 , 1.29D0 , 1.01D0 , .74D0 , .65D0 , 3*0.D0 ,
& 1.0D0 , 3.03D0 , 3.36D0 , 2.8D0 , 2.58D0 , 2.24D0 , 1.91D0 ,
& 1.68D0 , 1.35D0 , 1.01D0 , .67D0 , .5D0 , .24D0 , .23D0 ,
& 3*0.D0 , 1.0D0 , 3.03D0 , 3.36D0 , 2.8D0 , 2.58D0 , 2.24D0 ,
& 1.91D0 , 1.68D0 , 1.35D0 , 1.01D0 , .67D0 , .5D0 , .24D0 ,
& .23D0 , 7*0.D0 , .34D0 , 1.12D0 , 1.12D0 , 1.01D0 , .78D0 ,
& .45D0 , .39D0 , .22D0 , .07D0 , 7*0.D0 , 1.71D0 , 4.26D0 ,
& 5.6D0 , 5.57D0 , 4.93D0 , 4.48D0 , 3.92D0 , 3.19D0 , 2.63D0 ,
& 2.25D0 , 2.D0 , 10*0.D0 , .22D0 , .8D0 , .75D0 , 1.D0 ,
& 1.3D0 , 1.5D0 , 1.3D0 , 13*0.D0 , .1D0 , .3D0 , .7D0 , 1.D0 ,
& 13*0.D0 , .1D0 , .3D0 , .7D0 , 1.D0 , 9*0.D0 , .11D0 ,
& 1.72D0 , 2.69D0 , 3.92D0 , 4.76D0 , 5.10D0 , 5.44D0 , 5.3D0 ,
& 5*0.D0 , 9.2D0 , 4.7D0 , 1.9D0 , 9*0.D0 , .0D0 , 2.5D0 ,
& 15.D0 , 21.5D0 , 15.3D0 , 3.D0 , 1.5D0 , 10*0.D0/
DATA NUReln/9 , 12 , 5*0 , 10 , 14 , 3*0 , 1 , 3 , 5 , 7 , 6*0 ,
& 2 , 6 , 16 , 5*0 , 10 , 13 , 5*0 , 11 , 12 , 3*0 , 2 , 4 ,
& 6 , 8 , 6*0 , 3 , 15 , 7 , 5*0/
C= end*block.blkdt3 *
END BLOCK DATA
|
{"hexsha": "e8753acdf3eb5fa2b5fdf8c7f68b00e215737f3f", "size": 43940, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/dpmjet/DT_BLKD43.f", "max_stars_repo_name": "pzhristov/DPMJET", "max_stars_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-15T01:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T08:39:13.000Z", "max_issues_repo_path": "src/dpmjet/DT_BLKD43.f", "max_issues_repo_name": "pzhristov/DPMJET", "max_issues_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-15T09:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T20:52:28.000Z", "max_forks_repo_path": "src/dpmjet/DT_BLKD43.f", "max_forks_repo_name": "pzhristov/DPMJET", "max_forks_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-05T02:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T20:49:05.000Z", "avg_line_length": 65.1928783383, "max_line_length": 73, "alphanum_fraction": 0.4055075102, "num_tokens": 27569}
|
import warnings
import numpy as np
from matplotlib import pyplot as plt
from qupulse.pulses import SequencePT
from qupulse.pulses.plotting import (PlottingNotPossibleException, plot, render)
from qupulse.pulses.sequencing import Sequencer as Sequencing
from qupulse.serialization import Serializer, DictBackend
from qtt.instrument_drivers.virtualAwg.templates import DataTypes, Templates
# from qtt.instrument_drivers.virtualAwg.serializer import StringBackend
class Sequencer:
""" Conversion factor from seconds to nano-seconds."""
__sec_to_ns = 1e9
@staticmethod
def make_sawtooth_wave(amplitude, period, width=0.95, repetitions=1, name='sawtooth'):
""" Creates a sawtooth waveform of the type qupulse template.
Args:
amplitude (float): The peak-to-peak voltage of the waveform.
width (float): The width of the rising ramp as a proportion of the total cycle.
period (float): The period of the waveform in seconds.
repetitions (int): The number of oscillations in the sequence.
name (str): The name of the returned sequence.
Returns:
Dict: *NAME*, *TYPE*, *WAVE* keys containing values; sequence name,
sequence data type and the actual qupulse sequencePT respectively.
"""
if width <= 0 or width >= 1:
raise ValueError('Invalid argument value (0 < width < 1)!')
input_variables = {'period': period*Sequencer.__sec_to_ns, 'amplitude': amplitude/2.0,
'width': width}
sequence_data = (Templates.sawtooth(name), input_variables)
return {'name': name, 'wave': SequencePT(*((sequence_data,)*repetitions)),
'type': DataTypes.QU_PULSE}
@staticmethod
def make_square_wave(amplitude, period, repetitions=1, name='pulse'):
""" Creates a block waveforms of the type qupulse template.
Args:
amplitude (float): The peak-to-peak voltage of the waveform.
period (float): The period of the waveform in seconds.
repetitions (int): The number of oscillations in the sequence.
name (str): The name of the returned sequence.
Returns:
Dict: *NAME*, *TYPE*, *WAVE* keys containing values; sequence name,
sequence data type and the actual qupulse sequencePT respectively.
"""
input_variables = {'period': period*Sequencer.__sec_to_ns, 'amplitude': amplitude/2.0}
sequence_data = (Templates.square(name), input_variables)
return {'name': name, 'wave': SequencePT(*(sequence_data,)*repetitions),
'type': DataTypes.QU_PULSE}
@staticmethod
def make_marker(period, uptime=0.2, offset=0.0, repetitions=1, name='marker'):
""" Creates a marker block waveforms of the type qupulse template.
Args:
period (float): The period of the waveform in seconds.
uptime (float): The marker up period in seconds.
offset (float): The marker delay in seconds.
repetitions (int): The number of oscillations in the sequence.
name (str): The name of the returned sequence.
Returns:
Dict: *NAME*, *TYPE*, *WAVE* keys containing values; sequence name,
sequence data type and the actual qupulse sequencePT respectively.
"""
if uptime <= 0 or offset < 0:
raise ValueError('Invalid argument value (uptime <= 0 or offset < 0)!')
if uptime + offset > period:
raise ValueError('Invalid argument value (uptime + offset > period)!')
input_variables = {'period': period * Sequencer.__sec_to_ns,
'uptime': uptime * Sequencer.__sec_to_ns,
'offset': offset * Sequencer.__sec_to_ns}
sequence_data = (Templates.marker(name), input_variables)
return {'name': name, 'wave': SequencePT(*((sequence_data,)*repetitions)),
'type': DataTypes.QU_PULSE, 'uptime': uptime, 'offset': offset}
@staticmethod
def __qupulse_template_to_array(sequence, sampling_rate):
""" Renders a qupulse sequence as array with voltages.
Args:
sequence (dict): a waveform is a dictionary with "type" value
given the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): The number of samples per second.
Returns:
voltages (np.array): The array with voltages generated from the template.
"""
sequencer = Sequencing()
template = sequence['wave']
channels = template.defined_channels
sequencer.push(template, dict(), channel_mapping={ch: ch for ch in channels},
window_mapping={w: w for w in template.measurement_names})
instructions = sequencer.build()
if not sequencer.has_finished():
raise PlottingNotPossibleException(template)
(_, voltages, measurements) = render(instructions, sampling_rate / Sequencer.__sec_to_ns)
return voltages[next(iter(voltages))]
@staticmethod
def __raw_data_to_array(sequence, sampling_rate):
""" Renders a raw sequence as array with voltages.
Args:
sequence (dict): a waveform is a dictionary with "type" value
given the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): The number of samples per second.
Returns:
voltages (np.array): The array with voltages generated from the template.
"""
return sequence['wave']
@staticmethod
def get_data(sequence, sampling_rate):
""" This function returns the raw array data given a sequence.
A sequence can hold different types of data dependend on the
used pulse library. Currently only raw array data and qupulse
can be used.
Args:
sequence (dict): a waveform is a dictionary with "type" value
given the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): a sample rate of the awg in samples per sec.
Returns:
A numpy.ndarray with the corresponding sampled voltages.
"""
data_type = sequence['type']
switch = {DataTypes.RAW_DATA: Sequencer.__raw_data_to_array,
DataTypes.QU_PULSE: Sequencer.__qupulse_template_to_array}
to_array_function = switch[data_type]
return to_array_function(sequence, sampling_rate)
@staticmethod
def __raw_data_plot(sequence, sampling_rate, axes):
""" Plots a raw data sequence.
Args:
sequence (dict): a waveform dictionary with "type" value
given by the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): a sample rate of the awg in samples per sec.
axes: matplotlib Axes object the pulse will be drawn into if provided.
"""
if axes is None:
figure = plt.figure()
axes = figure.add_subplot(111)
raw_data = Sequencer.__raw_data_to_array(sequence, sampling_rate)
sample_count = len(raw_data)
total_time = (sample_count - 1)/(sampling_rate * Sequencer.__sec_to_ns)
times = np.linspace(0, total_time, num=sample_count, dtype=float)
axes.step(times, raw_data, where='post')
axes.get_figure().show()
@staticmethod
def __qupulse_template_plot(sequence, sampling_rate, axes):
""" Plots a qupulse sequence.
Args:
sequence (dict): a waveform dictionary with "type" value
given by the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): a sample rate of the awg in samples per sec.
axes: matplotlib Axes object the pulse will be drawn into if provided.
"""
ns_sample_rate = sampling_rate / Sequencer.__sec_to_ns
plot(sequence['wave'], sample_rate=ns_sample_rate, axes=axes, show=False)
@staticmethod
def plot(sequence, sampling_rate, axes=None):
""" Creates a plot for viewing the sequence.
Args:
sequence (dict): a waveform dictionary with "type" value
given by the used pulse library. The "wave" value should contain
the actual wave-object.
sampling_rate (float): a sample rate of the awg in samples per sec.
axes: matplotlib Axes object the pulse will be drawn into if provided.
"""
data_type = sequence['type']
switch = {DataTypes.RAW_DATA: Sequencer.__raw_data_plot,
DataTypes.QU_PULSE: Sequencer.__qupulse_template_plot}
plot_function = switch[data_type]
plot_function(sequence, sampling_rate, axes)
@staticmethod
def __raw_data_serialize(sequence):
""" Converts a raw data sequence into a JSON string.
Args:
sequence (dict): A sequence created using the sequencer.
Returns:
Str: A JSON string with the sequence data.
"""
pass
@staticmethod
def __qupulse_serialize(sequence):
""" Converts a qupulse sequence into a JSON string.
Args:
sequence (dict): A sequence created using the sequencer.
Returns:
Str: A JSON string with the sequence data.
"""
backend = DictBackend()
serializer = Serializer(backend)
return serializer.serialize(sequence, overwrite=True)
@staticmethod
def serialize(sequence):
""" Converts a sequence into a JSON string.
Args:
sequence (dict): A sequence created using the sequencer.
Returns:
Str: A JSON string with the sequence data.
"""
data_type = sequence['type']
switch = {DataTypes.RAW_DATA: Sequencer.__raw_data_serialize,
DataTypes.QU_PULSE: Sequencer.__qupulse_serialize}
serialize_function = switch[data_type]
return serialize_function(sequence['wave'])
@staticmethod
def deserialize(json_string):
""" Convert a JSON string into a sequencer object.
Args:
json_string: The JSON data containing the sequencer obect.
Returns:
Dict: *NAME*, *TYPE*, *WAVE* keys containing values; sequence name,
sequence data type and the actual qupulse sequencePT respectively.
"""
backend = DictBackend()
serializer = Serializer(backend)
return serializer.deserialize(json_string)
# UNITTESTS #
def test_qupulse_sawtooth_HasCorrectProperties():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="qupulse")
epsilon = 1e-14
period = 1e-3
amplitude = 1.5
sampling_rate = 1e9
sequence = Sequencer.make_sawtooth_wave(amplitude, period)
raw_data = Sequencer.get_data(sequence, sampling_rate)
assert len(raw_data) == sampling_rate*period + 1
assert np.abs(np.min(raw_data) + amplitude/2) <= epsilon
assert np.abs(np.max(raw_data) - amplitude/2) <= epsilon
def test_raw_wave_HasCorrectProperties():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="qupulse")
period = 1e-3
sampling_rate = 1e9
name = 'test_raw_data'
sequence = {'name': name, 'wave': [0]*int(period*sampling_rate+1),
'type': DataTypes.RAW_DATA}
raw_data = Sequencer.get_data(sequence, sampling_rate)
assert len(raw_data) == sampling_rate*period+1
assert np.min(raw_data) == 0
def test_serializer():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning, message="qupulse")
period = 1e-6
amplitude = 1.5
sawtooth = Sequencer.make_sawtooth_wave(amplitude, period)
Sequencer.serialize(sawtooth)
|
{"hexsha": "777606bf0f81865fc01303965b351e63bd8b08fd", "size": 12229, "ext": "py", "lang": "Python", "max_stars_repo_path": "qtt/instrument_drivers/virtualAwg/sequencer.py", "max_stars_repo_name": "dpfranke/qtt", "max_stars_repo_head_hexsha": "f60e812fe8b329e67f7b38d02eef552daf08d7c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qtt/instrument_drivers/virtualAwg/sequencer.py", "max_issues_repo_name": "dpfranke/qtt", "max_issues_repo_head_hexsha": "f60e812fe8b329e67f7b38d02eef552daf08d7c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qtt/instrument_drivers/virtualAwg/sequencer.py", "max_forks_repo_name": "dpfranke/qtt", "max_forks_repo_head_hexsha": "f60e812fe8b329e67f7b38d02eef552daf08d7c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4542372881, "max_line_length": 97, "alphanum_fraction": 0.6407719356, "include": true, "reason": "import numpy", "num_tokens": 2602}
|
"""
Authors: Bardiaux Benjamin
Institut Pasteur, Paris
IBPC, Paris
Copyright (C) 2005 Michael Habeck,
Wolfgang Rieping and Benjamin Bardiaux
No warranty implied or expressed.
All rights reserved.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
from aria.ariabase import *
from aria.Settings import Settings
from aria.xmlutils import XMLElement, XMLBasePickler
import aria.TypeChecking as TCheck
from aria.Chain import TYPE_NONPOLYMER
import numpy
from time import clock
from aria.AriaPeak import TextPickler
from aria.AriaPeak import ASSIGNMENT_TYPE_DICT, NA, \
HEADER_PROJECT, HEADER_ASSIGNMENT_TYPE, \
HEADER_SEQUENCE_SEPARATION, HEADER_RESTRAINT_DEFINITION, \
HEADER_RESTRAINT_ACTIVE
HEADER_SEQUENCE_SEPARATION = \
"""
# sep: sequence separation s: I: s == 0 (intra-residual)
# Q: s == 1 (sequential)
# S: 2 <= s <= 3 (short)
# M: 4 <= s <= 5 (medium)
# L: s > 5 (long)
# i: inter-monomer
"""[1:-1]
HEADER_DICT = {'project': HEADER_PROJECT,
'assignment_type': HEADER_ASSIGNMENT_TYPE,
'sequence_separation': HEADER_SEQUENCE_SEPARATION,
'restraint_definition': HEADER_RESTRAINT_DEFINITION,
'restraint_active': HEADER_RESTRAINT_ACTIVE}
HEADER_ABBREVIATIONS = \
("""
#
# Abbreviations:
#
%(restraint_definition)s
%(restraint_active)s
#
#
%(assignment_type)s
#
""" % HEADER_DICT)[1:-1]
HEADER_ALL = \
"""
#
# List of distance restraints.
#
# Created by Aria 2.3, %(creation_date)s
#
%(project)s
#
# Restraints used during calculation: %(n_active)d
# Violated: %(n_violated)d
#
%(abbreviations)s
%(sequence_separation)s
#
# n_c: The number of contributions. (see noe_restraints.assignments for
# explicit list of contributions).
#
# net_res: Network-anchoring score per residue.
#
# net_ato: Network-anchoring score per atom.
#
"""[1:]
class NetworkScoreTextPickler(TextPickler):
def encode_common(self, ap):
distance_format = '%.2f'
number = '%d' % ap.getId()
rp = ap.getReferencePeak()
x = rp.getNumber()
try:
ref_peak_number = '%d' % x
except:
ref_peak_number = NA
x = rp.getSpectrum().getName()
try:
ref_peak_spectrum = str(x)
except:
ref_peak_spectrum = NA
x = ap.isActive()
if x:
active = YES
else:
active = NO
at = rp.getAssignmentType()
assignment_type = ASSIGNMENT_TYPE_DICT[at]
# BARDIAUX
net = ap._network
net_res = '%.2f' % ap._network['residue']
net_ato = '%.2f' % ap._network['atom']
values = ref_peak_spectrum, ref_peak_number, number, \
active, net_res, net_ato, assignment_type
return list(values)
def encode(self, ap):
values = self.encode_common(ap)
## contributions
contributions = ap.getContributions()
## take only active contributions
contributions = ap.getActiveContributions()
if len(contributions) == 1:
## get sequence separation
## in case of multuple spin-pairs,
## we just take the first one, since all are
## involve the same two residues
atom1, atom2 = contributions[0].getSpinPairs()[0].getAtoms()
if atom1.getSegid() <> atom2.getSegid():
# we have an inter
values.append('1') # n_c
values.append('i')
return values
seq_pos1 = atom1.getResidue().getNumber()
seq_pos2 = atom2.getResidue().getNumber()
seq_sep = abs(seq_pos1 - seq_pos2)
## intra-residue
if seq_sep == 0:
descr = 'I'
## sequential
elif seq_sep == 1:
descr = 'Q'
## TODO: are these the correct values?
## short range
elif seq_sep <= 3:
descr = 'S'
## medium range
elif seq_sep <= 5:
descr = 'M'
else:
descr = 'L'
values.append('1') # n_c
values.append(descr)
## multiple contributions
else:
values.append(str(len(contributions)))
values.append('-') # sep
return values
def dumps(self, ap):
return '\n'.join(self.encode(ap))
class NetworkAnchoringTextPickler(TextPickler):
HEADER_COMMON = ['ref_spec', 'ref_no', 'id', 'active', 'net_res', 'net_ato', 'a_type']
COLUMNS = {'all' : HEADER_COMMON + ['n_c', 'sep'],}
HEADER = {'all' : HEADER_ALL,}
def __init__(self, settings):
#check_type(settings, 'AriaPeakListTextPicklerSettings')
TextPickler.__init__(self, settings = settings)
def get_column_header(self, _type):
"""
_type is 'ambig' or 'unambig'
"""
if not _type in ('ambig', 'unambig', 'all'):
s = 'Header for peak-type "%s" not known.' % _type
self.error(TypeError, s)
return list(self.COLUMNS[_type])
def encode(self, peak_list, header):
pickler = NetworkScoreTextPickler()
all = map(pickler.encode, peak_list)
## add header
if not len(all):
return header
if len(header) <> len(all[0]):
s = 'Number of columns must match header-length.'
self.error(Exception, s)
header[0] = '# ' + header[0]
## show additional information
active = [p for p in peak_list if p.isActive()]
n_violated = len([p for p in active if p.analysis.isViolated()])
d = self._compile_header_dict()
d['n_violated'] = n_violated
d['n_active'] = len(active)
d['abbreviations'] = HEADER_ABBREVIATIONS
text = self.format_output(all, header = header)
## add \n
text = [line + '\n' for line in text]
## make string
text = ''.join(text)
return text, d
def _write(self, s, filename, gzip = 0):
import os
if s is None:
import aria.tools as tools
tools.touch(filename)
return
if gzip:
from aria.tools import gzip_open as open_func
else:
open_func = open
filename = os.path.expanduser(filename)
f = open_func(filename, 'w')
f.write(s)
f.close()
def _compile_header_dict(self):
from aria.Singleton import ProjectSingleton
import time
from copy import copy
project = ProjectSingleton()
project_settings = project.getSettings()
infra = project.getInfrastructure()
run_path = infra.get_run_path()
d = {'date': project_settings['date'],
'project': project_settings['name'],
'run': project_settings['run'],
'author': project_settings['author'],
'working_directory': run_path}
x = copy(HEADER_DICT)
x['project'] %= d
x['creation_date'] =time.ctime()
return x
def dump_network(self, peak_list, filename, gzip = 0):
if peak_list:
header = self.get_column_header('all')
text, d = self.encode(peak_list, header)
d.update(self._compile_header_dict())
header = (self.HEADER['all'] % d)[1:]
s = header + text
# s = header.replace('\n\n','\n') + text
else:
s = None
return self._write(s, filename, gzip)
class NetworkPsPickler:
def __init__(self, network):
self.peaks = network.peaks
self.p_id = network._protons_id
self.net_res = network.residue_score
self.mol = network.molecule
self.it_n = network.iteration.getNumber()
def get_matrix(self):
# since we just support symmetric dimer
n_chains = len(self.mol.get_chains())
#max_res = len([r for c in self.mol.get_chains() for r in c.getResidues()])
max_res = [c.getResidues()[-1].getNumber() for c in self.mol.get_chains() \
if c.getType() != TYPE_NONPOLYMER]
from aria.Singleton import ProjectSingleton
from aria.DataContainer import DATA_SYMMETRY
project = ProjectSingleton()
sym_settings = project.getData(DATA_SYMMETRY)[0]
if n_chains < 2 or (n_chains > 1 and sym_settings['symmetry_type'] not in ["C2","C3","D2","C5"]):
# monomeric prot or hetero dimer
matrix = numpy.zeros((max_res[0]+1, max_res[0]+1), numpy.float)
for k, r_net in self.net_res.items():
r1, r2 = map(lambda a: a.getNumber(), k)
matrix[r1,r2] = r_net
matrix[r2,r1] = r_net
return matrix, None
else:
# homo-dimer
matrix_a = numpy.zeros((max_res[0]+1, max_res[0]+1), numpy.float)
matrix_r = numpy.zeros((max_res[0]+1, max_res[1]+1), numpy.float)
for k, r_net in self.net_res.items():
r1, r2 = map(lambda a: a.getNumber(), k)
s1, s2 = map(lambda a: a.getChain().getSegid(), k)
if s1 <> s2:
matrix_r[r1,r2] = r_net
matrix_r[r2,r1] = r_net
else:
matrix_a[r1,r2] = r_net
matrix_a[r2,r1] = r_net
return matrix_a, matrix_r
def plot_matrix(self):
# mask zero-values
from matplotlib import rcParams
from numpy import ma
rcParams['numerix'] = 'numpy'
pylab = self.pylab
msg = ""
matrix_a, matrix_r = self.get_matrix()
first_res = [c.getResidues()[0].getNumber() for c in self.mol.get_chains() if c.getType() != TYPE_NONPOLYMER]
max_res = [c.getResidues()[-1].getNumber() for c in self.mol.get_chains() if c.getType() != TYPE_NONPOLYMER]
if matrix_r is not None:
ax1 = pylab.subplot(2,1,1)
#matrix = matrix_r[1:,1:]
matrix = matrix_r[first_res[0]:,first_res[1]:]
X = ma.array(matrix, mask = numpy.equal(matrix, 0.))
xyticks = (first_res[0], max_res[0], first_res[1], max_res[1])
kw = {'origin':'lower',
'interpolation':'nearest',
'aspect' : 'equal',
'extent' : xyticks}
pylab.imshow(X, cmap=pylab.cm.Reds, **kw)
pylab.grid()
pylab.colorbar(orientation = 'vertical')
pylab.ylabel("Residue Number (Inter-molecular)")
#pylab.setp( ax1.get_xticklabels(), visible=False)
pylab.subplot(212)#, sharex=ax1)
#pos = pylab.axes([0.85, 0.1, 0.04, 0.8])
#pylab.colorbar(cax = pos)#, orientation = 'horizontal')
msg = " (Intra-molecular)"
matrix = matrix_a[first_res[0]:,first_res[0]:]
#matrix = matrix_a[1:,1:]
X = ma.array(matrix, mask = numpy.equal(matrix, 0.))
xyticks = (first_res[0], max_res[0], first_res[0], max_res[0])
kw = {'origin':'lower',
'interpolation':'nearest',
'aspect' : 'equal',
'extent' : xyticks}
pylab.imshow(X, cmap=pylab.cm.Reds, **kw)
if len(msg):
orientation = 'vertical'
else:
orientation = 'horizontal'
pylab.colorbar(orientation = orientation)
pylab.grid()
pylab.xlabel("Residue Number")
pylab.ylabel("Residue Number" + msg)
def plot_profile(self, type, n):
pylab = self.pylab
if type not in ['residue', 'atom']:
return
colors = {'residue' : 'b',
'atom' : 'r'}
scores = [p._network[type] for p in self.peaks]
nbins = int(max(scores))
#nbins = 1 + int(numpy.log(len(scores))/numpy.log(2))
nbins = int(1.0 + 3.3 * numpy.log(len(scores)))
pylab.subplot(2, 1, n)
pylab.hist(scores, bins = nbins +1, facecolor = colors[type])
pylab.xlabel("Network Anchoring score per %s" % type)
pylab.ylabel("Number of Peaks")
def plot(self, path):
try:
import matplotlib
matplotlib.use('PS', warn=False)
except:
return
import matplotlib.pylab as pylab
self.pylab = pylab
pylab.figure(num=1, figsize=(8,11))
pylab.clf()
pylab.figtext(0.3,0.95, 'Network Anchoring for iteration %s' % str(self.it_n))
pylab.figtext(0.3,0.90, 'Network Anchoring scores distribution')
self.plot_profile('residue', 1)
self.plot_profile('atom', 2)
pylab.subplots_adjust(top = 0.85)
pylab.figure(num=2, figsize=(8,11))
pylab.clf()
pylab.figtext(0.3,0.95, 'Residue-wise Network Anchoring scores for iteration %d' % self.it_n)
self.plot_matrix()
pylab.figure(1)
pylab.savefig(path +'_dist.ps', papertype='a4', dpi = 72)
pylab.figure(2)
pylab.savefig(path + '_2D.ps', papertype='a4', dpi = 72)
class NetworkSettings(Settings):
def create(self):
from aria.Settings import NonNegativeFloat
from aria.Settings import YesNoChoice
d = {}
# public settings
descr = "Network anchoring removes restraints which are not surrounded by a network of active restraints."
d['enabled'] = YesNoChoice(description = descr)
descr = "High network-anchoring score per residue for a peak to be active."
d['high_residue_threshold'] = NonNegativeFloat(description = descr)
descr = """Minimal network-anchoring score per residue for a peak to be active. (In combination with \"min_atom_threshold\")"""
d['min_residue_threshold'] = NonNegativeFloat(description = descr)
descr = """Minimal network-anchoring score per atoms for a peak to be active. (In combination with \"min_residue_threshold\")"""
d['min_atom_threshold'] = NonNegativeFloat(description = descr)
# private
descr = "Maximal distance for covalent inter-proton distance."
d['distance_max'] = NonNegativeFloat(description = descr)
descr = "Maximal network anchoring score for covalent distance."
d['v_max'] = NonNegativeFloat(description = descr)
descr = "Minimal network anchoring score for intraresidual/sequential distance."
d['v_min'] = NonNegativeFloat(description = descr)
return d
def create_default_values(self):
d = {}
d['enabled'] = NO
d['high_residue_threshold'] = 4.
d['min_residue_threshold'] = 1.0
d['min_atom_threshold'] = 0.25
d['distance_max'] = 5.5
d['v_max'] = 1.0
d['v_min'] = 0.1
return d
class CovalentConstraint:
def __init__(self, id, atom1, atom2, distance):
self.atom1 = atom1
self.atom2 = atom2
self.distance = distance
self.id = id
def getId(self):
return self.id
def getScore(self):
return 0.
def getAtoms(self):
return (self.atom1, self.atom2)
def getDistance(self):
return self.distance
def __str__(self):
s = "CovalentConstraint(id=%d, atoms=%s, d=%5.3f)" % (self.id, self.getAtoms(), self.distance)
return s
class NetworkAnchoring(AriaBaseClass):
def __init__(self, settings):
TCheck.check_type(settings, 'NetworkSettings')
AriaBaseClass.__init__(self)
self.setSettings(settings)
self.anchoring = None
self.peaks = None
self.getSettings()['v_min'] = 0.1
self.getSettings()['v_max'] = 1.0
self.getSettings()['distance_max'] = 5.5
def setup(self):
"""
Setup some lists and matrices.
"""
from sets import Set
if self.anchoring is not None:
# if we already have a network, just recreate self._c_id with copied contribuitions
self.message('Retrieving Network ...')
self._c_id = {}
self._c_id[-1] = [] # covalent
for p in self.peaks:
for c in p.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
self.addDistanceRestraints()
return 1
# if we run network_anchoring for 1st time, create all list and spinpair matrices
self.message('Initializing ...')
if not self.peaks:
return 0
# list with all protons
if self._is_noesy_only:
self._protons_id = [a for c in self.molecule.get_chains() for r in c.getResidues() \
for a in r.getAtoms() if a.isProton()]
else:
self._protons_id = [a for c in self.molecule.get_chains() for r in c.getResidues() \
for a in r.getAtoms() if a.isProton() or a.getType() in ['N','C']]
self._protons_id.sort(lambda a,b: cmp(a.getId(), b.getId()))
# dict with protons id as key, and indices in self._protons_id as values
self._protons_num = {}
for a in range(0, len(self._protons_id)):
self._protons_num[self._protons_id[a].getId()] = a
# list with protons residues number
# add chain levels to residues numbering
self._residues_num = {}
for c in self.molecule.get_chains():
cid = c.getSegid()
self._residues_num[cid] = [a.getResidue().getNumber() for a in self._protons_id]# if a.getSegid() == cid]
# dict with residues number as key and list of protons ids as values
self._residues_id = {}
for c in self.molecule.get_chains():
cid = c.getSegid()
self._residues_id[cid] = {}
for a in range(0, len(self._protons_id)):
r, cid = self._protons_id[a].getResidue().getNumber(), self._protons_id[a].getSegid()
self._residues_id[cid].setdefault(r, [])
self._residues_id[cid][r].append(a)
# dict with SpinPair.getId() + 1 as key and Set of contributions as values
self._c_id = {}
self._c_id[-1] = []
# dict with SpinPair.getId() + 1 as key and spinpair as values
self.spinpairs = {}
for p in self.peaks:
for c in p.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
if not self.spinpairs.has_key(sid):
self.spinpairs[sid] = sp
# add additional distance restraints
self.addDistanceRestraints()
# matrix to hold wether 2 protons are connected with spinpair(1), covalent(2) or not connected(0)
self._sp = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store the id of the spinpair connecting 2 atoms
self._sp_id = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store covalent score of a spinpair
self._sp_cov_scores = numpy.zeros((len(self._protons_id), len(self._protons_id)))
# matrix to store sum of contributions volumes of each spinpair
self._sp_sum_scores = numpy.zeros(len(self.spinpairs.keys()) , numpy.float)
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
self._sp[a][b] = 1
self._sp[b][a] = 1
self._sp_id[a][b] = spid
self._sp_id[b][a] = spid
self.addCovalentConstraints()
self.addStructureRestraints()
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
cov_score = self._get_covalent_score(a, b)
self._sp_cov_scores[a][b] = cov_score
self._sp_cov_scores[b][a] = cov_score
return 1
def setDefaultNetworkScores(self, s):
for p in self.peaks:
contribs = p.getContributions()
n = len(contribs)
[c.setNetworkScore(s/n) for c in contribs]
# use additional distance restraints
def addDistanceRestraints(self):
"""
Distance contraints
"""
# get list of DistanceRestraints valid for NA
restraints = []
restraint_list = self.iteration.getDistanceRestraints()
for l, r in restraint_list.items():
if l.getListSource()['add_to_network'] == YES:
restraints += r
if not restraints:
return
from sets import Set
for r in restraints:
for c in r.getContributions():
for sp in c.getSpinPairs():
sid = sp.getId() + 1
self._c_id.setdefault(sid, Set())
self._c_id[sid].add(c)
if not self.spinpairs.has_key(sid):
self.spinpairs[sid] = sp
def addStructureRestraints(self):
check = {}
vmax = self.getSettings()['v_max']
for c in self.molecule.get_chains():
residues = c.getResidues()
atoms = [a for r in residues for a in r.getAtoms() if a.isProton() and a.getName() in ['HA', 'H']]
for i in range(0, len(atoms)-1):
for j in range(i+1, len(atoms)):
a, b = atoms[i], atoms[j]
id = (min(a.getId(),b.getId()), max(a.getId(),b.getId()))
if not check.has_key(id):
check[id] = 1
res1 =int(a.getResidue().getNumber())
str1 = a.getResidue().getStructure()
t1 = a.getName()
res2 = int(b.getResidue().getNumber())
str2 = b.getResidue().getStructure()
t2 = b.getName()
if str1 == "" or str2 == "":
continue
sep = abs(res1 - res2)
if sep > 4:
continue
both_H = str1 == str2 and str1[0] == 'H'
both_B = str1 == str2 and str1[0] == 'B'
if not both_B or not both_H:
continue
HA_HN = (t1 == 'HA' and t2 == 'H') or \
(t1 == 'H' and t2 == 'HA')
HN_HN = (t1 == t2) and (t1 == 'H')
# check if valid constraints in SS
d = 0
# Sheets, dHA,HN(i,i+1)
if both_B and HA_HN and sep == 1:
d = 1
if both_H:
if HA_HN and sep <= 4:
d = 1
if HN_HN and sep <= 2:
d = 1
if d:
##cc = CovalentConstraint(n, a, b, d)
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
if self._sp_id[a][b] == 0:
self._sp_id[a][b] = -1
if self._sp_id[b][a] == 0:
self._sp_id[b][a] = -1
self._sp[a][b] = 2
self._sp[b][a] = 2
self._sp_cov_scores[a][b] = vmax
self._sp_cov_scores[b][a] = vmax
n+= 1
def addCovalentConstraints(self):
"""
Covalent contraints
"""
dmax = self.getSettings()['distance_max']
vmax = self.getSettings()['v_max']
from aria.CovalentDistances import CovalentDistances
cd = CovalentDistances()
check = {}
n = 0
for c in self.molecule.get_chains():
residues = c.getResidues()
for r in range(len(residues)-1):
atoms = residues[r].getAtoms() + residues[r+1].getAtoms()
# NOESY
atoms = [a for a in atoms if a.isProton()]
for i in range(0, len(atoms)-1):
for j in range(i+1, len(atoms)):
aa, bb = atoms[i], atoms[j]
id = (min(aa.getId(),bb.getId()), max(aa.getId(),bb.getId()))
if not check.has_key(id):
check[id] = 1
d = cd.areConnected(aa, bb)
if d:
cc = CovalentConstraint(n, aa, bb, d)
a, b = self._protons_num[aa.getId()], self._protons_num[bb.getId()]
if self._sp_id[a][b] == 0:
self._sp_id[a][b] = -1
if self._sp_id[b][a] == 0:
self._sp_id[b][a] = -1
self._sp[a][b] = 2
self._sp[b][a] = 2
self._sp_cov_scores[a][b] = vmax
self._sp_cov_scores[b][a] = vmax
# valid also for hetero atom
if self._is_noesy_only:
continue
ah, bh = aa.getHeteroAtom(), bb.getHeteroAtom()
if ah and bh and (ah.getType() in ['N','C'] and bh.getType() in ['N','C']) :
ai, bi = self._protons_num[ah.getId()], self._protons_num[bh.getId()]
if self._sp_id[ai][bi] == 0:
self._sp_id[ai][bi] = -1
if self._sp_id[bi][ai] == 0:
self._sp_id[bi][ai] = -1
self._sp[ai][bi] = 2
self._sp[bi][ai] = 2
self._sp_cov_scores[ai][bi] = vmax
self._sp_cov_scores[bi][ai] = vmax
n+= 1
## # cov_score
## for spid, sp in self.spinpairs.items():
## a, b = sp.getAtoms()
## d = cd.areConnected(a, b)
## if d:
## map(lambda c: (c.setCovalentScore(1.)), self._c_id[spid])
def create_network(self):
"""
create the network itself
dictionnary : key = spid
value = Set of gammas
"""
if self.anchoring is not None:
return
self.message('Creating network ...')
from sets import Set
self.anchoring = {}
#t1 = clock()
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
sa, sb = a.getSegid(), b.getSegid()
a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
# dim0
r = self._residues_num[sa][a]
res_bound = []
for i in range(r-1, r+2):
if self._residues_id[sa].has_key(i):
res_bound += self._residues_id[sa][i]
x = numpy.take(self._sp, res_bound, axis = 0)
both_0 = x[:,a] * x[:,b]
x0 = [res_bound[i] for i in numpy.flatnonzero(both_0)]
# dim1
r = self._residues_num[sb][b]
res_bound = []
for i in range(r-1, r+2):
if self._residues_id[sb].has_key(i):
res_bound += self._residues_id[sb][i]
x = numpy.take(self._sp, res_bound, axis = 1)
both_1 = x[a,:] * x[b,:]
x1 = [res_bound[i] for i in numpy.flatnonzero(both_1)]
x12 = Set(x0).union(x1)
self.anchoring[spid] = x12
self.message("Done.")
def _get_covalent_score(self, id_a, id_b):
"""
score according to covalent structure (a, b, are two atoms
{ Vmax if covalent constraint
S = { Vmin if intraresidual /sequential connectivity
{ 0 if long-range connectivity
"""
# argument : contribution ? => then get max distance from contribution's spinpairs (use ISPA Model)
# a spin pairs ?
# 2 atoms
vmin = self.getSettings()['v_min']
vmax = self.getSettings()['v_max']
if self._sp[id_a][id_b] == 2:
covalent_score = vmax
else:
if self._isSequential(id_a, id_b):
covalent_score = vmin
else:
covalent_score = 0.
return covalent_score
def _heaviside(self, x):
if x < 0:
return 0.
elif x == 0:
return .5
else:
return 1.
def _isSequential(self, id_a, id_b):
sa, sb = self._protons_id[id_a].getSegid(), self._protons_id[id_b].getSegid()
if sa <> sb:
return 0
else:
return abs(self._residues_num[sa][id_a] - self._residues_num[sb][id_b]) <= 1
def _sumContribScore(self):
self._sp_sum_scores = {}
for spid, contribs in self._c_id.items():
s = numpy.sum([c.getScore()/len(c.getSpinPairs()) for c in contribs])
self._sp_sum_scores[spid] = s
def updateContributionsNetworkScores(self):
"""
calulate network_score for each contribution and update network_score
"""
contribs_scores = {}
#t = clock()
self._sumContribScore()
#t = clock()
v_min = self.getSettings()['v_min']
for k, gammas in self.anchoring.items():
sp = self.spinpairs[k]
score = 0.
a, b = sp.getAtoms()
id_a = self._protons_num[a.getId()]
id_b = self._protons_num[b.getId()]
gammas = list(gammas)
# a-g
#g_scores_a = numpy.take(self._sp_sum_scores, numpy.take( self._sp_id[id_a,:], gammas))
g_scores_a = [self._sp_sum_scores[x] for x in numpy.take( self._sp_id[id_a,:], gammas)]
cov_scores_a = numpy.take(self._sp_cov_scores[id_a,:], gammas)
nus_a = numpy.where(numpy.greater(g_scores_a, cov_scores_a), g_scores_a, cov_scores_a)
nus_a *= numpy.greater(nus_a - v_min, 0)
# b-g
#g_scores_b = numpy.take(self._sp_sum_scores, numpy.take( self._sp_id[id_b,:], gammas))
g_scores_b = [self._sp_sum_scores[x] for x in numpy.take( self._sp_id[id_b,:], gammas)]
cov_scores_b = numpy.take(self._sp_cov_scores[id_b,:], gammas)
nus_b = numpy.where(numpy.greater(g_scores_b, cov_scores_b), g_scores_b, cov_scores_b)
nus_b *= numpy.greater(nus_b - v_min, 0)
score = numpy.sum(numpy.sqrt(nus_a * nus_b))
contribs = self._c_id[k]
for c in contribs:
contribs_scores.setdefault(c, [])
contribs_scores[c].append(score)
for c, ss in contribs_scores.items():
c.setNetworkScore(numpy.sum(ss)/len(ss))#/len(ss)
for p in self.peaks:
contribs = p.getContributions()
scores = numpy.array([c.getNetworkScore() for c in contribs])
#covalent = numpy.array([c.getCovalentScore() for c in contribs])
#covalent = numpy.greater(covalent, 1.)
#zero_scores_covalent = numpy.equal(scores, 0) * covalent
#scores = numpy.where(zero_scores_covalent, 1., scores)
sum_scores = numpy.sum(scores)
if sum_scores > 0.:
scores /= sum_scores
map(lambda c,s : (c.setNetworkScore(s)), contribs, scores)
#self.message("Done %5.3f" % (clock() -t))
def updateContributionsScores(self):
"""
calulate score of ecah contribution and update score
"""
for p in self.peaks:
contribs = p.getContributions()
#mask = [c.isInter() for c in contribs]
scores = numpy.array([c.getNetworkScore() * c.getWeight() for c in contribs])
#numpy.putmask(scores, mask, scores * 1.5)
sum_scores = numpy.sum(scores)
if sum_scores > 0.:
scores /= sum_scores
map(lambda c,s : (c.setScore(s)), contribs, scores)
#self.message("Done %5.3f" % (clock() -t))
def dump_text(self):
settings = None
peak_list = self.peaks
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
from aria.Protocol import REPORT_NOE_RESTRAINTS
path = infra.get_iteration_path(itn)
filename = os.path.join(path, REPORT_NOE_RESTRAINTS + '.network')
pickler = NetworkAnchoringTextPickler(settings)
pickler.dump_network(peak_list, filename, gzip = 0)
self.message('Network-Anchoring scores (text) written (%s).' % filename)
def dump_ps(self):
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
from aria.Protocol import REPORT_NOE_RESTRAINTS
path = infra.get_iteration_path(itn)
path = os.path.join(path, 'graphics/network')
np = NetworkPsPickler(self)
try:
np.plot(path)
except Exception, msg:
import aria.tools as tools
self.warning(tools.last_traceback())
msg = 'Error during creation of %s.network.' % REPORT_NOE_RESTRAINTS
self.warning(msg)
def _dump_scores(self, old_weights):
## save scores
s = ""
n = 0
for p in self.peaks:
pnetscores = self.getPeakNetScores(p)
for c in p.getContributions():
s += "NETWORK : I %4d %5d OW %5.3f W %5.3f N %5.3f S %5.3f Nres %5.3f Nat %5.3f\n" \
%(p.getId(), c.getId(), old_weights[n], c.getWeight(), c.getNetworkScore(), \
c.getScore(), pnetscores['residue'], pnetscores['atom'])
n += 1
itn = self.iteration.getNumber()
infra = self.project.getInfrastructure()
import os
path = os.path.join(infra.get_iteration_path(itn), "scores.dat")
f = open(path, 'w')
f.write(s)
f.close()
s = ''
for k, v in self.residue_score.items():
s += "%d %d %.4f\n" % (k[0],k[1], v)
path = os.path.join(infra.get_iteration_path(itn), "res_scores.dat")
f = open(path, 'w')
f.write(s)
f.close()
def getPeakNetScores(self, p):
score = {'residue' : 0.,
'atom' : 0.}
for c in p.getContributions():
res = [0,1]
for a in res:
res[a] = c.getSpinSystems()[a].getAtoms()[0].getResidue()
score['residue'] += self.getResNetScore(res) * c.getScore()
score['atom'] += c.getNetworkScore()/len(c.getSpinPairs()) * c.getScore()
return score
def getResNetScore(self, residues):
residues.sort(lambda a,b: cmp(a.getNumber(), b.getNumber()))
key = tuple(residues)
#r1, r2 = residues[0].getNumber(), residues[1].getNumber()
#key = (min((r1, r2)), max((r1, r2)))
return self.residue_score[key]
def analyze(self):
"""
Analyse contribution scores and remove non valable ones
"""
self.message('Analyzing ...')
self.result = {}
result = {}
# compute net score per residue pairs
self.residue_score = {}
for spid, sp in self.spinpairs.items():
a, b = sp.getAtoms()
r1, r2 = a.getResidue(), b.getResidue()
#sa, sb = a.getSegid(), b.getSegid()
#a, b = self._protons_num[a.getId()], self._protons_num[b.getId()]
#r1, r2 = self._residues_num[sa][a], self._residues_num[sb][b]
key = [r1, r2]
key.sort(lambda a,b: cmp(a.getNumber(), b.getNumber()))
#key = (min((r1, r2)), max((r1, r2)))
key = tuple(key)
self.residue_score.setdefault(key, 0.)
sc = max([c.getNetworkScore() for c in self._c_id[spid]])
self.residue_score[key] += sc
contribs = [c for p in self.peaks for c in p.getContributions()]
scores = [c.getScore() for c in contribs]
total = len(contribs)
#eliminated = [c for c in contribs if c.getScore() <= 0.]
eliminated = numpy.sum(numpy.less_equal(scores, 0.))
self.result['total'] = total
self.result['eliminated'] = eliminated
self.result['ratio'] = self.result['eliminated']*100./float(total)
## SET SCORE as Weight
old_weights = [c.getWeight() for c in contribs]
## save scores
#self._dump_scores(old_weights)
[c.setWeight(c.getScore()) for c in contribs]
####################################################
# FILTER PEAKS according to Nres et Natom
# First rule : <Nres>p >= Nhigh
# OR
# second rule : <Nres>p >= Nres_min AND <Natom>p >= Natom_min
#Nhigh = 4.
#Nres_min = 1.
#Nat_min = 0.25
s = self.getSettings()
for p in self.peaks:
res_score = self.getPeakNetScores(p)
p._network = res_score
if p.getReferencePeak().isReliable():
continue
## if not p.isAmbiguous() and p.getActiveContributions() and p.getActiveContributions()[0].isInter():
## continue
if not (res_score['residue'] >= s['high_residue_threshold'] or
(res_score['residue'] >= s['min_residue_threshold'] and res_score['atom'] >= s['min_atom_threshold'])):
p.isActive(0)
def update_scores(self):
self.setDefaultNetworkScores(1.)
#[ c.setScore(c.getNetworkScore() * c.getWeight()) for p in self.peaks for c in p.getContributions()]
self.updateContributionsScores()
n = 0
while n < 3:
self.message("Round %d ..." % n)
t = clock()
self._round = n
self.updateContributionsNetworkScores()
self.updateContributionsScores()
self.debug('Time: %ss' % str(clock() - t))
n += 1
def run(self, iteration):
"""
run network anchoring.
"""
self.iteration = iteration
self.peaks = iteration.getPeakList()
restraints = []
restraint_list = self.iteration.getDistanceRestraints()
for l, r in restraint_list.items():
if l.getListSource()['filter_contributions'] == YES and \
l.getListSource()['run_network_anchoring'] == YES :
restraints += r
self.peaks += restraints
self._is_noesy_only = 1
# check if we have non H-H pairs
for p in self.peaks:
contributions = p.getActiveContributions()
if not contributions:
continue
atom1, atom2 = contributions[0].getSpinPairs()[0].getAtoms()
if not atom1.isProton() and not atom1.isProton():
self._is_noesy_only = 0
break
from aria.Singleton import ProjectSingleton
self.project = ProjectSingleton()
self.molecule = self.project.getMolecule()
# 1) initalize
done = self.setup()
if not done:
s = 'Aborting. No valid peaks or restraints.'
self.warning(s)
return
# 2') create network
t1 = clock()
self.create_network()
self.debug('Time: %ss' % str(clock() - t1))
# 2) assign network scores to contributions
self.update_scores()
# 4) Analysis
t1 = clock()
self.analyze()
s = 'Done. %(eliminated)d/%(total)d (%(ratio)5.2f %%) assignment possibilities removed.\n'
self.message(s % self.result)
self.debug('Time: %ss' % str(clock() - t1))
# 5) logs
self.dump_text()
self.dump_ps()
#self.halt()
class NetworkXMLPickler(XMLBasePickler):
def _xml_state(self, x):
e = XMLElement()
e.enabled = x['enabled']
e.high_residue_threshold = x['high_residue_threshold']
e.min_residue_threshold = x['min_residue_threshold']
e.min_atom_threshold = x['min_atom_threshold']
return e
def load_from_element(self, e):
s = NetworkSettings()
s['enabled'] = str(e.enabled)
s['high_residue_threshold'] = float(e.high_residue_threshold)
s['min_residue_threshold'] = float(e.min_residue_threshold)
s['min_atom_threshold'] = float(e.min_atom_threshold)
return s
NetworkSettings._xml_state = NetworkXMLPickler()._xml_state
## TEST
if __name__ == '__main__':
molecule_file = '~/devel/aria2.2_release/test/run3/data/sequence/hrdc.xml'
ariapeaks_file='~/devel/aria2.2_release/test/run3/structures/it0/noe_restraints.pickle'
project_file = '~/devel/aria2.2_release/test/werner2.xml'
# read molecule
import aria.AriaXML as AriaXML
pickler = AriaXML.AriaXMLPickler()
molecule = pickler.load(molecule_file)
# read pickled ariapeak list
from aria.tools import Load
aria_peaks = Load(ariapeaks_file)
project = pickler.load(project_file)
project.ccpn_data_sources = ()
project.read_molecule()
ns = project.getProtocol().getSettings()['iteration_settings'][0]['network_anchoring_settings']
N = NetworkAnchoring(ns)
class it:
def __init__(self, peaks, n):
self.peaks = peaks
self.n = n
def getPeakList(self):
return self.peaks
def getNumber(self):
return self.n
N.run(it(aria_peaks, 0))
#N.dump()
|
{"hexsha": "2c55a44f1708355490f6623e534cfe988d374906", "size": 45032, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/mytools/ARIA/src/py/aria/Network.py", "max_stars_repo_name": "fmareuil/Galaxy_test_pasteur", "max_stars_repo_head_hexsha": "6f84fb0fc52e3e7dd358623b5da5354c66e16a5f", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/mytools/ARIA/src/py/aria/Network.py", "max_issues_repo_name": "fmareuil/Galaxy_test_pasteur", "max_issues_repo_head_hexsha": "6f84fb0fc52e3e7dd358623b5da5354c66e16a5f", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/mytools/ARIA/src/py/aria/Network.py", "max_forks_repo_name": "fmareuil/Galaxy_test_pasteur", "max_forks_repo_head_hexsha": "6f84fb0fc52e3e7dd358623b5da5354c66e16a5f", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4475997295, "max_line_length": 144, "alphanum_fraction": 0.5046411441, "include": true, "reason": "import numpy,from numpy", "num_tokens": 10692}
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from Bio import SeqIO
import os
from PIL import Image
import subprocess
import pandas as pd
import numpy as np
from torch import optim
from torchvision import models, transforms
from WK_NetArch import wk_tools as wkt
from WK_NetArch import alexnet_features, resnet101_features, vgg16_features
from scripts.utils import run_DAFS, run_RNAfold, make_8bit, get_matrix
import argparse
__doc__ = """
image_preprocessing - a scripts for image feature construction
===========================================================================
**image_preprocessing** is a Python script that provides function to process image and construct image feature.
Main Functions
--------------
Here are just a few of the things that **image_preprocessing** does well:
- Generate grayscale image.
- Construct image feature.
Main Program Functions
----------------------
"""
def extract_single(tag, features_dir, files_list):
if tag == 'alexnet':
alexnet = models.alexnet(pretrained=True)
model = alexnet_features.EncoderCNN(alexnet)
elif tag == 'resnet101':
resnet101 = models.alexnet(pretrained=True)
model = resnet101_features.EncoderCNN(resnet101)
elif tag == 'vgg16':
vgg16 = models.vgg16(pretrained=True)
model = vgg16_features.EncoderCNN(vgg16)
wkt.extract_features(model, tag, features_dir, files_list)
def make_image(dataset, itr1, outpath):
for j in range(itr1 + 1, len(dataset)):
len_pair1 = len(dataset[itr1][2])
len_pair2 = len(dataset[j][2])
path_to_pairFasta = "./pair" + str(itr1) + "," + str(j) + ".fa"
pairFa = ""
for k in [dataset[itr1], dataset[j]]:
pairFa += ">" + k[0] + "\n" + k[2] + "\n"
with open(path_to_pairFasta, 'w') as f:
f.write(pairFa)
pair1, pair2 = run_DAFS(path_to_pairFasta)
ss1, ss2 = run_RNAfold(path_to_pairFasta)
pair1 = make_8bit(pair1, ss1)
pair2 = make_8bit(pair2, ss2)
subprocess.call(["rm", path_to_pairFasta])
dp1 = './' + str(dataset[itr1][0]) + '_dp.ps'
dp2 = './' + str(dataset[j][0]) + '_dp.ps'
mat1, mat2 = get_matrix(dp1, len(pair1))
image_mat = [mat2]
mat1, mat2 = get_matrix(dp2, len(pair2))
image_mat = image_mat + [mat2]
if itr1 == 0:
image1 = Image.fromarray(image_mat[0]).convert('L')
image2 = Image.fromarray(image_mat[1]).convert('L')
re_box = (0, 0, len_pair2, len_pair2) if len_pair1 > len_pair2 else (0, 0, len_pair1, len_pair1)
re_image = image2.crop(re_box) if len_pair1 > len_pair2 else image1.crop(re_box)
if len_pair1 > len_pair2:
image2 = re_image
else:
image1 = re_image
rx = 256
ry = rx
re_size = (rx, ry)
if image1.size > re_size:
image1 = image1.resize(re_size, Image.ANTIALIAS)
else:
image1 = image1.resize(re_size, Image.BICUBIC)
if image2.size > re_size:
image2 = image2.resize(re_size, Image.ANTIALIAS)
else:
image2 = image2.resize(re_size, Image.BICUBIC)
image1 = image1.convert('RGB')
image2 = image2.convert('RGB')
image1.save(outpath + str(dataset[itr1][0]) + '.png')
image2.save(outpath + str(dataset[j][0]) + '.png')
def wk_main(infile, data_dir, features_dir):
dataset = []
wkt.check_path(features_dir)
files_list = wkt.get_image(data_dir)
wkn_tags = ['alexnet', 'resnet101', 'vgg16']
for record in SeqIO.parse(infile, "fasta"):
id_part = record.id
id_parts = id_part.split(",")
dataset = dataset + [[id_parts[0], int(id_parts[1])]]
make_image(dataset, 0, data_dir)
os.system('rm *.ps')
# use_gpu = torch.cuda.is_available()
for tag in wkn_tags:
extract_single(tag, features_dir, files_list)
if __name__ == '__main__':
data_dir = '../features/image/'
features_dir = '../features/image_features/'
infile = '../data/sequence_all.fa'
parser = argparse.ArgumentParser(description='image_preprocessing:')
parser.add_argument('--infile', '-i',
default='../data/sequence_all.fa',
help='Fasta files containing ncRNA sequences')
parser.add_argument('--data', '-d',
default='../features/image/',
help='The paths of images')
parser.add_argument('--outpath', '-o',
default='../features/image_features/',
help='Output paths of features')
args = parser.parse_args()
wk_main(args.infile, args.data_dir, args.outpath)
|
{"hexsha": "0258f06be66c108cfb32145bd7a905cb9b2f1209", "size": 4839, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/image_preprocessing.py", "max_stars_repo_name": "oAzv/GCFM", "max_stars_repo_head_hexsha": "5dc584f0722b90b99614616c9b210d9e086f8ff3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/image_preprocessing.py", "max_issues_repo_name": "oAzv/GCFM", "max_issues_repo_head_hexsha": "5dc584f0722b90b99614616c9b210d9e086f8ff3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/image_preprocessing.py", "max_forks_repo_name": "oAzv/GCFM", "max_forks_repo_head_hexsha": "5dc584f0722b90b99614616c9b210d9e086f8ff3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-19T02:55:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-19T02:55:02.000Z", "avg_line_length": 32.0463576159, "max_line_length": 111, "alphanum_fraction": 0.5945443273, "include": true, "reason": "import numpy", "num_tokens": 1212}
|
[STATEMENT]
lemma distinct_member_remove1 [simp]:
"list_distinct xs \<Longrightarrow> list_member (list_remove1 x xs) = (list_member xs)(x := False)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. list_distinct xs \<Longrightarrow> list_member (list_remove1 x xs) = (list_member xs)(x := False)
[PROOF STEP]
by(auto simp add: equal_eq List.member_def[abs_def] fun_eq_iff)
|
{"llama_tokens": 136, "file": "Containers_DList_Set", "length": 1}
|
[STATEMENT]
lemma not_is_Done_conv_Pause: "\<not> is_Done r \<longleftrightarrow> (\<exists>out c. r = Pause out c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<not> Resumption.resumption.is_Done r) = (\<exists>out c. r = Resumption.resumption.Pause out c)
[PROOF STEP]
by(cases r) auto
|
{"llama_tokens": 115, "file": "CryptHOL_Resumption", "length": 1}
|
[STATEMENT]
lemma rank_1_proj_col_carrier:
assumes "i < dim_col A"
shows "rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)
[PROOF STEP]
have "dim_vec (Matrix.col A i) = dim_row A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dim_vec (Matrix.col A i) = dim_row A
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
dim_vec (Matrix.col A i) = dim_row A
goal (1 subgoal):
1. rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
dim_vec (Matrix.col A i) = dim_row A
goal (1 subgoal):
1. rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)
[PROOF STEP]
by (metis rank_1_proj_carrier)
[PROOF STATE]
proof (state)
this:
rank_1_proj (Matrix.col A i) \<in> carrier_mat (dim_row A) (dim_row A)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 514, "file": "Commuting_Hermitian_Spectral_Theory_Complements", "length": 6}
|
"""Concept analysis functionality.
For details on the workflow of a concept analysis see
:py:meth:`ConceptAnalysis.analysis`.
In short:
:Input: All of
- The *concept* (defined via concept data)
- The *main model*
- The *layers* to analyse and compare
:Output: All of
- The *layer* hosting the best embedding,
- The *best embedding*,
- The *quality metric values* for the best embedding
"""
# Copyright (c) 2020 Continental Automotive GmbH
import enum
import logging
import os
from typing import Tuple, Dict, Any, Sequence, Callable, List, Optional, Union
import numpy as np
import pandas as pd
import torch
from hybrid_learning.datasets import data_visualization as datavis
from . import visualization as vis
from .concepts import ConceptTypes, Concept, SegmentationConcept2D
from .embeddings import ConceptEmbedding
# For type hints:
from .models import ConceptDetectionModel2D, ConceptDetection2DTrainTestHandle
LOGGER = logging.getLogger(__name__)
class EmbeddingReduction(enum.Enum):
"""Aggregator callables to get the mean from a list of embeddings."""
MEAN_NORMALIZED_DIST = (ConceptEmbedding.mean,)
"""Embedding with distance function the mean of those of the
normed representations"""
MEAN_DIST = (ConceptEmbedding.mean_by_distance,)
"""Embedding with distance the mean of the distance functions"""
MEAN_ANGLE = (ConceptEmbedding.mean_by_angle,)
"""Embedding with distance function the mean of the distance functions
weighted by cosine distance of the normal vectors"""
DEFAULT = MEAN_NORMALIZED_DIST
"""The default instance to be used."""
def __init__(self,
func: Callable[[Sequence[ConceptEmbedding]],
ConceptEmbedding]):
"""The init routine for enum members makes function available as
instance fields.
It is automatically called for all defined enum instances.
"""
self.function: Callable[[Sequence[ConceptEmbedding]],
ConceptEmbedding] = func
"""Actual function that reduces a list of embeddings to a new one.
.. note::
The function is manually saved as attribute during ``__init__``
due to the following issue:
Enums currently do not support functions as values, as explained in
`this
<https://stackoverflow.com/questions/40338652>`_ and
`this discussion
<https://mail.python.org/pipermail/python-ideas/2017-April/045435.html>`_.
The chosen workaround follows
`this suggestion <https://stackoverflow.com/a/30311492>`_
*(though the code is not used)*.
"""
def __call__(self,
embeddings: Sequence[ConceptEmbedding]) -> ConceptEmbedding:
"""Call aggregation function behind the instance on the embeddings."""
return self.function(embeddings)
class ConceptAnalysis:
r"""Handle for conducting a concept embedding analysis.
Saves the analysis settings and can run a complete analysis.
The core methods are:
- :py:meth:`analysis`: plain analysis (collect
:math:`\text{cross_val_runs}\cdot\text{num_val_splits}`
embeddings for each layer in :py:attr`layer_infos`)
- :py:meth:`best_embedding`: aggregate embeddings of an analysis per layer,
then choose best one
- :py:meth:`best_embedding_with_logging`: combination of the latter two
with automatic logging and result saving
"""
def __init__(self,
concept: Concept,
model: torch.nn.Module,
layer_infos: Union[Dict[str, Dict[str, Any]],
Sequence[str]] = None,
cross_val_runs: int = 1,
num_val_splits: int = 5,
emb_reduction: EmbeddingReduction = EmbeddingReduction.DEFAULT,
show_train_progress_bars: bool = True,
concept_model_args: Dict[str, Any] = None,
train_val_args: Dict[str, Any] = None,
):
"""Init.
:param concept: concept to find the embedding of
:param model: the DNN
:param layer_infos: information about the layers in which to look for
the best concept embedding; it may be given either as sequence of
layer IDs or as dict where the indices are the layer keys in
the model's :py:meth:`torch.nn.Module.named_modules` dict;
used keys:
- kernel_size: fixed kernel size to use for this layer
(overrides value from ``concept_model_args``)
- lr: learning rate to use
:param num_val_splits: the number of validation splits to use for
each cross-validation run
:param cross_val_runs: for a layer, several concept models are
trained in different runs; the runs differ by model initialization,
and the validation data split;
``cross_val_runs`` is the number of cross-validation runs,
i.e. collections of runs with num_val_splits distinct validation
sets each
:param emb_reduction: aggregation function to reduce list of
embeddings to one
:param show_train_progress_bars: whether to show the training
progress bars of the models
:param concept_model_args: dict with arguments for the concept model
initialization
:param train_val_args: any further arguments to initialize the concept
model handle
"""
if not concept.type == ConceptTypes.SEGMENTATION:
raise NotImplementedError(
("Analysis only available for segmentation concepts,"
"but concept was of type {}").format(concept.type))
self.concept: Concept = concept
"""The concept to find the embedding for."""
self.model: torch.nn.Module = model
"""The model in which to find the embedding."""
self.layer_infos: Dict[str, Dict[str, Any]] = layer_infos \
if isinstance(layer_infos, dict) \
else {l_id: {} for l_id in layer_infos}
"""Information about the layers in which to look for the best concept
embedding; the indices are the layer keys in the model's
:py:meth:`torch.nn.Module.named_modules` dict"""
self.cross_val_runs: int = cross_val_runs
"""The number of cross-validation runs to conduct for each layer.
A cross-validation run consists of :py:attr:`num_val_splits` training
runs with distinct validation sets. The resulting embeddings of all
runs of all cross-validation runs are then used to obtain the layer's
best concept embedding."""
self.num_val_splits: int = num_val_splits
"""The number of validation splits per cross-validation run."""
self.emb_reduction: EmbeddingReduction = emb_reduction
"""Aggregation function to reduce a list of embeddings from several
runs to one."""
self.show_train_progress_bars: bool = show_train_progress_bars
"""Whether to show the training progress bars of the models"""
self.train_val_args: Dict[str, Any] = train_val_args \
if train_val_args is not None else {}
"""Any training and evaluation arguments for the concept model
initialization."""
self.concept_model_args: Dict[str, Any] = concept_model_args \
if concept_model_args is not None else {}
"""Any arguments for initializing a new concept model."""
@property
def settings(self) -> Dict[str, Any]:
"""Settings dict to reproduce instance."""
return dict(
concept=self.concept,
model=self.model,
layer_infos=self.layer_infos,
cross_val_runs=self.cross_val_runs,
num_val_splits=self.num_val_splits,
emb_reduction=self.emb_reduction,
show_train_progress_bars=self.show_train_progress_bars,
train_val_args=self.train_val_args,
concept_model_args=self.concept_model_args
)
def __repr__(self):
setts = self.settings
# handle dict attribute representation
for k, val in setts.items():
if isinstance(val, dict) and len(val) > 0:
setts[k] = '{\n' + ',\n'.join(
["\t{!s}:\t{!s}".format(sub_k, sub_v)
for sub_k, sub_v in val.items()]) + '\n}'
return (str(self.__class__.__name__) + '(\n' +
',\n'.join(
["{!s} =\t{!s}".format(k, v) for k, v in setts.items()])
+ '\n)')
def best_embedding(self,
analysis_results: Dict[
str, Dict[int, Tuple[ConceptEmbedding, pd.Series]]]
= None) -> ConceptEmbedding:
"""Conduct an analysis and from results derive the best embedding.
:param analysis_results: optionally the results of a previously run
analysis; defaults to running a new analysis via :py:meth:`analysis`
:return: the determined best embedding of all layers analysed
"""
analysis_results = analysis_results or self.analysis()
best_embs_stds_stats: Dict[
str, Tuple[ConceptEmbedding, Tuple, pd.Series]] = {}
for layer_id, results_per_run in analysis_results.items():
best_embs_stds_stats[layer_id] = \
self.embedding_reduction(results_per_run)
best_layer_id = self.best_layer_from_stats(best_embs_stds_stats)
LOGGER.info("Concept %s final layer: %s", self.concept.name,
best_layer_id)
best_embedding, _, _ = best_embs_stds_stats[best_layer_id]
return best_embedding
def analysis(self) -> Dict[str,
Dict[int, Tuple[ConceptEmbedding, pd.Series]]]:
"""Conduct a concept embedding analysis.
For each layer in :py:attr:`layer_infos`:
- train :py:attr:`cross_val_runs` x :py:attr:`num_val_splits`
concept models,
- collect their evaluation results,
- convert them to embeddings.
:return: a dictionary of
``{layer_id: {run: (embedding,
pandas.Series with {pre_: metric_val}}}``
"""
results_per_layer: Dict[
str, Dict[int, Tuple[ConceptEmbedding, pd.Series]]] = {}
for layer_id in self.layer_infos:
results_per_run: Dict[int, Tuple[ConceptEmbedding, pd.Series]] = \
self.analysis_for_layer(layer_id)
results_per_layer[layer_id] = results_per_run
return results_per_layer
@classmethod
def analysis_results_to_pandas(cls, analysis_results):
"""Provide :py:class:`pandas.DataFrame` multi-indexed by layer and
run w/ info for each run.
The information for each run is the one obtained by
:py:meth:`emb_info_to_pandas`.
:param analysis_results: analysis results in the for as produced by
:py:meth:`analysis`
:returns: a :py:class:`pandas.DataFrame` with run result information
multi-indexed by ``(layer, run)``
"""
return pd.DataFrame({(layer_id, run): cls.emb_info_to_pandas(emb, stats)
for layer_id, runs in analysis_results.items()
for run, (emb, stats) in runs.items()
}).transpose()
@classmethod
def best_emb_infos_to_pandas(cls,
results: Dict[str, Tuple[
ConceptEmbedding,
Tuple[np.ndarray, float, float],
pd.Series]]) -> pd.DataFrame:
"""Provide :py:class:`pandas.DataFrame` indexed by layer ID wt/ info
about embeddings.
The format of results must be a dictionary indexed by the layer ID
and with values as provided by :py:meth:`embedding_reduction`
"""
return pd.DataFrame({layer_id: cls.emb_info_to_pandas(emb, stats, var)
for layer_id, (emb, var, stats) in results.items()
}).transpose()
@classmethod
def save_best_emb_results(
cls,
results: Dict[str, Tuple[ConceptEmbedding,
Tuple[np.ndarray, float, float],
pd.Series]],
folder_path: str):
"""Save results of embedding reduction.
The format of results must be a dict with layer IDs as keys and
values as provided by :py:meth:`embedding_reduction`.
"""
info = cls.best_emb_infos_to_pandas(results)
info['embedding'] = None
for layer in info.index:
emb: ConceptEmbedding = results[layer][0]
emb_fn = "{} best.npz".format(layer)
# Save and note in the info frame:
emb.save(os.path.join(folder_path, emb_fn))
info.loc[layer, 'embedding'] = emb_fn
info.to_csv(os.path.join(folder_path, "best_emb_stats.csv"))
@classmethod
def save_analysis_results(cls,
results: Dict[str, Dict[
int, Tuple[ConceptEmbedding, pd.Series]]],
folder_path: str):
"""Save analysis results.
The format is one retrievable by :py:meth:`load_analysis_results`.
The results are saved in the following files within ``folder_path``
- ``<layer> <run>.npz``: npz file with embedding resulting from
``<run>`` on ``<layer>``; can be loaded to an embedding using
:py:meth:`hybrid_learning.concepts.embeddings.ConceptEmbedding.load`
- ``stats.csv``: CSV file holding a :py:class:`pandas.DataFrame` with
each rows holding an embedding statistics;
additional columns are ``'layer'``, ``'run'``, and ``'embedding'``,
where the ``'embedding'`` column holds the path to the npz-saved
embedding corresponding of the row relative to the location of
``stats.csv``
:param results: results dictionary in the format returned by
:py:meth:`analysis`
:param folder_path: the root folder to save files under;
must not yet exist
"""
info = cls.analysis_results_to_pandas(results)
info['embedding'] = None
for layer, run in info.index:
emb: ConceptEmbedding = results[layer][run][0]
emb_fn = "{} {}.npz".format(layer, run)
# Save and note in the info frame:
emb.save(os.path.join(folder_path, emb_fn))
info.loc[(layer, run), 'embedding'] = emb_fn
info.to_csv(os.path.join(folder_path, "stats.csv"))
@staticmethod
def load_analysis_results(folder_path: str
) -> Dict[str, Dict[int, Tuple[ConceptEmbedding,
pd.Series]]]:
"""Load analysis results previously saved.
The saving format is assumed to be that of
:py:meth:`save_analysis_results`."""
if not os.path.isdir(folder_path):
raise ValueError("Folder {} does not exist!".format(folder_path))
stats_frame = pd.read_csv(os.path.join(folder_path, "stats.csv"))
assert all([col in stats_frame.columns
for col in ("layer", "run", "embedding")])
stats_frame.set_index(['layer', 'run'])
layers = stats_frame.index.get_level_values('layer').unique()
runs = stats_frame.index.get_level_values('run').unique()
analysis_results = {layer: {run: None for run in runs}
for layer in layers}
for layer in layers:
for run in runs:
row = stats_frame.loc[(layer, run)]
emb = ConceptEmbedding.load(
os.path.join(folder_path, row['embedding']))
stat = row.drop('embedding', axis=1)
analysis_results[layer][run] = (emb, stat)
return analysis_results
def analysis_for_layer(self, layer_id: str
) -> Dict[int, Tuple[ConceptEmbedding, pd.Series]]:
"""Get a concept embedding of the given concept in the given layer.
:param layer_id: ID of the layer to find embedding in; key in
:py:attr:`layer_infos`
:return: a tuple of the best found embedding, the standard deviation,
and its performance
"""
c_model = self.concept_model_for_layer(layer_id)
c_handle: ConceptDetection2DTrainTestHandle = \
self.concept_model_handle(c_model)
if 'lr' in self.layer_infos[layer_id]:
c_handle.optimizer.lr = self.layer_infos[layer_id]['lr']
stats_per_run = {}
for cross_val_run in range(self.cross_val_runs):
states, _, _ = zip(*c_handle.cross_validate(
num_splits=self.num_val_splits,
run_info_templ=("{}, cv {}/{}, ".format(
layer_id, cross_val_run + 1, self.cross_val_runs) +
"run {run}/{runs}"),
show_progress_bars=self.show_train_progress_bars))
for split, state_dict in enumerate(states):
c_model.load_state_dict(state_dict)
embedding = c_model.to_embedding()
metrics: pd.Series = self.evaluate_embedding(embedding)
# storing & logging
run = split + cross_val_run * self.num_val_splits
stats_per_run[run] = (embedding, metrics)
context = "Concept {}, layer {}, run {}".format(
self.concept.name, layer_id, run)
LOGGER.info("%s:\n%s", context,
self.emb_info_to_string(embedding, metrics))
return stats_per_run
def concept_model_handle(self,
c_model: ConceptDetectionModel2D = None,
emb: ConceptEmbedding = None,
layer_id: str = None
) -> ConceptDetection2DTrainTestHandle:
"""Train and eval handle for the given concept model.
The concept model to handle can either be specified directly or is
created from an embedding or from a given ``layer_id``.
:param c_model: the concept model to provide a handle for
:param emb: if ``c_model`` is not given, it is initialized using
:py:meth:`concept_model_from_embedding` on ``emb``
:param layer_id: if c_model and emb is not given, it is initialized
using :py:meth:`concept_model_for_layer` on ``layer_id``
:return: a handle for the specified or created concept model
"""
if c_model is None:
if emb is not None:
c_model = self.concept_model_from_embedding(emb)
elif layer_id is not None:
c_model = self.concept_model_for_layer(layer_id)
else:
raise ValueError("Either c_model, emb, or layer_id must "
"be given.")
return ConceptDetection2DTrainTestHandle(c_model, **self.train_val_args)
def concept_model_for_layer(self, layer_id):
"""Return a concept model for the given layer ID.
:param layer_id: ID of the layer the concept model should be attached
to; key in :py:attr:`layer_infos`
:returns: concept model for :py:attr:`concept` attached to given
layer in :py:attr:`model`
"""
c_model: ConceptDetectionModel2D = ConceptDetectionModel2D(
concept=SegmentationConcept2D.new(self.concept),
model=self.model, layer_id=layer_id,
**{'kernel_size': self.layer_infos[layer_id].get('kernel_size',
None),
**self.concept_model_args}
)
return c_model
@staticmethod
def concept_model_from_embedding(embedding: ConceptEmbedding
) -> ConceptDetectionModel2D:
"""Get concept model from embedding for training and eval."""
return ConceptDetectionModel2D.from_embedding(embedding)
@staticmethod
def emb_info_to_string(
emb: ConceptEmbedding, stats: pd.Series = None,
std_dev: Tuple[np.ndarray, float, float] = None) -> str:
"""Printable quick info about the given embedding with stats
(and standard deviation)."""
info: pd.Series = ConceptAnalysis.emb_info_to_pandas(emb, stats,
std_dev=std_dev)
# Formatting
float_format: str = "{: < 14.6f}"
exp_format: str = "{: < 14.6e}"
for idx in [i for i in info.index if "std" in i]:
info[idx] = exp_format.format(info[idx])
return info.to_string(float_format=float_format.format)
@staticmethod
def emb_info_to_pandas(emb: ConceptEmbedding, stats: pd.Series = None,
std_dev: Tuple[np.ndarray, float, float] = None
) -> pd.Series:
"""Quick info about embedding with stats (and standard dev)
as :py:class:`pandas.Series`."""
stats_info = stats if stats is not None else {}
emb_info = {"normal vec len": np.linalg.norm(emb.normal_vec),
"support factor": float(emb.support_factor),
"scaling factor": float(emb.scaling_factor)}
std_info = {"std dev normal vec (len)": np.linalg.norm(std_dev[0]),
"std dev support factor": std_dev[1],
"std dev scaling factor": std_dev[2]} \
if std_dev is not None else {}
return pd.Series({**stats_info, **emb_info, **std_info})
def evaluate_embedding(self, embedding: ConceptEmbedding):
"""Evaluate the embedding on its concept test data."""
# Value check:
if not embedding.concept.type == ConceptTypes.SEGMENTATION:
raise NotImplementedError(
("Routine currently only available for segmentation concepts,"
"but concept was of type {}").format(embedding.concept.type))
# Evaluation:
with torch.no_grad():
eval_model = self.concept_model_from_embedding(embedding)
stats: pd.Series = self.concept_model_handle(eval_model).evaluate()
return stats
def embedding_reduction(
self,
results_per_run: Dict[int, Tuple[ConceptEmbedding, pd.Series]]
) -> Tuple[ConceptEmbedding, Tuple[np.ndarray, float, float], pd.Series]:
"""Aggregate the embeddings collected in ``results_per_run``
to a best one.
This is a wrapper with standard deviation and stats collection and
logging around a call to :py:func:`emb_reduction`.
:param results_per_run: dictionary indexed by different runs to
obtain a concept embedding in the same setup
(layer, concept, etc.); values are tuples of:
- result embedding
- metrics results on the concept test set for that embedding
:return: a tuple of
- an aggregated ("mean") embedding for the concept and the layer,
- the standard deviation values of the normal vectors,
- the stats for the chosen "mean" embedding
"""
if len(results_per_run) == 0:
raise ValueError("Empty results dict")
layer_id: str = \
results_per_run[list(results_per_run.keys())[0]][0].layer_id
embeddings = [e for e, _ in results_per_run.values()]
best_embedding = self.emb_reduction(embeddings)
# Variance and stats collection:
std_dev: Tuple[np.ndarray, float, float] = \
ConceptEmbedding.std_deviation(embeddings)
stats: pd.Series = self.evaluate_embedding(best_embedding)
# Some logging:
LOGGER.info("Concept %s, layer %s:\n%s",
self.concept.name, layer_id,
self.emb_info_to_string(best_embedding, stats,
std_dev=std_dev))
return best_embedding, std_dev, stats
@staticmethod
def best_layer_from_stats(
results_per_layer: Dict[
str, Tuple[ConceptEmbedding, Tuple, pd.Series]]) -> str:
"""From the embedding quality results per layer, select the best layer.
For segmentation concepts, select by set IoU.
:param results_per_layer: tuple of
- the best concept embedding of the layer,
- the standard deviation results,
- the metric results when evaluated on its concept
:return: layer ID with best stats
"""
# DataFrame with layer-wise metric results
# (col: layer_id, idx: metric_name)
test_set_iou_key = ConceptDetection2DTrainTestHandle.test_("set_iou")
stats = pd.DataFrame({l_id: info[-1]
for l_id, info in results_per_layer.items()})
if test_set_iou_key not in stats.index:
raise KeyError(
("KPI key {} not in stats keys {}; Wrong concept type used?"
" (currently only segmentation concepts allowed)"
).format(test_set_iou_key, stats.index))
best_layer_id = stats.loc[test_set_iou_key].idxmax()
return str(best_layer_id)
def train_data_infos(self) -> pd.DataFrame:
"""Provide a DataFrame with some information on how each layer."""
layer_infos = {}
for layer_id in self.layer_infos:
c_model = self.concept_model_for_layer(layer_id)
c_handle = self.concept_model_handle(c_model)
layer_infos[layer_id] = {
'kernel_size': c_model.kernel_size,
'prop_neg_px': datavis.neg_pixel_prop(c_handle.data.train)}
return pd.DataFrame(layer_infos).transpose()
def best_embedding_with_logging(
self,
concept_exp_root: str,
logger: logging.Logger = None,
file_logging_formatter: logging.Formatter = None,
log_file: str = 'log.txt',
img_fp_templ: Optional[str] = "{}.png"
) -> ConceptEmbedding:
# TODO: properly separate saving & loading from analysis
"""Conduct an analysis, collect mean and best embeddings,
and save and log all results.
.. rubric:: Saved results
- the embedding of each layer and run as .npz file;
for format see
:py:meth:`hybrid_learning.concepts.embeddings.ConceptEmbedding.save`;
load with
:py:meth:`hybrid_learning.concepts.embeddings.ConceptEmbedding.load`
- the aggregated (best) embedding for each layer as .npz file
(see above)
- the final best embedding amongst all layers as .npz file
(chosen from above best embeddings)
- statistics of the runs for each layer incl. evaluation results and
infos on final embedding obtained by each run;
for format see :py:meth:`save_analysis_results`;
load with :py:meth:`ConceptAnalysis.load_analysis_results`
- statistics for the aggregated (best) embeddings;
for format see :py:meth:`ConceptAnalysis.save_best_emb_results`;
.. rubric:: Saved visualizations
- visualization of the training data
- visualization of the final best embedding on some test data samples
- visualization of the best embedding and each embedding in its layer
for comparison (the best embedding is a kind of mean of the embeddings
from its layer)
- visualization of the aggregated embeddings of each layer
for comparison
:param concept_exp_root: the root directory in which to save results
for this part
:param logger: the logger to use for file logging; defaults to the
module level logger; for the analysis, the logging level is set
to :py:const:`logging.INFO`
:param file_logging_formatter: if given, the formatter for the file
logging
:param log_file: the path to the logfile to use relative to
``concept_exp_root``
:param img_fp_templ: template for the path of image files relative to
``concept_exp_root``; must include one ``'{}'`` formatting variable
:return: the found best embedding for that part
"""
os.makedirs(concept_exp_root, exist_ok=True)
save_imgs: bool = img_fp_templ is not None
if save_imgs and ('{}' not in img_fp_templ
or img_fp_templ.count('{}') > 1):
raise ValueError("Invalid img_fp_templ {}; ".format(img_fp_templ) +
"must contain exactly one occurrence of '{}'")
save_as: Callable[[str], str] = lambda desc: os.path.join(
concept_exp_root,
img_fp_templ.format(desc))
# region Logging setup
if logger is None:
logger = logging.getLogger(__name__)
orig_logging_level: int = logger.level
logger.setLevel(logging.INFO)
part_log_file_handler = logging.FileHandler(
os.path.join(concept_exp_root, log_file))
part_log_file_handler.setLevel(logging.INFO)
if file_logging_formatter is not None:
part_log_file_handler.setFormatter(file_logging_formatter)
logger.addHandler(part_log_file_handler)
# endregion
# Some logging friendly settings of pandas
with pd.option_context('display.max_rows', None,
'display.max_columns', None,
'display.expand_frame_repr', False):
# region Settings info
logger.info("Starting concept %s", self.concept.name)
# Concept
logger.info("Concept data:\n%s", self.concept.data.info)
logger.info("Mean proportion of negative pixels orig data: %f",
datavis.neg_pixel_prop(self.concept.test_data))
# Analysis settings
logger.info("Analysis settings:\n%s", str(self))
logger.info("Layer-wise training data properties:\n%s",
self.train_data_infos())
if save_imgs:
datavis.visualize_mask_transforms(
{layer_id: self.concept_model_handle(
layer_id=layer_id).data.train
for layer_id in self.layer_infos},
save_as=save_as("vis_train_data_transforms"))
# endregion
# Analysis:
analysis_results = self.analysis()
self.save_analysis_results(analysis_results, concept_exp_root)
logger.info("Embedding results per run:\n%s",
self.analysis_results_to_pandas(analysis_results))
# Best embedding selection:
best_embs_results: Dict[str,
Tuple[ConceptEmbedding, Tuple, pd.Series]] \
= {layer_id: self.embedding_reduction(results_per_run)
for layer_id, results_per_run in analysis_results.items()}
self.save_best_emb_results(best_embs_results, concept_exp_root)
best_emb_infos = self.best_emb_infos_to_pandas(best_embs_results)
logger.info("Best embeddings per layer:\n%s", best_emb_infos)
# The very best embedding:
# Save it twice to find it more easily and store in best_embs
best_layer_id = self.best_layer_from_stats(best_embs_results)
best_layer_embs: List[ConceptEmbedding] = \
[e for e, stats in analysis_results[best_layer_id].values()]
best_embedding: ConceptEmbedding = \
best_embs_results[best_layer_id][0]
best_embedding.save(os.path.join(concept_exp_root, "best.npz"))
logger.info("Best embedding:\n%s",
best_emb_infos.loc[best_layer_id])
# pair-wise cosines with last row and column the best_embedding:
pairwise_cos = vis.pairwise_cosines(
embs=best_layer_embs + [best_embedding],
keys=list(range(len(best_layer_embs))) + ['best_emb'])
pairwise_cos.to_csv(
os.path.join(concept_exp_root, 'pairwise_cosines.csv'))
logger.info(
"Mean cosine dist of best_embedding to other embeddings:\n%s",
pairwise_cos.iloc[:-1, -1].mean())
logger.info(
'Pair-wise cosine dist between normal vectors of runs in '
'best layer:\n%s',
pairwise_cos)
# visualizations:
if save_imgs:
vis.visualize_concept_model(
self.concept_model_handle(emb=best_embedding),
save_as=save_as("vis_best_embedding"))
vis.visualize_concept_models(
{**{"best": self.concept_model_handle(emb=best_embedding)},
**{"emb {}".format(i): self.concept_model_handle(emb=e)
for i, e in enumerate(best_layer_embs)}},
save_as=save_as("vis_best_layer_embeddings"))
vis.visualize_concept_models(
{layer_id: self.concept_model_handle(emb=e)
for layer_id, (e, _, _) in best_embs_results.items()},
save_as=save_as("vis_best_embeddings"))
# Close part specific logging
logger.removeHandler(part_log_file_handler)
logger.setLevel(orig_logging_level)
return best_embedding
|
{"hexsha": "494fe9286103a4d7df55d0e6632641008027ad69", "size": 33980, "ext": "py", "lang": "Python", "max_stars_repo_path": "hybrid_learning/concepts/analysis.py", "max_stars_repo_name": "continental/hybrid_learning", "max_stars_repo_head_hexsha": "37b9fc83d7b14902dfe92e0c45071c150bcf3779", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-19T12:47:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T11:26:00.000Z", "max_issues_repo_path": "hybrid_learning/concepts/analysis.py", "max_issues_repo_name": "continental/hybrid_learning", "max_issues_repo_head_hexsha": "37b9fc83d7b14902dfe92e0c45071c150bcf3779", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hybrid_learning/concepts/analysis.py", "max_forks_repo_name": "continental/hybrid_learning", "max_forks_repo_head_hexsha": "37b9fc83d7b14902dfe92e0c45071c150bcf3779", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-02T10:50:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T10:50:53.000Z", "avg_line_length": 46.231292517, "max_line_length": 86, "alphanum_fraction": 0.6073278399, "include": true, "reason": "import numpy", "num_tokens": 6980}
|
%% POS conversion
posfile = dir('*pos');
pos = importdata(posfile.name);
% determine if timestamps are first or last column
[a b] = min(nanstd(diff(pos))); % find the column with smallest variability..
behav.timestamps = pos(:,b);
pos(:,b) = []; % remove timestamps from pos mat
if size(pos,2) > 5 % if optitrack
columns = [7 9 8 3 5 4 6 10 1 2];
pos = pos(:,columns);
behav.position.x = pos(:,2);
behav.position.y = pos(:,3);
behav.position.z = pos(:,4);
behav.orientation.rx = pos(:,5);
behav.orientation.ry = pos(:,6);
behav.orientation.rz = pos(:,7);
behav.orientation.rw = pos(:,8);
behav.timestamps = pos(:,1);
behav.errorPerMarker = pos(:,9);
behav.frameCount = pos(:,10);
elseif size(pos,2) < 5 % if LED tracking
behav.position.x = nanmean(pos(:,[2 4]));
behav.position.y = nanmean(pos(:,[1 3]));
dx = pos(:,3) - pos(:,5);
dy = pos(:,2) - pos(:,4);
ang = atan2(dy,dx)-angOffset;
ang = mod(ang,2*pi);
behav.orientation.z = ang;
warning('come up with a better head dir calculation...')
end
%% LFP conversion
%% SPIKE conversion
% times
spktimes.timestamps = ;
save('spktimes.cellinfo.mat')
% waveforms
save('waveforms.cellinfo.mat')
% features
save('features.cellinfo.mat')
% metadata
save('metadata.cellinfo.mat')
%% METADATA conversion
%% EVENT conversion
%% move old files to FMAT_format folder
|
{"author": "buzsakilab", "repo": "buzcode", "sha": "2d700a38b3c2a860ad1333be90f14d7a37a72815", "save_path": "github-repos/MATLAB/buzsakilab-buzcode", "path": "github-repos/MATLAB/buzsakilab-buzcode/buzcode-2d700a38b3c2a860ad1333be90f14d7a37a72815/utilities/fileConversions/convertFMAT2Matlab.m"}
|
import numpy as np
from numpy.random import seed
from keras.optimizers import Adam
from keras.models import Sequential
from keras.initializers import TruncatedNormal
from keras.layers import Conv1D, Dense, Flatten, Dropout, MaxPool1D
from keras.callbacks import ModelCheckpoint
from keras.utils import plot_model
import keras.backend as K
import select_data as sd
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
'''
See paper: Sensors 2018, 18(4), 1055; https://doi.org/10.3390/s18041055
"Divide and Conquer-Based 1D CNN Human Activity Recognition Using Test Data Sharpening"
by Heeryon Cho & Sang Min Yoon
This code learns DOWN position activity classification model using LOWER body sensors data.
'''
X_train, y_train, X_valid, y_valid, X_test, y_test = sd.load_data("lower", "down")
n_classes = 2
# Generates one-hot encoding of the activity labels
y_train_oh = np.eye(n_classes)[y_train]
y_test_oh = np.eye(n_classes)[y_test]
y_valid_oh = np.eye(n_classes)[y_valid]
# Fit 1D CNN
k_init = TruncatedNormal(mean=0.0, stddev=0.001, seed=2017)
seed(2017)
model = Sequential()
model.add(Conv1D(100, 3, input_shape=(156, 1), activation='relu', kernel_initializer=k_init))
model.add(MaxPool1D(3, strides=1))
model.add(Conv1D(500, 3, activation='relu', kernel_initializer=k_init))
model.add(Flatten())
model.add(Dense(2, activation='softmax', kernel_initializer='uniform'))
model.add(Dropout(0.33))
adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss='mean_squared_error', optimizer=adam, metrics=['accuracy'])
# Summarize layers
print(model.summary())
model_dir = 'model/'
if not os.path.exists(model_dir):
os.makedirs(model_dir)
fig_dir = 'fig/'
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
fpath = model_dir + 'weights.{epoch:02d}-{val_acc:.2f}.hdf5'
cp_cb = ModelCheckpoint(fpath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Comment out to disable learning /// Uncomment below to allow learning:
#model.fit(np.expand_dims(X_train, axis=2), y_train_oh,
# validation_data=(np.expand_dims(X_valid, axis=2), y_valid_oh),
# batch_size=32, epochs=5, verbose=2, callbacks=[cp_cb])
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Save 1D CNN model image
if not os.path.exists('fig/model_lower_down.png'):
model_file = 'fig/model_lower_down.png'
plot_model(model, to_file=model_file)
del model
K.clear_session()
'''
/usr/bin/python2.7 /home/hcilab/Documents/OSS/sensors2018cnnhar/opp/learn_lower_down.py
/home/hcilab/.local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Using TensorFlow backend.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv1d_1 (Conv1D) (None, 154, 100) 400
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 152, 100) 0
_________________________________________________________________
conv1d_2 (Conv1D) (None, 150, 500) 150500
_________________________________________________________________
flatten_1 (Flatten) (None, 75000) 0
_________________________________________________________________
dense_1 (Dense) (None, 2) 150002
_________________________________________________________________
dropout_1 (Dropout) (None, 2) 0
=================================================================
Total params: 300,902
Trainable params: 300,902
Non-trainable params: 0
_________________________________________________________________
None
Process finished with exit code 0
'''
|
{"hexsha": "fe52b87d46cb419909746f92bd4e9bb37496b071", "size": 4257, "ext": "py", "lang": "Python", "max_stars_repo_path": "opp/learn_lower_down.py", "max_stars_repo_name": "heeryoncho/sensors2018cnnhar", "max_stars_repo_head_hexsha": "2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-09-25T07:55:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-08T15:01:56.000Z", "max_issues_repo_path": "opp/learn_lower_down.py", "max_issues_repo_name": "heeryoncho/sensors2018cnnhar", "max_issues_repo_head_hexsha": "2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opp/learn_lower_down.py", "max_forks_repo_name": "heeryoncho/sensors2018cnnhar", "max_forks_repo_head_hexsha": "2c0ae84b83a95bd5b5ab13df0fb3f5e8529df91f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-12-12T16:40:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-29T01:24:07.000Z", "avg_line_length": 38.7, "max_line_length": 247, "alphanum_fraction": 0.6925064599, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1017}
|
import os
import time
from os.path import isfile, join
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from background_subtraction import bs_godec, get_godec_frame, postprocess_img
from file_utils import (create_folder_if_absent, get_all_files, get_frame,
get_frame_GREY, get_frame_RGB, normalize_frame)
from naive_presence_detection import get_init_heatmap_plot
from visualizer import (init_comparison_plot, init_heatmap,
update_comparison_plot, update_heatmap)
def optical_flow_lk(files, track_length=10, detect_interval=5):
print("Performing Lucas-Kanade Optical Flow")
plot = get_init_heatmap_plot()
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 4,
qualityLevel = 0.2,
minDistance = 6,
blockSize = 4 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (3,3),
maxLevel = 3,
criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
# Take first frame and find corners in it
first_frame_gray = get_frame_GREY(files[0])
# TODO: instead of using good features to track, possibly just use contour points directly
prevPts = cv.goodFeaturesToTrack(first_frame_gray, mask = None, **feature_params)
color = np.random.randint(0,255,(100,3))
counter = 1
prevImg = first_frame_gray
while counter < len(files):
frame = get_frame_GREY(files[counter])
nextImg = frame.copy()
update_heatmap(get_frame(files[counter]), plot)
nextPts, status, err = cv.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, None, **lk_params)
displacement = nextPts - prevPts
if (abs(displacement) > 3).any():
print(displacement)
plt.xlabel("Displacement: {}".format(displacement))
else:
plt.xlabel("Displacement in x/y lower than 3 ")
if nextPts is None:
print("Target not moving")
prevPts = cv.goodFeaturesToTrack(frame, mask = None, **feature_params)
nextPts, status, err = cv.calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, None, **lk_params)
# Select good points
# each element of the vector is set to 1 if the flow for the corresponding features has been found, otherwise, it is set to 0.
good_new = nextPts[status==1]
good_old = prevPts[status==1]
# Now update the previous frame and previous points
prevImg = nextImg.copy()
prevPts = good_new.reshape(-1,1,2)
counter +=1
def optical_flow_dense(files):
# Perform Godec first on all frames
M, LS, L, S, width, height = bs_godec(files)
first_frame = get_frame(files[0])
# frames to be compared is after godec and postprocessing
godec_frame, probability = get_godec_frame(M, L, S, width, height, 0)
img, centroids = postprocess_img(godec_frame, all_images=False)
prev_gray = img
ims = init_comparison_plot(first_frame, ["Original", "Thresholded", "FlowS"], 1,3)
test = cv.cvtColor(first_frame.astype("uint8"), cv.COLOR_GRAY2BGR)
hsv_mask = np.zeros_like(test)
hsv_mask[...,1] = 255
window_name = "Dense Optical Flow"
counter = 1
while counter < len(files):
print(counter)
godec_frame, probability = get_godec_frame(M, L, S, width, height, counter)
img, centroids = postprocess_img(godec_frame, all_images=False)
next_gray = img
flow = cv.calcOpticalFlowFarneback(prev_gray,next_gray,
None,
pyr_scale = 0.5,
levels = 5,
winsize = 11,
iterations = 5,
poly_n = 5,
poly_sigma = 1.1,
flags = 0)
magnitude, angle = cv.cartToPolar(flow[...,0], flow[...,1])
hsv_mask[...,0] = angle*180/np.pi/2 # Set image hue according to the optical flow direction
hsv_mask[...,2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX) # Set image value according to the optical flow magnitude (normalized)
# plotting of grayscale flowmap and data heatmap
update_comparison_plot(ims, [get_frame(files[counter]), next_gray, hsv_mask])
plt.title("Max Magnitude :" + str(np.amax(magnitude)) + "\nMax Angle:" + str(np.amax(angle)))
create_folder_if_absent("optical_flow_pics")
plt.savefig("optical_flow_pics/{}.png".format(counter))
prev_gray = next_gray
k = cv.waitKey(30) & 0xff
counter += 1
|
{"hexsha": "eba7442fab27cd73ac90219a992578e75bfad6cb", "size": 4860, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLX90640/optical_flow.py", "max_stars_repo_name": "Nekostone/activity-levels-monitoring", "max_stars_repo_head_hexsha": "9197924586425f3f881846742d05c48a242169ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLX90640/optical_flow.py", "max_issues_repo_name": "Nekostone/activity-levels-monitoring", "max_issues_repo_head_hexsha": "9197924586425f3f881846742d05c48a242169ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-21T13:42:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:44:32.000Z", "max_forks_repo_path": "MLX90640/optical_flow.py", "max_forks_repo_name": "Nekostone/activity-levels-monitoring", "max_forks_repo_head_hexsha": "9197924586425f3f881846742d05c48a242169ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-05T12:11:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-05T12:11:27.000Z", "avg_line_length": 45.0, "max_line_length": 150, "alphanum_fraction": 0.6121399177, "include": true, "reason": "import numpy", "num_tokens": 1162}
|
import random
sample_len = 1000
class GetDataset():
def __init__(self, sub_dirs, useful_train_dirs, useful_img_dirs_train, \
useful_val_dirs, useful_img_dirs_val):
self.sub_dirs = sub_dirs
self.useful_train_dirs = useful_train_dirs
self.useful_img_dirs_train = useful_img_dirs_train
self.useful_val_dirs = useful_val_dirs
self.useful_img_dirs_val = useful_img_dirs_val
def get_train_data(self):
data_list = []
for sub_dir in self.sub_dirs:
if sub_dir.basename() in self.useful_train_dirs: # 最终用到的训练集有四种
# print(f'{sub_dir.basename()}')
img_dir_path = sub_dir / 'hdImgs'
annotation_dir_path = sub_dir / 'hdPose3d_stage1_coco19'
img_dirs = img_dir_path.dirs()
annotation_files = annotation_dir_path.files() # annotations 这里没有文件夹,是所有的
# sample_annotation_files = random.sample(annotation_files, 6000)
for img_dir in img_dirs:
if img_dir.basename() in self.useful_img_dirs_train:
# imgs = img_dir.files() #len(imgs) == 16716 所有的数据集
cali_file_path = sub_dir / ('calibration_' + sub_dir.basename() + '.json') # 标定文件的路径
for idx in range(len(annotation_files)):
basename = annotation_files[idx].basename()
if basename.endswith('.json'): # 读取的时候有错误。。
anno_num = basename.split('.')[0].split('_')[1] # 只要这个标签的文件数值就好
img_path = img_dir.split('panoptic-toolbox/')[-1] / (img_dir.basename() + '_' + anno_num + '.jpg')
data_list.append((img_path, annotation_files[idx], cali_file_path,
img_dir.basename())) # img_dir.basename() --> 主要是为了得到对应的相机参数
# if len(data_list) >= sample_len:
# print('sample ..')
# # random.shuffle(data_list)
# # data_list = random.sample(data_list,sample_len)
# else:
# print(f'{useful_train_dirs}: {len(data_list)} ')
random.shuffle(data_list)
print(f'{useful_train_dirs}: {len(data_list)} ')
return data_list
def get_val_data(self):
data_list = []
for sub_dir in self.sub_dirs:
if sub_dir.basename() in self.useful_val_dirs:
img_dir_path = sub_dir / 'hdImgs'
annotation_dir_path = sub_dir / 'hdPose3d_stage1_coco19'
img_dirs = img_dir_path.dirs()
annotation_files = annotation_dir_path.files() # annotations 这里没有文件夹,是所有的
for img_dir in img_dirs:
if img_dir.basename() in self.useful_img_dirs_val:
cali_file_path = sub_dir / ('calibration_' + sub_dir.basename() + '.json') # 标定文件的路径
for idx in range(len(annotation_files)):
basename = annotation_files[idx].basename()
if basename.endswith('.json'): # 读取的时候有错误。。
anno_num = basename.split('.')[0].split('_')[1] # 只要这个标签的文件数值就好
img_path = img_dir.split('panoptic-toolbox/')[-1] / (img_dir.basename() + '_' + anno_num + '.jpg')
data_list.append((img_path, annotation_files[idx], cali_file_path,
img_dir.basename())) # img_dir.basename() --> 主要是为了得到对应的相机参数
return data_list
if __name__ == '__main__':
import sys
sys.path.append('/home/xuchengjun/ZXin/smap')
from path import Path
from IPython import embed
from lib.utils.tools import read_json
from lib.preprocess.project import reproject
import json
import numpy as np
import os
import time
dataset_path = Path('/media/xuchengjun/datasets/panoptic-toolbox')
sub_dirs = dataset_path.dirs()
useful_train_dirs = ['160906_pizza1'] # '170221_haggling_b1', '160906_pizza1','160422_ultimatum1' 161029_sports1
useful_val_dirs = ['160422_ultimatum1'] # '170407_haggling_a1' 没有00_16, 00_30的相机内参
useful_img_dirs_train = ['00_09'] # ,'00_01','00_02','00_03','00_04','00_05','00_06','00_07','00_08','00_09',
useful_img_dirs_val = ['00_30']
print(useful_img_dirs_val)
# embed()
get_data = GetDataset(sub_dirs, useful_train_dirs, useful_img_dirs_train, useful_val_dirs, useful_img_dirs_val)
# train_data_list = get_data.get_train_data()
train_data_list = get_data.get_val_data()
"""
total 29 videos --> useful 426099 imgs & non-useful 41950 imgs 但是smap原文中也只用了160k张图片
"""
#这个是分开成单独的文件放在一起
min_width = 1000
min_height = 1000
human = 0
no_human = 0
s_time = time.time()
output_root_path = Path('/media/xuchengjun/datasets/CMU/train/160422_ultimatum1/30')
if not output_root_path.exists():
os.makedirs(output_root_path)
print(f'creating gt json path --> {output_root_path}')
for idx in range(len(train_data_list)):
img_path , anno_path = train_data_list[idx][0] , train_data_list[idx][1]
cali_path , cam_id = train_data_list[idx][2] , train_data_list[idx][3]
anno_file = read_json(anno_path)
cali_file = read_json(cali_path)
if anno_file == None:
print(train_data_list[idx])
cam_id = str(cam_id)
lnum , rnum = int(cam_id.split('_')[0]) , int(cam_id.split('_')[1])
cam_coors , pixel_coors , skel_with_conf , cam, resolution = reproject(anno_file,cali_file,(lnum,rnum))
if len(cam_coors) < 1:
no_human += 1
continue
tmp = str(img_path).split('/')
img_anno_name = tmp[-4] + '--' + tmp[-2] + "--" + tmp[-1].split('.')[0].split('_')[-1]
# output_json_root = dataset_path / f'{tmp[-4]}' / 'json_file' #/media/xuchengjun/datasets/CMU/170407_haggling_a1/json_file
# json_sub_dirs = output_json_root / f'{tmp[-2]}'
# if not json_sub_dirs.exists():
# os.makedirs(json_sub_dirs)
# output_json_path = json_sub_dirs / f'{img_anno_name}.json'
output_json_path = output_root_path / f'{img_anno_name}.json'
output_json = dict()
bodys = list()
for i in range(len(cam_coors)):
body_new = np.zeros((15,11))
for jtype in range(15): # 刚好前15个
body_new[jtype][0] = pixel_coors[i][0][jtype] # x (pixel)
body_new[jtype][1] = pixel_coors[i][1][jtype] # y (pixel)
body_new[jtype][2] = pixel_coors[i][2][jtype] # Z (cam)
# if skel_with_conf[i][3][jtype] >= 0.2:
body_new[jtype][3] = 2
body_new[jtype][4] = cam_coors[i][0][jtype] # X (cam)
body_new[jtype][5] = cam_coors[i][1][jtype] # Y (cam)
body_new[jtype][6] = cam_coors[i][2][jtype] # Z (cam)
body_new[jtype][7] = cam[0, 0] # fx
body_new[jtype][8] = cam[1, 1] # fy
body_new[jtype][9] = cam[0, 2] # cx
body_new[jtype][10] = cam[1, 2] # cy
bodys.append(body_new.tolist())
output_json['dataset'] = 'CMU'
output_json['img_paths'] = img_path.split('toolbox/')[-1]
output_json['img_width'] = resolution[0]
output_json['img_height'] = resolution[1]
output_json['image_id'] = img_path.split('/')[-1].split('.')[-2]
output_json['cam_id'] = cam_id
output_json['bodys'] = bodys
output_json["isValidation"] = 1
min_width = min(min_width, resolution[0])
min_height = min(min_height, resolution[1])
with open(output_json_path, 'w') as f:
json.dump(output_json, f)
print('working .. {} / {}'.format(human, len(train_data_list)))
if human > 2000:
break
human += 1
e_time = time.time()
print(f'min_width: {min_width} \t min_height: {min_height}')
print(f'done .. total_useful: {human}, no_human: {no_human}')
print(f'using time --> {(e_time - s_time) / 3600}')
# -------------------------------------------------------------------------------------------------------
# 这里是把所有的都保存在一个文件中
# output_json_file = Path('/media/xuchengjun/datasets/CMU/CMU.json')
# count = 1
# no_human = 0
# s_time = time.time()
# output_json = dict()
# output_json['root'] = []
# min_width = 1000
# min_height = 1000
# for idx in range(len(train_data_list)):
# img_path, anno_path = train_data_list[idx][0], train_data_list[idx][1]
# cali_path, cam_id = train_data_list[idx][2], train_data_list[idx][3]
# anno_file = read_json(anno_path)
# cali_file = read_json(cali_path)
# cam_id_str = str(cam_id)
# lnum, rnum = int(cam_id_str.split('_')[0]), int(cam_id_str.split('_')[1])
# # cam_coors, pixel_coors --> list:[array, array, ...]
# cam_coors, pixel_coors, skel_with_conf, cam, resolution = reproject(anno_file, cali_file, (lnum, rnum))
# if len(cam_coors) < 1: # not include human
# no_human += 1
# continue
# bodys = list()
# for i in range(len(cam_coors)):
# body_new = np.zeros((15,11))
# for jtype in range(15): # 刚好前15个
# body_new[jtype][0] = pixel_coors[i][0][jtype] # x (pixel)
# body_new[jtype][1] = pixel_coors[i][1][jtype] # y (pixel)
# body_new[jtype][2] = pixel_coors[i][2][jtype] # Z (cam)
# # if skel_with_conf[i][3][jtype] >= 0.2:
# body_new[jtype][3] = 2
# body_new[jtype][4] = cam_coors[i][0][jtype] # X (cam)
# body_new[jtype][5] = cam_coors[i][1][jtype] # Y (cam)
# body_new[jtype][6] = cam_coors[i][2][jtype] # Z (cam)
# body_new[jtype][7] = cam[0, 0] # fx
# body_new[jtype][8] = cam[1, 1] # fy
# body_new[jtype][9] = cam[0, 2] # cx
# body_new[jtype][10] = cam[1, 2] # cy
# bodys.append(body_new.tolist())
# this_pic = dict()
# this_pic['dataset'] = 'CMU'
# this_pic['img_paths'] = img_path
# this_pic['img_width'] = resolution[0]
# this_pic['img_height'] = resolution[1]
# this_pic['image_id'] = img_path.split('/')[-1].split('.')[-2]
# this_pic['cam_id'] = cam_id
# this_pic['bodys'] = bodys
# this_pic["isValidation"] = 0
# output_json["root"].append(this_pic)
# min_width = min(min_width, resolution[0])
# min_height = min(min_height, resolution[1])
# print(f'working .. {count} / {len(train_data_list)}')
# count += 1
# with open(output_json_file, 'w') as f:
# json.dump(output_json, f)
# e_time = time.time()
# print(f'min_width: {min_width} \t min_height: {min_height}')
# print(f'done .. total_useful: {count}, no_human: {no_human}')
# ------------------------------------------------------------------------------------------------------------------
|
{"hexsha": "3f7ead9b38c5301d893f60ba12d73373aa4c6f75", "size": 11373, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/preprocess/gen_CMU_anno.py", "max_stars_repo_name": "ZXin0305/hri", "max_stars_repo_head_hexsha": "b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/preprocess/gen_CMU_anno.py", "max_issues_repo_name": "ZXin0305/hri", "max_issues_repo_head_hexsha": "b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/preprocess/gen_CMU_anno.py", "max_forks_repo_name": "ZXin0305/hri", "max_forks_repo_head_hexsha": "b91d89158fc2d05ca4d3ea3ba4a7b9f69b0221a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.6, "max_line_length": 140, "alphanum_fraction": 0.5533280577, "include": true, "reason": "import numpy", "num_tokens": 3182}
|
# blackbox_function.py
"""Volume 2: Optimization Packages I (scipy.optimize). Auxiliary File."""
import numpy as np
from scipy import linalg as la
def blackbox(y_free):
"""
Finds the length of a curve approximated piece-wise by a set of points.
Accepts:
y_free (1xn ndarray): the non-endpoint y-values of the curve.
Returns:
total_length (float): the length of the approximated curve.
"""
# Initialize local constants.
m = len(y_free) + 2 # Number points: free variables, origin, and endpoint.
a, b = 40, 30 # Coordinates of endpoint.
# Generate the evenly-spaced x-values of the curve.
x = np.linspace(0,a,m)
# Pad the free variables with the fixed endpoint values, 0 and b.
y = np.hstack((0,y_free, b))
# Calculate and return the line integral of the approximated curve.
partial_norms = []
for i,item in enumerate(y[:-1]):
partial_norms.append(la.norm(np.array([x[i+1]-x[i],y[i+1] - item])))
return np.sum(partial_norms)
|
{"hexsha": "191396d79e679e2574f1a31a0429d9ace3a77865", "size": 1026, "ext": "py", "lang": "Python", "max_stars_repo_path": "Vol2B/scipyoptimize/blackbox_function.py", "max_stars_repo_name": "joshualy/numerical_computing", "max_stars_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173", "max_stars_repo_licenses": ["CC-BY-3.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Vol2B/scipyoptimize/blackbox_function.py", "max_issues_repo_name": "joshualy/numerical_computing", "max_issues_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173", "max_issues_repo_licenses": ["CC-BY-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Vol2B/scipyoptimize/blackbox_function.py", "max_forks_repo_name": "joshualy/numerical_computing", "max_forks_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173", "max_forks_repo_licenses": ["CC-BY-3.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-05T14:45:03.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-05T14:45:03.000Z", "avg_line_length": 33.0967741935, "max_line_length": 78, "alphanum_fraction": 0.6608187135, "include": true, "reason": "import numpy,from scipy", "num_tokens": 263}
|
# coding=utf-8
import uuid
import os
import cv2
import numpy as np
is_cut = False
class CutPlateNumber:
def __init__(self):
self.is_cut = False
def preprocess(self,gray, iterations):
# 高斯平滑
gaussian = cv2.GaussianBlur(gray, (3, 3), 0, 0, cv2.BORDER_DEFAULT)
# 中值滤波
median = cv2.medianBlur(gaussian, 5)
# Sobel算子,X方向求梯度
sobel = cv2.Sobel(median, cv2.CV_8U, 1, 0, ksize=3)
# 二值化
ret, binary = cv2.threshold(sobel, 170, 255, cv2.THRESH_BINARY)
# 膨胀和腐蚀操作的核函数
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 7))
# 膨胀一次,让轮廓突出
dilation = cv2.dilate(binary, element2, iterations=1)
# 腐蚀一次,去掉细节
erosion = cv2.erode(dilation, element1, iterations=1)
# 再次膨胀,让轮廓明显一些
dilation2 = cv2.dilate(erosion, element2, iterations=iterations)
return dilation2
def findPlateNumberRegion(self,img):
region = []
# 查找轮廓
binary, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 筛选面积小的
for i in range(len(contours)):
cnt = contours[i]
# 计算该轮廓的面积
area = cv2.contourArea(cnt)
# 面积小的都筛选掉
if (area < 2000):
continue
# 找到最小的矩形,该矩形可能有方向
rect = cv2.minAreaRect(cnt)
# box是四个点的坐标
box = cv2.cv2.boxPoints(rect)
box = np.int0(box)
# 计算高和宽
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
# 车牌正常情况下长高比在2.7-5之间
ratio = float(width) / float(height)
if (ratio > 5 or ratio < 2):
continue
region.append(box)
return region
def detect(self,img, iterations, is_infer=False):
# 转化成灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 形态学变换的预处理
dilation = self.preprocess(gray, iterations)
# 查找车牌区域
region = self.findPlateNumberRegion(dilation)
if len(region) > 0:
# 如果使用6次迭代膨胀获取车牌成功,就裁剪保存
box = region[0]
ys = [box[0, 1], box[1, 1], box[2, 1], box[3, 1]]
xs = [box[0, 0], box[1, 0], box[2, 0], box[3, 0]]
ys_sorted_index = np.argsort(ys)
xs_sorted_index = np.argsort(xs)
x1 = box[xs_sorted_index[0], 0]
x2 = box[xs_sorted_index[3], 0]
y1 = box[ys_sorted_index[0], 1]
y2 = box[ys_sorted_index[3], 1]
img_plate = img[y1:y2, x1:x2]
if is_infer:
# 如果是用于预测的图像,就给定文件名
cv2.imwrite('../images/infer.jpg', img_plate)
else:
# 如果是训练的图像,就裁剪到数据临时存放文件夹等待下一步处理
cv2.imwrite('../data/data_temp/%s.jpg' % self.img_name, img_plate)
else:
if self.is_cut:
pass
else:
self.is_cut = True
# 如果使用6次迭代膨胀获取车牌不成功,就使用3次迭代膨胀
self.detect(img, 3)
def strat_crop(self,imagePath, is_infer=False,name=None):
self.is_cut = False
if not is_infer:
self.img_name = name.split('.')[0]
# 开始裁剪
img = cv2.imread(imagePath)
# 默认使用6次迭代膨胀
self.detect(img=img, iterations=6, is_infer=is_infer)
if __name__ == '__main__':
cutPlateNumber = CutPlateNumber()
img_path = '../images/src_temp/'
# 获取原图像的所有路径
imgs = os.listdir(img_path)
for img in imgs:
cutPlateNumber.strat_crop(img_path + img, False,img)
|
{"hexsha": "e625660b1f932bb6afe53a3c8b02d5c78b0b9778", "size": 3664, "ext": "py", "lang": "Python", "max_stars_repo_path": "note7/code/CutPlateNumber.py", "max_stars_repo_name": "fluffyrita/LearnPaddle", "max_stars_repo_head_hexsha": "45a2b56f12264616dd2903c8a7c822dbf3721133", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 367, "max_stars_repo_stars_event_min_datetime": "2018-01-26T01:56:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T12:04:20.000Z", "max_issues_repo_path": "note7/code/CutPlateNumber.py", "max_issues_repo_name": "lrq-Alice/LearnPaddle", "max_issues_repo_head_hexsha": "c4500904615149115535b66a67d3e5d06f8435c4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2018-03-19T12:32:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-26T07:08:07.000Z", "max_forks_repo_path": "note7/code/CutPlateNumber.py", "max_forks_repo_name": "lrq-Alice/LearnPaddle", "max_forks_repo_head_hexsha": "c4500904615149115535b66a67d3e5d06f8435c4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 137, "max_forks_repo_forks_event_min_datetime": "2018-01-26T01:56:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-01T13:06:56.000Z", "avg_line_length": 30.7899159664, "max_line_length": 99, "alphanum_fraction": 0.5458515284, "include": true, "reason": "import numpy", "num_tokens": 1267}
|
import os
import argparse
from os.path import join
import numpy
import json
import shutil
def main():
"""
Creating Test Split for evaluating the attack
"""
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('--data_dir', '-data', type=str,
default="/data2/DeepFakeDataset/manipulated_sequences/") # dir containing face2face etc
p.add_argument('--dest_dir', '-dest', type=str,
default="/data2/DeepFakeDataset/manipulated_test_sequences/")
args = p.parse_args()
data_dir_path = args.data_dir
dest_dir_path = args.dest_dir
with open("test_split.json") as f:
test_pair_list = json.loads(f.read())
test_file_name_list = [ "{}_{}.mp4".format(pair[0], pair[1]) for pair in test_pair_list ]
fake_methods = ["Deepfakes", "Face2Face", "FaceSwap", "NeuralTextures"]
compression_levels = ["c23", "c40", "raw"]
for fake_method in fake_methods:
for compression_level in compression_levels:
input_folder_path = join(data_dir_path, fake_method, compression_level, "videos")
if not os.path.isdir(input_folder_path):
print ("Did not find input directory:", input_folder_path)
continue
print("Copying", fake_method, compression_level)
# create destination directory if it does not exist
dest_folder_path = join(dest_dir_path, fake_method, compression_level, "videos")
if not os.path.isdir(dest_folder_path):
os.makedirs(dest_folder_path)
for input_fn in test_file_name_list:
source_file = join(input_folder_path, input_fn)
if os.path.exists(source_file):
dest_file = join(dest_folder_path, input_fn)
shutil.copyfile(source_file, dest_file)
if __name__ == '__main__':
main()
|
{"hexsha": "610e74c3444196cfb029d1ae0084b643e6e3bf81", "size": 1959, "ext": "py", "lang": "Python", "max_stars_repo_path": "create_test_data.py", "max_stars_repo_name": "paarthneekhara/AdversarialDeepFakes", "max_stars_repo_head_hexsha": "0454c6eb528beb8e5d6ca9ee378d7d6e6e085f96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-07-26T12:18:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T02:28:44.000Z", "max_issues_repo_path": "create_test_data.py", "max_issues_repo_name": "paarthneekhara/AdversarialDeepFakes", "max_issues_repo_head_hexsha": "0454c6eb528beb8e5d6ca9ee378d7d6e6e085f96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-09-08T03:23:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:58:13.000Z", "max_forks_repo_path": "create_test_data.py", "max_forks_repo_name": "paarthneekhara/AdversarialDeepFakes", "max_forks_repo_head_hexsha": "0454c6eb528beb8e5d6ca9ee378d7d6e6e085f96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-10T00:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T08:07:52.000Z", "avg_line_length": 36.2777777778, "max_line_length": 95, "alphanum_fraction": 0.6457376212, "include": true, "reason": "import numpy", "num_tokens": 420}
|
\filetitle{datcmp}{Compare two IRIS serial date numbers}{dates/datcmp}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
Flag = datcmp(Dat1,Dat2)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{Dat1}, \texttt{Dat2} {[} numeric {]} - IRIS serial date
numbers or vectors.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{Flag} {[} \texttt{true} \textbar{} \texttt{false} {]} - True
for numbers that represent the same date.
\end{itemize}
\paragraph{Description}\label{description}
The two date vectors must either be the same lengths, or one of them
must be scalar.
Use this function instead of the plain comparison operator, \texttt{==},
to compare dates. The plain comparision can sometimes give false results
because of round-off errors.
\paragraph{Example}\label{example}
\begin{verbatim}
d1 = qq(2010,1);
d2 = qq(2009,1):qq(2010,4);
datcmp(d1,d2)
ans =
0 0 0 0 1 0 0 0
\end{verbatim}
|
{"hexsha": "a2b79b9354efb94df6323685da07ec290bd4930c", "size": 1109, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "-help/dates/datcmp.tex", "max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z", "max_issues_repo_path": "-help/dates/datcmp.tex", "max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z", "max_forks_repo_path": "-help/dates/datcmp.tex", "max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z", "avg_line_length": 22.6326530612, "max_line_length": 74, "alphanum_fraction": 0.7159603246, "num_tokens": 353}
|
module WrongHidingInLHS where
f : Set -> Set
f {x} = x
|
{"hexsha": "d12e87376a5600992d99bd9e5c071cbd1b99cdd1", "size": 58, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Fail/WrongHidingInLHS.agda", "max_stars_repo_name": "shlevy/agda", "max_stars_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Fail/WrongHidingInLHS.agda", "max_issues_repo_name": "shlevy/agda", "max_issues_repo_head_hexsha": "ed8ac6f4062ea8a20fa0f62d5db82d4e68278338", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Fail/WrongHidingInLHS.agda", "max_forks_repo_name": "Agda-zh/agda", "max_forks_repo_head_hexsha": "231d6ad8e77b67ff8c4b1cb35a6c31ccd988c3e9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 8.2857142857, "max_line_length": 29, "alphanum_fraction": 0.6379310345, "num_tokens": 23}
|
using Test
using Logging
# using Revise
using LarSurf
# Logging.configure(level==Logging.Debug)
# include("../src/LarSurf.jl")
# include("../src/block.jl")
@testset "Block basic function Tests" begin
data3d = LarSurf.random_image([7, 7, 7], [1,2,2], [3, 4, 5], 2)
@test maximum(data3d) > 2
@test minimum(data3d) < 1
end
@testset "Tetris" begin
segmentation = LarSurf.tetris_brick()
@test minimum(segmentation) == 0
@test maximum(segmentation) == 1
end
@testset "data234" begin
segmentation = LarSurf.data234()
@test minimum(segmentation) == 0
@test maximum(segmentation) == 1
end
@testset "half sphere generation" begin
segmentation = LarSurf.generate_truncated_sphere(10, [20,20,20])
@test minimum(segmentation) == 0
@test maximum(segmentation) == 1
segmentation = LarSurf.generate_truncated_sphere(10)
@test minimum(segmentation) == 0
@test maximum(segmentation) == 1
end
|
{"hexsha": "f76c5f73ffe2766fcae1a29dafa004ee9b851bc9", "size": 942, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/datasets_test.jl", "max_stars_repo_name": "mjirik/LarSurf.jl", "max_stars_repo_head_hexsha": "de2eaec62dfe8c63e7d621bc973aa01d8de019c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-17T22:56:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-04T09:50:42.000Z", "max_issues_repo_path": "test/datasets_test.jl", "max_issues_repo_name": "mjirik/lario3d.jl", "max_issues_repo_head_hexsha": "de2eaec62dfe8c63e7d621bc973aa01d8de019c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-16T15:47:22.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-18T17:43:46.000Z", "max_forks_repo_path": "test/datasets_test.jl", "max_forks_repo_name": "mjirik/lario3d.jl", "max_forks_repo_head_hexsha": "de2eaec62dfe8c63e7d621bc973aa01d8de019c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-05T15:01:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-05T15:01:47.000Z", "avg_line_length": 24.1538461538, "max_line_length": 68, "alphanum_fraction": 0.686836518, "num_tokens": 291}
|
# Import pyVPLM packages
from pyvplm.core.definition import PositiveParameter, PositiveParameterSet
from pyvplm.addon import variablepowerlaw as vpl
from pyvplm.addon import pixdoe as doe
from pint import UnitRegistry
import save_load as sl
import pi_format as pif
import csv_export as csv
import constraint_format as csf
import round_minmax as rmm
import constant_pi as cpi
import number_of_coeff as noc
import dependency_plot as dpp
import save_plots as spl
import save_py_func as spf
# Import external libs
import copy
import os
import pandas as pd
from pandas.plotting import scatter_matrix
import plotly.graph_objects as go
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as plb
import webbrowser
import ipyfilechooser as ipf
import time
from datetime import datetime
import ipywidgets as widgets
import ipyvuetify as v
from IPython.display import display, clear_output
import warnings
import seaborn as sns
from win32gui import GetWindowRect, GetForegroundWindow
# ------------Constants------------------------
from text_list import TEXT_LIST as TL
FORBIDDEN_CHARACTERS = [' ', '|', '*', '/', '-', '+', ',', "#", "!", "$", "£", "%", "^", "&", "?", ";", "ù", "é",
"@", "¤", "µ", "è", "°", "\\", '"', "'"]
FORBIDDEN_CHARACTERS_DESC = ['|', '"', "'", "#"]
FORBIDDEN_PARAMS = ['I', 'gamma', 'beta', 're', 'ln', 'log', 'sqrt', 'arg']
DOE_MULTIPLIER = 10
# ------------Global variables-----------------
WORKDIR = os.path.abspath(os.getcwd())
OUTPUTS = 0
PHYSICAL_PARAMS = None
OLD_PHYSICAL_PARAMS = None
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
DOE_PI_LIST = []
DOE = []
TOTAL_DOE = pd.DataFrame()
FIG = plt.Figure()
AX = FIG.add_subplot(111)
RESULT_DF = pd.DataFrame()
OLD_RESULT = pd.DataFrame()
OLD_PI_SET = []
RESULT_PI = np.array([])
DEPENDENCY_CHECK_STATE = []
OLD_DEPENDENCY_CHECK_STATE = []
REGRESSION_PI_LIST = []
MODELS = {}
REGRESSIONS = []
PI0_PI_LIST = []
"""
This is the code for GUI widgets and their associated functions. The first part contains all functions,
the second part (~line 2600) contains the widgets. These two parts are subdivided by tab name.
"""
# -----------Functions--------------------------------------------------------------------------------------------------
# Fist Physical Parameters Tab, some Buckingham tab and all Toolbar functions as well as some general helper functions
def check_name(name):
"""
Parameters
----------
name String in name TextField
Returns Boolean : True if the name is valid
-------
"""
if name == '':
name_entry.error_messages = TL[0]
return False
for for_char in FORBIDDEN_CHARACTERS:
if for_char in name:
name_entry.error_messages = f"{TL[1]}: {for_char}"
return False
for for_param in FORBIDDEN_PARAMS:
if name == for_param:
name_entry.error_messages = f"{TL[51]}: {for_param}"
return False
for item in sheet.items:
if item['name'] == name or item['name'].lower() == name:
name_entry.error_messages = TL[2]
return False
return True
def check_desc(desc):
"""
Parameters
----------
desc String in description TextField
Returns Boolean : True if the description is valid
-------
"""
for for_char in FORBIDDEN_CHARACTERS_DESC:
if for_char in desc:
desc_entry.error_messages = f"{TL[3]} : {for_char}"
return False
return True
def check_unit(unit):
"""
Parameters
----------
unit String in unit TextField
Returns Boolean : True if the unit is recognized by pint
-------
"""
if unit == '':
unit_entry.error_messages = TL[4]
return False
base_registry = UnitRegistry()
try:
if unit not in base_registry:
contains_upper = False
for u in unit:
if u.isupper():
contains_upper = True
break
if contains_upper:
unit_entry.error_messages = "Unit not recognized, try in lowercase"
else:
unit_entry.error_messages = TL[5]
return False
except Exception:
unit_entry.error_messages = "Invalid characters"
return False
return True
def check_bounds():
"""
Returns Boolean : True if the bounds in the lower bound and upper bound TextFields are valid
-------
"""
lb = lb_entry.v_model
ub = ub_entry.v_model
lbool = lb is None or lb == ""
ubool = ub is None or ub == ""
if ubool:
ub_entry.error_messages = TL[6]
return False
err_mess = TL[7]
if lbool:
try:
float(ub)
return True
except ValueError:
ub_entry.error_messages = err_mess
return False
else:
brk = False
try:
ub = float(ub)
except ValueError:
ub_entry.error_messages = err_mess
brk = True
try:
lb = float(lb)
except ValueError:
lb_entry.error_messages = err_mess
brk = True
if brk:
return False
if 0 < lb < ub:
return True
else:
neg = False
err_mess = TL[8]
if lb <= 0:
neg = True
lb_entry.error_messages = err_mess
if ub <= 0:
neg = True
ub_entry.error_messages = err_mess
if neg:
return False
else:
err_mess = TL[9]
lb_entry.error_messages = err_mess
ub_entry.error_messages = err_mess
return False
def add_item(widget, event, data):
"""
Returns Adds parameter specified by the user in the sheet DataTable, if one of the attributes is invalid, shows the
user an error under the TextField
-------
"""
name_entry.error_messages = ''
desc_entry.error_messages = ''
unit_entry.error_messages = ''
lb_entry.error_messages = ''
ub_entry.error_messages = ''
if check_name(name_entry.v_model) and check_desc(desc_entry.v_model) and check_unit(
unit_entry.v_model) and check_bounds():
name = name_entry.v_model
description = desc_entry.v_model
unit = unit_entry.v_model
lb = lb_entry.v_model
if lb:
lower_bound = float(lb_entry.v_model)
else:
lower_bound = None
name = name.upper()
upper_bound = float(ub_entry.v_model)
name_entry.v_model = ''
desc_entry.v_model = ''
unit_entry.v_model = ''
lb_entry.v_model = None
ub_entry.v_model = None
sheet.items = sheet.items + [{"name": name,
"description": description,
"unit": unit,
"lower bound": lower_bound,
"upper bound": upper_bound,
"in/out": "Input"}]
def order_items():
"""
Leaves output physical parameters at the end of the set (least priority to be repetitive)
Returns ordered physical parameters
-------
"""
data = sheet.items
inputs = []
outputs = []
for item in data:
if item["in/out"] == TL[10]:
outputs.append(item)
else:
inputs.append(item)
return inputs + outputs
def gen_parameter_set():
"""
Returns Generates a PositiveParameterSet from the physical parameters in the sheet DataTable, if there are none,
returns None
-------
"""
data = order_items()
if len(data) > 0:
first = True
param_set = {}
for item in data:
if item['lower bound'] is None or item['lower bound'] == "":
bounds = [item['upper bound']]
item['name'] = item['name'].upper()
else:
bounds = [item['lower bound'], item['upper bound']]
param = PositiveParameter(item['name'], bounds, item['unit'], item['description'])
param_set[item['name']] = param
if first:
param_set = PositiveParameterSet(param)
first = False
return param_set
return None
def get_outputs():
"""
Returns int : The number of output parameters specified
-------
"""
global OUTPUTS
n = 0
for item in sheet.items:
if item['in/out'] == TL[10]:
n += 1
OUTPUTS = n
def buckingham():
"""
Returns Shows the set in buck_area and modifies current_set
-------
"""
global PHYSICAL_PARAMS, PI_LISTS, PI_SETS
if PHYSICAL_PARAMS is not None:
# noinspection PyTypeChecker
PI_SETS[0], PI_LISTS[0] = vpl.buckingham_theorem(PHYSICAL_PARAMS, True)
pi_set_str = str(PI_SETS[0])
formatted_pi_set = pif.format_pi_set(pi_set_str)
buck_area.v_model = formatted_pi_set
if force_area.v_model is None or force_area.v_model == "":
force_area.v_model = formatted_pi_set
if check1.v_model:
global CHOSEN_PI_SET, CHOSEN_PI_LIST
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
if PI_LISTS[0]:
return True
return False
def force_buckingham(widget, event, data):
"""
Parameters
----------
widget force_buck_btn : button to check pi set
Returns Enables selection of the specified pi set if it is valid
-------
"""
widget.disabled = True
widget.loading = True
if force_buck_btn.children == [TL[11]]:
param_set = gen_parameter_set()
global OUTPUTS
out_n = OUTPUTS
try:
global PI_LISTS
PI_LISTS[1] = pif.format_force_area(force_area.v_model)
global PI_SETS
PI_SETS[1] = vpl.force_buckingham(param_set, *PI_LISTS[1])
if pif.check_outputs(PI_LISTS[1], param_set, out_n):
raise ValueError(TL[12])
force_area.error_messages = ""
force_area.success_messages = TL[13]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
if ' | ' in force_area.v_model:
force_area.v_model = force_area.v_model.replace(' | ', '\n')
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
except Exception as e:
force_area.success_messages = ""
force_area.error_messages = TL[15] + str(e)
else:
force_area.success_messages = ""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check2.v_model:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_area.messages = ""
force_buck_btn.children = [TL[11]]
widget.loading = False
widget.disabled = False
def automatic_buckingham(widget, event, data):
"""
Parameters
----------
widget auto_buck_btn : button to perform automatic Buckingham analysis
Returns Fills auto_buck_table with the resulting pi sets
-------
"""
widget.disabled = True
widget.loading = True
param_set = gen_parameter_set()
combinator_pi_set, alternative_set_dict = vpl.automatic_buckingham(param_set, True)
global PI_SETS, PI_LISTS, PHYSICAL_PARAMS, OUTPUTS
for n in combinator_pi_set:
PI_SETS[2].append(combinator_pi_set[n][0])
PI_LISTS[2].append(list(combinator_pi_set[n][1]))
items = []
i = 0
j = 1
del_index = []
for exp in alternative_set_dict:
if not pif.check_outputs(PI_LISTS[2][i], PHYSICAL_PARAMS, OUTPUTS):
items.append({"pi set number": j, "expressions": exp})
j += 1
else:
del_index.append(i)
i += 1
del_index.reverse()
for i in del_index:
PI_SETS[2].pop(i)
PI_LISTS[2].pop(i)
auto_buck_table.items = items
if force_buck_btn.children == [TL[11]]:
force_copy_btn.disabled = False
check3.disabled = False
widget.loading = False
widget.disabled = False
def force_copy(widget, event, data):
"""
Returns Copies the selected pi set from auto_buck_table or buck area to force_area
-------
"""
l = len(auto_buck_table.items)
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number']:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
force_area.v_model = pif.format_auto_pi_set(auto_buck_table.v_model[0]['expressions'])
break
elif check1.v_model:
force_area.v_model = buck_area.v_model
def check1_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in buck_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check2.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[0]
CHOSEN_PI_LIST = PI_LISTS[0]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check2_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the pi set in force_area
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check3.v_model = False
CHOSEN_PI_SET = PI_SETS[1]
CHOSEN_PI_LIST = PI_LISTS[1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def check3_change(widget, event, data):
"""
Parameters
----------
event Boolean : state of the checkbox
Returns Modifies current_set with the selected pi set in auto_buck_table
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if data:
check1.v_model = False
check2.v_model = False
l = len(auto_buck_table.items)
if auto_buck_table.v_model:
if auto_buck_table.v_model[0]['pi set number'] is None:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
else:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
for i in range(0, l):
if auto_buck_table.items[i]['pi set number'] == pi_set_nb:
update_current_set()
break
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def select_auto_pi_set(widget, event, data):
"""
Parameters
----------
data dict: Contains the pi set number of the selected pi set in the automatic buckingham data table
Returns Modifies current set accordingly
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST
if check3.v_model:
if data['value']:
pi_set_nb = data['item']['pi set number']
CHOSEN_PI_SET = PI_SETS[2][pi_set_nb - 1]
CHOSEN_PI_LIST = PI_LISTS[2][pi_set_nb - 1]
update_current_set()
else:
CHOSEN_PI_SET = None
CHOSEN_PI_LIST = []
update_current_set()
def pi_set_html(pi_set, math=True):
"""
Parameters
----------
pi_set: Pi set in a string form (with " | " separators between pi numbers)
math: display expression as Latex math (default True)
Returns A list of v.HTML widgets that are to be used as children of a v.CardText
-------
"""
if not math:
pi_set = pi_set.replace("**", "°°")
pi_set = pi_set.replace("*", " * ")
pi_set = pi_set.replace("°°", "**")
spt_pi_set = pi_set.split("| ")
card_text_children = []
for pi in spt_pi_set:
card_text_children.append(v.Html(tag='div', children=[pi]))
return card_text_children
else:
pi_set = pi_set.replace("**", "^{")
spt_pi_set = pi_set.split("| ")
for i in range(len(spt_pi_set)):
pi_expr = spt_pi_set[i]
pi_expr = pi_expr.replace(f"pi", f"\pi_", 1)
pi = list(pi_expr)
open_bracket = False
for j in range(len(pi)):
if pi[j] == "{":
open_bracket = True
if pi[j] == "*" and open_bracket:
pi[j] = "}"
open_bracket = False
pi_expr = "".join(pi)
pi_expr = pi_expr.replace("}", "}\\ \cdot \\ ")
pi_expr = pi_expr.replace("*", "\\ \cdot \\ ")
if open_bracket:
pi_expr += "}"
pi_expr = pi_expr.replace("=", "\\ = \\")
spt_pi_set[i] = pi_expr
card_text_children = []
str_latex = r"$"
for pi in spt_pi_set:
str_latex += pi + r"\\"
card_text_children.append(widgets.HTMLMath(str_latex + "$"))
return card_text_children
def update_current_set():
"""
Returns Shows the current selected pi set to the user in current_set Card
-------
"""
global CHOSEN_PI_LIST
out_set = pif.pi_list_to_str(CHOSEN_PI_LIST)
if out_set:
current_set.children[0].children = [TL[52]]
current_set.color = "green lighten-3"
else:
current_set.children[0].children = [TL[53]]
current_set.color = "grey lighten-3"
current_set.children[1].children = pi_set_html(out_set)
def del_item(widget, event, data):
"""
Returns Deletes the selected parameter from the sheet data table
-------
"""
if sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(len(sheet.items)):
if sheet.items[i]['name'] == item_name:
if i == len(sheet.items):
sheet.items = sheet.items[:-1]
else:
sheet.items = sheet.items[0:i] + sheet.items[i + 1:]
break
def del_all(widget, event, data):
"""
Returns Deletes all parameters from the sheet data table
-------
"""
sheet.items = []
def up_item(widget, event, data):
"""
Returns Moves up the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(1, l):
if sheet.items[i]['name'] == item_name:
if i == l:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]]
else:
sheet.items = sheet.items[0:i - 1] + [sheet.items[i]] + [sheet.items[i - 1]] + sheet.items[i + 1:]
break
def down_item(widget, event, data):
"""
Returns Moves down the selected parameter in the sheet data table
-------
"""
l = len(sheet.items)
if l >= 2 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l - 1):
if sheet.items[i]['name'] == item_name:
if i == l - 1:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]]
else:
sheet.items = sheet.items[0:i] + [sheet.items[i + 1]] + [sheet.items[i]] + sheet.items[i + 2:]
break
def set_as_out(widget, event, data):
"""
Returns Sets the selected parameter as output in the sheet data table
-------
"""
l = len(sheet.items)
if l > 0 and sheet.v_model:
item_name = sheet.v_model[0]['name']
for i in range(0, l):
if sheet.items[i]['name'] == item_name:
if sheet.items[i]['in/out'] == 'Input':
if sheet.items[i]['lower bound'] is None or sheet.items[i]['lower bound'] == "":
const_alert.value = True
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Output'}] + sheet.items[i + 1:]
else:
sheet.items = sheet.items[0:i] + [{"name": sheet.items[i]["name"],
"description": sheet.items[i]["description"],
"unit": sheet.items[i]["unit"],
"upper bound": sheet.items[i]["upper bound"],
"lower bound": sheet.items[i]["lower bound"],
'in/out': 'Input'}] + sheet.items[i + 1:]
break
def error_end(widget, event, data):
"""
Parameters
----------
widget Current widget
Returns Hides the error messages on the current widget
-------
"""
widget.error_messages = ""
def pint_link(widget, event, data):
"""
Returns Opens browser to a page with all pint base units
-------
"""
webbrowser.open_new(r"https://raw.githubusercontent.com/hgrecco/pint/master/pint/default_en.txt")
def new_log(log, success: bool):
"""
Parameters
----------
log The string to be shown if the logs field
success If true, the log will be displayed in green (in red if False)
Returns Replaces previous log with current log in the logs field
-------
"""
if success:
logs_card.class_ = logs_card.class_ + "; green--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
else:
logs_card.class_ = logs_card.class_ + "; red--text"
logs_card.children = [v.Html(tag='div', children=[log], class_="text-left py-2 px-2")]
def choose_dir(widget, event, data):
"""
Returns Opens the dialog_dir dialog box and initializes it
-------
"""
global WORKDIR
dialog_dir.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog_dir.v_model = True
def hide_dir(chooser):
"""
Returns Effectively changes the current work directory (WORKDIR) and closes the dialog_dir dialog box
-------
"""
global WORKDIR
old_workdir = WORKDIR
spl.add_temp(old_workdir)
WORKDIR = fc_dir.selected
spl.move_temp(old_workdir, WORKDIR)
dialog_dir.v_model = False
new_log(f"Work directory: {WORKDIR}", True)
dir_btn.color = "green"
time.sleep(0.5)
dir_btn.color = "default"
def save(widget, event, data):
"""
Parameters
----------
widget The save button int the toolbar
Returns Creates a new pyVPLM save in the work directory with a default name containing date and time
-------
"""
widget.disabled = True
global WORKDIR
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
file_path = WORKDIR + "\pyVPLM_" + dt_string + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model, select_reg_type.v_model,
nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
widget.disabled = False
new_log(f"Saved at: {file_path}", True)
widget.color = "green"
time.sleep(0.5)
widget.color = "default"
def save_as(widget, event, data):
"""
Returns Shows the save dialog
-------
"""
global WORKDIR
dialog.children[0].children[1].children = ["Current work directory: " + WORKDIR]
dialog.v_model = True
def hide_save_as(widget, event, data):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Saves a .txt file with all current user input to the specified path and hides the save dialog
-------
"""
global WORKDIR
save_as_tf.error_messages = ""
if save_as_tf.v_model.strip():
file_path = WORKDIR + "\\" + save_as_tf.v_model + ".txt"
widget.disabled = True
widget.loading = True
if auto_buck_table.v_model and auto_buck_table.v_model[0]['pi set number'] is not None:
pi_set_nb = auto_buck_table.v_model[0]['pi set number']
else:
pi_set_nb = 0
force_state = force_buck_btn.children == [TL[11]]
tab2_state = [check1.v_model, check2.v_model, check3.v_model, force_state, pi_set_nb]
result = [[header["text"] for header in result_data.headers], result_data.items]
doe_params = [select_DOE.v_model, select_log.v_model, anticipated_mo_entry.v_model]
reg_state = [select_pi0.v_model, select_reg_criteria.v_model, model_order_entry.v_model,
select_reg_type.v_model, nb_terms_slider.v_model]
sl.save(file_path, sheet.items, buck_area.v_model, force_area.v_model, auto_buck_table.items, tab2_state,
PHYSICAL_PARAMS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST, phy_const_area.v_model,
pi_const_area.v_model, doe_params, DOE, result, threshold_slider.v_model, DEPENDENCY_CHECK_STATE,
REGRESSION_PI_LIST, reg_state, MODELS)
dialog.v_model = False
widget.disabled = False
widget.loading = False
new_log(f"Saved at: {file_path}", True)
save_as_btn.color = "green"
time.sleep(0.5)
save_as_btn.color = "default"
else:
save_as_tf.error_messages = "please specify a file name"
def save_plots(widget, event, data):
"""
Parameters
----------
widget The save all plots button from the toolbar
Returns Saves all the plots that were in the temp directory in the work directory with default names with date and time
-------
"""
try:
spl.save_all_plots(WORKDIR)
new_log(f"All plots saved at: {WORKDIR}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
except FileNotFoundError:
new_log(f"No plots to save", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
def load(widget, event, data):
"""
Returns Shows the load dialog
-------
"""
global WORKDIR
fc_load.default_path = WORKDIR
dialog2.v_model = True
def hide_ld(chooser):
"""
Parameters
----------
widget The OK button in the save dialog
Returns Loads a .txt file and modifies the state of all widgets accordingly, hides the load dialog
-------
"""
file_path = fc_load.selected
if file_path:
global OLD_PHYSICAL_PARAMS, PHYSICAL_PARAMS, OUTPUTS, PI_SETS, CHOSEN_PI_SET, PI_LISTS, CHOSEN_PI_LIST,\
RESULT_DF, RESULT_PI, DEPENDENCY_CHECK_STATE, REGRESSION_PI_LIST, MODELS
try:
load_tuple = sl.load(file_path)
except FileNotFoundError:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, file does not exist", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
if len(load_tuple) != 20:
fc_load.reset()
dialog2.v_model = False
new_log(f"Failed to load, invalid file", False)
load_btn.color = "red"
time.sleep(0.5)
load_btn.color = "default"
return -1
dialog2.v_model = False
fc_load.reset()
load_btn.color = "green"
new_log(f"Loaded: {file_path}", True)
sheet.items = load_tuple[0]
buck_area.v_model = load_tuple[1]
force_area.v_model = load_tuple[2]
auto_buck_table.items = load_tuple[3]
tab2_state = load_tuple[4]
PHYSICAL_PARAMS = load_tuple[5]
OLD_PHYSICAL_PARAMS = load_tuple[5]
OUTPUTS = load_tuple[6]
PI_SETS = load_tuple[7]
CHOSEN_PI_SET = load_tuple[8]
PI_LISTS = load_tuple[9]
CHOSEN_PI_LIST = load_tuple[10]
update_current_set()
check1.v_model = tab2_state[0]
check2.v_model = tab2_state[1]
check3.v_model = tab2_state[2]
if tab2_state[3]:
force_area.error_messages = ""
force_area.success_messages = ""
check2.disabled = True
check2.v_model = False
force_area.readonly = False
force_area.clearable = True
force_area.background_color = "white"
force_eq.disabled = False
force_eq.background_color = "white"
add_pi_btn.disabled = False
if auto_buck_table.v_model:
force_copy_btn.disabled = False
force_buck_btn.children = [TL[11]]
else:
force_area.error_messages = ""
force_area.success_messages = TL[18]
check2.disabled = False
force_area.readonly = True
force_area.clearable = False
force_area.background_color = "grey lighten-3"
force_eq.disabled = True
force_eq.v_model = ""
force_eq.background_color = "grey lighten-3"
add_pi_btn.disabled = True
force_copy_btn.disabled = True
force_buck_btn.children = [TL[14]]
if tab2_state[4] == 0:
check3.disabled = True
else:
check3.disabled = False
setattr(auto_buck_table, 'v_model', [auto_buck_table.items[tab2_state[4] - 1]])
anticipated_mo_entry.v_model = load_tuple[12][2]
change_tab_3()
phy_const_area.v_model = load_tuple[11][0]
pi_const_area.v_model = load_tuple[11][1]
select_DOE.v_model = load_tuple[12][0]
select_log.v_model = load_tuple[12][1]
does = load_tuple[13]
if does:
doeX, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active = does
reduced_parameter_set, reduced_pi_set = PHYSICAL_PARAMS, CHOSEN_PI_SET
for out in list(PHYSICAL_PARAMS.dictionary.keys())[-OUTPUTS:]:
reduced_parameter_set, reduced_pi_set = vpl.reduce_parameter_set(reduced_parameter_set,
reduced_pi_set,
elected_output=out)
init_doe_plots(doeX, reduced_parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active,
reduced_pi_set)
if len(doe_box.children) == 3:
doe_box.children = list(doe_box.children) + [exp_panel_doe]
result_headers, result_items = load_tuple[14]
result_data.headers = csv.format_headers(result_headers)
result_data.items = result_items
if result_items:
RESULT_DF = pd.DataFrame(result_items)
func_x_to_pi = vpl.declare_func_x_to_pi(PHYSICAL_PARAMS, CHOSEN_PI_SET)
ordered_columns = []
for key in PHYSICAL_PARAMS.dictionary:
ordered_columns.append(f"{key} [{PHYSICAL_PARAMS.dictionary[key].defined_units}]")
re_ordered_result = RESULT_DF[ordered_columns]
RESULT_PI = func_x_to_pi(re_ordered_result.to_numpy(dtype=float))
threshold_slider.v_model = load_tuple[15]
DEPENDENCY_CHECK_STATE = load_tuple[16]
REGRESSION_PI_LIST = load_tuple[17]
reg_state = load_tuple[18]
if reg_state:
select_pi0.v_model = reg_state[0]
select_reg_criteria.v_model = reg_state[1]
model_order_entry.v_model = int(reg_state[2])
select_reg_type.v_model = reg_state[3]
MODELS = load_tuple[19]
if MODELS:
regression_models(models_btn, 0, 0, slider_state=int(reg_state[4]))
if tabs.v_model == 5:
change_tab_5()
if tabs.v_model == 6:
change_tab_6()
time.sleep(0.5)
load_btn.color = "default"
else:
dialog2.v_model = False
# --------- Buckingham Tab Functions -----------------------------------------------------------------------------------
def add_pi(widget, event, data):
"""
Returns Adds the pi number specified in force_eq to force_area
-------
"""
index = pif.get_pi_index(force_area.v_model)
if force_eq.v_model is None or force_eq.v_model == "":
force_eq.error_messages = TL[21]
else:
exp = pif.format_input(force_eq.v_model, index)
if force_area.v_model is not None:
force_area.v_model += exp + "\n"
else:
force_area.v_model = exp + "\n"
force_eq.v_model = ""
def tab2_reload():
"""
Returns Reloads Buckingham Theorem Tab
-------
"""
global CHOSEN_PI_SET, CHOSEN_PI_LIST, PI_SETS, PI_LISTS
CHOSEN_PI_SET = None
PI_SETS = [None, None, []]
CHOSEN_PI_LIST = []
PI_LISTS = [[], [], []]
update_current_set()
buck_area.v_model = ""
check1.v_model = True
force_buck_btn.disabled = False
force_buck_btn.children = [TL[11]]
force_eq.v_model = ""
force_eq.error_messages = ""
force_area.v_model = ""
force_area.success_messages = ""
force_area.error_messages = ""
force_area.readonly = False
force_area.clearable = True
add_pi_btn.disabled = False
force_copy_btn.disabled = False
check2.disabled = True
check2.v_model = False
auto_buck_btn.disabled = False
auto_buck_table.items = []
check3.disabled = True
check3.v_model = False
def tab2_disable():
"""
Returns Disables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = True
auto_buck_btn.disabled = True
check1.disabled = True
check1.v_model = False
def tab2_enable():
"""
Returns Enables Buckingham Theorem Tab
-------
"""
force_buck_btn.disabled = False
auto_buck_btn.disabled = False
check1.disabled = False
# -----DOE Tab functions------------------------------------------------------------------------------------------------
def add_phy_const(widget, event, data):
"""
Returns Adds a physical constraint from the text field to the text area
-------
"""
phy_const_entry.error_messages = ""
if phy_const_entry.v_model is None or phy_const_entry.v_model == "":
phy_const_entry.error_messages = TL[21]
else:
exp = phy_const_entry.v_model
if phy_const_area.v_model is not None:
phy_const_area.v_model += exp + "\n"
else:
phy_const_area.v_model = exp + "\n"
phy_const_entry.v_model = ""
def add_pi_const(widget, event, data):
"""
Returns Adds a pi constraint from the text field to the text area
-------
"""
pi_const_entry.error_messages = ""
if pi_const_entry.v_model is None or pi_const_entry.v_model == "":
pi_const_entry.error_messages = TL[21]
else:
exp = pi_const_entry.v_model
if pi_const_area.v_model is not None:
pi_const_area.v_model += exp + "\n"
else:
pi_const_area.v_model = exp + "\n"
pi_const_entry.v_model = ""
def nb_of_terms():
"""
Returns The maximum number of terms for the given model order and the amount of input pi numbers
-------
"""
n = int(anticipated_mo_entry.v_model)
p = len(CHOSEN_PI_LIST) - OUTPUTS
return noc.coefficient_nb(n, p, approx=(p >= 2*n and n > 10))
def mo_to_size(widget, event, data):
"""
Parameters
----------
widget Anticipated model order field
Returns Sets the default wished size to 10x max number of terms
-------
"""
nb_terms = nb_of_terms()
wished_size_entry.v_model = DOE_MULTIPLIER * nb_terms
model_order_entry.v_model = widget.v_model
widget.messages = ""
wished_size_entry.messages = ""
def check_size(widget, event, data):
"""
Returns Checks if the wished size is not too low or too high compared to the default wished size and shows warnings
-------
"""
expected = DOE_MULTIPLIER * nb_of_terms()
if int(wished_size_entry.v_model) > int(2*expected) or\
int(0.5 * expected) > int(wished_size_entry.v_model) >= int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size not advised for model order"
anticipated_mo_entry.messages = "Warning: size not advised for model order"
elif int(wished_size_entry.v_model) < int(expected/DOE_MULTIPLIER):
wished_size_entry.messages = "Warning: size too low for model order, model computation will fail"
anticipated_mo_entry.messages = "Warning: size too low for model order, model computation will fail"
else:
wished_size_entry.messages = ""
anticipated_mo_entry.messages = ""
def gen_doe(widget, event, data):
"""
Returns Displays the generate DOE dialog box and initializes it
-------
"""
global WORKDIR
dialog3.v_model = True
dialog3.children[0].children[1].children = ["Current work directory: " + WORKDIR]
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
doe_tf.v_model = "pyVPLM_" + dt_string
def customize_2d_plot(widget, event, data):
"""
Parameters
----------
widget The current range slider or one of the two selection fields (for the axis)
Returns
-------
"""
global AX, TOTAL_DOE
widget.loading = True
new_df = TOTAL_DOE
i = 0
for col in new_df:
[col_min, col_max] = range_sliders.children[2*i + 1].v_model
new_df = new_df[(new_df[col] >= col_min) & (new_df[col] <= col_max)]
i += 1
with customizable_2d_plot_output:
clear_output(wait=True)
AX.clear()
AX.set_xlabel(select_2d_x.v_model)
AX.set_ylabel(select_2d_y.v_model)
AX.plot(new_df[select_2d_x.v_model], new_df[select_2d_y.v_model], 'o')
display(AX.figure)
widget.loading = False
def init_doe_plots(doeX, parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active, pi_set, log=True):
"""
Parameters
----------
doeX numpy array with the DOE of physical parameters
parameter_set PositiveParameterSet with all input physical parameters
doePi numpy array with the DOE of pi numbers (elected points)
doePi_all numpy array with the DOE of pi numbers (all points)
doePi_nearest numpy array with the DOE of pi numbers (3 nearest from objective points)
doePi_all_obj numpy array with the DOE of pi numbers (all objective points)
doePI_active numpy array with the DOE of pi numbers (active objective points)
pi_set PositiveParameterSet with all input pi numbers
log Toggles display in log space for all plots
Returns Initializes all DOE plots
-------
"""
spl.add_temp(WORKDIR)
_, _, ww, _ = GetWindowRect(GetForegroundWindow())
error = False
if log:
doeX = np.log10(doeX)
doePi = np.log10(doePi)
doePi_all = np.log10(doePi_all)
doePi_nearest = np.log10(doePi_nearest)
doePi_all_obj = np.log10(doePi_all_obj)
doePI_active = np.log10(doePI_active)
columns = []
constants = []
for key in parameter_set.dictionary:
if log:
column_name = f"log10({key}) [{parameter_set.dictionary[key].defined_units}]"
else:
column_name = f"{key} [{parameter_set.dictionary[key].defined_units}]"
columns.append(column_name)
if len(parameter_set.dictionary[key].defined_bounds) == 0:
constants.append(column_name)
df = pd.DataFrame(data=doeX, columns=columns)
df = df.drop(labels=constants, axis=1)
phy_scatter_matrix_output.clear_output()
with phy_scatter_matrix_output:
try:
plt.rcParams['axes.labelsize'] = 14
sm1 = scatter_matrix(df, figsize=(30*ww/1928, 30*ww/1928), alpha=0.9, diagonal="kde")
for i in range(np.shape(sm1)[0]):
for j in range(np.shape(sm1)[1]):
if i < j:
sm1[i, j].set_visible(False)
elif i == j:
x_ = sm1[i, j].lines[0].get_xdata()
y_ = sm1[i, j].lines[0].get_ydata()
sm1[i, j].fill_between(x_, y_, alpha=0.54) # Petite ref
try:
plt.savefig(WORKDIR + "\\temp\\phy_scatter_matrix.pdf")
except Exception:
new_log("Failed to save phy_scatter_matrix.pdf in \\temp", False)
plt.show()
except ValueError:
error = True
columns_2 = []
for key in pi_set.dictionary:
if log:
columns_2.append("log10(" + key + ")")
else:
columns_2.append(key)
df_2 = pd.DataFrame(data=doePi, columns=columns_2)
constant_pi = cpi.get_constant_pi(df_2)
df_2 = df_2.drop(labels=constant_pi, axis=1)
df_2_1 = pd.DataFrame(data=doePi_all, columns=columns_2)
df_2_1 = df_2_1.drop(labels=constant_pi, axis=1)
df_2_2 = pd.DataFrame(data=doePi_nearest, columns=columns_2)
df_2_2 = df_2_2.drop(labels=constant_pi, axis=1)
df_2_3 = pd.DataFrame(data=doePI_active, columns=columns_2)
df_2_3 = df_2_3.drop(labels=constant_pi, axis=1)
df_2_4 = pd.DataFrame(data=doePi_all_obj, columns=columns_2)
df_2_4 = df_2_4.drop(labels=constant_pi, axis=1)
df_2_f = pd.concat([df_2, df_2_1, df_2_2, df_2_3, df_2_4], ignore_index=True)
lab0 = "Elected (Feas.)"
lab1 = "All (Feas.)"
lab2 = "3 Nearest (Feas.)"
lab3 = "Active (Obj.)"
lab4 = "All (Obj.)"
df_2_f["DOE points"] = [lab0]*len(df_2) + [lab1]*len(df_2_1) + [lab2]*len(df_2_2) + [lab3]*len(df_2_3) + \
[lab4]*len(df_2_4)
pi_scatter_matrix_output.clear_output()
with pi_scatter_matrix_output:
try:
pass
except ValueError:
error = True
palette = {lab0: "blue", lab1: "green", lab2: "cyan", lab3: "red", lab4: "black"}
ax = sns.pairplot(df_2_f, hue="DOE points", hue_order=[lab3, lab0, lab2, lab1, lab4], palette=palette,
corner=True, height=7*ww/1928, markers=["D", ".", ".", "s", "."], diag_kind="kde",
diag_kws={'bw_method': 0.3})
plt.savefig(WORKDIR + "\\temp\\pi_scatter_matrix.pdf")
plb.setp(ax.legend.get_texts(), fontsize='22') # for legend text
plb.setp(ax.legend.get_title(), fontsize='32') # for legend title
plt.show()
if error:
raise ValueError
df_3 = pd.concat([df, df_2], axis=1)
df_3_col_list = list(df_3.columns)
select_2d_x.items = df_3_col_list
select_2d_x.v_model = df_3_col_list[0]
select_2d_y.items = df_3_col_list
select_2d_y.v_model = df_3_col_list[1]
range_slider_card.width = ww * 600 / 1928
range_slider_card.height = ww * 580 / 1928
range_sliders.children = []
for col_name in df_3_col_list:
col_min = rmm.round_min(df_3[col_name].min())
col_max = rmm.round_max(df_3[col_name].max())
act_min = col_min - 0.1*abs(col_min)
act_max = col_max + 0.1*abs(col_max)
step = round((act_max - act_min) / 100, 2)
range_sliders.children = range_sliders.children + [v.Subheader(children=[col_name], class_="justify-center"),
v.RangeSlider(min=act_min,
max=act_max,
v_model=[act_min, act_max],
step=step,
thumb_label="always")]
for i in range(len(range_sliders.children)):
if i % 2 == 1:
range_sliders.children[i].on_event("change", customize_2d_plot)
customizable_2d_plot_output.clear_output()
with customizable_2d_plot_output:
global AX
AX.clear()
AX.set_xlabel(select_2d_x.v_model)
AX.set_ylabel(select_2d_y.v_model)
AX.plot(df_3[select_2d_x.v_model], df_3[select_2d_y.v_model], 'o')
FIG.set(size_inches=(10*ww/1928, 10*ww/1928))
FIG.savefig(WORKDIR + "\\temp\\customizable_2D_plot.pdf")
display(AX.figure)
fig = go.FigureWidget()
fig.layout.width = ww - 300 if ww - 300 > 200 else 200
fig.layout.height = 800
parcoords_labels = {}
if len(df_3.columns) < 5:
for col_name in df_3.columns:
parcoords_labels[col_name] = col_name
else:
for col_name in df_3.columns:
parcoords_labels[col_name] = col_name.split("[")[0]
fig.add_parcoords(dimensions=[{'label': parcoords_labels[n], 'values': df_3[n]} for n in df_3.columns],
line=dict(color=df_3[df_3.columns[len(df_3.columns) - 1]],
colorscale='Bluered_r', showscale=False)
)
parallel_plot_box.children = [fig]
fig.write_image(WORKDIR + "\\temp\\parallel_plot.pdf")
global TOTAL_DOE
TOTAL_DOE = df_3
# noinspection PyTypeChecker,
# PyUnresolvedReferences
def hide_doe(widget, event, data):
"""
Parameters
----------
widget Ok button in the DOE dialog box
Returns Hides the dialog box and generates the DOE, calls init_DOE_plots to initialize the plots
-------
"""
widget.disabled = True
widget.loading = True
gen_DOE_btn.disabled = True
gen_DOE_btn.loading = True
doe_tf.error_messages = ""
if doe_tf.v_model.strip():
phy_save_btn.disabled = True
pi_save_btn.disabled = True
cus_save_btn.disabled = True
para_save_btn.disabled = True
file_name = WORKDIR + "\\" + doe_tf.v_model + ".csv"
valid_input = True
if PHYSICAL_PARAMS is None or CHOSEN_PI_SET is None:
valid_input = False
if file_name and valid_input:
reduced_parameter_set, reduced_pi_set = PHYSICAL_PARAMS, CHOSEN_PI_SET
out_headers = []
for out in list(PHYSICAL_PARAMS.dictionary.keys())[-OUTPUTS:]:
reduced_parameter_set, reduced_pi_set = vpl.reduce_parameter_set(reduced_parameter_set,
reduced_pi_set,
elected_output=out)
out_headers.append(out + " [" + PHYSICAL_PARAMS.dictionary[out].defined_units + "]")
func_x_to_pi = vpl.declare_func_x_to_pi(reduced_parameter_set, reduced_pi_set)
try:
parameter_constraints = vpl.declare_constraints(reduced_parameter_set,
csf.str_to_constraint_set(phy_const_area.v_model))
except (ValueError, SyntaxError):
phy_const_area.error_messages = "Invalid constraints"
widget.disabled = False
widget.loading = False
gen_DOE_btn.disabled = False
gen_DOE_btn.loading = False
dialog3.v_model = False
return -1
try:
pi_constraints = vpl.declare_constraints(reduced_pi_set,
csf.str_to_constraint_set(pi_const_area.v_model))
except (ValueError, SyntaxError):
pi_const_area.error_messages = "Invalid constraints"
widget.disabled = False
widget.loading = False
dialog3.v_model = False
gen_DOE_btn.disabled = False
gen_DOE_btn.loading = False
return -1
relative_points_vector = []
for i in range(len(relative_nb_points_sliders.children)):
if i % 2 == 1:
slider = relative_nb_points_sliders.children[i]
relative_points_vector.append(slider.v_model)
out_tuple = doe.create_const_doe(reduced_parameter_set, reduced_pi_set, func_x_to_pi,
parameters_constraints=parameter_constraints,
pi_constraints=pi_constraints,
whished_size=int(wished_size_entry.v_model),
test_mode=True,
log_space=(select_log.v_model == "Log"),
relative_points=relative_points_vector)
doeX, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active = out_tuple
global DOE
DOE = [doeX, doePi, doePi_all, doePi_nearest, doePi_all_obj, doePI_active]
csv.generate_csv(doeX, file_name, reduced_parameter_set, out_headers)
if len(doe_box.children) == 3:
doe_box.children = list(doe_box.children) + [exp_panel_doe]
widget.disabled = False
widget.loading = False
gen_DOE_btn.disabled = False
gen_DOE_btn.loading = False
dialog3.v_model = False
try:
init_doe_plots(doeX, reduced_parameter_set, doePi, doePi_all, doePi_nearest, doePi_all_obj,
doePI_active, reduced_pi_set, log=(select_log.v_model == "Log"))
phy_save_btn.disabled = False
pi_save_btn.disabled = False
cus_save_btn.disabled = False
para_save_btn.disabled = False
except ValueError:
if len(doe_box.children) > 3:
doe_box.children = list(doe_box.children[:-1])
phy_const_area.error_messages = "Constraints are too restrictive"
pi_const_area.error_messages = "Constraints are too restrictive"
else:
doe_tf.error_messages = "Please specify a file name"
widget.disabled = False
widget.loading = False
gen_DOE_btn.disabled = False
gen_DOE_btn.loading = False
def save_phy(widget, event, data):
"""
Parameters
----------
widget Save phy button
Returns Saves the physical parameters scatter matrix in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "phy_scatter_matrix.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_pi(widget, event, data):
"""
Parameters
----------
widget Save phy button
Returns Saves the pi scatter matrix in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "pi_scatter_matrix.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_cus(widget, event, data):
"""
Parameters
----------
widget Save phy button
Returns Saves the customizable 2D plot in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "customizable_2D_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_para(widget, event, data):
"""
Parameters
----------
widget Save phy button
Returns Saves the parallel coordinates plot in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "parallel_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def tab3_reload():
"""
Returns Reloads the DOE tab
-------
"""
doe_alert_cont.children = []
input_pi.children[1].children = [""]
output_pi.children[1].children = [""]
relative_nb_points_sliders.children = []
phy_const_entry.v_model = ""
phy_const_area.v_model = ""
pi_const_entry.v_model = ""
pi_const_area.v_model = ""
select_DOE.v_model = "Full Fact"
anticipated_mo_entry.v_model = 1
wished_size_entry.v_model = 40
def tab3_disable():
"""
Returns Disables the DOE tab
-------
"""
phy_const_entry.disabled = True
phy_const_btn.disabled = True
phy_const_area.disabled = True
pi_const_entry.disabled = True
pi_const_btn.disabled = True
pi_const_area.disabled = True
select_DOE.disabled = True
anticipated_mo_entry.disabled = True
wished_size_entry.disabled = True
gen_DOE_btn.disabled = True
def tab3_enable():
"""
Returns Enables the DOE tab
-------
"""
phy_const_entry.disabled = False
phy_const_btn.disabled = False
phy_const_area.disabled = False
pi_const_entry.disabled = False
pi_const_btn.disabled = False
pi_const_area.disabled = False
select_DOE.disabled = False
anticipated_mo_entry.disabled = False
wished_size_entry.disabled = False
gen_DOE_btn.disabled = False
# -----Result import Tab functions--------------------------------------------------------------------------------------
def gen_empty_csv(widget, event, data):
"""
Returns Displays and initializes the generate empty csv dialog box
-------
"""
global WORKDIR
dialog5.v_model = True
dialog5.children[0].children[1].children = ["Current work directory: " + WORKDIR]
now = datetime.now()
dt_string = now.strftime("%d-%m-%y_%H-%M-%S")
empty_tf.v_model = "pyVPLM_empty_" + dt_string
def hide_empty_csv(widget, event, data):
"""
Parameters
----------
widget The OK button in the generated empty csv dialog box
Returns Generates a .csv file with only valid headers in the current work directory
-------
"""
widget.disabled = True
widget.loading = True
empty_csv_btn.disabled = True
empty_csv_btn.loading = True
empty_tf.error_messages = ""
global PHYSICAL_PARAMS, RESULT_DF, RESULT_PI, CHOSEN_PI_SET, WORKDIR
path = WORKDIR + "\\" + empty_tf.v_model + ".csv"
if empty_tf.v_model.strip():
try:
reduced_parameter_set = copy.deepcopy(PHYSICAL_PARAMS)
out_headers = []
for out in list(PHYSICAL_PARAMS.dictionary.keys())[-OUTPUTS:]:
del reduced_parameter_set.dictionary[out]
out_headers.append(out + " [" + PHYSICAL_PARAMS.dictionary[out].defined_units + "]")
csv.generate_csv(np.array([]), path, reduced_parameter_set, out_headers)
except Exception as e:
result_alert.children = [str(e)]
result_alert_cont.children = [result_alert]
widget.disabled = False
widget.loading = False
empty_csv_btn.loading = False
empty_csv_btn.disabled = False
dialog5.v_model = False
empty_csv_btn.children = ["Empty csv generated"]
empty_csv_btn.color = "grey lighten-2"
time.sleep(1.5)
empty_csv_btn.children = ["Generate empty csv"]
empty_csv_btn.color = "grey lighten-4"
else:
empty_tf.error_messages = "Please specify a file name"
widget.disabled = False
widget.loading = False
empty_csv_btn.loading = False
empty_csv_btn.disabled = False
def result_import(widget, event, data):
"""
Returns Displays and initializes the result import dialog box
-------
"""
dialog4.v_model = True
fc_res.default_path = WORKDIR
def hide_res_import():
"""
Returns Imports the chosen .csv file to memory and displays it in the result import table
-------
"""
result_btn.disabled = True
result_btn.loading = True
global PHYSICAL_PARAMS, RESULT_DF, RESULT_PI, CHOSEN_PI_SET, DEPENDENCY_CHECK_STATE
path = fc_res.selected
if path:
try:
headers, items, RESULT_DF = csv.read_csv(path, PHYSICAL_PARAMS, round_=True)
result_data.headers = headers
result_data.items = items
result_alert_cont.children = []
if CHOSEN_PI_SET:
func_x_to_pi = vpl.declare_func_x_to_pi(PHYSICAL_PARAMS, CHOSEN_PI_SET)
ordered_columns = []
for key in PHYSICAL_PARAMS.dictionary:
ordered_columns.append(f"{key} [{PHYSICAL_PARAMS.dictionary[key].defined_units}]")
re_ordered_result = RESULT_DF[ordered_columns]
RESULT_PI = func_x_to_pi(re_ordered_result.to_numpy(dtype=float))
DEPENDENCY_CHECK_STATE = []
select_pi0.v_model = ""
except Exception as e:
result_alert.children = [str(e)]
result_alert_cont.children = [result_alert]
result_btn.disabled = False
result_btn.loading = False
dialog4.v_model = False
# -----Dependency Tab functions-----------------------------------------------------------------------------------------
def input_output_lists():
"""
Returns The names of the the input pi and output pi numbers based on CHOSEN_PI_LIST and OUTPUTS (number of outputs)
-------
"""
global OUTPUTS, CHOSEN_PI_LIST, PHYSICAL_PARAMS, DOE_PI_LIST
if CHOSEN_PI_LIST:
if OUTPUTS == 0:
raise ValueError("No output pi")
else:
output_index = pif.output_pi_index(CHOSEN_PI_LIST, PHYSICAL_PARAMS, OUTPUTS)
output_list = [CHOSEN_PI_LIST[i] for i in output_index]
input_list = CHOSEN_PI_LIST.copy()
input_index = [i for i in range(0, len(CHOSEN_PI_LIST))]
for i in range(len(output_list)):
input_list.remove(output_list[i])
for ind in output_index:
input_index.remove(ind)
input_pi_names = []
output_pi_names = []
for index in input_index:
input_pi_names.append("pi" + str(index + 1))
for index in output_index:
output_pi_names.append("pi" + str(index + 1))
return input_pi_names, output_pi_names
else:
raise ValueError("No chosen pi set")
def dependency_check(widget, event, data):
"""
Parameters
----------
widget Any checkbox in the dependency analysis tab
data True if the checkbox is checked, False otherwise
Returns Modifies the current pi set vie REGRESSION_PI_LIST and DEPENDENCY_CHECK_STATE
-------
"""
global REGRESSION_PI_LIST, DEPENDENCY_CHECK_STATE
index = int(widget.label[-1]) - 1
if data:
REGRESSION_PI_LIST[index] = CHOSEN_PI_LIST[index]
DEPENDENCY_CHECK_STATE[index] = True
else:
REGRESSION_PI_LIST[index] = None
DEPENDENCY_CHECK_STATE[index] = False
dependency_set.color = "green lighten-3"
dependency_set.children[0].children = ["Current pi set:"]
dependency_set.children[1].children = pi_set_html(pif.pi_list_to_str(REGRESSION_PI_LIST))
toggle_dependency_check()
def update_dependency_check():
"""
Returns Updates current chosen pi set based on DEPENDENCY_CHECK_STATE
-------
"""
global REGRESSION_PI_LIST, DEPENDENCY_CHECK_STATE
for index in range(len(DEPENDENCY_CHECK_STATE)):
if DEPENDENCY_CHECK_STATE[index]:
REGRESSION_PI_LIST[index] = CHOSEN_PI_LIST[index]
else:
REGRESSION_PI_LIST[index] = None
dependency_set.color = "green lighten-3"
dependency_set.children[0].children = ["Current pi set:"]
dependency_set.children[1].children = pi_set_html(pif.pi_list_to_str(REGRESSION_PI_LIST))
toggle_dependency_check()
def toggle_dependency_check():
"""
Returns Disables the last checked checkbox for the last input pi and the last output pi (ensures that
there is at least one input and one output before regression)
-------
"""
piN, pi0 = input_output_lists()
inp_index = []
for pi_n in piN:
inp_index.append(int(pi_n.replace("pi", "")) - 1)
out_index = []
for pi_n in pi0:
out_index.append(int(pi_n.replace("pi", "")) - 1)
nb_inputs = 0
for i in inp_index:
if DEPENDENCY_CHECK_STATE[i]:
nb_inputs += 1
nb_outputs = 0
for i in out_index:
if DEPENDENCY_CHECK_STATE[i]:
nb_outputs += 1
if nb_inputs == 1:
for i in inp_index:
if DEPENDENCY_CHECK_STATE[i]:
dependency_checkboxes.children[i].disabled = True
else:
for i in inp_index:
dependency_checkboxes.children[i].disabled = False
if nb_outputs == 1:
for i in out_index:
if DEPENDENCY_CHECK_STATE[i]:
dependency_checkboxes.children[i].disabled = True
else:
for i in out_index:
dependency_checkboxes.children[i].disabled = False
def update_dependency_plots(widget, event, data):
"""
Parameters
----------
widget Any of the checkboxes in th dependency analysis tab
Returns Updates both sensitivity and dependency analysis plots depending on which checkboxes are unchecked (removes
plots associated with removed pi numbers)
-------
"""
widget.disabled = True
widget.loading = True
sen_save_btn.disabled = True
dep_save_btn.disabled = True
piN, pi0 = input_output_lists()
for i in range(len(DEPENDENCY_CHECK_STATE)):
if not DEPENDENCY_CHECK_STATE[i]:
pi_n = f"pi{i + 1}"
if pi_n in piN:
piN.remove(pi_n)
elif pi_n in pi0:
pi0.remove(pi_n)
sensitivity_output.clear_output(wait=True)
with sensitivity_output:
dpp.pi_sensitivity_plot(CHOSEN_PI_SET, RESULT_PI, WORKDIR, pi0=pi0, piN=piN, latex=True)
dependency_output.clear_output(wait=True)
with dependency_output:
dpp.pi_dependency_plot(CHOSEN_PI_SET, RESULT_PI, WORKDIR, x_list=piN, y_list=piN, latex=True,
threshold=threshold_slider.v_model)
widget.disabled = False
widget.loading = False
sen_save_btn.disabled = False
dep_save_btn.disabled = False
def change_threshold(widget, event, data):
"""
Parameters
----------
widget The R^2 threshold slider
Returns Updates dependency analysis plots accordingly (new R^2 threshold)
-------
"""
widget.loading = True
dep_save_btn.disabled = True
piN, _ = input_output_lists()
for i in range(len(DEPENDENCY_CHECK_STATE)):
if not DEPENDENCY_CHECK_STATE[i]:
pi_n = f"pi{i + 1}"
if pi_n in piN:
piN.remove(pi_n)
dependency_output.clear_output(wait=True)
threshold = widget.v_model
if threshold == 0:
threshold = 0.001
if threshold == 1:
threshold = 0.999
with dependency_output:
dpp.pi_dependency_plot(CHOSEN_PI_SET, RESULT_PI, WORKDIR, x_list=piN, y_list=piN, latex=True,
threshold=threshold)
widget.loading = False
dep_save_btn.disabled = False
def save_sen(widget, event, data):
"""
Parameters
----------
widget Save sen button
Returns Saves the sensitivity analysis plot in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "sensitivity_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_dep(widget, event, data):
"""
Parameters
----------
widget Save sen button
Returns Saves the dependency analysis plot in the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "dependency_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
# -----Regression Tab functions-----------------------------------------------------------------------------------------
def tab6_enable():
"""
Returns Enables the regression tab
-------
"""
select_pi0.disabled = False
model_order_entry.disabled = False
select_reg_criteria.disabled = False
select_reg_type.disabled = False
models_btn.disabled = False
nb_terms_slider.disabled = False
def tab6_disable():
"""
Returns Disables the regression tab
-------
"""
select_pi0.disabled = True
model_order_entry.disabled = True
select_reg_criteria.disabled = True
select_reg_type.disabled = True
models_btn.disabled = True
nb_terms_slider.disabled = True
def slider_tick_labels(max_nb):
"""
Parameters
----------
max_nb Max number of terms for the current model order
Returns Removes some ticks from the model terms slider if there are too many ticks (prevents overlapping)
-------
"""
tick_labels = list(range(1, max_nb + 1))
if max_nb > 50:
visible_labels = [1]
for i in range(1, 11):
visible_labels.append(round(i*(max_nb - 1)/10 + 1))
for i in range(0, max_nb):
if i + 1 not in visible_labels:
tick_labels[i] = ""
return tick_labels
def regression_models(widget, event, data, slider_state=1):
"""
Parameters
----------
widget The show models button
slider_state State of the model terms slider
Returns Generates all regression models, stores them in memory and displays all the regression tab plots
-------
"""
spl.add_temp(WORKDIR)
widget.disabled = True
widget.loading = True
models_save_btn.disabled = True
nb_terms_slider.disabled = True
model_order_entry.error_messages = ""
model_order = int(float(model_order_entry.v_model))
if model_order < 1:
model_order_entry.error_messages = "Model order should be >= 1"
widget.disabled = False
widget.loading = False
nb_terms_slider.disabled = False
return -1
global MODELS, RESULT_PI, REGRESSIONS, PI0_PI_LIST
modified_result_pi = RESULT_PI
selected_pi0 = int(select_pi0.v_model[2:])
max_pi_nb = np.shape(RESULT_PI)[1]
list_to_del = []
_, output_pi_names = input_output_lists()
for pi_name in output_pi_names:
index = int(pi_name[2:]) - 1
if index != selected_pi0 - 1:
list_to_del.append(index)
if DEPENDENCY_CHECK_STATE:
for i in range(len(DEPENDENCY_CHECK_STATE)):
if not DEPENDENCY_CHECK_STATE[i] and i not in list_to_del:
list_to_del.append(i)
modified_result_pi = np.delete(modified_result_pi, list_to_del, 1)
actual_pi0 = selected_pi0 - 1
for i in list_to_del:
if i < selected_pi0 - 1:
actual_pi0 -= 1
eff_pi0 = actual_pi0 + 1
pi_list = []
for i in range(0, max_pi_nb):
if i not in list_to_del:
pi_list.append(f"pi{i + 1}")
PI0_PI_LIST = [eff_pi0 - 1, pi_list]
criteria = select_reg_criteria.v_model
if criteria == "max(error)":
choice = 1
elif criteria == "avg(error magnitude)":
choice = 2
elif criteria == "avg(error)":
choice = 3
else:
choice = 4
log_space = select_reg_type.v_model == "Power Law"
if not RESULT_DF.empty:
models_output.clear_output(wait=True)
_, _, ww, _ = GetWindowRect(GetForegroundWindow())
with models_output:
warnings.filterwarnings("ignore")
plt.rc("text", usetex=True)
plt.rc("font", family="serif")
MODELS, axs, fig = vpl.regression_models(modified_result_pi, elected_pi0=select_pi0.v_model,
order=model_order, test_mode=True, plots=True,
force_choice=choice, ymax_axis=1000, removed_pi=list_to_del,
eff_pi0=eff_pi0, skip_cross_validation=True, return_axes=True,
fig_size=((29/(1 - delta) * (ww/1928 - delta)),
12*ww/1928),
log_space=log_space)
max_nb_terms = len(MODELS.keys()) - 4
fig_width, _ = fig.get_size_inches()*fig.dpi
omicron = 0.001 * max_nb_terms
if slider_state >= max_nb_terms:
slider_state = 1
for i in range(len(axs)):
ax = axs[i]
err_train = "NaN"
err_test = "NaN"
if i == 0:
err_train = round(MODELS["max |e|"][0][slider_state], 1)
err_test = round(MODELS["max |e|"][1][slider_state], 1)
elif i == 1:
err_train = round(MODELS["ave. |e|"][0][slider_state], 1)
err_test = round(MODELS["ave. |e|"][1][slider_state], 1)
elif i == 2:
err_train = round(MODELS["ave. e"][0][slider_state], 1)
err_test = round(MODELS["ave. e"][1][slider_state], 1)
elif i == 3:
err_train = round(MODELS["sigma e"][0][slider_state], 1)
err_test = round(MODELS["sigma e"][1][slider_state], 1)
y_max = ax.get_ylim()[1]
if slider_state == 0:
ax.axvline(1 + omicron, color="blue")
ax.text(1 + 10*omicron, 3*y_max/8, str(err_train), fontdict={"fontsize": "large",
"fontfamily": "sans-serif"})
if err_test != 0:
ax.text(1 + 10*omicron, 5*y_max/8, str(err_test), color="red",
fontdict={"fontsize": "large", "fontfamily": "sans-serif"})
else:
ax.axvline(slider_state + 1, color="blue")
ax.text(slider_state + 1 + 10*omicron, 3*y_max/8, str(err_train),
fontdict={"fontsize": "large", "fontfamily": "serif"})
if err_test != 0:
ax.text(slider_state + 1 + 10*omicron, 5*y_max/8, str(err_test), color="red",
fontdict={"fontsize": "large", "fontfamily": "sans-serif"})
fig.savefig(os.path.join(WORKDIR, "temp\\regression_models_plot.pdf"))
plt.show()
REGRESSIONS = []
for i in range(max_nb_terms):
expression, expression_latex, y, y_reg = vpl.perform_regression(modified_result_pi, MODELS,
chosen_model=i + 1,
latex=True, pi_list=pi_list,
max_pi_nb=max_pi_nb,
removed_pi=list_to_del,
eff_pi0=eff_pi0, test_mode=True,
no_plots=True)
REGRESSIONS.append({"expr": expression, "expr_latex": expression_latex, "Y": y, "Y_reg": y_reg})
nb_terms_slider.tick_labels = slider_tick_labels(max_nb_terms)
nb_terms_slider.max = max_nb_terms - 1
if len(regression_cont.children) == 3:
regression_cont.children = regression_cont.children + [models_output_col, nb_terms_slider_row,
save_reg_cont, regression_output]
nb_terms_slider.v_model = slider_state
perform_regression(nb_terms_slider, "change", 0, from_reg_models=True)
nb_terms_slider.disabled = False
models_save_btn.disabled = False
widget.disabled = False
widget.loading = False
def perform_regression(widget, event, data, from_reg_models=False):
"""
Parameters
----------
widget model terms slider
from_reg_models True if the function is called from regression_models()
Returns Calculates the actual regression for each model (if from_reg_models=True), and updates all plots in the
regression tab according to model terms slider state
-------
"""
widget.disabled = True
widget.loading = True
models_save_btn.disabled = True
reg_save_btn.disabled = True
global REGRESSIONS, PI0_PI_LIST, MODELS
chosen_model = nb_terms_slider.v_model
if not from_reg_models:
models_output.clear_output(wait=True)
with models_output:
axs, fig = dpp.regression_models_plot(MODELS, WORKDIR)
with plt.rc_context({"text.usetex": False, "font.family": "sans-serif"}):
max_nb_terms = len(MODELS.keys()) - 4
omicron = 0.001 * max_nb_terms
if chosen_model >= max_nb_terms:
chosen_model = 1
for i in range(len(axs)):
err_train = "NaN"
err_test = "NaN"
if i == 0:
err_train = round(MODELS["max |e|"][0][chosen_model], 1)
err_test = round(MODELS["max |e|"][1][chosen_model], 1)
elif i == 1:
err_train = round(MODELS["ave. |e|"][0][chosen_model], 1)
err_test = round(MODELS["ave. |e|"][1][chosen_model], 1)
elif i == 2:
err_train = round(MODELS["ave. e"][0][chosen_model], 1)
err_test = round(MODELS["ave. e"][1][chosen_model], 1)
elif i == 3:
err_train = round(MODELS["sigma e"][0][chosen_model], 1)
err_test = round(MODELS["sigma e"][1][chosen_model], 1)
y_max = axs[i].get_ylim()[1]
if chosen_model == 0:
axs[i].axvline(1 + omicron, color="blue")
axs[i].text(1 + 10*omicron, 3 * y_max / 8, str(err_train), fontdict={"fontsize": "large",
"fontfamily": "sans-serif"})
if err_test != 0:
axs[i].text(1 + 10*omicron, 5 * y_max / 8, str(err_test), color="red",
fontdict={"fontsize": "large", "fontfamily": "sans-serif"})
else:
axs[i].axvline(chosen_model + 1, color="blue")
axs[i].text(chosen_model + 1 + 10*omicron, 3 * y_max / 8, str(err_train),
fontdict={"fontsize": "large", "fontfamily": "sans-serif"})
if err_test != 0:
axs[i].text(chosen_model + 1 + 10*omicron, 5 * y_max / 8, str(err_test), color="red",
fontdict={"fontsize": "large", "fontfamily": "sans-serif"})
fig.savefig(os.path.join(WORKDIR, "temp\\regression_models_plot.pdf"))
plt.show()
regression_output.clear_output(wait=True)
with regression_output:
dic = REGRESSIONS[chosen_model]
dpp.perform_regression_plot(dic["expr"], dic["Y"], dic["Y_reg"], PI0_PI_LIST[0],
PI0_PI_LIST[1], WORKDIR)
plt.show()
if len(regression_cont.children) == 7:
regression_cont.children = regression_cont.children[:-2] + [expression_col] + regression_cont.children[-2:]
math_widget = widgets.HTMLMath(dic["expr_latex"].replace("pi", "\pi_"))
expression_cont.children = [math_widget]
models_save_btn.disabled = False
reg_save_btn.disabled = False
widget.disabled = False
widget.loading = False
def save_models(widget, event, data):
"""
Parameters
----------
widget Save models button
Returns Saves the first plot of the regression tab to the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "regression_models_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_reg(widget, event, data):
"""
Parameters
----------
widget Save models button
Returns Saves the second plot of the regression tab to the current work directory
-------
"""
try:
new_name = spl.save_single_plot(WORKDIR, "perform_regression_plot.pdf")
except FileNotFoundError:
new_log(f"Failed to save (plot may already be saved)", False)
widget.color = "red"
time.sleep(1)
widget.color = "default"
else:
new_log(f"Saved plot as: {new_name}", True)
widget.color = "green"
time.sleep(1)
widget.color = "default"
def save_py_func(widget, event, data):
"""
Parameters
----------
widget the save .py function button
Returns Saves the current model as a python function in a .py file created in the current work directory
-------
"""
widget.disabled = True
widget.loading = True
chosen_model = nb_terms_slider.v_model
global REGRESSIONS, CHOSEN_PI_SET, PHYSICAL_PARAMS
print(CHOSEN_PI_SET)
print(PHYSICAL_PARAMS)
model = REGRESSIONS[chosen_model]["expr"]
input_pi_names, _ = input_output_lists()
f_name = spf.save_py_func(model, input_pi_names, WORKDIR, CHOSEN_PI_SET, PHYSICAL_PARAMS)
new_log(f"Saved python file as: {f_name}", True)
widget.disabled = False
widget.loading = False
python_func_btn.children[0].color = "green"
time.sleep(1)
python_func_btn.children[0].color = "default"
# -----All Tabs functions-----------------------------------------------------------------------------------------------
def change_tab_2():
"""
Function called when the user switches to the Buckingham theorem tab
-------
"""
global OLD_PHYSICAL_PARAMS, PHYSICAL_PARAMS, OUTPUTS
valid_param_set = True
if len(sheet.items) == 0:
OUTPUTS = 0
PHYSICAL_PARAMS = None
if len(list(vbox2.children)) == 2:
vbox2.children = [buck_error] + list(vbox2.children)
if len(list(vbox2.children)) == 3:
vbox2.children = [buck_error] + list(vbox2.children)[1:]
tab2_reload()
tab2_disable()
if len(sheet.items) != 0:
old_outputs = OUTPUTS
get_outputs()
if PHYSICAL_PARAMS is None:
PHYSICAL_PARAMS = gen_parameter_set()
if len(list(vbox2.children)) == 3:
vbox2.children = list(vbox2.children)[1:]
tab2_reload()
valid_param_set = buckingham()
else:
PHYSICAL_PARAMS = gen_parameter_set()
if OLD_PHYSICAL_PARAMS == PHYSICAL_PARAMS and OUTPUTS == old_outputs:
if len(list(vbox2.children)) == 3:
vbox2.children = list(vbox2.children)[1:]
elif OLD_PHYSICAL_PARAMS:
if len(list(vbox2.children)) == 2:
vbox2.children = [buck_warn] + list(vbox2.children)
if len(list(vbox2.children)) == 3:
vbox2.children = [buck_warn] + list(vbox2.children)[1:]
tab2_reload()
valid_param_set = buckingham()
else:
tab2_reload()
valid_param_set = buckingham()
if not valid_param_set:
buck_area.v_model = "/!\ Cannot generate pi set out of the given parameters /!\ "
tab2_disable()
else:
tab2_enable()
OLD_PHYSICAL_PARAMS = PHYSICAL_PARAMS
def change_tab_3():
"""
Function called when the user switches to the DOE tab
-------
"""
global OUTPUTS, CHOSEN_PI_LIST, PHYSICAL_PARAMS, DOE_PI_LIST
if CHOSEN_PI_LIST:
if OUTPUTS == 0:
tab3_reload()
tab3_disable()
doe_alert_cont.children = [no_output_error]
elif OUTPUTS == len(CHOSEN_PI_LIST):
tab3_reload()
tab3_disable()
doe_alert_cont.children = [no_input_error]
else:
if DOE_PI_LIST and DOE_PI_LIST != CHOSEN_PI_LIST:
doe_alert_cont.children = [change_pi_set_warning]
DOE_PI_LIST = CHOSEN_PI_LIST
tab3_enable()
output_index = pif.output_pi_index(CHOSEN_PI_LIST, PHYSICAL_PARAMS, OUTPUTS)
output_list = [CHOSEN_PI_LIST[i] for i in output_index]
output_pi.children[1].children = pi_set_html(pif.pi_sub_list_to_str(output_list, output_index))
input_index = [i for i in range(0, len(CHOSEN_PI_LIST))]
input_list = CHOSEN_PI_LIST.copy()
for i in range(len(output_list)):
input_list.remove(output_list[i])
for ind in output_index:
input_index.remove(ind)
input_pi.children[1].children = pi_set_html(pif.pi_sub_list_to_str(input_list, input_index))
relative_nb_points_sliders.children = []
for inp_pi in input_index:
relative_nb_points_sliders.children = relative_nb_points_sliders.children + [v.Subheader(
children=[f"pi{inp_pi + 1}"], class_="justify-center"),
v.Slider(thumb_label="always", min=1, max=10, v_model=1)]
wished_size_entry.v_model = DOE_MULTIPLIER * nb_of_terms()
elif not DOE_PI_LIST and CHOSEN_PI_LIST:
doe_alert_cont.children = []
DOE_PI_LIST = CHOSEN_PI_LIST
tab3_enable()
output_index = pif.output_pi_index(CHOSEN_PI_LIST, PHYSICAL_PARAMS, OUTPUTS)
output_list = [CHOSEN_PI_LIST[i] for i in output_index]
output_pi.children[1].children = pi_set_html(pif.pi_sub_list_to_str(output_list, output_index))
input_index = [i for i in range(0, len(CHOSEN_PI_LIST))]
input_list = CHOSEN_PI_LIST.copy()
for i in range(len(output_list)):
input_list.remove(output_list[i])
for ind in output_index:
input_index.remove(ind)
input_pi.children[1].children = pi_set_html(pif.pi_sub_list_to_str(input_list, input_index))
relative_nb_points_sliders.children = []
for inp_pi in input_index:
relative_nb_points_sliders.children = relative_nb_points_sliders.children + [v.Subheader(
children=[f"pi{inp_pi + 1}"], class_="justify-center"),
v.Slider(thumb_label="always", min=1, max=10, v_model=1)]
wished_size_entry.v_model = DOE_MULTIPLIER * nb_of_terms()
else:
doe_alert_cont.children = []
DOE_PI_LIST = CHOSEN_PI_LIST
else:
tab3_reload()
tab3_disable()
doe_alert_cont.children = [no_pi_set_error]
def change_tab_4():
"""
Function called when the user switches to the Result import tab
-------
"""
global PHYSICAL_PARAMS
PHYSICAL_PARAMS = gen_parameter_set()
get_outputs()
if PHYSICAL_PARAMS is None:
result_btn.disabled = True
empty_csv_btn.disabled = True
result_alert_cont.children = [result_warning]
else:
result_btn.disabled = False
empty_csv_btn.disabled = False
result_alert_cont.children = []
def change_tab_5():
"""
Function called when the user switches to the Dependency analysis tab
-------
"""
global RESULT_DF, RESULT_PI, CHOSEN_PI_SET, PHYSICAL_PARAMS, OLD_RESULT, OLD_PI_SET, REGRESSION_PI_LIST, \
DEPENDENCY_CHECK_STATE
dependency_alert_cont.children = []
if CHOSEN_PI_SET is not None:
sen_save_btn.disabled = True
dep_save_btn.disabled = True
update_dependency_plots_btn.loading = True
nochange1 = CHOSEN_PI_SET == OLD_PI_SET
nochange2 = RESULT_DF.equals(OLD_RESULT)
if not RESULT_DF.empty and (not nochange2 or not nochange1):
if not OLD_RESULT.empty and not nochange2:
dependency_alert_cont.children = [dependency_change_alert]
if not nochange1 and OLD_PI_SET:
dependency_alert_cont.children = [dependency_change_alert_2]
threshold_slider.disabled = True
update_dependency_plots_btn.disabled = True
REGRESSION_PI_LIST = CHOSEN_PI_LIST.copy()
exp_panel_dependency.v_model = [0]
exp_panel_dependency.disabled = False
pi_removal_card.disabled = False
sensitivity_output.clear_output()
func_x_to_pi = vpl.declare_func_x_to_pi(PHYSICAL_PARAMS, CHOSEN_PI_SET)
ordered_columns = []
for key in PHYSICAL_PARAMS.dictionary:
ordered_columns.append(f"{key} [{PHYSICAL_PARAMS.dictionary[key].defined_units}]")
re_ordered_result = RESULT_DF[ordered_columns]
RESULT_PI = func_x_to_pi(re_ordered_result.to_numpy(dtype=float))
checkboxes = []
if DEPENDENCY_CHECK_STATE:
update_dependency_plots(update_dependency_plots_btn, 0, 0)
for i in range(len(CHOSEN_PI_LIST)):
checkboxes.append(v.Checkbox(v_model=DEPENDENCY_CHECK_STATE[i], label=f"pi{i + 1}", class_="mx-2",
disabled=False))
checkboxes[i].on_event("change", dependency_check)
else:
with sensitivity_output:
try:
piN, pi0 = input_output_lists()
dpp.pi_sensitivity_plot(CHOSEN_PI_SET, RESULT_PI, WORKDIR, pi0=pi0, piN=piN, latex=True)
except ValueError as e:
print(e)
dependency_output.clear_output()
with dependency_output:
dpp.pi_dependency_plot(CHOSEN_PI_SET, RESULT_PI, WORKDIR, x_list=piN, y_list=piN, latex=True)
dependency_set.children[1].children = pi_set_html(pif.pi_list_to_str(REGRESSION_PI_LIST))
for i in range(len(CHOSEN_PI_LIST)):
checkboxes.append(v.Checkbox(v_model=True, label=f"pi{i + 1}", class_="mx-2", disabled=False))
checkboxes[i].on_event("change", dependency_check)
DEPENDENCY_CHECK_STATE.append(True)
dependency_checkboxes.children = checkboxes
update_dependency_check()
threshold_slider.disabled = False
update_dependency_plots_btn.disabled = False
elif not RESULT_DF.empty and nochange2:
dependency_alert_cont.children = []
exp_panel_dependency.v_model = [0]
exp_panel_dependency.disabled = False
pi_removal_card.disabled = False
else:
dependency_alert_cont.children = [dependency_result_alert]
exp_panel_dependency.v_model = []
exp_panel_dependency.disabled = True
pi_removal_card.disabled = True
sen_save_btn.disabled = False
dep_save_btn.disabled = False
update_dependency_plots_btn.loading = False
else:
dependency_alert_cont.children = [dependency_pi_set_alert]
exp_panel_dependency.v_model = []
exp_panel_dependency.disabled = True
pi_removal_card.disabled = True
OLD_RESULT = RESULT_DF
OLD_PI_SET = CHOSEN_PI_SET
def change_tab_6():
"""
Function called when the user switches to the Regression tab
-------
"""
tab6_enable()
reg_alert_cont.children = []
global DEPENDENCY_CHECK_STATE, OLD_DEPENDENCY_CHECK_STATE, REGRESSION_PI_LIST, CHOSEN_PI_LIST, CHOSEN_PI_SET
if CHOSEN_PI_SET is not None:
if not RESULT_DF.empty:
if not REGRESSION_PI_LIST and CHOSEN_PI_LIST:
REGRESSION_PI_LIST = CHOSEN_PI_LIST
dependency_set.color = "green lighten-3"
dependency_set.children[0].children = ["Current pi set:"]
dependency_set.children[1].children = pi_set_html(pif.pi_list_to_str(REGRESSION_PI_LIST))
_, pi0 = input_output_lists()
out_index = []
for pi_n in pi0:
out_index.append(int(pi_n.replace("pi", "")) - 1)
items = []
if DEPENDENCY_CHECK_STATE and DEPENDENCY_CHECK_STATE != OLD_DEPENDENCY_CHECK_STATE:
for i in out_index:
if DEPENDENCY_CHECK_STATE[i]:
items.append(f"pi{i + 1}")
select_pi0.items = items
if len(select_pi0.items) > 0:
select_pi0.v_model = select_pi0.items[0]
else:
for i in out_index:
items.append(f"pi{i + 1}")
select_pi0.items = items
if len(select_pi0.items) > 0 and select_pi0.v_model == "":
select_pi0.v_model = select_pi0.items[0]
select_reg_criteria.v_model = "max(error)"
model_order_entry.v_model = anticipated_mo_entry.v_model
else:
reg_alert_cont.children = [reg_no_result_error]
tab6_disable()
OLD_DEPENDENCY_CHECK_STATE = DEPENDENCY_CHECK_STATE
else:
reg_alert_cont.children = [reg_no_pi_set_error]
tab6_disable()
def change_tab(widget, event, data):
"""
Parameters
----------
data The index of the tab being switched to
Returns Executes the proper change_tab function when the user changes tab
-------
"""
# if you change the number of tabs /!\ change line 1047 and 1049: if tabs.v_model == 4:
if data == 2:
change_tab_2()
if data == 3:
change_tab_3()
if data == 4:
change_tab_4()
if data == 5:
change_tab_5()
if data == 6:
change_tab_6()
# -----------Tutorial Tab-----------------------------------------------------------------------------------------------
tutorial_box = v.Card(children=[v.Row(children=[v.Img(src="Images/logo_ICA.png", max_height=200, max_width=200,
class_="mx-5 my-2"),
v.Img(src="Images/logo_supaero.png", heigth=120, max_width="250px",
contain=False, class_="mx-5 my-2"),
v.Img(src="Images/logo_insa.png", height=120, max_width="500px",
contain=False, class_="mx-5 my-2")],
justify="space-around", align="center"),
v.CardTitle(children=["What is pyVPLM ?"], class_='display-2'),
widgets.HTML("""<head><style type="text/css">
<!--
.tab { margin-right: 40px; margin-left: 20px; font-size: 15px; line-height: 1.5}
-->
<!--
.dash { margin-right: 50px; margin-left: 20px; font-size: 15px; line-height: 1.5}
-->
</style></head>
<div class="tab"> <p>pyVPLM is a program that is developed to help scientist and
engineers to construct <b>power-law and/or polynomial regression models</b> on different
type of data such as finite-element simulation results, manufacturer data-sheets...<br>
It integrates various functionalities such as:</p><p class="dash">- <b>Model parameters
reduction</b> based on Buckingham Theorem dimensional analysis and Pint package with
derived functions.
</p><p class="dash">- <b>Sensitivity and dependency analysis</b> on dimensionless
parameters and limited experiments to simplify further model expressions.
</p><p class="dash">- Construction of <b>optimized experimental design</b> on
feasible-physical variables leading to full-factorial design within dimensionless space.
Those DOEs are the inputs of parametrized finite-element models.
</p><p class="dash">- <b>Regression models construction</b> with increasing complexity
(terms sorted based on their impact) and validation based on relative error repartition
analysis.</p> </div>"""),
v.CardTitle(children=["Before you start"], class_='display-2'),
widgets.HTML("""<div class="tab"> <p>Before using pyVPLM, it is recommended to create a
folder to store all saved files (pyvplm saves, plots, .py models), and set that folder
as <b>work directory</b> (see "The toolbar"). The recommended resolution is <b>1920*1080
</b> and it is advised to use the app on <b>full screen</b>.</p><p> If you need to
generate a model, the mandatory tabs are: Physical parameters, Buckingham theorem,
Result import and perform regression (the other tabs are marked as "optional" in this
tutorial). On the other hand, if you just need to generate a DOE, you only need the
Physical parameter, Buckingham theorem and DOE tabs. </p> </div>"""),
v.CardTitle(children=["How to use pyVPLM ?"], class_='display-2'),
v.CardTitle(children=["The toolbar"], class_='display-1'),
v.Img(src="Images/Toolbar_with_desc.PNG", max_width=1500, class_="mb-2"),
widgets.HTML("""
<div class="tab"> <p>The <b>toolbar</b> is always at the top of the screen.
It contains 5 buttons and a log text field:</p><p class="dash"> - The <b>Work directory
selector</b> should be the first button you use, it will allow you to change the default
work directory to one of your choosing. All the files saved by
pyVPLM will go to the work directory and it will be the default
directory to load files from. Just click on select when you are
in the desired directory and you will have changed the work
directory.</p><p class="dash"> - The <b>Save as</b> button will allow you to save your
progress with a specific name. pyVPLM save files are .txt files and
contain all the user inputs as well as the DOE, the imported
result and the regression models calculated but NOT the plots.
You don't need to write .txt at the and of the file name.</p><p class="dash">
- The <b>Save</b> button will allow you to make a quick save in one click.
</p><p class="dash">- The <b>Save all plots</b> button will save all the pots that have
been generated but not yet saved by other means. The saved plots will
appear in your current work directory and their name will contain
the precise time at which they have been saved.</p><p class="dash">
- The <b>Load</b> button will allow you to load any pyVPLM save.</p><p class="dash">
- Finally, every time you perform an action related to saving,
loading or changing work directory, a massage will appear in the
<b>logs</b> text field. It will be green if the operation is a success
and red otherwise </p> </div>"""),
v.CardTitle(children=["The Physical Parameters Tab"], class_="display-1 mb-3"),
v.Img(src="Images/phy_param_tab(1).png", max_width=1700),
widgets.HTML("""<div class="tab"><p>The <b>Physical parameters tab</b> is where you
define your physical variables (or constants). To define a new variable fill the text
fields with the necessary information:</p><p class="dash"> - The <b>name</b> is the
unique identifier of your parameter, it has to be different from all the already
existing names. It can be multiple characters long but it cannot contain a space or
some special characters (like ?, # or $) and cannot be some special mathematical keyword
(like ln, sqrt or gamma). If you end up defining a constant, all characters in its name
will be transformed to uppercase when you add the parameter.</p><p class="dash">
- The <b>description</b> is just a longer version of the name, will only influence the
comments in a generated python model. Some special characters are still forbidden
(#, |, ...) ut spaces are allowed. </p><p class="dash"> - <b>Unit</b> is the physical
unit for the value of your parameter. It has to be understood by the pint unit registry.
It is advised to use S.I units, prefixes are handled by Pint but it is also advised to
keep your units lower case when it makes sense. If you have any trouble with unit
definition, the question mark in the unit text field will link you to the pint registry
txt page. If you need to define a dimensionless parameter, just fill the unit field with
"m/m" or any other unit divided by itself.</p><p class="dash"> - <b>Bounds</b> are the
limit values for your parameter. Both bounds must me <b>strictly positive</b>. If you
want to define a constant, just fill the upper bound with the constant's value.
Scientific notation syntax is supported (ex: 1.1e4 for 11000).</p>
<p>Once you filled the text fields you can either press the <b>Add parameter</b> button
or simply press enter (if you are still 'in' any of the text fields). The new
parameter will appear in the table below the previous ones.To modify you
parameter set, select a parameter with the checkboxes and then use the buttons on the
left of the table:</p><p class="dash"> - The <b>up and down arrows</b> will allow you to
move up and down the selected parameter. This will change its priority to be chosen as
a repetitive parameter in the pi set that you will generate in the next tab. This means
that it will be more likely to appear in multiple pi numbers compared to the other
parameters. This hierarchy only applies to input parameters as output parameters will
always have the lowest priority to be repetitive.</p><p class="dash"> - The <b>delete
</b> button will simply delete the selected parameter.</p><p class="dash"> - The
<b>toggle input/output</b> will allow you to define a parameter as being an output
(or revert it back to an input), it is necessary to have at least one output
(and one input) to use the rest of the program. Constants cannot be set to output.
</p><p class="dash"> - The <b>delete all</b> button will delete all parameters in the
table regardless of their selected status.</p><p> When all your parameters are defined
and you have at least one output, you can proceed to the next tab.</p></div>"""),
v.CardTitle(children=["The Buckingham Theorem Tab"], class_='display-1'),
v.Img(src="Images/buck_tab(1).png", max_width=1700, class_="mb-3"),
widgets.HTML("""<div class="tab"><p>The <b>Buckingham Theorem tab</b> is where you
define your dimensionless pi set. The tab is divided in three panels: </p>
<p class="dash">- The <b>simple Buckingham</b> panel will have a pi set generated as
soon as you get on the tab. This set will take into account the repetitive parameter
hierarchy defined in the previous tab. </p><p class="dash">- The <b>manual Buckingham
</b> panel allows you to define your own pi set in the <b>Forced pi numbers</b> area.
The syntax is pi{N} = {expression in python form}. You can also define a new pi number
with the <b>add pi</b> button in which case you only need to write the expression in
python form. If you want to copy a pi set from the other panels just, select the pi set
you want to copy with the checkboxes and press the copy button. Once the forced pi
number area is filled with your pi set just click on <b>check pi set</b> and if your pi
set is valid you will be able to select it with the panel's checkbox. The area will
become read-only but if you want to modify the set just click on <b>modify pi set</b>.
</p><p class="dash"> - The <b>automatic Buckingham</b> panel allows you to generate all
possible pi sets (with minimal integer exponents) via the automatic buckingham button.
This will generate a table with checkboxes. To select one of these pi sets, you need to
check both the checkbox in the table and the panel checkbox in the bottom left. The
syntax of the pi sets (with | separators) in the automatic buckingham table is supported
by the manual buckingham area. </p><p> You can see at all times the <b>current selected
pi set</b> in mathematical form at the bottom of the tab. Once you have selected a pi
set, you can proceed to the next tabs. </p></div>"""),
v.CardTitle(children=["The DOE Tab (Optional)"], class_='display-1'),
v.Img(src="Images/DOE_tab_1.png", max_width=1700, class_="mb-3"),
widgets.HTML("""<div class="tab"><p>The <b>DOE tab</b> (Design Of Experiment) allows you
to generate a DOE adapted to your input pi set. At the top of the tab, you will see
which pi numbers have been kept as input pi and which are considered output pi and
therefore are not part of a DOE. A pi number is considered to be an output if at least
one of its physical parameters is n output, otherwise it's an input. The tab also
contains 3 panels:</p><p class="dash"> - The <b>physical parameter constraints</b> panel
allows you to define constraints that will be applied to the DOE in the physical space.
The syntax is: {python expression} {> or <} {another python expression} and to define
multiple constrains just put a line break between them. Example: x**2 - 1 < y + z.
Only physical parameters are accepted as variables. If your constraints are too
restrictive, an error message will be shown when you generated the DOE.
</p><p class="dash"> - The <b>pi constraints</b> panel has the same purpose as the
previous one but for pi numbers. The syntax is the same but you must only work with the
input pi numbers (written as pi1, pi2 ...). Example: pi1*3 - pi2**-1 > 0
</p><p class="dash"> - The <b>Relative number of points</b> panel allows you to generate
more or less points along a particular axis in pi space relative to others. For
example, if one slider is set to 10 and the others to 1, the DOE generator will try to
generate 10 times more points along that particular pi's axis than the other's
(does not work as of 22/12/2021).</p>
<p> Below these panels, you can set the type of DOE (<b>Full-Fact or Latin</b> -> only
Full-Fact available as of 22/12/2021), choose if you want your DOE to be optimized for
<b>log space</b> (recommended for Power Law regression) or for <b>linear space</b>
(recommended for polynomial regression) and the <b>wished size</b> (the number of points
the DOE generator will try to create). In addition, to have an idea of a recommended
size, if you know the model order that you want, you can type it in <b>anticipated model
order</b> This will set the wised size to 10 times the maximum number of terms for the
given model order (which depends both on the number of input pi numbers and the model
order). If the wished size if too low for regression to work later, a waring will appear
. That means that their will be more terms than points, in which case a regression makes
no sense. Once you set your parameters, just click on <b>generate DOE</b>, in the
dialog box that appears, you can change the name of .csv and then click <b>OK</b>. The
file will appear in your current <b>work directory</b> and 4 plots will be created:</p>
</p><p class="dash"> - The <b>Scatter plot matrix (Physical parameters)</b> shows the
actual DOE points in multiple 2D plots in the physical space . The diagonal contains the
1D distribution of these points for a single parameter. Keep in mind that for each 2D
plot, there may be multiple points "on top" of each other as these plots are all
projections. </p><p class="dash"> - The <b>Scatter plot matrix (Pi numbers)</b> is
roughly the same as the previous one in pi space. The main difference being that points
shown are not only the <b>elected</b> DOE points (dark blue) but also all the
<b>objective</b> points (black), all the <b>feasible</b> points (green), the <b>3
feasible points nearest</b> to an objective point (cyan) and the <b>active</b> objective
points (red) these are objective points that have been "reached" because at least one
feasible point was close enough to it. The diagonal shows again the 1D distribution of
points along a specific pi axis.</p><p class="dash"> - The <b>2D Customizable plot</b>
allows you to see a 2D projection of the DOE with any axis of your choosing and add as
many constraints to it as you like with the range sliders on the right.
</p><p class="dash"> - The <b>Parallel plot</b> shows the DOE points as lines going
through all physical parameters and pi numbers at a height representing the points
value. By clicking and holding on any axis, you can create a constraint on which lines
are shown. It is possible to do this on multiple axis at once an you can also change the
axis order by dragging a specific axis. The color of the line is based on where it ends
up on the last pi axis.</p><p> You can save any of these plots at any time using the
save button at the top right of each plot. They will appear in your work directory
and will bear a name containing the type of plot as well as the date and time of save.
</p></div>"""),
v.CardTitle(children=["The Result Import Tab"], class_='display-1'),
v.Img(src="Images/result_import_tab.png", max_width=1700, class_="mb-3"),
widgets.HTML("""<div class="tab"><p>The <b>Result import tab</b> is where you can import
a .csv file containing experimental results. The csv headers must have the same name as
the your physical parameter names. The syntax for headers is: {name} [{unit}], the unit
has to be written as a full word same as in the pint registry (m -> meter)
. To help you with these strict headers, it is highly advised to use the <b>generate
empty csv</b> button which will generate a .csv with valid headers in you work
directory. Columns in any order are supported. It is recommended for the separator or
you .csv file to be "," and for numbers in your csv to use . (international standard).
That being said, csv with ";" separators and "," are supported but keep in mind that
generated DOEs follow the previous standard. If you want to sort by a specific
parameter, just click on the arrow at the top of each columns (appears on hover). You
can change page or change the number of rows per page at bottom left of the table.</p>
<p> Once your results are imported you can proceed immediately to the <b>regression
tab</b> if you don't want any dependency or sensitivity analysis (switching to the
dependency analysis tab will launch calculations).</p></div>"""),
v.CardTitle(children=["The Dependency Analysis Tab (Optional)"], class_='display-1'),
v.Img(src="Images/dependency_analysis_tab.png", max_width=1700, class_="mb-3"),
widgets.HTML("""<div class="tab"><p>The <b>Dependency analysis tab</b> is where you can
see which input pi is most correlated with a give output pi and which of the input pi
are correlated between themselves (and therefore redundant).</p><p class="dash"> - The
fist plot is the <b>dependency analysis</b>, on the vertical axis are the output pi and
on the horizontal axis are the input pi. The color of each graph is determined by its IF
(Impact Factor), a higher IF corresponds to a more "opaque" color.</p><p class="dash"> -
The second plot is the <b>sensitivity analysis</b>, it shows how correlated input pi
numbers are between each other. You can change the <b>R^2 threshold</b> using the sider
above the graphs, this will show a polynomial regression of the graphs that allow for a
R^2 coefficient superior to the threshold.</p><p> Below the plots, you will find several
checkboxes that if unchecked, remove a specific pi number form your current pi set.
Keep in mind that you always need at least one input pi and one output pi, that is why
sometimes some checkboxes might be disabled.</p></div>"""),
v.CardTitle(children=["The Regression Tab"], class_='display-1'),
v.Img(src="Images/regression_tab.png", max_width=1700, class_="mb-3"),
widgets.HTML("""<div class="tab"><p>The <b>Regression tab</b> is where the models will
be generated. The box at the top left reminds of your current pi set. Before you launch
the calculations, choose the output pi of your model (not needed if you have only one
output pi) and the model order (which is not the number of terms). The regression
criteria is the one that the regression algorithm will try to minimize and the
regression type is either power law of polynomial.<p></p>
Once you chose all the regression parameters, click on <b>show models</b>. This might
take some time (especially for higher orders). Once the calculations are over, the first
graph that will appear will show all the model's error criteria as a function of the
number of terms of the models. Notice the blue vertical line, that represents the
current chosen model. By default it should be the model with 1 term but you can change
it using the slider below. Next to the line will be displayed the error of the current
model as well as (if you have few points compared to the number of terms) the cross
validation error. The cross validation error is the error of the model across points
that were left out of the regression process by the algorithm. It is used to see if the
model is realistically applicable and not just "ad hoc". Below that graph, the model
will be shown in mathematical form with maximum accuracy on all coefficients. If you
want to generate a .py file with the current model as a python function, just click the
button with the python logo. Keep in mind the .py file is created in your work directory
.</p><p> The last two graphs represent the current model. The left graph is simply a
comparison between the model and the points given and the right graph gives the
distribution of error of the model.</p></div>"""),
v.CardTitle(children=["Use case example"], class_='display-2'),
v.Img(src="Images/use_case.gif", max_width=1700, class_="mb-3"),
v.CardTitle(children=["Credits"], class_='display-2'),
widgets.HTML("""<div class="tab"><p> Authors: <b>Arthur Ammeux</b>, <b>Aurélien Reysset
</b>
</p><p>References:</p><p class="dash"> - <b>F. Sanchez, M. Budinger, I. Hazyuk</b>,
"Dimensional analysis and surrogate models for thermal modeling of power electronic
components", Electrimacs conference (2017), Toulouse<br>
</p><p class="dash"> - <b>F. Sanchez, M. Budinger, I. Hazyuk</b>, "Dimensional analysis
and surrogate models for the thermal modeling of Multiphysics systems", Applied
Thermal Engineering 110 (August 2016)</p></div>""")])
# -----------Physical Parameters Tab------------------------------------------------------------------------------------
name_entry = v.TextField(label=TL[22], v_model='', outlined=True)
name_entry.on_event('click', error_end)
name_entry.on_event('keydown.enter', add_item)
desc_entry = v.TextField(label=TL[23], v_model='', outlined=True)
desc_entry.on_event('click', error_end)
desc_entry.on_event('keydown.enter', add_item)
unit_entry = v.TextField(label=TL[24], v_model='', outlined=True, append_icon="mdi-help-circle")
unit_entry.on_event('click', error_end)
unit_entry.on_event('click:append', pint_link)
unit_entry.on_event('keydown.enter', add_item)
lb_entry = v.TextField(v_model="", type="number", label="Lower bound", outlined=True)
lb_entry.on_event('click', error_end)
lb_entry.on_event('keydown.enter', add_item)
ub_entry = v.TextField(v_model="", type="number", label="Upper bound", outlined=True)
ub_entry.on_event('click', error_end)
ub_entry.on_event('keydown.enter', add_item)
add_btn = v.Btn(children=[TL[25]], height=56, width="100%")
add_btn.on_event('click', add_item)
h = [{'text': 'Name', 'sortable': False, 'value': 'name'},
{'text': 'Description', 'sortable': False, 'value': 'description'},
{'text': 'Unit', 'sortable': False, 'value': 'unit'},
{'text': 'Lower bound', 'sortable': False, 'value': 'lower bound'},
{'text': 'Upper Bound', 'sortable': False, 'value': 'upper bound'},
{'text': 'Input/Output', 'sortable': False, 'value': 'in/out'}]
it = [{"name": "x", "description": "length", "unit": "m", "lower bound": 0.1, "upper bound": 100, 'in/out': 'Input'},
{"name": "y", "description": "height", "unit": "cm", "lower bound": 1, "upper bound": 1000, 'in/out': 'Input'},
{"name": "z", "description": "width", "unit": "m", "lower bound": 1, "upper bound": 20, 'in/out': 'Input'},
{"name": "t", "description": "time", "unit": "s", "lower bound": 0.5, "upper bound": 10, 'in/out': 'Input'},
{"name": "f", "description": "frequency", "unit": "s**-1", "lower bound": 100, "upper bound": 20000,
'in/out': 'Input'},
{"name": "v", "description": "speed", "unit": "m/s", "lower bound": 1, "upper bound": 50, 'in/out': 'Output'},
{"name": "a", "description": "acceleration", "unit": "m/s**2", "lower bound": 0.1, "upper bound": 10,
'in/out': 'Input'},
{"name": "b", "description": "acceleration_2", "unit": "m/s**2", "lower bound": 1, "upper bound": 100,
'in/out': 'Output'}
]
icon_up = v.Btn(children=[v.Icon(children=["mdi-arrow-up-bold"], large=True)],
style_="margin : 40px 20px 10px 0px",
icon=True)
icon_down = v.Btn(children=[v.Icon(children=["mdi-arrow-down-bold"], large=True)],
style_="margin : 10px 20px 10px 0px",
icon=True)
icon_del = v.Btn(children=[v.Icon(children=["mdi-delete"], large=True)],
v_on="tooltip.on",
style_="margin : 10px 20px 10px 0px",
icon=True)
tool_del = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': icon_del,
}],
children=[TL[26]])
icon_out = v.Btn(children=[v.Icon(children=["mdi-variable-box"], size=27)],
v_on="tooltip.on",
icon=True,
style_="margin : 10px 20px 10px 0px")
tool_out = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': icon_out,
}],
children=[TL[27]])
icon_del_all = v.Btn(children=[v.Icon(children=["mdi-recycle"], size=27)],
v_on="tooltip.on",
icon=True,
style_="margin : 10px 20px 10px 0px")
tool_del_all = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': icon_del_all,
}],
children=["Delete all parameters"])
icon_up.on_event('click', up_item)
icon_down.on_event('click', down_item)
icon_del.on_event('click', del_item)
icon_out.on_event('click', set_as_out)
icon_del_all.on_event('click', del_all)
sheet = v.DataTable(v_model=[{'name': None}],
show_select=True,
single_select=True,
item_key='name',
headers=h,
items=it,
no_data_text=TL[28],
background_color="blue lighten-3",
layout=widgets.Layout(flex='90 1 auto', width='auto'))
const_alert = v.Alert(type="error",
value=False,
outlined=True,
children=[TL[29]],
transition="scroll-y-transition",
dismissible=True)
col1 = v.Col(children=[name_entry, lb_entry])
col2 = v.Col(children=[desc_entry, ub_entry])
col3 = v.Col(children=[unit_entry, add_btn])
box1 = v.Container(children=[v.Row(children=[col1, col2, col3])])
action_box = widgets.VBox([icon_up, icon_down, tool_del, tool_out, tool_del_all])
box2 = widgets.HBox([action_box, sheet])
box2.layout.align_content = "center"
box2.layout.justify_content = "space-between"
const_info = v.Alert(type="info", border="top", children=[TL[30]])
prio_info = v.Alert(type="info", border="top",
children=[TL[31],
v.Icon(children=["mdi-arrow-up-bold"]),
v.Icon(children=["mdi-arrow-down-bold"])])
vbox = widgets.VBox([const_info, box1, prio_info, box2, const_alert])
vbox.layout.margin = "15px 0px 10px 0px"
# -------- Buckingham Tab-----------------------------------------------------------------------------------------------
buck_error = v.Alert(type="error", dense=True, outlined=True,
children=[TL[32]])
buck_warn = v.Alert(type="warning", dense=True, outlined=True,
children=[TL[33]])
buck_area = v.Textarea(v_model='',
style_="margin : 15px 0px 0px 0px",
label=TL[34],
background_color="grey lighten-3",
readonly=True,
outlined=True,
auto_grow=True,
row=15)
force_buck_info = v.Alert(type="info", border="top", style_="margin : 5px",
children=[TL[35]]
)
force_eq = v.TextField(v_model='', label=TL[36], width=300, outlined=True, class_="mx-2")
force_eq.on_event('click', error_end)
force_eq.on_event('keydown.enter', add_pi)
add_pi_btn = v.Btn(children=[TL[37]], class_="mx-2", height="55")
add_pi_btn.on_event("click", add_pi)
force_copy_btn = v.Btn(children=[v.Icon(children=["mdi-clipboard-text-multiple-outline"])],
v_on='tooltip.on',
large=True,
icon=True,
disabled=True)
force_copy_btn.on_event('click', force_copy)
tool_copy = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': force_copy_btn,
}],
children=[TL[38]])
force_box = v.Container(justify="space-between", align_content="center",
children=[v.Row(children=[force_eq, add_pi_btn, tool_copy])])
force_area = v.Textarea(v_model='',
label=TL[39],
outlined=True,
background_color="white",
clearable=True,
auto_grow=True,
row=6)
force_area.on_event('click', error_end)
force_buck_btn = v.Btn(children=[TL[11]], width="50%", max_width=500)
force_buck_btn.on_event('click', force_buckingham)
box4 = v.Container(children=[v.Row(children=[force_buck_btn], justify="center")])
auto_buck_btn = v.Btn(children=[TL[40]], width="50%", max_width=500)
auto_buck_btn.on_event('click', automatic_buckingham)
box5 = v.Container(children=[v.Row(children=[auto_buck_btn], justify="center")])
buck_h = [{'text': TL[41], 'sortable': True, 'value': 'pi set number'},
{'text': TL[42], 'sortable': False, 'value': 'expressions'}]
auto_buck_table = v.DataTable(v_model=[{'pi set number': None}],
show_select=True,
single_select=True,
checkbox_color="green",
items=[],
item_key='pi set number',
headers=buck_h,
no_data_text=TL[43],
layout=widgets.Layout(flex='90 1 auto', width='auto'))
auto_buck_table.on_event('item-selected', select_auto_pi_set)
check1 = v.Checkbox(v_model=True, label=TL[44], color="green")
check1.on_event('change', check1_change)
check2 = v.Checkbox(v_model=False, label=TL[44], color="green", disabled=True)
check2.on_event('change', check2_change)
check3 = v.Checkbox(v_model=False, label=TL[44], color="green", disabled=True)
check3.on_event('change', check3_change)
exp_panel = v.ExpansionPanels(v_model=[0], multiple=True, children=[
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=[TL[45]]),
v.ExpansionPanelContent(children=[buck_area, check1])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=[TL[46]]),
v.ExpansionPanelContent(children=[force_buck_info,
force_box,
force_area,
box4,
check2])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=[TL[47]]),
v.ExpansionPanelContent(children=[box5, auto_buck_table, check3])
])
])
current_set = v.Card(color="grey lighten-3", margin=10, width=600,
children=[v.CardTitle(class_="title font-weight-regular",
children=[TL[48]]),
v.CardText(class_="body-1", children=[])])
set_box = widgets.HBox([current_set])
set_box.layout.justify_content = "center"
set_box.layout.margin = "15px 0px 10px 0px"
vbox2 = widgets.VBox([exp_panel, set_box])
vbox2.layout.margin = "15px 0px 10px 0px"
vbox2.layout.justify_content = "space-between"
# ---------- DOE Tab ---------------------------------------------------------------------------------------------------
input_pi = v.Card(color="green lighten-3",
width="48%",
class_="mx-2",
children=[v.CardTitle(class_="title font-weight-regular",
children=[TL[49]]),
v.CardText(class_="body-1", children=[])])
output_pi = v.Card(color="blue-grey lighten-3",
width="48%",
class_='mx-2',
children=[v.CardTitle(class_="title font-weight-regular",
children=[TL[50]]),
v.CardText(class_="body-1", children=[])])
top_cont = v.Container(children=[v.Row(justify="space-between", children=[input_pi, output_pi])])
phy_const_entry = v.TextField(v_model='',
label="Declare physical parameter constraint",
width=300,
outlined=True,
class_="mx-2")
phy_const_entry.on_event("keydown.enter", add_phy_const)
phy_const_entry.on_event("click", error_end)
phy_const_btn = v.Btn(children=["Add constraint"], class_="mx-2", height="55")
phy_const_btn.on_event("click", add_phy_const)
phy_const_row = v.Row(style_="margin : 5px", children=[phy_const_entry, phy_const_btn])
phy_const_area = v.Textarea(v_model='',
label="Physical parameter constraints",
outlined=True,
background_color="white",
clearable=True,
auto_grow=True,
row=6)
phy_const_area.on_event("click", error_end)
pi_const_entry = v.TextField(v_model='', label="Declare pi constraint", width=300, outlined=True, class_="mx-2")
pi_const_entry.on_event("keydown.enter", add_pi_const)
pi_const_entry.on_event("click", error_end)
pi_const_btn = v.Btn(children=["Add constraint"], class_="mx-2", height="55")
pi_const_btn.on_event("click", add_pi_const)
pi_const_row = v.Row(style_="margin : 5px", children=[pi_const_entry, pi_const_btn])
pi_const_area = v.Textarea(v_model='',
label="Pi constraints",
outlined=True,
background_color="white",
clearable=True,
auto_grow=True,
row=6)
pi_const_area.on_event("click", error_end)
relative_nb_points_info = v.Alert(type="info",
border="top",
class_="my-2",
children=["Slider numbers correspond to how many points will be generated in the DOE "
"along a specific pi axis relative to others"])
relative_nb_points_sliders = v.Col(children=[])
const_panel = v.ExpansionPanels(v_model=[0], multiple=True, children=[
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Physical parameter constraints"]),
v.ExpansionPanelContent(children=[phy_const_row, phy_const_area])]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Pi constraints"]),
v.ExpansionPanelContent(children=[pi_const_row, pi_const_area])]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Relative number of points"]),
v.ExpansionPanelContent(children=[relative_nb_points_info, relative_nb_points_sliders])])
])
select_DOE = v.Select(v_model="Full Fact", label="Select DOE type", outlined=True, items=["Full Fact", "Latin"],
class_="mx-2")
select_log = v.Select(v_model="Log", label="Log/Linear", outlined=True, items=["Log", "Linear"], class_="mx-2")
anticipated_mo_entry = v.TextField(v_model=1,
label="Anticipated model order (optional)",
type="number",
width="20%",
outlined=True,
class_="mx-2")
anticipated_mo_entry.on_event("change", mo_to_size)
size_info = v.Alert(type="info",
border="top",
style_="margin : 15px 0 20px 0px",
class_="mx-2",
children=["Default wished size is determined by the anticipated model order "
"(10x the maximum number of model terms)"])
wished_size_entry = v.TextField(v_model=4 * DOE_MULTIPLIER,
label="Wished size",
type="number",
width="20%",
outlined=True,
class_="mx-2")
wished_size_entry.on_event("change", check_size)
gen_DOE_btn = v.Btn(children=["Generate DOE"], class_="mx-2", height=55, width=200)
gen_DOE_btn.on_event("click", gen_doe)
doe_tf = v.TextField(label="Filename without .csv extension", outlined=True, v_model="")
doe_tf.on_event("click", error_end)
doe_tf.on_event("keydown.enter", hide_doe)
dialog3 = v.Dialog(width='600',
v_model='dialog',
children=[
v.Card(color="white", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Name .csv file"
]),
v.CardText(children=["Current work directory: " + WORKDIR], class_="font-italic"),
v.CardText(children=[
doe_tf,
v.Btn(children=['OK'])
])
])
])
dialog3.v_model = False
dialog3.children[0].children[2].children[1].on_event("click", hide_doe)
DOE_rows = v.Col(children=[v.Row(children=[select_DOE, select_log]),
v.Row(children=[anticipated_mo_entry, wished_size_entry, gen_DOE_btn, dialog3])])
DOE_cont = v.Col(children=[const_panel, size_info, DOE_rows])
phy_scatter_matrix_output = widgets.Output()
pi_scatter_matrix_output = widgets.Output()
customizable_2d_plot_output = widgets.Output()
select_2d_x = v.Select(label="Select x-axis", outlined=True, items=[], class_="mx-2")
select_2d_x.on_event("change", customize_2d_plot)
select_2d_y = v.Select(label="Select y-axis", outlined=True, items=[], class_="mx-2")
select_2d_y.on_event("change", customize_2d_plot)
range_sliders = v.Col(children=[], style_="margin : 0 10 0 10")
range_slider_card = v.Card(children=[range_sliders], max_height=580,
style_='overflow-y: auto; overflow-x: hidden', class_="mx_2")
container_2d = v.Col(children=[v.Row(children=[select_2d_x, select_2d_y], align_content="center"),
v.Row(justify="space-around", children=[customizable_2d_plot_output, range_slider_card])
])
parallel_plot_box = widgets.HBox([])
phy_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
phy_save_btn.on_event('click', save_phy)
tool_save_phy = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': phy_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_phy_cont = v.Row(children=[v.Spacer(), tool_save_phy])
pi_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
pi_save_btn.on_event('click', save_pi)
tool_save_pi = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': pi_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_pi_cont = v.Row(children=[v.Spacer(), tool_save_pi])
cus_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
cus_save_btn.on_event('click', save_cus)
tool_save_cus = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': cus_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
para_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
para_save_btn.on_event('click', save_para)
tool_save_para = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': para_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_para_cont = v.Row(children=[v.Spacer(), tool_save_para])
container_2d.children[0].children = container_2d.children[0].children + [tool_save_cus]
exp_panel_doe = v.ExpansionPanels(v_model=[0], multiple=True, style_='overflow-y: hidden', children=[
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Scatter plot matrix (Physical Parameters)"]),
v.ExpansionPanelContent(children=[save_phy_cont, phy_scatter_matrix_output])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Scatter plot matrix (Pi numbers)"]),
v.ExpansionPanelContent(children=[save_pi_cont, pi_scatter_matrix_output])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["2D Customizable plot"]),
v.ExpansionPanelContent(children=[container_2d])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Parallel plot"]),
v.ExpansionPanelContent(children=[save_para_cont, parallel_plot_box])
])
])
no_pi_set_error = v.Alert(type="error", dense=True, outlined=True,
children=["No selected pi set, please select a pi set in the Buckingham tab"])
no_output_error = v.Alert(type="error", dense=True, outlined=True,
children=["No pi number has been found to be an output,"
" check if there is at least one output physical parameter"])
no_input_error = v.Alert(type="error", dense=True, outlined=True,
children=["No pi number has been found to be an input,"
" check if there is at least one input physical parameter"
" and enough parameters to make a input dimensionless number"])
change_pi_set_warning = v.Alert(type="warning", dense=True, outlined=True,
children=["Selected pi set changed"])
doe_alert_cont = v.Container(children=[])
doe_box = widgets.VBox([doe_alert_cont, top_cont, DOE_cont])
# ---------- Result import Tab------------------------------------------------------------------------------------------
result_info = v.Alert(type="info", border="top", children=["Import result supports only .csv files"])
empty_csv_btn = v.Btn(children=["Generate empty csv"], width="30%", max_width=400, height=55, class_="mx-2")
empty_csv_btn.on_event("click", gen_empty_csv)
result_btn = v.Btn(children=["Import result"], width="30%", height=55, max_width=400, class_="mx-2")
result_btn.on_event("click", result_import)
result_alert = v.Alert(type="error", dense=True, outlined=True, children=["Error"])
result_warning = v.Alert(type="warning", dense=True, outlined=True, children=["No physical parameter defined"])
result_warning_2 = v.Alert(type="warning", dense=True, outlined=True, children=[])
result_alert_cont = v.Container(children=[])
fc_res = ipf.FileChooser(WORKDIR)
fc_res.filter_pattern = '*.csv'
fc_res._show_dialog()
fc_res.register_callback(hide_res_import)
dialog4 = v.Dialog(width='600',
v_model='dialog3',
children=[
v.Card(color="blue-grey lighten-4", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Choose .csv file"
]),
v.CardText(children=[
fc_res
])
])
])
dialog4.v_model = False
empty_tf = v.TextField(label="Filename without .csv extension", outlined=True, v_model="")
empty_tf.on_event("click", error_end)
empty_tf.on_event("keydown.enter", hide_empty_csv)
dialog5 = v.Dialog(width='600',
v_model='dialog',
children=[
v.Card(color="blue-grey lighten-4", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Name .csv file"
]),
v.CardText(children=["Current work directory: " + WORKDIR], class_="font-italic"),
v.CardText(children=[
empty_tf,
v.Btn(children=['OK'])
])
])
])
dialog5.v_model = False
dialog5.children[0].children[2].children[1].on_event("click", hide_empty_csv)
res_but_cont = v.Row(justify="center", children=[empty_csv_btn, result_btn])
result_h = [{'text': 'Measure', 'sortable': True, 'value': 'Measure'},
{'text': 'Parameters', 'sortable': True, 'value': 'Parameters'}]
result_data = v.DataTable(v_model=[{'pi set number': None}],
items=[],
item_key='Measure',
headers=result_h,
no_data_text="No result imported",
layout=widgets.Layout(flex='90 1 auto', width='auto'))
result_box = v.Col(children=[result_info, res_but_cont, dialog4, dialog5, result_alert_cont, result_data])
# ---------- Dependency analysis Tab------------------------------------------------------------------------------------
dependency_result_alert = v.Alert(type="error", dense=True, outlined=True, children=["No result imported"])
dependency_pi_set_alert = v.Alert(type="error", dense=True, outlined=True, children=["No pi set defined"])
dependency_change_alert = v.Alert(type="warning", dense=True, outlined=True, children=["Imported result changed"])
dependency_change_alert_2 = v.Alert(type="warning", dense=True, outlined=True, children=["Pi set changed"])
dependency_alert_cont = v.Container(children=[])
sensitivity_info = v.Alert(type="info", border="top", style_="margin : 10px 0 10px 0px", class_="mx-2",
children=["MCC : Maximum Correlation Coefficient between Pearson and Spearman -- ",
"alpha : Relative standard deviation (on dimensionless parameter) -- ",
"IF : Impact factor IF=MCC*alpha"])
sensitivity_output = widgets.Output()
threshold_slider = v.Slider(label="R^2 threshold", v_model=0.9, min=0, max=1, step=0.01, thumb_label="always",
thumb_size=24, class_="mx-2")
threshold_slider.on_event("change", change_threshold)
threshold_cont = v.Container(children=[threshold_slider])
dependency_output = widgets.Output()
checkbox_info = v.Alert(type="info", border="top", style_="margin : 10px 0 10px 0px", class_="mx-2",
children=["Uncheck a pi number to remove it from the current set"])
dependency_checkboxes = v.Row(children=[], class_="mx-2")
dependency_checkboxes.layout.justify_content = "space-between"
checkboxes_cont = widgets.HBox([dependency_checkboxes])
checkboxes_cont.layout.justify_content = "center"
sen_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
sen_save_btn.on_event('click', save_sen)
tool_save_sen = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': sen_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_sen_cont = v.Row(children=[v.Spacer(), tool_save_sen])
dep_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
dep_save_btn.on_event('click', save_dep)
tool_save_dep = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': dep_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_dep_cont = v.Row(children=[v.Spacer(), tool_save_dep])
exp_panel_dependency = v.ExpansionPanels(v_model=[0], multiple=True, children=[
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Sensitivity analysis"]),
v.ExpansionPanelContent(children=[sensitivity_info, save_sen_cont, sensitivity_output])
]),
v.ExpansionPanel(children=[v.ExpansionPanelHeader(color="grey lighten-3",
class_='title font-weight-regular',
children=["Dependency analysis"]),
v.ExpansionPanelContent(children=[threshold_cont, save_dep_cont, dependency_output])
])
], class_="overflow-hidden")
exp_panel_cont = v.Container(children=[exp_panel_dependency])
update_dependency_plots_btn = v.Btn(children=["Update plots"], class_="mx-2", height=55,
style_="margin : 0px 0 10px 0px")
update_dependency_plots_btn.on_event("click", update_dependency_plots)
update_plots_row = v.Row(justify="center", children=[update_dependency_plots_btn])
pi_removal_card = v.Card(children=[v.CardTitle(children=["Remove pi number"], class_="title font-weight-medium"),
checkbox_info, checkboxes_cont,
update_plots_row])
pi_removal_cont = v.Container(children=[pi_removal_card])
dependency_set = v.Card(color="grey lighten-3", margin=10, width=500,
children=[v.CardTitle(class_="title font-weight-regular",
children=["No pi set defined"]),
v.CardText(class_="body-1", children=[])])
set_box_2 = widgets.HBox([dependency_set])
set_box_2.layout.justify_content = "center"
set_box_2.layout.margin = "10px 0px 15px 0px"
dependency_vbox = widgets.VBox([dependency_alert_cont, exp_panel_dependency, pi_removal_cont, set_box_2])
dependency_vbox.layout.justify_content = "space-between"
# ---------- Regression Tab---------------------------------------------------------------------------------------------
reg_alert_cont = v.Col(children=[])
reg_no_result_error = v.Alert(type="error", dense=True, outlined=True, children=["No result imported"])
reg_no_pi_set_error = v.Alert(type="error", dense=True, outlined=True, children=["No pi set defined"])
reg_info = v.Alert(type="info", border="top", class_="mx-2", style_="margin : 12px 0px 0px 0px",
children=["If the number of points of the result > 10 * the number of models"
" the cross-validation step will be skipped"])
select_pi0 = v.Select(v_model="", label="Select pi0 (output)", outlined=True,
items=[])
model_order_entry = v.TextField(v_model=1,
label="Model order",
type="number",
outlined=True)
model_order_entry.on_event("click", error_end)
select_reg_criteria = v.Select(v_model="max(error)", label="Select regression criteria", outlined=True,
items=["max(error)", "avg(error magnitude)", "avg(error)", "sigma(error)"])
select_reg_type = v.Select(v_model="Power Law", label="Select regression type", outlined=True,
items=["Power Law", "Polynomial"])
models_btn = v.Btn(children=["Show models"], class_="mx-2", height=55, width=300)
models_btn.on_event("click", regression_models)
models_btn_row = v.Row(children=[models_btn], justify="center")
regression_parameters = v.Row(children=[dependency_set,
v.Col(children=[
v.Row(children=[
v.Col(children=[select_pi0, select_reg_criteria]),
v.Col(children=[model_order_entry, select_reg_type])]),
models_btn_row],
class_="mx-2"),
v.Spacer()
], justify="center")
models_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on')
models_save_btn.on_event('click', save_models)
tool_save_models = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': models_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_models_cont = v.Row(children=[v.Spacer(), tool_save_models])
reg_save_btn = v.Icon(children=["mdi-content-save"], large=True, v_on='tooltip.on', disabled=True)
reg_save_btn.on_event('click', save_reg)
tool_save_reg = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': reg_save_btn,
}],
children=["Save plot"],
class_="overflow-hidden")
save_reg_cont = v.Row(children=[v.Spacer(), tool_save_reg])
models_output = widgets.Output()
#models_output.layout.border = "2px solid black"
models_output_col = v.Col(children=[save_models_cont, models_output])
delta = 0.12
_, _, x_w, _ = GetWindowRect(GetForegroundWindow())
slider_style = f"margin : 0px {int(118/(1 - delta) * (x_w/1928 -delta))}px 0px 56px"
nb_terms_slider = v.Slider(v_model=0,
class_="mx-2",
tick_labels=[1, 2],
max=2,
step=1,
ticks="always",
tick_size="4")
nb_terms_slider.on_event("change", perform_regression)
nb_terms_slider_row = v.Row(children=[nb_terms_slider], style_=slider_style)
regression_output = widgets.Output()
python_func_btn = v.Btn(children=[v.Icon(children=["mdi-language-python"], large=True)], v_on='tooltip.on', width=200)
python_func_btn.on_event('click', save_py_func)
tool_python_func = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': python_func_btn,
}],
children=["Save model as a python function"],
class_="overflow-hidden")
expression_cont = v.Container(children=[], style_="margin : 0px 10px 10px 10px")
expression_card = v.Card(children=[v.CardTitle(class_="title font-weight-medium", children=["Model expression:"]),
expression_cont], class_="my-3", style_='overflow-x: auto',
color="green lighten-3")
expression_col = v.Col(children=[expression_card, tool_python_func], align="center")
regression_cont = v.Col(children=[reg_alert_cont, regression_parameters, reg_info])
# --------- Main widgets------------------------------------------------------------------------------------------------
tabs = v.Tabs(v_model="tab", children=[v.Tab(children=["Tutorial"]),
v.Tab(children=["Physical parameters"]),
v.Tab(children=["Buckingham theorem"]),
v.Tab(children=["DOE"]),
v.Tab(children=["Result import"]),
v.Tab(children=["Dependency analysis"]),
v.Tab(children=["Regression"]),
v.TabItem(children=[tutorial_box]),
v.TabItem(children=[vbox]),
v.TabItem(children=[vbox2]),
v.TabItem(children=[doe_box]),
v.TabItem(children=[result_box]),
v.TabItem(children=[dependency_vbox]),
v.TabItem(children=[regression_cont])],
background_color="grey lighten-3", center_active=True)
tabs.on_event('change', change_tab)
fc_dir = ipf.FileChooser('../')
fc_dir.show_only_dirs = True
fc_dir.register_callback(hide_dir)
fc_dir._show_dialog()
dir_btn = v.Icon(children=["mdi-file-tree"], large=True, class_="mx-2", v_on='tooltip.on')
dir_btn.on_event("click", choose_dir)
tool_dir = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': dir_btn,
}],
children=["Choose working directory"])
dialog_dir = v.Dialog(width='600',
v_model='dialog',
children=[
v.Card(color="green lighten-4", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Choose working directory"]),
v.CardText(children=["Current work directory: " + WORKDIR], class_="font-italic"),
v.CardText(children=[fc_dir], class_="body-1")
])
])
dialog_dir.v_model = False
save_btn = v.Icon(children=["mdi-content-save"], large=True, class_="mx-2", v_on='tooltip.on')
save_btn.on_event('click', save)
tool_save = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': save_btn,
}],
children=["Save"])
save_as_btn = v.Icon(children=["mdi-content-save-edit"], large=True, class_="mx-2", v_on='tooltip.on')
save_as_btn.on_event('click', save_as)
tool_save_as = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': save_as_btn,
}],
children=["Save as"])
save_as_tf = v.TextField(label="Filename without .txt extension", outlined=True, v_model="")
save_as_tf.on_event("click", error_end)
save_as_tf.on_event("keydown.enter", hide_save_as)
dialog = v.Dialog(width='600',
v_model='dialog',
children=[
v.Card(color="blue lighten-4", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Save as"
]),
v.CardText(children=["Current work directory: " + WORKDIR], class_="font-italic"),
v.CardText(children=[
save_as_tf,
v.Btn(color='primary', children=['OK'])
])
])
])
dialog.v_model = False
dialog.children[0].children[2].children[1].on_event("click", hide_save_as)
save_plots_btn = v.Icon(children=["mdi-image-multiple"], large=True, class_="mx-2", v_on='tooltip.on')
save_plots_btn.on_event("click", save_plots)
tool_save_plots = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': save_plots_btn,
}],
children=["Save all generated plots"])
fc_load = ipf.FileChooser(WORKDIR)
fc_load._show_dialog()
fc_load.filter_pattern = '*.txt'
fc_load.register_callback(hide_ld)
load_btn = v.Icon(children=["mdi-folder"], large=True, class_="mx-2", v_on='tooltip.on')
load_btn.on_event("click", load)
tool_load = v.Tooltip(bottom=True, v_slots=[{
'name': 'activator',
'variable': 'tooltip',
'children': load_btn,
}],
children=["Load"])
dialog2 = v.Dialog(width='600',
v_model='dialog2',
children=[
v.Card(color="orange lighten-4", children=[
v.CardTitle(class_='headline gray lighten-2', primary_title=True, children=[
"Load"
]),
v.CardText(children=[fc_load])
])
])
dialog2.v_model = False
tool_icons_card = v.Card(children=[tool_dir, dialog_dir, tool_save_as, dialog, tool_save, dialog2, tool_save_plots,
tool_load],
class_="mx-2")
log_class = "mx-2; pa-0; overflow-x: auto; overflow-y:auto"
logs_card = v.Sheet(children=[], height=37, rounded=True, width="50%", class_=log_class,
style_="flex-wrap: nowrap;overflow-x: auto", elevation=2)
logs_card.class_ = logs_card.class_ + "; grey--text"
logs_card.children = [v.Html(tag='div', children=["Default work directory: " + WORKDIR], class_="text-left py-2 px-2")]
img_cont = v.Row(children=[v.Img(src="Images/logo_cut.png", max_height="55px", max_width="200px", contain=False,
class_="mr-3")],
justify="end")
sl_tool = v.Toolbar(children=[tool_icons_card, logs_card, img_cont], color="grey lighten-3",
justify="space-between")
main = v.Card(children=[sl_tool, tabs])
|
{"hexsha": "3e49f05c1a610a9ff53908fbc6a068c16b2ab4ba", "size": 170253, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvplm/gui/pyVPLM_GUI.py", "max_stars_repo_name": "ArthurAmmeux/pyVPLM-GUI", "max_stars_repo_head_hexsha": "e7b0866137b0f83455aa7e839527a95b668e964b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyvplm/gui/pyVPLM_GUI.py", "max_issues_repo_name": "ArthurAmmeux/pyVPLM-GUI", "max_issues_repo_head_hexsha": "e7b0866137b0f83455aa7e839527a95b668e964b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyvplm/gui/pyVPLM_GUI.py", "max_forks_repo_name": "ArthurAmmeux/pyVPLM-GUI", "max_forks_repo_head_hexsha": "e7b0866137b0f83455aa7e839527a95b668e964b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3162097418, "max_line_length": 124, "alphanum_fraction": 0.5445777754, "include": true, "reason": "import numpy", "num_tokens": 35736}
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!!
!! AUTHOR: Kenneth Leiter (kenneth.leiter@arl.army.mil)
!!
!! Use the Xdmf Fortran Bindings to write out a simple mesh consisting of
!! two hexahedrons. Link against the XdmfUtils library to compile.
!!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
PROGRAM XdmfFortranExample
Implicit none
INCLUDE 'Xdmf.f'
INTEGER*8 obj
character*256 filename
REAL*4 myPoints(3,3,4)
INTEGER myConnections(8,2), myDimensions(3)
REAL*8 myCellAttribute(2), myNodeAttribute(3,4), &
& mySmallerNode(3,2), myTime, myOrigin(3), myBrick(3)
INTEGER nodeAttributeId, nodeSmallAttributeId, cellAttributeId, &
& testSetID, testMapID, tempID
filename = 'my_fixed_form_output.xmf'//CHAR(0)
myPoints(1,1,1) = 0
myPoints(2,1,1) = 0
myPoints(3,1,1) = 1
myPoints(1,2,1) = 1
myPoints(2,2,1) = 0
myPoints(3,2,1) = 1
myPoints(1,3,1) = 3
myPoints(2,3,1) = 0
myPoints(3,3,1) = 2
myPoints(1,1,2) = 0
myPoints(2,1,2) = 1
myPoints(3,1,2) = 1
myPoints(1,2,2) = 1
myPoints(2,2,2) = 1
myPoints(3,2,2) = 1
myPoints(1,3,2) = 3
myPoints(2,3,2) = 2
myPoints(3,3,2) = 2
myPoints(1,1,3) = 0
myPoints(2,1,3) = 0
myPoints(3,1,3) = -1
myPoints(1,2,3) = 1
myPoints(2,2,3) = 0
myPoints(3,2,3) = -1
myPoints(1,3,3) = 3
myPoints(2,3,3) = 0
myPoints(3,3,3) = -2
myPoints(1,1,4) = 0
myPoints(2,1,4) = 1
myPoints(3,1,4) = -1
myPoints(1,2,4) = 1
myPoints(2,2,4) = 1
myPoints(3,2,4) = -1
myPoints(1,3,4) = 3
myPoints(2,3,4) = 2
myPoints(3,3,4) = -2
myConnections(1,1) = 0
myConnections(2,1) = 1
myConnections(3,1) = 7
myConnections(4,1) = 6
myConnections(5,1) = 3
myConnections(6,1) = 4
myConnections(7,1) = 10
myConnections(8,1) = 9
myConnections(1,2) = 1
myConnections(2,2) = 2
myConnections(3,2) = 8
myConnections(4,2) = 7
myConnections(5,2) = 4
myConnections(6,2) = 5
myConnections(7,2) = 11
myConnections(8,2) = 10
myNodeAttribute(1,1) = 100
myNodeAttribute(1,2) = 300
myNodeAttribute(1,3) = 300
myNodeAttribute(1,4) = 500
myNodeAttribute(2,1) = 200
myNodeAttribute(2,2) = 400
myNodeAttribute(2,3) = 400
myNodeAttribute(2,4) = 600
myNodeAttribute(3,1) = 300
myNodeAttribute(3,2) = 500
myNodeAttribute(3,3) = 500
myNodeAttribute(3,4) = 700
myCellAttribute(1) = 100
myCellAttribute(2) = 200
myDimensions(1) = 12
myDimensions(2) = 12
myDimensions(3) = 12
myOrigin(1) = 0
myOrigin(2) = 0
myOrigin(3) = 0
myBrick(1) = 12
myBrick(2) = 12
myBrick(3) = 12
myTime = 1.0
CALL XDMFINIT(obj, filename)
CALL XDMFSETMAXFILESIZE(obj, 1)
CALL XDMFSETALLOWSETSPLITTING(obj, .TRUE.)
CALL XDMFINITHDF5(obj, 'my_output.h5'//CHAR(0), .TRUE.)
tempID = XDMFADDINFORMATION(obj, 'GridCollection1'//CHAR(0), &
& 'This is Grid collection 1'//CHAR(0))
CALL XDMFADDGRIDCOLLECTION(obj, "Temporal"//CHAR(0), &
& XDMF_GRID_COLLECTION_TYPE_TEMPORAL)
CALL XDMFADDMAP(obj, "TestMap"//CHAR(0))
CALL XDMFADDREMOTENODEID(obj, 0, 1, 2, 3)
CALL XDMFADDREMOTENODEID(obj, 0, 1, 2, 4)
CALL XDMFADDREMOTENODEID(obj, 0, 1, 3, 3)
CALL XDMFADDREMOTENODEID(obj, 0, 1, 3, 5)
testMapID = XDMFSTOREMAP(obj, 0)
CALL XDMFADDREMOTENODEID(obj, 0, 1, 3, 8)
CALL XDMFSETTIME(obj, myTime)
!! Unstructured Only
tempID = XDMFSETTOPOLOGY(obj, XDMF_TOPOLOGY_TYPE_HEXAHEDRON, 16,&
& XDMF_ARRAY_TYPE_INT32, myConnections, 0)
!! /Unstructured Only
!! Curvilinear and Rectilinear Only
tempID = XDMFSETDIMENSIONS(obj, 3, XDMF_ARRAY_TYPE_INT32, &
& myDimensions)
!! /Curvilinear and Rectilinear Only
!! Unstructured and Curvilinear Only
tempID = XDMFSETGEOMETRY(obj, XDMF_GEOMETRY_TYPE_XYZ, 36, &
& XDMF_ARRAY_TYPE_FLOAT32, myPoints)
!! /Unstructured and Curvilinear Only
!! Rectilinear Only
tempID = XDMFADDCOORDINATE(obj, "XCoordinates"//CHAR(0), 12, &
& XDMF_ARRAY_TYPE_FLOAT32, myPoints(1,1,1))
tempID = XDMFADDCOORDINATE(obj, "YCoordinates"//CHAR(0), 12, &
& XDMF_ARRAY_TYPE_FLOAT32, myPoints(1,2,2))
tempID = XDMFADDCOORDINATE(obj, "ZCoordinates"//CHAR(0), 12, &
& XDMF_ARRAY_TYPE_FLOAT32, myPoints(1,3,3))
!! /Rectilinear Only
!! Regular Only
tempID = XDMFSETORIGIN(obj, 3, XDMF_ARRAY_TYPE_FLOAT64, &
& myOrigin)
tempID = XDMFSETBRICK(obj, 3, XDMF_ARRAY_TYPE_FLOAT64, myBrick)
!! /Regular Only
testSetID = XDMFADDSET(obj, 'TestSet'//CHAR(0), &
& XDMF_SET_TYPE_NODE, myNodeAttribute, 12, &
& XDMF_ARRAY_TYPE_FLOAT64)
tempID = XDMFADDINFORMATION(obj, 'Attrib1'//CHAR(0), &
& 'This is Attribute 1'//CHAR(0))
nodeAttributeId = XDMFADDATTRIBUTE(obj, 'NodeValues'//CHAR(0), &
& XDMF_ATTRIBUTE_CENTER_NODE, XDMF_ATTRIBUTE_TYPE_SCALAR, 12,&
& XDMF_ARRAY_TYPE_FLOAT64, myNodeAttribute)
PRINT *, 'Node Attribute ID: ', nodeAttributeId
CALL XDMFRETRIEVEATTRIBUTEVALUES(obj, 0, mySmallerNode, &
& XDMF_ARRAY_TYPE_FLOAT64, 6, 0, 1, 1)
tempID = XDMFADDINFORMATION(obj, 'Attrib2'//CHAR(0), &
& 'This is Attribute 2'//CHAR(0))
cellAttributeId = XDMFADDATTRIBUTE(obj, 'CellValues'//CHAR(0), &
& XDMF_ATTRIBUTE_CENTER_CELL, XDMF_ATTRIBUTE_TYPE_SCALAR, 2, &
& XDMF_ARRAY_TYPE_FLOAT64, myCellAttribute)
PRINT *, 'Cell Attribute ID: ', cellAttributeId
nodeSmallAttributeId = XDMFADDATTRIBUTE(obj, &
& 'SmallNodeValues'//CHAR(0), XDMF_ATTRIBUTE_CENTER_NODE, &
& XDMF_ATTRIBUTE_TYPE_SCALAR, 6, XDMF_ARRAY_TYPE_FLOAT64, &
& mySmallerNode)
PRINT *, 'Node Attribute ID: ', nodeSmallAttributeId
tempID = XDMFADDINFORMATION(obj, 'Grid1'//CHAR(0), &
& 'This is Grid 1'//CHAR(0))
tempID = XDMFADDINFORMATION(obj, 'SubInformation'//CHAR(0), &
& 'This is an information inside an information'//CHAR(0))
CALL XDMFADDINFORMATIONARRAY(obj, 1, "Array"//CHAR(0), myBrick, &
& 3, XDMF_ARRAY_TYPE_FLOAT64)
CALL XDMFMODIFYINFORMATIONARRAY(obj, 1, 0, myBrick, &
& XDMF_ARRAY_TYPE_FLOAT64, 3, 3, 1, 1)
CALL XDMFINSERTINFORMATIONINTOINFORMATION(obj, 0, 1, .TRUE.)
CALL XDMFADDGRID(obj, 'TestGrid'//CHAR(0), .FALSE.)
myTime = 2.0
CALL XDMFSETTIME(obj, myTime)
CALL XDMFADDPREVIOUSATTRIBUTE(obj, cellAttributeId)
CALL XDMFADDPREVIOUSMAP(obj, testMapID)
CALL XDMFADDPREVIOUSSET(obj, testSetID)
CALL XDMFADDPREVIOUSATTRIBUTE(obj, nodeAttributeId)
CALL XDMFADDGRID(obj, 'Identical'//CHAR(0), .FALSE.)
CALL XDMFCLOSEGRIDCOLLECTION(obj, .TRUE.)
!! CALL XDMFWRITEHDF5(obj, 'my_output.h5'//CHAR(0), .TRUE.)
CALL XDMFWRITE(obj, filename, 30, .TRUE.)
CALL XDMFCLOSE(obj)
END PROGRAM XdmfFortranExample
|
{"hexsha": "b54000126d5264478f334d556324c2a5af8135ac", "size": 7629, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "utils/tests/Fortran/FixedOutputTestXdmfFortran.f90", "max_stars_repo_name": "scottwedge/xdmf", "max_stars_repo_head_hexsha": "f41196c966997a20f60525a3d2083490a63626a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-12-07T08:11:06.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-15T01:39:07.000Z", "max_issues_repo_path": "utils/tests/Fortran/FixedOutputTestXdmfFortran.f90", "max_issues_repo_name": "scottwedge/xdmf", "max_issues_repo_head_hexsha": "f41196c966997a20f60525a3d2083490a63626a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-26T16:50:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-26T16:50:37.000Z", "max_forks_repo_path": "utils/tests/Fortran/FixedOutputTestXdmfFortran.f90", "max_forks_repo_name": "scottwedge/xdmf", "max_forks_repo_head_hexsha": "f41196c966997a20f60525a3d2083490a63626a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-04-04T20:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-15T01:39:08.000Z", "avg_line_length": 37.5812807882, "max_line_length": 77, "alphanum_fraction": 0.5771398611, "num_tokens": 2712}
|
#!/usr/bin/python3
# coding: utf-8
from optparse import OptionParser
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as patches
import matplotlib.patheffects as patheffects
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import AutoMinorLocator
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.animation import FuncAnimation
from matplotlib.text import Annotation, Text
import numpy as np
import urllib
from urllib import request
import re
import html
import sys, os
import pickle
from datetime import datetime, timedelta
import tweepy
import sqlite3
import math
from collections import deque
apatch = None
urlstr = "https://uk-air.defra.gov.uk/latest/currentlevels?view=site#L"
shorturlstr = "https://goo.gl/ZpELjS"
urlWHO = "http://apps.who.int/iris/bitstream/10665/69477/1/WHO_SDE_PHE_OEH_06.02_eng.pdf"
sitename = b'Liverpool'
mgm3 = '\u03BCgm\u207B\u00B3'
O3, NO2, SO2, PM25, PM100 = "O\u2083", "NO\u2082", "SO\u2082", "PM\u2082\u2085", "PM\u2081\u2080\u2080"
guides = {O3:100, NO2:200, SO2:20, PM25:25, PM100:50} # source: http://apps.who.int/iris/bitstream/10665/69477/1/WHO_SDE_PHE_OEH_06.02_eng.pdf
meansWHO = {O3:'8h', NO2:'1h', SO2:'10m', PM25:'24h', PM100:'24h'}
meansDEFRA = {O3:'8h', NO2:'1h', SO2:'max 15m', PM25:'24h', PM100:'24h'}
consumer_key, consumer_secret, access_token, access_token_secret = None, None, None, None
def loadAPIKeys():
global consumer_key, consumer_secret, access_token, access_token_secret
if os.path.isfile("apikeys.bin"):
consumer_key, consumer_secret, access_token, access_token_secret = pickle.load(open("apikeys.bin", "rb"))
else:
consumer_key = input("consumer_key: ")
consumer_secret = input("consumer_secret: ")
access_token = input("access_token: ")
access_token_secret = input("access_token_secret: ")
pickle.dump((consumer_key, consumer_secret, access_token, access_token_secret), open("apikeys.bin", "wb"))
def twitterAPI():
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def tweet(status, replyto=None, imgfilename=None):
if not (status or imgfilename):
return
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', status)
# take out all url texts from status for count, all urls count as 23
rstat = status
for u in urls:
rstat = rstat.replace(u, '')
nchars = len(rstat) + 23 * len(urls)
if nchars > 140:
print("Tweet too long")
#print(status)
api = twitterAPI()
if (imgfilename and os.path.isfile(imgfilename)):
try:
stat = api.update_with_media(imgfilename, status=status, in_reply_to_status_id=(replyto and replyto.id))
except Exception as e:
print(e)
stat = None
else:
try:
stat = api.update_status(status=status, in_reply_to_status_id=(replyto and replyto.id))
except Exception as e:
print(e)
stat = None
return stat
def compose(day, clock, reading):
status = ["%s, %s (%s)" % (day, clock, mgm3)]
skeys = list(reading.keys())
skeys.sort()
for k in skeys:
if reading[k][0] == "n/a":
status.append("%s: %s" % (k, reading[k][0]))
else:
status.append("%s: %.0f %s" % (k, reading[k][0], reading[k][1]))
status.append("%s" % shorturlstr)
status = '\n'.join(status)
return status
def toDT(day, clock):
if clock[:5] == "24:00": # 27/01/2017 24:00 is in fact 28/01/2017 00:00
clock = "00:00"
day = (datetime.strptime(day, "%d/%m/%Y") + timedelta(hours=24)).strftime("%d/%m/%Y")
return datetime.strptime("%s %s" % (day, clock[:5]), "%d/%m/%Y %H:%M")
def composeAboveTweet(day, clock, above, origtweetstat):
status = []
dtnow = toDT(day, clock)
for k in above:
# count hours above
#print("In composeAboveTweet", k, above[k])
lday, lclock, lvalue = above[k][0]
if lday == day and lclock == clock:
stat = []
# count hours above
dtlast = dtnow
nhours = 1
for lday, lclock, lvalue in above[k][1:]:
if lday == day and lclock == clock:
continue # skip duplicate entries
dt = toDT(lday, lclock)
if (dtlast - dt) == timedelta(hours=1):
nhours += 1
else:
break
dtlast = dt
stat.append("@lpoolcouncil @DefraUKAir @LiverpoolFoE: %s %dh above @WHO guide (%.0f%s %s-mean %s) #airpollution #liverpool" %
(k, nhours, guides[k], mgm3, meansWHO[k], urlWHO))
if meansWHO[k] != meansDEFRA[k]:
stat.append("(Note #DEFRA data is %s mean)" % meansDEFRA[k])
status.append('\n'.join(stat))
return status
def scrape():
f = request.urlopen(urlstr)
r = f.read()
g = re.search(b".*<tr>.*(%s.*?)</tr>" % sitename, r, re.DOTALL)
#print(g.group(1))
# split into <td></td>
row = g.group(1)
#print("row = %s\n" % row)
# date and time
dategroups = re.search(b".*<td>(.*?)<br.*?>(.*?)</td>", row, re.DOTALL)
day = dategroups.group(1).decode("utf-8")
clock = dategroups.group(2).decode("utf-8")
# data
cols = re.findall(b"<span.*?>(.*?)</span>", row, re.DOTALL)
assert len(cols) == 5
units = [O3, NO2, SO2, PM25, PM100]
datanums = []
for v in cols:
value = 'not_set'
if b' ' in v:
try:
value = float(v[:v.index(b' ')])
except ValueError:
pass
if value == 'not_set' and b'n/a' in v:
value = "n/a"
else:
value = float(v[:v.index(b'&')])
nv = v.replace(b' ', b' ')
ix = b''
m = re.match(b".*?(\(.*?\))", nv)
if m:
ix = re.match(b".*?(\(.*?\))", nv).group(1)
datanums.append((value, ix.decode("utf-8")))
reading = dict(zip(units, datanums))
return day, clock, reading
def convert(r):
# converts result from sqlite query to format we scrape of day, time, readings
# where readings is a dict with keys "units" and tuples as values
units = [O3, NO2, SO2, PM25, PM100]
converted = deque()
for e in r:
if (e[1] == "19-11-15 Nov:11:1573840800.000"): # a muddled up entry
dt = datetime.strptime(e[1], "%y-%m-%d Nov:11:1573840800.000")
else:
dt = datetime.strptime(e[1], "%Y-%m-%d %H:%M:%S.%f")
date = dt.strftime("%d/%m/%Y")
clock = dt.strftime("%H:%M:%S")
tpls = []
for v in e[2:]:
if v[:3] == "n/a":
tpls.append(("n/a", ''))
else:
m = re.match("(.*?)(\(.*?\))", v)
tpls.append((float(m.group(1)), m.group(2)))
assert len(tpls) == 5
converted.appendleft((date, clock, dict(zip(units, tpls))))
return converted
def loadAllReadings(dbname):
db = sqlite3.connect(dbname)
c = db.cursor()
c.execute("SELECT * FROM readings")
return c.fetchall()
def loadLastReading(dbname):
db = sqlite3.connect(dbname)
c = db.cursor()
c.execute("SELECT * FROM readings WHERE id in ( SELECT max(id) FROM readings)")
return c.fetchall()
def loadReadings():
fall = "allreadings.bin"
allreadings = deque()
if os.path.isfile(fall):
allreadings = pickle.load(open(fall, "rb"))
return allreadings
def saveLastReading(dbname, date, time, reading, overwrt=False):
units = [O3, NO2, SO2, PM25, PM100]
db = sqlite3.connect(dbname)
c = db.cursor()
if overwrt:
c.execute(''' DROP TABLE IF EXISTS readings''')
e = '''
CREATE TABLE IF NOT EXISTS readings(id INTEGER PRIMARY KEY, date_time TEXT, %s TEXT, %s TEXT, %s TEXT, %s TEXT, %s TEXT)
''' % tuple(units)
c.execute(e)
dtConvert = "%s %s" % (date, time)
m = re.match("[0-9]{1,2}\/[0-9]{2}\/[0-9]{4} [0-9]{2}\:[0-9]{2}\:[0-9]*", dtConvert)
if m:
dt = datetime.strptime(dtConvert, "%d/%m/%Y %H:%M:%S")
else:
dt = datetime.strptime(dtConvert, "%d/%m/%Y %H:%M")
dts = dt.strftime("%Y-%m-%d %H:%M:%S.000")
c.execute("SELECT * FROM readings WHERE date_time=?", (dts,))
r = c.fetchall()
if r:
print("Already exists")
return
e = '''INSERT INTO readings(date_time, %s, %s, %s, %s, %s) VALUES(?,?,?,?,?,?)''' % tuple(units)
t = (dts, "%s %s" % reading[O3], "%s %s"% reading[NO2], "%s %s" % reading[SO2], "%s %s" % reading[PM25], "%s %s" % reading[PM100])
c.execute(e, t)
db.commit()
db.close()
def pickleReadings(allreading):
fall = "allreadings.bin"
pickle.dump(allreadings, open(fall, "wb"))
def compareWHO(allreadings):
above = {}
for (day, clock, reading) in allreadings:
for k in guides:
if type(reading[k][0]) == type(1.0) and reading[k][0] > guides[k]:
if k not in above:
above[k] = []
above[k].append((day,clock, reading[k][0]))
return above
def weatherTweetToDict(t):
m = re.match(".*AirTemp ([\+\-0-9.]*).*?, RH ([0-9]*?)\%, wind speed ([0-9.]*) m\/s, wind dir ([0-9.]*?) deg, Time ([0-9:]*?)UTC", t.text)
if m:
try:
d = {"temp": float(m.group(1)), "rh": int(m.group(2)), "windspeed": float(m.group(3)), "winddir": float(m.group(4)), "time": m.group(5)}
d["datetime"] = t.created_at
d["tweet"] = t
return d
except Exception as e:
print(t.text)
raise e
def getAndPickleWeather(fn, readings):
api = twitterAPI()
oldestReading = toDT(readings[-1][0], readings[-1][1])
idlast = None
alltweets = []
while True:
if 0:#idlast == None:
r = api.user_timeline("@livuniwx")
else:
r = api.user_timeline("@livuniwx", max_id=idlast)
for i, t in enumerate(r[:-1]):
d = weatherTweetToDict(t)
if d:
alltweets.append(d)
if r[-1].created_at < oldestReading:
break
idlast = r[-1].id
pickle.dump(alltweets, open(fn, "wb"))
print("Pickled ", len(alltweets), " tweets")
def loadWeatherTweets(fn):
wt = pickle.load(open(fn, "rb"))
d0 = wt[0]["datetime"]
for t in wt[1:]:
assert t["datetime"] < d0
d0 = t["datetime"]
return wt
def testCMap():
# colourmap from green over yellow to red
cdict = {
'red' : ((0.00, 0.00, 0.00),
(0.50, 1.00, 1.00),
(1.00, 1.00, 1.00)),
'green': ((0.00, 1.00, 1.00),
(0.50, 1.00, 1.00),
(1.00, 0.00, 0.00)),
'blue' : ((0.00, 0.00, 0.00),
(0.50, 0.00, 0.00),
(1.00, 0.00, 0.00)),
}
cm = LinearSegmentedColormap("mymap", cdict, 256)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig, axes = plt.subplots(nrows=1)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
#axes[0].set_title(cmap_category + ' colormaps', fontsize=14)
axes.imshow(gradient, aspect='auto', cmap=cm)
pos = list(axes.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
#fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
axes.set_axis_off()
plt.show()
def Along(lam, a, b):
return lam * b + (1.0 - lam) * a
class Gauge:
def __init__(self, dates, data, C):
self.dates = dates
self.data = data
self.C = C
self.titles = {O3: r"$O_3$", NO2: r"$NO_2$", SO2: r"$SO_2$", PM25: r"$PM_{2.5}$", PM100: r"$PM_{10}$"}
self.mgpqm = "${\mu gm^{-3}}$"
self.maxValue = None
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
self.ax.set_xlim([-1.2, 1.2])
self.ax.set_ylim([-0.2, 1.2])
self.ax.set_aspect("equal")
plt.axis('off')
circle = patches.Circle((0, 0), 0.06, color="orange", path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
circle.zorder = 200
self.ax.add_artist(circle)
# 50% available for valmin to valmax, where is limit
self.valmin = 0
self.valmax = 1.2 * guides[C]
self.wholimit = guides[C]
self.rad = 0.9
lim = 180.0*(1.0 - self.toDialPos(self.wholimit)[2] / math.pi)
wedgeBelow = patches.Wedge((0, 0), 1.0, lim, 180.0, color=(0.8, 1, 0.8))
wedgeAbove = patches.Wedge((0, 0), 1.0, 0.0, lim, color=(1, 0.8, 0.8))
self.ax.add_patch(wedgeBelow)
self.ax.add_patch(wedgeAbove)
self.apatch = None
self.maxArtist = None
self.lastValue = 0.0
self.addLabels()
def toDialPos(self, value):
theta = ((value - self.valmin) / (self.valmax - self.valmin)) * math.pi
sx, sy = -self.rad * math.cos(theta), self.rad * math.sin(theta)
return sx, sy, theta
def drawGauge(self, frame):
# transform value to angle between 0=valmin and 180=valmax
value = self.data[frame]
dialColor = "orange"
if value == "n/a":
value = self.lastValue
dialColor = "grey"
self.lastValue = value
sx, sy, theta = self.toDialPos(value)
if self.apatch:
self.apatch.remove()
arrow = patches.FancyArrow(0, 0, sx, sy, color=dialColor, width=0.05, length_includes_head=True, head_width=0.07, path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
self.apatch = self.ax.add_patch(arrow)
self.apatch.zorder = 100
# draw the max value
if self.maxValue == None or value > self.maxValue:
rx, ry = -(self.rad+0.07) * math.cos(theta), (self.rad+0.07) * math.sin(theta)
tx, ty = 0.07 * math.cos(theta), -0.07 * math.sin(theta)
arrow = patches.FancyArrow(rx, ry, tx, ty, color="red", width=0.0, length_includes_head=True, head_width=0.07, path_effects=[patheffects.SimplePatchShadow(), patheffects.Normal()])
if self.maxValue != None:
self.aMaxPatch.remove()
self.aMaxPatch = self.ax.add_patch(arrow)
self.maxValue = value
self.maximTitle = "\n Maximum: %.1f%s, %s" % (self.maxValue, self.mgpqm, self.dates[frame].strftime("%d/%m/%Y %H:%M"))
if self.maxArtist:
self.maxArtist.remove()
if dialColor == "grey":
self.ax.set_title(self.titles[self.C] + " %s" % self.dates[frame].strftime("%d/%m/%Y %H:%M"), fontsize=12)
self.maxArtist = self.ax.add_artist(Text(0, 1.25 * self.rad, text="No readings recorded!", verticalalignment='baseline', horizontalalignment='center'))
else:
self.ax.set_title(self.titles[self.C] + " %s" % self.dates[frame].strftime("%d/%m/%Y %H:%M"), fontsize=12)
self.maxArtist = self.ax.add_artist(Text(0, 1.25 * self.rad, text="%s" % (self.maximTitle), verticalalignment='baseline', horizontalalignment='center'))
def addLabels(self):
# numbers around the top
for i in range(11):
value = Along(i/10.0, self.valmin, self.valmax)
sx, sy, theta = self.toDialPos(value)
self.ax.add_artist(Text(sx, sy, text="%.0f" % value, verticalalignment='baseline', horizontalalignment='center', rotation=90.0 - math.degrees(theta)))
# label what we are showing
self.ax.add_artist(Text(0, self.rad/2, text="%s\n[%s]" % (self.titles[self.C], self.mgpqm), verticalalignment='baseline', horizontalalignment='center'))
# WHO guide information
self.ax.add_artist(Text(0, -0.2 * self.rad, text="WHO Limit: %s%s" % (guides[self.C], self.mgpqm), verticalalignment='baseline', horizontalalignment='center', color=(1, 0.8, 0.8)))
def plotRadial(readings, C):
dates = [toDT(d, c) for d, c, r in readings]
data = [r[C][0] for d, c, r in readings] # data
d0, d1 = dates[0], dates[-1] # date range
gauge = Gauge(dates, data, C)
framlist = [0, 0, 0, 0, 0, 0, 0, 0]
framlist.extend(range(len(data)))
anim = FuncAnimation(gauge.fig, gauge.drawGauge, frames=framlist, interval=200)
fn = "gauge_%s.gif" % d1.strftime("%Y%m%d%H%M")
anim.save(fn, dpi=100, writer='imagemagick')
plt.close(gauge.fig)
return fn
def plotLinear(readings, C):
titles = {O3: r"$O_3$", NO2: r"$NO_2$", SO2: r"$SO_2$", PM25: r"$PM_{2.5}$", PM100: r"$PM_{10}$"}
dates = [toDT(d, c) for d, c, r in readings]
data = [r[C][0] for d, c, r in readings] # data
newdates, newdata = [], []
for date, val in zip(dates, data):
if val != 'n/a':
newdates.append(date)
newdata.append(val)
data = newdata
dates = newdates
d0, d1 = dates[0], dates[-1] # date range
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# format x axis
ax.xaxis_date()
ax.set_xlim(d0, d1)
ax.xaxis.set_minor_formatter(mdates.DateFormatter('%Hh'))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
# format y axis
ax.xaxis.set_major_formatter(mdates.DateFormatter('%a %d/%m'))
ax.yaxis.set_major_formatter(FormatStrFormatter(r'%.0f$\frac{\mu g}{m^3}$'))
ax.set_ylim(0, max(data) + 5)
# green / red background division above and below WHO guide
guide = guides[C]
ax.fill_between([d0, d1, d1, d0], [0, 0, guide, guide], facecolor=(0.8, 1, 0.8), edgecolor="none")
ax.fill_between([d0, d1, d1, d0], [guide, guide, max(data) + 5, max(data) + 5], facecolor=(1, 0.8, 0.8), edgecolor="none")
ax.scatter(dates, data)
ax.set_title(titles[C] + " for %s to %s,\nLiverpool Speke (%s)" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y"), urlstr), fontsize=10)
ax.tick_params(axis='both', which='both', labelsize=10)
fig.autofmt_xdate()
plt.grid(which='major')
fn = "figure_%s.png" % d1.strftime("%Y%m%d")
plt.savefig(fn, dpi=600)
return fn
def plotPolar(readings, weathertweets):
def findInWT(dt, wt):
for t in wt:
if t["datetime"] - dt < timedelta(minutes=10):
return t
assert 0
# pair pollution readings with weather data
pm25 = []
pm100 = []
windspeed = []
winddir = []
dates = []
for r in readings:
d, c, rr = r
dt = toDT(d, c)
# find dt in wt
w = findInWT(dt, weathertweets)
dates.append(dt)
if type(rr[PM25][0]) != type(''):
pm25.append(rr[PM25][0])
windspeed.append(w["windspeed"])
winddir.append(w["winddir"])
#if type(rr[PM100][0]) != type(''):
# pm100.append(rr[PM100][0])
theta = np.radians(winddir)
# colourmap from green over yellow to red
cdict = {
'red' : ((0.00, 0.00, 0.00),
(0.50, 1.00, 1.00),
(1.00, 1.00, 1.00)),
'green': ((0.00, 1.00, 1.00),
(0.50, 1.00, 1.00),
(1.00, 0.00, 0.00)),
'blue' : ((0.00, 0.00, 0.00),
(0.50, 0.00, 0.00),
(1.00, 0.00, 0.00)),
}
cm = LinearSegmentedColormap("greentored", cdict, 256)
ax = plt.subplot(111, projection='polar')
ax.scatter(theta, windspeed, c=pm25, s=100, cmap=cm, edgecolors='none')
ax.set_rmax(max(windspeed) + 1)
ax.set_rticks(np.arange(0, max(windspeed), 1)) # less radial ticks
ax.set_rlabel_position(300) # get radial labels away from plotted line
ax.set_theta_zero_location("S")
ax.set_theta_direction(-1)
ax.grid(True)
# tick locations
thetaticks = np.arange(0,360,90)
ax.set_thetagrids(thetaticks, frac=1.01)
#img = plt.imread("speke.png")
#plt.imshow(img, extent=[0,10,0,10])
ax.set_title("PM25 %s to %s" % (allreadings[-1][0], allreadings[0][0]))
plt.show()
if __name__ == "__main__":
parser = OptionParser()
parser = OptionParser(usage='usage: %prog [options] ')
parser.add_option("-f", "--file", dest="filename",
help="", metavar="FILE")
parser.add_option('-m', '--mode',
type='choice',
action='store',
dest='mode',
choices=['plotpollution', 'debug', 'saveweather', 'plotpollutionLinear', 'plotRadial', 'regular'],
default='regular',
help='Choose mode',)
(options, args) = parser.parse_args()
mode = options.mode
loadAPIKeys()
#allreadings = loadReadings()
# remove duplicate entries (could have come in while debugging)
#ic = 0
#while ic < len(allreadings):
# r = allreadings[ic]
# while allreadings.count(r) > 1:
# allreadings.remove(r)
# ic += 1
if mode == 'debug':
#day, clock, reading = scrape()
#saveLastReading("readings.db", day, clock, reading)
#r = loadLastReading("readings.db")
#c = convert(r)
#print(c)
#print(scrape())
# find when we last posted an image
files = [f for f in os.listdir('.') if re.match("gauge_[0-9]*.gif", f)]
if files:
datelast = max([datetime.strptime(f, "gauge_%Y%m%d%H%M.gif") for f in files])
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 2)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
fn = plotRadial(readings, PM25)
#tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, fn)
elif mode == 'saveweather':
allreadings = convert(loadAllReadings("readings.db"))
getAndPickleWeather("weathertweets.bin", allreadings)
elif mode == 'plotpollution':
weathertweets = loadWeatherTweets("weathertweets.bin")
plotPolar(allreadings, weathertweets)
elif mode == 'plotRadial':
files = [f for f in os.listdir('.') if re.match("gauge_[0-9]*.gif", f)]
if files:
datelast = max([datetime.strptime(f, "gauge_%Y%m%d%H%M.gif") for f in files])
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 2)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
fn = plotRadial(readings, PM25)
tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, fn)
elif mode == "plotpollutionLinear":
# find when we last posted an image
files = [f for f in os.listdir('.') if re.match("figure_[0-9]*.png", f)]
if files:
datelast = max([datetime.strptime(f, "figure_%Y%m%d.png") for f in files])
datelast += timedelta(hours=12)
else:
datelast = datetime.today() - timedelta(days=100)
sincelastplot = (datetime.today() - datelast)
if (sincelastplot > timedelta(hours=24 * 3)):
allreadings = convert(loadAllReadings("readings.db"))
allreadings.reverse()
readings = [(d, h, r) for (d, h, r) in allreadings if toDT(d, h) >= datelast]
figure = plotLinear(readings, PM25)
d0, d1 = toDT(readings[0][0], readings[0][1]), toDT(readings[-1][0], readings[-1][1])
#tweet(PM25 + "\n%s - %s" % (d0.strftime("%d/%m/%Y"), d1.strftime("%d/%m/%Y")), None, figure)
else:
day, clock, reading = scrape()
r = loadLastReading("readings.db")
converted = convert(r)
assert(len(converted) == 1)
lastday, lastclock, lastreading = converted[-1]
if ((day, clock) != (lastday, lastclock)):
status = compose(day, clock, reading)
rtweet = tweet(status)
saveLastReading("readings.db", day, clock, reading)
allreadings = convert(loadAllReadings("readings.db"))
# compare with WHO recommendations
r = allreadings and compareWHO(allreadings)
if r:
stats = composeAboveTweet(day, clock, r, rtweet)
for s in stats:
tweet(s, replyto=rtweet)
else:
print("Reading already known")
|
{"hexsha": "1953420d8b141ef081883d1468e29e9794b69f31", "size": 25193, "ext": "py", "lang": "Python", "max_stars_repo_path": "aqliverpool.py", "max_stars_repo_name": "mdunschen/AirQualityTweeter", "max_stars_repo_head_hexsha": "8eab4b94d36d0e3ca15bdfcea44ee9ce9313b177", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aqliverpool.py", "max_issues_repo_name": "mdunschen/AirQualityTweeter", "max_issues_repo_head_hexsha": "8eab4b94d36d0e3ca15bdfcea44ee9ce9313b177", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aqliverpool.py", "max_forks_repo_name": "mdunschen/AirQualityTweeter", "max_forks_repo_head_hexsha": "8eab4b94d36d0e3ca15bdfcea44ee9ce9313b177", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6710334789, "max_line_length": 192, "alphanum_fraction": 0.5637677133, "include": true, "reason": "import numpy", "num_tokens": 7556}
|
# So that this test can be run independently
using Cairo
if !isdefined(:ddots4)
include("shape_functions.jl")
end
# Test that writing images to a Julia IO object works
c = CairoRGBSurface(256,256);
cr = CairoContext(c);
ddots4(cr,256,246,1.0,3000)
buf = IOBuffer()
pipe = Base64EncodePipe(buf)
write_to_png(c,pipe)
close(pipe)
# Catch short writes
@assert length(takebuf_array(buf)) > 200
|
{"hexsha": "b89870f50406a4a44498b0b96000f8548eb4a67b", "size": 394, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_stream.jl", "max_stars_repo_name": "JuliaPackageMirrors/Cairo.jl", "max_stars_repo_head_hexsha": "fdfcfdb24c29cb2e71b596d891f411f9479db3cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_stream.jl", "max_issues_repo_name": "JuliaPackageMirrors/Cairo.jl", "max_issues_repo_head_hexsha": "fdfcfdb24c29cb2e71b596d891f411f9479db3cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_stream.jl", "max_forks_repo_name": "JuliaPackageMirrors/Cairo.jl", "max_forks_repo_head_hexsha": "fdfcfdb24c29cb2e71b596d891f411f9479db3cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1764705882, "max_line_length": 53, "alphanum_fraction": 0.7487309645, "num_tokens": 117}
|
from gym.spaces import Discrete, Box
import numpy as np
class MemoryGame:
'''Multi-agent wrapper for the memory game with noisy observations'''
def __init__(self, config, spec_only=False):
self._length = config.get("length", 5)
self._num_cues = config.get("num_cues", 2)
self._noise = config.get("noise", 0)
self._agent_id = config.get("agent_id", 0)
self._obs_shape = (self._num_cues + 2,)
self.observation_space = {self._agent_id: Box(0, 2, shape=self._obs_shape)}
self.action_space = {self._agent_id: Discrete(self._num_cues)}
self._current_step = 0
self._current_cue = 0
def _obs(self):
if 0 == self._noise:
obs = np.zeros(self._obs_shape)
else:
obs = np.random.uniform(0, self._noise, self._obs_shape)
if 0 == self._current_step:
obs[-2] += 1
obs[self._current_cue] += 1
elif self._length == self._current_step:
obs[-1] += 1
return {self._agent_id: obs}
def reset(self):
self._current_step = 0
self._current_cue = np.random.randint(self._num_cues)
return self._obs()
def step(self, action):
if self._current_step < self._length:
self._current_step += 1
return self._obs(), {self._agent_id: 0}, {self._agent_id: False}, None
else:
reward = (1 if action == self._current_cue else 0)
return self._obs(), {self._agent_id: reward}, {self._agent_id: True}, None
|
{"hexsha": "b78a7be4a07388e59795cd82a42ece94c6d3b338", "size": 1552, "ext": "py", "lang": "Python", "max_stars_repo_path": "interactive_agents/envs/memory_game.py", "max_stars_repo_name": "rtloftin/interactive_agents", "max_stars_repo_head_hexsha": "f7d57d1421000b2e8a79a9dff179b8fe7c8d3fc0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "interactive_agents/envs/memory_game.py", "max_issues_repo_name": "rtloftin/interactive_agents", "max_issues_repo_head_hexsha": "f7d57d1421000b2e8a79a9dff179b8fe7c8d3fc0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "interactive_agents/envs/memory_game.py", "max_forks_repo_name": "rtloftin/interactive_agents", "max_forks_repo_head_hexsha": "f7d57d1421000b2e8a79a9dff179b8fe7c8d3fc0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7391304348, "max_line_length": 86, "alphanum_fraction": 0.5992268041, "include": true, "reason": "import numpy", "num_tokens": 422}
|
import os
import numpy as np
import matplotlib.pyplot as plt
from torchvision.utils import save_image
import torch
SAVE_DIR = '../data/results'
def imsave(img,fname=None,prefix='test',fdir=None):
if fdir==None:
fdir = SAVE_DIR
if not os.path.exists(fdir):
os.mkdir(fdir)
if fname==None:
idx = len([f for f in os.listdir(fdir) if f.startswith(prefix)])
fname = '%s%d.png'%(prefix,idx)
pth = os.path.join(fdir,fname)
# import pdb;pdb.set_trace()
if isinstance(img,np.ndarray):
plt.imsave(pth,img)
else:
save_image(img,pth)
""" function for gluing together images"""
def glue_img(*img_lst):
# import pdb;pdb.set_trace()
numpied = []
# convert to numpy array
for img in img_lst:
if isinstance(img,torch.Tensor):
img = img.float().cpu().detach().numpy().transpose([1,2,0])
img = (img+1)/2
numpied.append(img)
else:
numpied.append(img)
out = np.concatenate(numpied,1)
return out
|
{"hexsha": "e8e1efebe5e19ce607b82333e9ffbde712a35fa0", "size": 1068, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/custom_utils.py", "max_stars_repo_name": "RisingStockPrices/dressing-in-order", "max_stars_repo_head_hexsha": "77e1579311d2b94e650a5db500cc9773f64bd24a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/custom_utils.py", "max_issues_repo_name": "RisingStockPrices/dressing-in-order", "max_issues_repo_head_hexsha": "77e1579311d2b94e650a5db500cc9773f64bd24a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/custom_utils.py", "max_forks_repo_name": "RisingStockPrices/dressing-in-order", "max_forks_repo_head_hexsha": "77e1579311d2b94e650a5db500cc9773f64bd24a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4285714286, "max_line_length": 72, "alphanum_fraction": 0.5964419476, "include": true, "reason": "import numpy", "num_tokens": 278}
|
from PIL import Image,ImageDraw,ImageFilter
from numpy import *
from mod_dim import region_label as rg
from models import ImageDB,Legend,TopLegend,ImageInfo
import pandas as pd
import ast
from app import db
class Point(object):
def __init__(self,x,y):
self.x=x
self.y=y
def __str__(self):
return "("+str(self.x)+","+str(self.y)+")"
class Pixel(object):
def __init__(self,color=[(0,0,0)],topLeft=Point(0,0),bottomRight=Point(0,0),image=None):
self.color=color
#self.image=image
self.topLeft=topLeft
self.bottomRight=bottomRight
def getColor(self):
return self.color[0]
def getRowImgPoints(self):
return list(range(self.topLeft.x,self.bottomRight.x+1))
def getColImgPoints(self):
return list(range(self.topLeft.y,self.bottomRight.y+1))
def __str__(self):
return "Pixel:" + str(self.color) + "coordinates : " + str(self.topLeft) + ":" + str(self.bottomRight)
#def quadTree(image,pixel):
class quadTree(object):
def __init__(self, image,tl,br):
self.pixel = imageInfo(image,tl,br) #colorlist,top-left-pixel-coordinates, bottom-right-pixel-coordinates
self.left = None
self.mid1 = None
self.mid2 = None
self.right = None
# GIVEN IMAGE, TL AND BR IT RETURN PIXEL OBJECT CONTAINING TL, BL AND COLORLIST IN HEX
def imageInfo(image,tl,br):
t=image.crop((tl.x,tl.y,br.x+1,br.y+1))
t=t.convert('RGB')
color_list=t.getcolors(maxcolors=10000)
#color_list=image.crop((tl.x,tl.y,br.x+1,br.y+1)).getcolors(maxcolors=200)
#print(color_list)
color_list=[b for (a,b) in color_list]
color_list=['#%02x%02x%02x' % (r,g,b) for (r,g,b) in color_list]
p = Pixel(color_list,tl,br)#,image)
return p
# RETURN TRUE IF INPUT PIXELS/RECTANGLES CONTAINS ONLY ONE COLOR
def isHomogeneous(pixel):
#print(pixel,len(pixel.color))
if(len(pixel.color)==1): return True
return False
# SPLIT THE BLOCK/RECTANGLE/PIXEL IN 3 QUADS RECURSIVELY
def splitBlock(node,image):
tl=node.pixel.topLeft
br=node.pixel.bottomRight
if (tl.x==br.x and tl.y==br.y):
node.left=None
node.mid1=None
node.mid2=None
node.right=None
elif((tl.x+1)==br.x and (tl.y+1)==br.y) :
node.left=quadTree(image,tl,tl)
node.mid1=quadTree(image,Point(tl.x,tl.y+1),Point(tl.x,tl.y+1))
node.mid2=quadTree(image,Point(tl.x+1,tl.y),Point(tl.x+1,tl.y))
node.right=quadTree(image,br,br)
elif((tl.x+1)==br.x and (tl.y)==br.y) or ((tl.x)==br.x and (tl.y+1)==br.y) :
node.left=quadTree(image,tl,tl)
node.mid1=None
node.mid2=None
node.right=quadTree(image,br,br)
else:
xmid=int((tl.x+br.x)/2)
ymid=int((tl.y+br.y)/2)
mid=Point(xmid,ymid)
node.left=quadTree(image,tl,Point(xmid,ymid))
node.mid1=quadTree(image,Point(tl.x,ymid+1),Point(xmid,br.y))
node.mid2=quadTree(image,Point(xmid+1,tl.y),Point(br.x,ymid))
node.right=quadTree(image,Point(xmid+1,ymid+1),br)
if node.left!=None:
if not isHomogeneous(node.left.pixel): splitBlock(node.left,image)
if node.mid1!=None:
if not isHomogeneous(node.mid1.pixel): splitBlock(node.mid1,image)
if node.mid2!=None:
if not isHomogeneous(node.mid2.pixel): splitBlock(node.mid2,image)
if node.right!=None:
if not isHomogeneous(node.right.pixel): splitBlock(node.right,image)
# SAVING ENTIRE QUAD TREE TO DATABASE
# SAVE ONLY THE LEAF NODES OF QUAD TREE IN DATABASE
def saveMatrixToDatabase_leaf(image,bit_subspace,outputPath,sorted_data,lbl,labelname='n1'):
#write code here to save to database
print('---CREATING QUADTREE FOR IMAGE----')
print(sorted_data.columns)
q=quadTree(image,Point(0,0),Point(image.size[0]-1,image.size[1]-1))
splitBlock(q,image)
def preorder1(start):
if(start==None): return
if(len(start.pixel.color)==1):
color=str(start.pixel.color[0])
block_row = int(sorted_data.iloc[int(start.pixel.topLeft.x),-1])
block_col = int(sorted_data.iloc[int(start.pixel.topLeft.y),-1])
u1=ImageDB(name=labelname,tlx=str(start.pixel.topLeft.x),tly=str(start.pixel.topLeft.y),brx=str(start.pixel.bottomRight.x),bry=str(start.pixel.bottomRight.y),color=color,lbl=lbl[start.pixel.topLeft.y][start.pixel.topLeft.x],block_row=block_row,block_col=block_col)
db.session.add(u1)
temp=lbl[start.pixel.topLeft.y:start.pixel.bottomRight.y+1,start.pixel.topLeft.x:start.pixel.bottomRight.x+1]
x=unique(temp)
#if(len(x)>1):
# print("ERROR:",start.pixel,x)
#tx.create(u1)
#tx.merge(u1)
preorder1(start.left)
preorder1(start.mid1)
preorder1(start.mid2)
preorder1(start.right)
#n = db.labels.create("tmp2")
print('----SAVING QUADTREE IN DATABASE---')
preorder1(q)
print('---done---')
db.session.commit()
#tx.commit()
def deleteAllNodes(labelname='image'):
x = ImageDB.query.filter_by(name=labelname)
print(x)
for i in x:
db.session.delete(i)
db.session.commit()
def drawImage(pixels,width,height,scale,perclustercount=[],class_label=[],color_dict={},grid=False,blurr=False):
print ('---Draw Image---')
print(color_dict)
OUTPUT_SCALE=scale
LEGEND_GAP=int((width*scale)/10)
LEGEND_WIDTH=int((width*scale)/50)
FILL_COLOR=(255,255,255)
BORDER_COLOR=(0,100,0)
#retrieving the image back from quadtree - intialization
m = OUTPUT_SCALE
dx, dy = (0,0)#(PADDING, PADDING)
im = Image.new('RGB', (width * m + dx + LEGEND_GAP, height * m + dy+LEGEND_GAP))
draw = ImageDraw.Draw(im)
if blurr==True:
draw.rectangle((0, 0, width * m + LEGEND_GAP, height * m+LEGEND_GAP), FILL_COLOR,outline=FILL_COLOR) #(255,255,255)
else:
draw.rectangle((0, 0, width * m + LEGEND_GAP, height * m+LEGEND_GAP), BORDER_COLOR,outline=BORDER_COLOR) #(255,255,255)
draw.rectangle((0, 0, width * m , height * m), FILL_COLOR,outline=FILL_COLOR) #(255,255,255)
for quad in pixels:
l, t, r, b = quad.tlx,quad.tly,quad.brx,quad.bry
box = (l * m + dx, t * m + dy, (r+1) * m-1, (b +1)* m-1)
#print(quad.color)
draw.rectangle(box, quad.color,outline=quad.color)
if blurr==True:
im= im.filter(ImageFilter.GaussianBlur(radius=10))
del draw
return im
dx,dy = (0,0)
prev=0
for i in range(len(perclustercount)):
c=perclustercount[i]
label=class_label[i]
l,t,r,b=width*m+int(LEGEND_GAP/2), prev , width*m+int(LEGEND_GAP/2)+LEGEND_WIDTH, prev+c*m
box = (l + dx, t + dy, (r+1) -1, (b +1) -1)
print(label)
color_val=color_dict[str(label)]
draw.rectangle(box, color_val,outline=color_val) #vertical legend
l,t,r,b=prev,width*m+int(LEGEND_GAP/2) , prev+c*m, width*m+int(LEGEND_GAP/2)+LEGEND_WIDTH
box = (l + dx, t + dy, (r+1) -1, (b +1) -1)
draw.rectangle(box, color_val,outline=color_val) #horizontal legend
prev=prev+c*m
if grid!=False:
prev=0
for c in perclustercount:
draw.line((0,prev+c*m,sum(perclustercount)*m,prev+c*m),fill=(200,0,0),width=int(LEGEND_WIDTH/4))
draw.line((prev+c*m,0,prev+c*m,sum(perclustercount)*m),fill=(200,0,0),width=int(LEGEND_WIDTH/4))
prev=prev+c*m
draw.line((0,0,0,width*m),fill=(200,0,0),width=int(LEGEND_WIDTH/4))
draw.line((0,0,width*m,0),fill=(200,0,0),width=int(LEGEND_WIDTH/4))
del draw
#im1 = im.filter(ImageFilter.BLUR)
return im
def getbackground(labelname,datasetName,grid=False):
pixels = ImageDB.query.filter_by(name=labelname)
width,height,scale,class_count,class_label=getImageParams(datasetName)
class_count=class_count.split(',')
perclustercount=[int(i) for i in class_count]
class_label=class_label.split(',')
class_label=[int(i) for i in class_label]
#print('pppp1',width,height,scale)
color_dict=getColorFromTopLegend(datasetName)
im = drawImage(pixels,width,height,scale,perclustercount,class_label,color_dict,grid,blurr=True)
return im
# SAVE COLOR DICTIONARY TO DATABASE (saving legend)
def saveColorToDatabase(colorDict,datasetname):
#write code here to save to database
print('---SAVING COLOR DICT IN DATABASE----')
x = Legend.query.filter_by(name=datasetname)
print(x)
for i in x:
db.session.delete(i)
db.session.commit()
print(colorDict)
for c in colorDict:
u1=Legend(name=datasetname,color=c,subspace=colorDict[c])
db.session.add(u1)
db.session.commit()
def loadColorFromDatabase(datasetName):
print('---LOADING COLORDICT FROM DATABASE----')
x = Legend.query.filter_by(name=datasetName)
allSubspace=set([])
color_dict={}
for i in x:
subspace=ast.literal_eval('['+i.subspace+']')
color_dict[i.color]=subspace
for j in subspace:
#print(i)
allSubspace.add(tuple(j))
print('--end--')
return color_dict,list(allSubspace)
def saveClassLabelColorsInDatabase(datasetName,classLabels):
x = TopLegend.query.filter_by(name=datasetName)
print(x)
for i in x:
db.session.delete(i)
db.session.commit()
print('-- saveClassLabelColorsInDatabase --')
print(datasetName,classLabels,len(classLabels))
import seaborn as sns
x=sns.color_palette("hls", len(classLabels))
y=[(int(a*255),int(b*255),int(c*255)) for (a,b,c) in x]
color_list=['#%02x%02x%02x' % (r,g,b) for (r,g,b) in y]
for k in range(len(color_list)):
u1=TopLegend(name=datasetName,color=color_list[k],classLabel=str(classLabels[k]))
db.session.add(u1)
db.session.commit()
def getColorFromTopLegend(datasetName):
print('--LOADING COLORDICT FROM DATABASE for classLabel')
x=TopLegend.query.filter_by(name=datasetName)
color_dict={}
for i in x:
color_dict[i.classLabel]=i.color
return color_dict
'''
opt=tx.cypher.execute('MATCH (n:colorClassLabel) where n.name={x} return keys(n)', x=datasetName)
allColors=str(opt).split('[')[1].split(']')[0]
#print(allColors)
allColors=allColors.replace('\'','')
allColors=allColors.replace(' ','')
#print(allColors)
allColors=allColors.replace('uH','H') #python2
allColors=allColors.replace('uname','name') #python2
allColors=allColors.split(',')
print(allColors)
allColors.remove('name')
color_dict={}
for a in allColors:
str1='MATCH (n:colorClassLabel) where n.name=\'%s\' return n.%s' %(datasetName,a)
classLabel=tx.cypher.execute(str1)
n='n.'+a
classLabel=classLabel[0][n]
color_dict[classLabel]='#'+a[1:]
return color_dict
'''
return {}
# FETCH ONLY THOSE NODES WITH THE GIVEN COLOR IN DATABASE
def databasefilter(labelname,color='ALL'):
print('--Database filter ALL/color--')
print(color)
if(color!='ALL'):
#color='[\''+color+'\']'
pixels = ImageDB.query.filter_by(name=labelname,color=color)
else:
pixels = ImageDB.query.filter_by(name=labelname)
print(pixels)
#print('/t 1. retrieved all pixels. Total count is %d' %(len(pixels)))
return pixels
def saveInfoToDatabase(row,col,name,class_count,class_label):
print('-- saveInfoToDatabase --')
print(class_count,class_label)
if(row<300):
scale=10
else:
scale=1
x = ImageInfo.query.filter_by(name=name)
for i in x:
db.session.delete(i)
db.session.commit()
tx = ImageInfo(name,row,col,scale,class_count,class_label)
db.session.add(tx)
db.session.commit()
def getImageParams(datasetName):
tx = ImageInfo.query.filter_by(name=datasetName)
print('txt',tx)
width=int(tx[0].width)
height=int(tx[0].height)
scale=int(tx[0].scale)
class_count=tx[0].class_count
class_label=tx[0].class_label
return width,height,scale,class_count,class_label
def updateImageInfo(x,y,name):
t=ImageInfo.query.filter_by(name=name)[0]
t.with_legend_width=x
t.with_legend_height=y
db.session.commit()
return
def getTopLeftCoordinates_Grid(x,y,labelname,datasetName='image'):
print('-- getTopLeftCoordinates_Grid: retrieving top left coordinates of grid ')
tx = ImageDB.query.filter(ImageDB.name==labelname,ImageDB.tlx<=x,ImageDB.brx>=x,ImageDB.tly<=y,ImageDB.bry>=y);
print(tx[0],size(tx))
color_val = tx[0].color
block_row = tx[0].block_row
block_col = tx[0].block_col
#labelname+imgType
tx = ImageInfo.query.filter_by(name=datasetName)
class_count=tx[0].class_count.split(',')
print(class_count)
tlx=0
tly=0
print('BBLOCK',block_row,block_col)
for i in range(block_row):
print(class_count[i])
tlx = tlx + int(class_count[i])
for i in range(block_col): tly = tly + int(class_count[i])
return tlx,tly,block_row,block_col
def databasepatternfilter(x,y,labelname):
tx = ImageDB.query.filter(ImageDB.name==labelname,ImageDB.tlx<=x,ImageDB.brx>=x,ImageDB.tly<=y,ImageDB.bry>=y);
print(tx[0],size(tx))
color_val = tx[0].color
lbl = tx[0].lbl
pixels = ImageDB.query.filter_by(name=labelname, lbl=lbl)
return pixels,color_val
def blockfilter(x,y,labelname):
print('-- blockfilter: retrieving pixels associated with the clicked pattern ')
tx = ImageDB.query.filter(ImageDB.name==labelname,ImageDB.tlx<=x,ImageDB.brx>=x,ImageDB.tly<=y,ImageDB.bry>=y);
print(tx[0],size(tx))
color_val = tx[0].color
block_row = tx[0].block_row
block_col = tx[0].block_col
pixels = ImageDB.query.filter_by(name=labelname,block_col=block_col,block_row=block_row,color=color_val)
return pixels,color_val
def getCoordinates(pixels,sortedPath,unique=True):
rowPointsIndex=[]
colPointsIndex=[]
pair=[]
x=pd.read_csv(sortedPath,index_col=0)
for p in pixels:
tlx=p.tlx
tly=p.tly
brx=p.brx
bry=p.bry
for i in range(tlx,brx+1):
for j in range(tly,bry+1):
rowPointsIndex.extend([i])
colPointsIndex.extend([j])
pair.extend([str(i)+':'+str(j)])
#pair.extend([x.index.get_values()[i]+':'+x.index.get_values()[j]])
#print(rowPointsIndex,colPointsIndex,tl,br)
if(unique==True):
rowPointsIndex=list(set(rowPointsIndex))
colPointsIndex=list(set(colPointsIndex))
print('---',len(rowPointsIndex),len(colPointsIndex))
rowPoints = x.iloc[rowPointsIndex,:]
colPoints = x.iloc[colPointsIndex,:]
return rowPoints,colPoints
|
{"hexsha": "45d893b8e6e923a4fb53c501532a8219b9632693", "size": 14758, "ext": "py", "lang": "Python", "max_stars_repo_path": "TOOL/mod_dim/database_connectivity_rdb.py", "max_stars_repo_name": "ayushi04/SPVAC", "max_stars_repo_head_hexsha": "7bb7742881ebc08842afe9056a3a1439c4d559c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TOOL/mod_dim/database_connectivity_rdb.py", "max_issues_repo_name": "ayushi04/SPVAC", "max_issues_repo_head_hexsha": "7bb7742881ebc08842afe9056a3a1439c4d559c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TOOL/mod_dim/database_connectivity_rdb.py", "max_forks_repo_name": "ayushi04/SPVAC", "max_forks_repo_head_hexsha": "7bb7742881ebc08842afe9056a3a1439c4d559c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6203473945, "max_line_length": 276, "alphanum_fraction": 0.6508334463, "include": true, "reason": "from numpy", "num_tokens": 4177}
|
#!/usr/bin/env Rscript
cat("Making plots...\n")
tring <- read.table("trcurve.txt", head=TRUE)
gmoverr <- read.table("gmoverr.txt", head=TRUE)
data <- read.table("maxv.txt",head=FALSE)
vrange <- data[1,1]
dv <- 4
data <- read.table("rsize.txt", head=FALSE)
rsize <- data[1,1]
data <- as.matrix(read.table("observation.txt", head=FALSE))
pdf("pvplot.pdf")
image(log10(data), axes=0, xaxs="i", yaxs="i")
axis(1, at=seq(0,1,length.out=9),labels=1e3*c(seq(-rsize, 0, length.out=5),seq(rsize/4, rsize,length.out=4)))
axis(2, at=seq(0,1,length.out=9),labels=c(seq(-vrange,0,length.out=5),seq(vrange/4,vrange,length.out=4)))
segments(-1,0.5,2,0.5,lty=2)
segments(0.5,-1,0.5,2,lty=2)
dev.off() -> dumpvar
distmedian <- function(vels, distribution) {
for (i in seq(2,length(distribution))) {
distribution[i] <- distribution[i] + distribution[i-1]
}
midpoint <- distribution[length(distribution)]*0.5
medpoint <- 0
for (i in seq(1,length(distribution))) {
if (distribution[i]<midpoint) {
medpoint <- i
}
}
return (vels[medpoint])
}
weightedmean <- function(vels, distribution) {
v <- sum(vels*distribution)/sum(distribution)
return(v)
}
velseq <- seq((dv/2)-vrange,vrange-(dv/2),by=dv)
radius <- gmoverr$r[seq(2,1+(length(data[,1])-1)/2)]*1e3
leftcurve <- numeric(0)
rightcurve <- numeric(0)
for (i in seq(1,(length(data[,1])-1)/2 ) ) {
leftdist <- as.numeric(data[(length(data[,1])+1)/2-i,])
rightdist <- as.numeric(data[(length(data[,1]+1))/2+i,])
#leftcurve <- c(leftcurve, distmedian(velseq,leftdist))
#rightcurve <- c(rightcurve, distmedian(velseq,rightdist))
leftcurve <- c(leftcurve, weightedmean(velseq,leftdist))
rightcurve <- c(rightcurve, weightedmean(velseq,rightdist))
}
data <- read.table("finalcurve.txt", head=TRUE)
radius <- data$radius
rightcurve <- data$low
leftcurve <- -data$high
rc <- data.frame(r=radius, vr=rightcurve, vl=leftcurve)
pts <- (rightcurve-leftcurve)/2
pdf("tiltedringcurve.pdf")
plot(rc$r, pts, ylim=c(0,max(tring$v)*1.5), xlim=c(0,max(rc$r)), xlab="R[kpc]", ylab=expression(v[c]~"[kpc]"))
segments(rc$r, rc$vr, rc$r, -rc$vl)
lines(tring$r*1e3, tring$v,lty=2, col="green", lwd=2)
lines(gmoverr$r*1e3, gmoverr$v, lty=2, col="red", lwd=2)
#lines(tring$r*1e3, tring$gas,col="blue",lwd=2)
lines(tring$r*1e3, tring$stars,col="purple",lwd=2)
dev.off() -> dumpvar
errs <- numeric(0)
for (i in seq(1,length(rc$r))) {
errs[i] = abs((rightcurve[i]+leftcurve[i])/2)
if (is.nan(errs[i])) {
errs[i] = 4
} else {
if (errs[i] == 0) { errs[i]=4 }
}
}
sink("curvedata.txt")
cat("# Radius v_obs err v_gas v_disk v_bulge\n")
cat("# kpc km/s km/s km/s km/s km/s\n")
for (i in seq(1,length(rc$r))) {
cat(rc$r[i])
cat(" ")
cat(pts[i])
cat(" ")
cat(errs[i])
cat(" ")
cat(tring$gas[i])
cat(" ")
cat(tring$stars[i])
cat(" ")
cat("0\n")
}
sink()
sink("realcurvedata.txt")
cat("# Radius v_obs err v_gas v_disk v_bulge\n")
cat("# kpc km/s km/s km/s km/s km/s\n")
for (i in seq(1,length(rc$r))) {
cat(rc$r[i])
cat(" ")
cat(tring$v[i])
cat(" ")
cat(errs[i])
cat(" ")
cat(tring$gas[i])
cat(" ")
cat(tring$stars[i])
cat(" ")
cat("0\n")
}
sink()
|
{"hexsha": "eccd322b697ae1270cd3fdbcd0af27f1d11a53d7", "size": 3155, "ext": "r", "lang": "R", "max_stars_repo_path": "pvplot.r", "max_stars_repo_name": "petehague/galaxyview", "max_stars_repo_head_hexsha": "9202a09c97d66b23213356815f3c6eaeb8958d7f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pvplot.r", "max_issues_repo_name": "petehague/galaxyview", "max_issues_repo_head_hexsha": "9202a09c97d66b23213356815f3c6eaeb8958d7f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pvplot.r", "max_forks_repo_name": "petehague/galaxyview", "max_forks_repo_head_hexsha": "9202a09c97d66b23213356815f3c6eaeb8958d7f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.512605042, "max_line_length": 110, "alphanum_fraction": 0.6332805071, "num_tokens": 1141}
|
"""
Utilities for nifti data
"""
# Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nibabel as nib
import numpy as np
def get_nifti_matrix(filename, dtype=np.int16):
""" Returns array from nifti filename and affine matrix. """
array = nib.load(filename).get_data().astype(dtype) # array
affine = nib.load(filename).affine # affine matrix
return array, affine
def save_nifti_matrix(array, affine, filename, dtype=np.int16):
""" Saves nifti array with a given affine matrix.
Notice that the nifti file will be saved in the given dtype (default int16)"""
nimage = nib.Nifti1Image(array.astype(dtype), affine)
nib.save(nimage, filename=filename)
|
{"hexsha": "3d5db2338bd7c6b354825d6195f53edd037d5c18", "size": 1230, "ext": "py", "lang": "Python", "max_stars_repo_path": "idas/data_utils/nifti_utils.py", "max_stars_repo_name": "GabrieleValvano/SDNet", "max_stars_repo_head_hexsha": "121b2ba78881bd7b9653da072a0e46efe5f4ba94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-08-15T10:56:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T11:13:34.000Z", "max_issues_repo_path": "idas/data_utils/nifti_utils.py", "max_issues_repo_name": "GabrieleValvano/SDNet", "max_issues_repo_head_hexsha": "121b2ba78881bd7b9653da072a0e46efe5f4ba94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-03-22T17:33:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-13T07:20:10.000Z", "max_forks_repo_path": "idas/data_utils/nifti_utils.py", "max_forks_repo_name": "GabrieleValvano/SDNet", "max_forks_repo_head_hexsha": "121b2ba78881bd7b9653da072a0e46efe5f4ba94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-12T06:56:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T06:56:35.000Z", "avg_line_length": 35.1428571429, "max_line_length": 82, "alphanum_fraction": 0.7317073171, "include": true, "reason": "import numpy", "num_tokens": 305}
|
import tensorflow as tf
from tensorflow.python.platform import gfile
import numpy as np
from struct import unpack
from tensorflow.python.framework import graph_util
# supported data types
_data_types_ = {
'float32':('f',4,tf.float32),
'float64':('d',8,tf.float64),
'float':('f',4,tf.float32),
'double':('d',8,tf.float64),
'int8':('b',1,tf.int8),
'int16':('i',2,tf.int16),
'int32':('l',4,tf.int32),
'uint8':('B',1,tf.uint8),
'uint16':('I',2,tf.uint16),
'uint32':('L',4,tf.uint32)
}
# tf nn functions
def leaky_relu(inputs,alpha=0.1,name='leaky_relu'):
return tf.nn.leaky_relu(inputs, alpha=alpha, name=name)
def Batch_Normalize(data,scale,mean,variance,epsilon=1e-6,
scope = "Batch_Normalize",
reuse = False):
with tf.variable_scope(scope, reuse=reuse):
return tf.multiply((data-mean)/(tf.sqrt(variance)+epsilon), scale)
# packed tf layer functions
def convolutional(net, weights, biases,
strides=1,
padding='SAME',
activation_fn = None,
batch_normalize = False,
bn_scale = None,
bn_mean = None,
bn_variance = None,
parent_scope = None, my_scope = "conv2d",
reuse = False):
with tf.variable_scope(None if parent_scope == 'None' else parent_scope,
my_scope, reuse=reuse):
my_out = tf.nn.conv2d(net, weights,
strides=[1,strides,strides,1],
padding=padding,
name = "conv2d")
if batch_normalize:
my_out = Batch_Normalize(my_out, bn_scale, bn_mean, bn_variance,
scope = "BN", reuse = reuse)
my_out = tf.add(my_out, biases, name = "bias")
if activation_fn is not None:
my_out = activation_fn(my_out)
return my_out
def depthwise_convolutional(net, weights, biases,
strides=1,
padding='SAME',
activation_fn = None,
batch_normalize = False,
bn_scale = None,
bn_mean = None,
bn_variance = None,
parent_scope = None, my_scope = "dw_conv2d",
reuse = False):
with tf.variable_scope(None if parent_scope == 'None' else parent_scope,
my_scope, reuse=reuse):
my_out = tf.nn.depthwise_conv2d(net, weights,
strides=[1, strides, strides, 1],
padding=padding,
name="dw_conv2d")
if batch_normalize:
my_out = Batch_Normalize(my_out, bn_scale, bn_mean, bn_variance,
scope = "BN", reuse = reuse)
my_out = tf.add(my_out, biases, name="bias")
if activation_fn is not None:
my_out = activation_fn(my_out)
return my_out
def max_pool(net, ksize=2, strides=1, padding='SAME', scope = None, name = "max_pool"):
with tf.variable_scope(name if scope == 'None' else scope):
return tf.nn.max_pool(net, ksize=[1,ksize,ksize,1],
strides=[1,strides,strides,1],
padding=padding, name = name)
def avg_pool(net, ksize=2, strides=1, padding='SAME', scope = None, name = "avg_pool"):
with tf.variable_scope(name if scope == 'None' else scope):
return tf.nn.avg_pool(net, ksize=[1,ksize,ksize,1],
strides=[1,strides,strides,1],
padding=padding, name = name)
def route_concat(layers_route, axis = -1, name='Route_concat'):
len_dim = len(layers_route[0].shape)
concat_dim = (len_dim + axis)%len_dim
return tf.concat(layers_route, concat_dim, name=name)
def route_sum(layers_route, activation_fn ,scope='Route_sum'):
with tf.variable_scope(scope):
return activation_fn(tf.add_n(layers_route, name='sum'))
# I/O
def bytes_from_TFW(tfw_file):
with open(tfw_file, 'rb') as F:
return F.read(-1)
def var_from_bytes(tfw_bytes, start, end,
resize_as = None,
trainable = False,
name = None,
dtype=tf.float32):
assert dtype in _data_types_, 'check the supported data types: '+\
''.join('\n<%s>'%s for s in _data_types_.keys())
count = (end-start)/_data_types_[dtype][1]
arr = unpack('%i%s'%(count,_data_types_[dtype][0]), tfw_bytes[start:end])
if resize_as:
arr = np.resize(arr, resize_as)
return tf.Variable(arr, trainable=trainable, dtype=dtype, name=name)
def freeze_to_PB(session, out_nodes_list, out_path):
LAST_DOT = out_path.find('/')
out_dir = './' if LAST_DOT < 0 else out_path[:LAST_DOT+1]
out_name = out_path[LAST_DOT+1:]
constant_graph = graph_util.convert_variables_to_constants(session,
session.graph_def,
output_node_names=out_nodes_list)
tf.train.write_graph(constant_graph, out_dir, out_name, as_text=False)
|
{"hexsha": "86539eead4f14c559fd825cc6e77f5c369c7cdfe", "size": 4568, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_loader/darknet/D2T_lib/static_lib/tf_functions.py", "max_stars_repo_name": "MistQue/kendryte-model-compiler", "max_stars_repo_head_hexsha": "36af917defb37880037fb84330ab995ed44311e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_loader/darknet/D2T_lib/static_lib/tf_functions.py", "max_issues_repo_name": "MistQue/kendryte-model-compiler", "max_issues_repo_head_hexsha": "36af917defb37880037fb84330ab995ed44311e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_loader/darknet/D2T_lib/static_lib/tf_functions.py", "max_forks_repo_name": "MistQue/kendryte-model-compiler", "max_forks_repo_head_hexsha": "36af917defb37880037fb84330ab995ed44311e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0895522388, "max_line_length": 93, "alphanum_fraction": 0.6468914186, "include": true, "reason": "import numpy", "num_tokens": 1204}
|
program bin2hdf5
! Created by Manuel A. Diaz, ENSMA 2020
use HDF5 ! This module contains all necessary modules
IMPLICIT NONE
!-------- initialize variables -------------
character(len=30) :: input_file0
character(len=30) :: input_file1='xp.dat'
character(len=30) :: input_file2='yp.dat'
character(len=30) :: input_file3='zp.dat'
character(len=30) :: output_file
character(len=30) :: xdmf_file
character(len=10) :: Nx_char
character(len=10) :: Ny_char
character(len=10) :: Nz_char
character(len=3) :: data_extension="h5"
character(len=3) :: xdmf_extension="xmf"
integer, parameter :: fp=8 ! float data presicion
integer :: pointPosition ! '.' location
integer :: Nx, Ny, Nz ! Array dimensions
real(fp), allocatable :: x(:), y(:), z(:) ! Axis points arrays
real(fp), allocatable :: p(:,:,:) ! Buffer for array
integer :: buff_lenght ! Buffer length
integer :: i, j, k ! dummy indexes
integer(HSIZE_T), dimension(3) :: dims ! Dataset dimensions
integer(HID_T) :: input_file_id=1 ! File identifier
integer(HID_T) :: output_file_id ! File identifier
integer(HID_T) :: dset_id ! Dataset identifier
integer(HID_T) :: dspace_id ! Dataspace identifier
integer :: error ! Error flag
!-------- Parse arguments from command -------------
if ( command_argument_count() .NE. 4 ) then
print*, "Mode of use: ./bin2hdf5_fields_serial.run [*.bin] [Nx] [Ny] [Nz]"; stop
else
call get_command_argument(1,input_file0)
call get_command_argument(2,Nx_char); read(Nx_char,*) Nx
call get_command_argument(3,Ny_char); read(Ny_char,*) Ny
call get_command_argument(4,Nz_char); read(Nz_char,*) Nz
print*, "Attemping to read ",Nx,"Nx",Ny,"Ny",Nz,"Nz array."
end if
!-------- Set the name of the output files ----------
pointPosition = scan(trim(input_file0),".", BACK=.true.)
if (pointPosition>0) then
output_file = input_file0(1:pointPosition)//data_extension
xdmf_file = input_file0(1:pointPosition)//xdmf_extension
end if
!-------- Allocate space for the array -------------
allocate( x(Nx) )
allocate( y(Ny) )
allocate( z(Nz) )
allocate( p(Nx,Ny,Nz) )
!-------- Read the input data -------------
Inquire( iolength = buff_lenght ) p
open (input_file_id,FILE=trim(input_file0), form='unformatted', status='old', access='direct',recl=buff_lenght)
read (input_file_id,rec=1) p
close(input_file_id)
open (input_file_id,file=trim(input_file1), form='formatted', status='old', action='read')
do i = 1, Nx
read(input_file_id,*) x(i)
end do
close(input_file_id)
open (input_file_id,file=trim(input_file2), form='formatted', status='old', action='read')
do j = 1, Ny
read(input_file_id,*) y(j)
end do
close(input_file_id)
open (input_file_id,file=trim(input_file3), form='formatted', status='old', action='read')
do k = 1, Nz
read(input_file_id,*) z(k)
end do
close(input_file_id)
!-------- Write the data to HDF5 -------------
! Write Fields
dims(1) = Nx
dims(2) = Ny
dims(3) = Nz
!
CALL h5open_f(error) ! Initialize FORTRAN interface of HDF5.
CALL h5fcreate_f (output_file, H5F_ACC_TRUNC_F, output_file_id, error) ! Create a new file.
! p
CALL h5screate_simple_f(3, dims, dspace_id, error) ! Create the dataspace.
CALL h5dcreate_f(output_file_id, "p", H5T_NATIVE_DOUBLE, dspace_id, dset_id, error)
CALL h5dwrite_f(dset_id, H5T_NATIVE_DOUBLE, p, dims, error) ! Write the dataset.
CALL h5dclose_f(dset_id, error) ! End access to the dataset and release resources used by it.
CALL h5sclose_f(dspace_id, error) ! Terminate access to the data space.
CALL h5fclose_f(output_file_id, error) ! Close the file.
CALL h5close_f(error) ! Close FORTRAN interface.
! Write geometry file
CALL writeGeometry_h5_serial(x,y,z,Nx,Ny,Nz)
! if everything goes well then report that:
print*, "translation *.bin to *.h5 successful :)"
deallocate(x)
deallocate(y)
deallocate(z)
deallocate(p)
!-------- Write the associated XDMF file -------------
call writeFields3D_xmf_dp(xdmf_file,Nx,Ny,Nz,0.0_fp)
end program bin2hdf5
|
{"hexsha": "167dcf1f396f4ca37d3f205eadd08596b53cb729", "size": 4512, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/bin2hdf5_fields_serial.f90", "max_stars_repo_name": "wme7/Fortran2Paraview_with_HDF5", "max_stars_repo_head_hexsha": "7afe4421fe72be316bd475e3b07e5e4f2a72b7af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-30T00:24:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-06T15:13:40.000Z", "max_issues_repo_path": "src/bin2hdf5_fields_serial.f90", "max_issues_repo_name": "wme7/Fortran2Paraview_with_HDF5", "max_issues_repo_head_hexsha": "7afe4421fe72be316bd475e3b07e5e4f2a72b7af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/bin2hdf5_fields_serial.f90", "max_forks_repo_name": "wme7/Fortran2Paraview_with_HDF5", "max_forks_repo_head_hexsha": "7afe4421fe72be316bd475e3b07e5e4f2a72b7af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-30T00:25:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-30T00:25:19.000Z", "avg_line_length": 39.9292035398, "max_line_length": 117, "alphanum_fraction": 0.6148049645, "num_tokens": 1274}
|
import numpy as np
from skimage.io import imread, imsave
from skimage.color import rgb2lab, lab2rgb
from sklearn.metrics import euclidean_distances
import util
class PaletteQuery(object):
"""
Extract a L*a*b color array from a dict representation of a palette query.
The array can then be used to histogram colors, output a palette image, etc.
Parameters
----------
palette_query : dict
A mapping of hex colors to unnormalized values, representing proportion
in the palette (e.g. {'#ffffff': 20, '#cc3300': 0.5}).
"""
def __init__(self, palette_query):
rgb_image = util.palette_query_to_rgb_image(palette_query)
h, w, d = tuple(rgb_image.shape)
self.lab_array = rgb2lab(rgb_image).reshape((h * w, d))
class Image(object):
"""
Read the image at the URL in RGB format, downsample if needed,
and convert to Lab colorspace.
Store original dimensions, resize_factor, and the filename of the image.
Image dimensions will be resized independently such that neither width nor
height exceed the maximum allowed dimension MAX_DIMENSION.
Parameters
----------
url : string
URL or file path of the image to load.
id : string, optional
Name or some other id of the image. For example, the Flickr ID.
"""
MAX_DIMENSION = 240 + 1
def __init__(self, url, _id=None):
self.id = _id
self.url = url
img = imread(url)
# Handle grayscale and RGBA images.
# TODO: Should be smarter here in the future, but for now simply remove
# the alpha channel if present.
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
elif img.ndim == 4:
img = img[:, :, :3]
# Downsample for speed.
#
# NOTE: I can't find a good method to resize properly in Python!
# scipy.misc.imresize uses PIL, which needs 8bit data.
# Anyway, this is faster and almost as good.
#
# >>> def d(dim, max_dim): return arange(0, dim, dim / max_dim + 1).shape
# >>> plot(range(1200), [d(x, 200) for x in range(1200)])
h, w, d = tuple(img.shape)
self.orig_h, self.orig_w, self.orig_d = tuple(img.shape)
h_stride = h / self.MAX_DIMENSION + 1
w_stride = w / self.MAX_DIMENSION + 1
img = img[::h_stride, ::w_stride, :]
# Convert to L*a*b colors.
h, w, d = img.shape
self.h, self.w, self.d = img.shape
self.lab_array = rgb2lab(img).reshape((h * w, d))
def as_dict(self):
"""
Return relevant info about self in a dict.
"""
return {'id': self.id, 'url': self.url,
'resized_width': self.w, 'resized_height': self.h,
'width': self.orig_w, 'height': self.orig_h}
def output_quantized_to_palette(self, palette, filename):
"""
Save to filename a version of the image with all colors quantized
to the nearest color in the given palette.
Parameters
----------
palette : rayleigh.Palette
Containing K colors.
filename : string
Where image will be written.
"""
dist = euclidean_distances(
palette.lab_array, self.lab_array, squared=True).T
min_ind = np.argmin(dist, axis=1)
quantized_lab_array = palette.lab_array[min_ind, :]
img = lab2rgb(quantized_lab_array.reshape((self.h, self.w, self.d)))
imsave(filename, img)
|
{"hexsha": "6209d80c3776ed8567ca0624b9580bee4462df30", "size": 3550, "ext": "py", "lang": "Python", "max_stars_repo_path": "rayleigh/image.py", "max_stars_repo_name": "mgsh/rayleigh", "max_stars_repo_head_hexsha": "54835d20345f0fb05cc626ac627b56371ba9bd42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 185, "max_stars_repo_stars_event_min_datetime": "2015-02-02T08:03:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T07:48:15.000Z", "max_issues_repo_path": "rayleigh/image.py", "max_issues_repo_name": "mgsh/rayleigh", "max_issues_repo_head_hexsha": "54835d20345f0fb05cc626ac627b56371ba9bd42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-04-24T21:18:23.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-21T12:07:57.000Z", "max_forks_repo_path": "rayleigh/image.py", "max_forks_repo_name": "mgsh/rayleigh", "max_forks_repo_head_hexsha": "54835d20345f0fb05cc626ac627b56371ba9bd42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-03-22T21:51:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T10:20:02.000Z", "avg_line_length": 34.8039215686, "max_line_length": 81, "alphanum_fraction": 0.6047887324, "include": true, "reason": "import numpy", "num_tokens": 870}
|
# script: Data generator. Reads cropped objects pickles and background images and generates image datasets.
# author: Mihai Polceanu
import cv2
import numpy as np
import os
import sys
import pickle
import random
import imutils
import argparse
def rndint(l,h):
return np.random.randint(l, h)
def resize(img):
ratio = np.random.uniform(0.01, 0.5)
noiseX = np.random.uniform(0.0, 0.1)
noiseY = np.random.uniform(0.0, 0.1)
result = cv2.resize(img, (int(img.shape[1]*(ratio+noiseX)), int(img.shape[0]*(ratio+noiseY))))
return result
def rotate(img):
angle = np.random.randint(0, 360)
result = imutils.rotate_bound(img, angle)
return result
def skew(img):
rows,cols,ch = img.shape
a = np.random.uniform(0, 10)
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2 = np.float32([[50+rndint(0,a),50+rndint(0,a)],[200+rndint(0,a),50+rndint(0,a)],[50+rndint(0,a),200+rndint(0,a)]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
return dst
def noise(img):
amount = 20
n = np.random.uniform(-amount, amount, img.shape)
result = img.astype(np.float32)+n
result[result<0] = 0
result[result>255] = 255
result = result.astype(np.uint8)
return result
def color(img):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cropMask = np.sum(img.copy().astype(np.float32), axis=2).reshape(img.shape[0], img.shape[1], 1)
cropMask[cropMask > 0] = 1
#cv2.imshow("crop", cropMask)
amount = 40
#print(img.shape)
n = np.random.uniform(-amount, amount, (1, 1, 1))
#print(n.shape)
result = hsv_img.astype(np.float32)
result[:, :, 0:1] += n
result[result[:,:,0:1]<0] += 179
result[result[:,:,0:1]>179] -= 179
result = result*cropMask
result = result.astype(np.uint8)
result = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return result
def color2(img):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cropMask = np.sum(img.copy().astype(np.float32), axis=2).reshape(img.shape[0], img.shape[1], 1)
cropMask[cropMask > 0] = 1
#cv2.imshow("crop", cropMask)
amount = 10
amount2 = 20
#print(img.shape)
n = np.random.uniform(-amount, amount, (1, 1, 1))
n2 = np.random.uniform(-amount2, amount2, (1, 1, 1))
#print(n.shape)
result = hsv_img.astype(np.float32)
result[:, :, 0:1] += n
result[:, :, 2:3] += n2
result[result[:,:,0:1]<0] += 179
result[result[:,:,0:1]>179] -= 179
result[result<0] = 0
result[result>255] = 255
result = result*cropMask
result = result.astype(np.uint8)
result = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return result
def flip(img):
result = img.copy()
if np.random.uniform(0, 1) > 0.5:
result = cv2.flip( result, 0 )
if np.random.uniform(0, 1) > 0.5:
result = cv2.flip( result, 1 )
return result
def generateAugmentedImage(crops, backgrounds):
bgIndex = np.random.randint(0, backgrounds.shape[0])
# resulting image resolution (darknet default)
dx = 640
dy = 480
im = backgrounds[bgIndex, :, :, :]
#print(im.shape)
h = im.shape[0]
w = im.shape[1]
x = random.randint(0, w-dx)
y = random.randint(0, h-dy)
#print("Cropping {}: {},{} -> {},{}".format(file, x,y, x+dx, y+dy))
bgCrop = im[y:y+dy, x:x+dx, :].copy()
ratioX = np.random.uniform(0.3, 1.0)
ratioY = np.random.uniform(0.3, 1.0)
# hardcoded label list, must coincide with the prefix of video file names
cropsLabels = []
classNames = ['bowl', 'cup', 'fork', 'knife', 'napkin', 'plate', 'spoon']
for i in range(len(crops)):
label = crops[i][0]
cropsLabels.append(label)
classWeights = []
clsSum = 0
for i in range(len(cropsLabels)):
label = crops[i][0]
p = 1.0/cropsLabels.count(label)/len(classNames)
classWeights.append(p)
clsSum += p
# print(cropsLabels)
# print(classWeights)
bgCrop = flip(bgCrop)
labelList = []
for i in range(np.random.randint(1, 7)):
#objIndex = np.random.randint(0, len(crops))
objIndex = np.random.choice(len(crops), 1, p=classWeights)[0]
label = crops[objIndex][0]
cropIndex = np.random.randint(0, len(crops[objIndex][1]))
crop = crops[objIndex][1][cropIndex].copy()
#print(crop.shape)
#print("Cropping {}: {},{} -> {},{}".format(file, x,y, x+dx, y+dy))
# --------------- #
# hsv = cv2.cvtColor(crop, cv2.COLOR_BGR2HSV)
# hsv[:,:,1:2] = hsv[:,:,0:1]
# hsv[:,:,2:3] = hsv[:,:,0:1]
# #hsv = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# cv2.imshow("Before", hsv)
crop = resize(crop)
crop = rotate(crop)
crop = color(crop)
crop = flip(crop)
# hsv = cv2.cvtColor(crop, cv2.COLOR_BGR2HSV)
# hsv[:,:,1:2] = hsv[:,:,0:1]
# hsv[:,:,2:3] = hsv[:,:,0:1]
# #hsv = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# cv2.imshow("After", hsv)
#cv2.waitKey(5000)
# --------------- #
dx = crop.shape[1]
dy = crop.shape[0]
#print(bgCrop.shape)
h = bgCrop.shape[0]
w = bgCrop.shape[1]
#print(w-dx-1, h-dy-1)
x = random.randint(0, w-dx)
y = random.randint(0, h-dy)
blendRatio = np.random.uniform(0.0, 0.2)
cropMask = np.sum(crop.copy(), axis=2)
cropMask[cropMask > 0] = 1
cropMask = cropMask.reshape(cropMask.shape[0], cropMask.shape[1], 1)
bgCrop[y:y+dy, x:x+dx, :] = bgCrop[y:y+dy, x:x+dx, :]*(1-cropMask) + bgCrop[y:y+dy, x:x+dx, :]*cropMask*blendRatio + cropMask*crop*(1-blendRatio)
labelList.append([classNames.index(label), (x+dx/2.0)/bgCrop.shape[1], (y+dy/2.0)/bgCrop.shape[0], 1.0*dx/bgCrop.shape[1], 1.0*dy/bgCrop.shape[0]])
bgCrop = color2(bgCrop)
bgCrop = noise(bgCrop)
# cv2.imshow("Image", bgCrop)
# cv2.waitKey(100)
return bgCrop, labelList
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Data generator.')
parser.add_argument('--trainSize', type=int, default=100, help='training set size, default 100')
parser.add_argument('--validSize', type=int, default=10, help='validation set size, default 10')
parser.add_argument('--testSize', type=int, default=10, help='test set size, default 10')
#parser.add_argument('--display', metavar='D', type=int, default=1, help='show cropped objects (0==False, 1==True), default 1')
parser.add_argument('--verbose', metavar='V', type=int, default=1, help='output text to console (0==False, 1==True), default 1')
args = parser.parse_args()
crops = []
for root, dirs, files in os.walk("../generated_crop_data"):
for filename in files:
#print(filename)
label = filename.split("_")[0]
if args.verbose:
print(label)
with open('../generated_crop_data/'+filename, 'rb') as handle:
crops.append([label, pickle.load(handle)])
# hack count files
nrBg = 0
for root, dirs, files in os.walk("../dataset_backgrounds"):
for filename in files:
nrBg += 1
# background images are currently 1600x1200
backgrounds = np.zeros((nrBg, 1200, 1600, 3), dtype=np.uint8)
bgIndex = 0
for root, dirs, files in os.walk("../dataset_backgrounds"):
for filename in files:
im = cv2.imread("../dataset_backgrounds/"+filename, cv2.IMREAD_COLOR)
backgrounds[bgIndex,:,:,:] = im
bgIndex += 1
# cv2.imshow("Image", im)
# cv2.waitKey(1)
sets = ['train', 'valid', 'test']
set_size = [args.trainSize, args.validSize, args.testSize]
# safety check to avoid replacing previous data !
if not os.path.exists('../generated_darknet_data/'):
# generate folders
os.makedirs('../generated_darknet_data/')
for s in sets:
if not os.path.exists('../generated_darknet_data/'+s):
os.makedirs('../generated_darknet_data/'+s)
os.makedirs('../generated_darknet_data/'+s+'/images')
os.makedirs('../generated_darknet_data/'+s+'/labels')
for si in range(len(sets)):
file_list = open("../generated_darknet_data/"+sets[si]+".txt", "w")
for i in range(set_size[si]): #range(600000):
bgCrop, labels = generateAugmentedImage(crops, backgrounds)
cv2.imwrite("../generated_darknet_data/"+sets[si]+"/images/img_%d.jpg" % i, bgCrop)
with open("../generated_darknet_data/"+sets[si]+"/labels/img_%d.txt" % i, "w") as text_file:
for j in range(len(labels)):
line = str(labels[j][0])
for k in range(1, len(labels[j])):
line += " "+str(labels[j][k])
text_file.write(line+"\n")
file_list.write("../generated_darknet_data/"+sets[si]+"/images/img_%d.jpg\n" % i)
#print(labels)
file_list.close()
else:
print('------------------------------------------------------------------')
print('| SANITY CHECK: ../generated_darknet_data folder already exists! |')
print('| Please move or remove it to generate new data. |')
print('------------------------------------------------------------------')
# while True:
# pass
# print(image.shape)
# count = 0
# success = True
# cropList = []
# while success:
# #cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
# if count % 2 == 0:
# img, mask = processImage(image)
# kernel1 = np.ones((4,4), np.uint8)
# kernel2 = np.ones((7,7), np.uint8)
# mask = cv2.erode(mask, kernel1, iterations=2)
# mask = cv2.dilate(mask, kernel2, iterations=2)
# crop, cmask = cropObject(img, mask)
# maxSize = max(crop.shape[0], crop.shape[1])
# ratio = 640.0/maxSize
# small_crop = cv2.resize(crop, (int(crop.shape[1]*ratio), int(crop.shape[0]*ratio)))
# small_cmask = cv2.resize(cmask, (int(cmask.shape[1]*ratio), int(cmask.shape[0]*ratio)))
# cropList.append(small_crop)
# cv2.imshow("Image", small_crop)
# cv2.waitKey(1)
# success,image = vidcap.read()
# print 'Read a new frame: ', success, count
# count += 1
# with open('./crop_data/'+filename+'_cropped.pickle', 'wb') as handle:
# pickle.dump(cropList, handle, protocol=pickle.HIGHEST_PROTOCOL)
|
{"hexsha": "eb9b0637f9e8caf3e22292e1f9dd7506fe863fad", "size": 11072, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/dataGen.py", "max_stars_repo_name": "polceanum/data.augmentation", "max_stars_repo_head_hexsha": "d47d93f20bca453bfda94e5cd714399fd35a6287", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/dataGen.py", "max_issues_repo_name": "polceanum/data.augmentation", "max_issues_repo_head_hexsha": "d47d93f20bca453bfda94e5cd714399fd35a6287", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/dataGen.py", "max_forks_repo_name": "polceanum/data.augmentation", "max_forks_repo_head_hexsha": "d47d93f20bca453bfda94e5cd714399fd35a6287", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.816091954, "max_line_length": 155, "alphanum_fraction": 0.5525650289, "include": true, "reason": "import numpy", "num_tokens": 3091}
|
# -*- coding: utf-8 -*-
"""
Flow based cut algorithms
"""
import itertools
import networkx as nx
# Define the default maximum flow function to use in all flow based
# cut algorithms.
from networkx.algorithms.flow import edmonds_karp, shortest_augmenting_path
from networkx.algorithms.flow import build_residual_network
default_flow_func = edmonds_karp
from .utils import (build_auxiliary_node_connectivity,
build_auxiliary_edge_connectivity)
__author__ = '\n'.join(['Jordi Torrents <jtorrents@milnou.net>'])
__all__ = ['minimum_st_node_cut',
'minimum_node_cut',
'minimum_st_edge_cut',
'minimum_edge_cut']
def minimum_st_edge_cut(G, s, t, flow_func=None, auxiliary=None,
residual=None):
"""Returns the edges of the cut-set of a minimum (s, t)-cut.
This function returns the set of edges of minimum cardinality that,
if removed, would destroy all paths among source and target in G.
Edge weights are not considered
Parameters
----------
G : NetworkX graph
Edges of the graph are expected to have an attribute called
'capacity'. If this attribute is not present, the edge is
considered to have infinite capacity.
s : node
Source node for the flow.
t : node
Sink node for the flow.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See :meth:`node_connectivity` for
details. The choice of the default function may change from version
to version and should not be relied on. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
cutset : set
Set of edges that, if removed from the graph, will disconnect it.
See also
--------
:meth:`minimum_cut`
:meth:`minimum_node_cut`
:meth:`minimum_edge_cut`
:meth:`stoer_wagner`
:meth:`node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
Examples
--------
This function is not imported in the base NetworkX namespace, so you
have to explicitly import it from the connectivity package:
>>> from networkx.algorithms.connectivity import minimum_st_edge_cut
We use in this example the platonic icosahedral graph, which has edge
connectivity 5.
>>> G = nx.icosahedral_graph()
>>> len(minimum_st_edge_cut(G, 0, 6))
5
If you need to compute local edge cuts on several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for edge connectivity, and the residual
network for the underlying maximum flow computation.
Example of how to compute local edge cuts among all pairs of
nodes of the platonic icosahedral graph reusing the data
structures.
>>> import itertools
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_edge_connectivity)
>>> H = build_auxiliary_edge_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> result = dict.fromkeys(G, dict())
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as parameters
>>> for u, v in itertools.combinations(G, 2):
... k = len(minimum_st_edge_cut(G, u, v, auxiliary=H, residual=R))
... result[u][v] = k
>>> all(result[u][v] == 5 for u, v in itertools.combinations(G, 2))
True
You can also use alternative flow algorithms for computing edge
cuts. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(minimum_st_edge_cut(G, 0, 6, flow_func=shortest_augmenting_path))
5
"""
if flow_func is None:
flow_func = default_flow_func
if auxiliary is None:
H = build_auxiliary_edge_connectivity(G)
else:
H = auxiliary
kwargs = dict(capacity='capacity', flow_func=flow_func, residual=residual)
cut_value, partition = nx.minimum_cut(H, s, t, **kwargs)
reachable, non_reachable = partition
# Any edge in the original graph linking the two sets in the
# partition is part of the edge cutset
cutset = set()
for u, nbrs in ((n, G[n]) for n in reachable):
cutset.update((u, v) for v in nbrs if v in non_reachable)
return cutset
def minimum_st_node_cut(G, s, t, flow_func=None, auxiliary=None, residual=None):
r"""Returns a set of nodes of minimum cardinality that disconnect source
from target in G.
This function returns the set of nodes of minimum cardinality that,
if removed, would destroy all paths among source and target in G.
Parameters
----------
G : NetworkX graph
s : node
Source node.
t : node
Target node.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The choice
of the default function may change from version to version and
should not be relied on. Default value: None.
auxiliary : NetworkX DiGraph
Auxiliary digraph to compute flow based node connectivity. It has
to have a graph attribute called mapping with a dictionary mapping
node names in G and in the auxiliary digraph. If provided
it will be reused instead of recreated. Default value: None.
residual : NetworkX DiGraph
Residual network to compute maximum flow. If provided it will be
reused instead of recreated. Default value: None.
Returns
-------
cutset : set
Set of nodes that, if removed, would destroy all paths between
source and target in G.
Examples
--------
This function is not imported in the base NetworkX namespace, so you
have to explicitly import it from the connectivity package:
>>> from networkx.algorithms.connectivity import minimum_st_node_cut
We use in this example the platonic icosahedral graph, which has node
connectivity 5.
>>> G = nx.icosahedral_graph()
>>> len(minimum_st_node_cut(G, 0, 6))
5
If you need to compute local st cuts between several pairs of
nodes in the same graph, it is recommended that you reuse the
data structures that NetworkX uses in the computation: the
auxiliary digraph for node connectivity and node cuts, and the
residual network for the underlying maximum flow computation.
Example of how to compute local st node cuts reusing the data
structures:
>>> # You also have to explicitly import the function for
>>> # building the auxiliary digraph from the connectivity package
>>> from networkx.algorithms.connectivity import (
... build_auxiliary_node_connectivity)
>>> H = build_auxiliary_node_connectivity(G)
>>> # And the function for building the residual network from the
>>> # flow package
>>> from networkx.algorithms.flow import build_residual_network
>>> # Note that the auxiliary digraph has an edge attribute named capacity
>>> R = build_residual_network(H, 'capacity')
>>> # Reuse the auxiliary digraph and the residual network by passing them
>>> # as parameters
>>> len(minimum_st_node_cut(G, 0, 6, auxiliary=H, residual=R))
5
You can also use alternative flow algorithms for computing minimum st
node cuts. For instance, in dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better than
the default :meth:`edmonds_karp` which is faster for sparse
networks with highly skewed degree distributions. Alternative flow
functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(minimum_st_node_cut(G, 0, 6, flow_func=shortest_augmenting_path))
5
Notes
-----
This is a flow based implementation of minimum node cut. The algorithm
is based in solving a number of maximum flow computations to determine
the capacity of the minimum cut on an auxiliary directed network that
corresponds to the minimum node cut of G. It handles both directed
and undirected graphs. This implementation is based on algorithm 11
in [1]_.
See also
--------
:meth:`minimum_node_cut`
:meth:`minimum_edge_cut`
:meth:`stoer_wagner`
:meth:`node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if auxiliary is None:
H = build_auxiliary_node_connectivity(G)
else:
H = auxiliary
mapping = H.graph.get('mapping', None)
if mapping is None:
raise nx.NetworkXError('Invalid auxiliary digraph.')
if G.has_edge(s, t) or G.has_edge(t, s):
return []
kwargs = dict(flow_func=flow_func, residual=residual, auxiliary=H)
# The edge cut in the auxiliary digraph corresponds to the node cut in the
# original graph.
edge_cut = minimum_st_edge_cut(H, '%sB' % mapping[s], '%sA' % mapping[t],
**kwargs)
# Each node in the original graph maps to two nodes of the auxiliary graph
node_cut = set(H.node[node]['id'] for edge in edge_cut for node in edge)
return node_cut - set([s, t])
def minimum_node_cut(G, s=None, t=None, flow_func=None):
r"""Returns a set of nodes of minimum cardinality that disconnects G.
If source and target nodes are provided, this function returns the
set of nodes of minimum cardinality that, if removed, would destroy
all paths among source and target in G. If not, it returns a set
of nodes of minimum cardinality that disconnects G.
Parameters
----------
G : NetworkX graph
s : node
Source node. Optional. Default value: None.
t : node
Target node. Optional. Default value: None.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
Returns
-------
cutset : set
Set of nodes that, if removed, would disconnect G. If source
and target nodes are provided, the set contians the nodes that
if removed, would destroy all paths between source and target.
Examples
--------
>>> # Platonic icosahedral graph has node connectivity 5
>>> G = nx.icosahedral_graph()
>>> node_cut = nx.minimum_node_cut(G)
>>> len(node_cut)
5
You can use alternative flow algorithms for the underlying maximum
flow computation. In dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better
than the default :meth:`edmonds_karp`, which is faster for
sparse networks with highly skewed degree distributions. Alternative
flow functions have to be explicitly imported from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> node_cut == nx.minimum_node_cut(G, flow_func=shortest_augmenting_path)
True
If you specify a pair of nodes (source and target) as parameters,
this function returns a local st node cut.
>>> len(nx.minimum_node_cut(G, 3, 7))
5
If you need to perform several local st cuts among different
pairs of nodes on the same graph, it is recommended that you reuse
the data structures used in the maximum flow computations. See
:meth:`minimum_st_node_cut` for details.
Notes
-----
This is a flow based implementation of minimum node cut. The algorithm
is based in solving a number of maximum flow computations to determine
the capacity of the minimum cut on an auxiliary directed network that
corresponds to the minimum node cut of G. It handles both directed
and undirected graphs. This implementation is based on algorithm 11
in [1]_.
See also
--------
:meth:`minimum_st_node_cut`
:meth:`minimum_cut`
:meth:`minimum_edge_cut`
:meth:`stoer_wagner`
:meth:`node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if (s is not None and t is None) or (s is None and t is not None):
raise nx.NetworkXError('Both source and target must be specified.')
# Local minimum node cut.
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return minimum_st_node_cut(G, s, t, flow_func=flow_func)
# Global minimum node cut.
# Analog to the algoritm 11 for global node connectivity in [1].
if G.is_directed():
if not nx.is_weakly_connected(G):
raise nx.NetworkXError('Input graph is not connected')
iter_func = itertools.permutations
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors(v),
G.successors(v)])
else:
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is not connected')
iter_func = itertools.combinations
neighbors = G.neighbors
# Reuse the auxiliary digraph and the residual network.
H = build_auxiliary_node_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R)
# Choose a node with minimum degree.
v = min(G, key=G.degree)
# Initial node cutset is all neighbors of the node with minimum degree.
min_cut = set(G[v])
# Compute st node cuts between v and all its non-neighbors nodes in G.
for w in set(G) - set(neighbors(v)) - set([v]):
this_cut = minimum_st_node_cut(G, v, w, **kwargs)
if len(min_cut) >= len(this_cut):
min_cut = this_cut
# Also for non adjacent pairs of neighbors of v.
for x, y in iter_func(neighbors(v), 2):
if y in G[x]:
continue
this_cut = minimum_st_node_cut(G, x, y, **kwargs)
if len(min_cut) >= len(this_cut):
min_cut = this_cut
return min_cut
def minimum_edge_cut(G, s=None, t=None, flow_func=None):
r"""Returns a set of edges of minimum cardinality that disconnects G.
If source and target nodes are provided, this function returns the
set of edges of minimum cardinality that, if removed, would break
all paths among source and target in G. If not, it returns a set of
edges of minimum cardinality that disconnects G.
Parameters
----------
G : NetworkX graph
s : node
Source node. Optional. Default value: None.
t : node
Target node. Optional. Default value: None.
flow_func : function
A function for computing the maximum flow among a pair of nodes.
The function has to accept at least three parameters: a Digraph,
a source node, and a target node. And return a residual network
that follows NetworkX conventions (see :meth:`maximum_flow` for
details). If flow_func is None, the default maximum flow function
(:meth:`edmonds_karp`) is used. See below for details. The
choice of the default function may change from version
to version and should not be relied on. Default value: None.
Returns
-------
cutset : set
Set of edges that, if removed, would disconnect G. If source
and target nodes are provided, the set contians the edges that
if removed, would destroy all paths between source and target.
Examples
--------
>>> # Platonic icosahedral graph has edge connectivity 5
>>> G = nx.icosahedral_graph()
>>> len(nx.minimum_edge_cut(G))
5
You can use alternative flow algorithms for the underlying
maximum flow computation. In dense networks the algorithm
:meth:`shortest_augmenting_path` will usually perform better
than the default :meth:`edmonds_karp`, which is faster for
sparse networks with highly skewed degree distributions.
Alternative flow functions have to be explicitly imported
from the flow package.
>>> from networkx.algorithms.flow import shortest_augmenting_path
>>> len(nx.minimum_edge_cut(G, flow_func=shortest_augmenting_path))
5
If you specify a pair of nodes (source and target) as parameters,
this function returns the value of local edge connectivity.
>>> nx.edge_connectivity(G, 3, 7)
5
If you need to perform several local computations among different
pairs of nodes on the same graph, it is recommended that you reuse
the data structures used in the maximum flow computations. See
:meth:`local_edge_connectivity` for details.
Notes
-----
This is a flow based implementation of minimum edge cut. For
undirected graphs the algorithm works by finding a 'small' dominating
set of nodes of G (see algorithm 7 in [1]_) and computing the maximum
flow between an arbitrary node in the dominating set and the rest of
nodes in it. This is an implementation of algorithm 6 in [1]_. For
directed graphs, the algorithm does n calls to the max flow function.
It is an implementation of algorithm 8 in [1]_.
See also
--------
:meth:`minimum_st_edge_cut`
:meth:`minimum_node_cut`
:meth:`stoer_wagner`
:meth:`node_connectivity`
:meth:`edge_connectivity`
:meth:`maximum_flow`
:meth:`edmonds_karp`
:meth:`preflow_push`
:meth:`shortest_augmenting_path`
References
----------
.. [1] Abdol-Hossein Esfahanian. Connectivity Algorithms.
http://www.cse.msu.edu/~cse835/Papers/Graph_connectivity_revised.pdf
"""
if (s is not None and t is None) or (s is None and t is not None):
raise nx.NetworkXError('Both source and target must be specified.')
# reuse auxiliary digraph and residual network
H = build_auxiliary_edge_connectivity(G)
R = build_residual_network(H, 'capacity')
kwargs = dict(flow_func=flow_func, residual=R, auxiliary=H)
# Local minimum edge cut if s and t are not None
if s is not None and t is not None:
if s not in G:
raise nx.NetworkXError('node %s not in graph' % s)
if t not in G:
raise nx.NetworkXError('node %s not in graph' % t)
return minimum_st_edge_cut(H, s, t, **kwargs)
# Global minimum edge cut
# Analog to the algoritm for global edge connectivity
if G.is_directed():
# Based on algorithm 8 in [1]
if not nx.is_weakly_connected(G):
raise nx.NetworkXError('Input graph is not connected')
# Initial cutset is all edges of a node with minimum degree
node = min(G, key=G.degree)
min_cut = set(G.edges(node))
nodes = list(G)
n = len(nodes)
for i in range(n):
try:
this_cut = minimum_st_edge_cut(H, nodes[i], nodes[i+1], **kwargs)
if len(this_cut) <= len(min_cut):
min_cut = this_cut
except IndexError: # Last node!
this_cut = minimum_st_edge_cut(H, nodes[i], nodes[0], **kwargs)
if len(this_cut) <= len(min_cut):
min_cut = this_cut
return min_cut
else: # undirected
# Based on algorithm 6 in [1]
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is not connected')
# Initial cutset is all edges of a node with minimum degree
node = min(G, key=G.degree)
min_cut = set(G.edges(node))
# A dominating set is \lambda-covering
# We need a dominating set with at least two nodes
for node in G:
D = nx.dominating_set(G, start_with=node)
v = D.pop()
if D:
break
else:
# in complete graphs the dominating set will always be of one node
# thus we return min_cut, which now contains the edges of a node
# with minimum degree
return min_cut
for w in D:
this_cut = minimum_st_edge_cut(H, v, w, **kwargs)
if len(this_cut) <= len(min_cut):
min_cut = this_cut
return min_cut
|
{"hexsha": "0288f5e202c4f8790fa0281476c0fcae8cbdaccf", "size": 22960, "ext": "py", "lang": "Python", "max_stars_repo_path": "networkx/algorithms/connectivity/cuts.py", "max_stars_repo_name": "argriffing/networkx", "max_stars_repo_head_hexsha": "5a3d000e605be2ca567f69a4694afcba3b8acb54", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "networkx/algorithms/connectivity/cuts.py", "max_issues_repo_name": "argriffing/networkx", "max_issues_repo_head_hexsha": "5a3d000e605be2ca567f69a4694afcba3b8acb54", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "networkx/algorithms/connectivity/cuts.py", "max_forks_repo_name": "argriffing/networkx", "max_forks_repo_head_hexsha": "5a3d000e605be2ca567f69a4694afcba3b8acb54", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0132450331, "max_line_length": 81, "alphanum_fraction": 0.6738675958, "include": true, "reason": "import networkx,from networkx", "num_tokens": 5371}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
class TestGeneratorMethods(test.TestCase):
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_generator_methods(self):
arr_data = np.random.random((50, 2))
arr_labels = np.random.random((50,))
def custom_generator():
batch_size = 10
num_samples = 50
while True:
batch_index = np.random.randint(0, num_samples - batch_size)
start = batch_index
end = start + batch_size
x = arr_data[start: end]
y = arr_labels[start: end]
yield x, y
with self.cached_session():
x = keras.Input((2,))
y = keras.layers.Dense(1)(x)
fn_model = keras.models.Model(x, y)
fn_model.compile(
loss='mse',
optimizer='sgd',
metrics=['mae', metrics_module.CategoricalAccuracy()])
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(1, input_shape=(2,)))
seq_model.compile(loss='mse', optimizer='sgd')
for model in [fn_model, seq_model]:
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
workers=4,
use_multiprocessing=True)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator(),
validation_steps=10)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
validation_data=custom_generator(),
validation_steps=1,
workers=0)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
workers=2,
use_multiprocessing=True)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
workers=0)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
workers=2,
verbose=1,
use_multiprocessing=True)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False,
workers=0)
def test_generator_methods_with_sample_weights(self):
arr_data = np.random.random((50, 2))
arr_labels = np.random.random((50,))
arr_sample_weights = np.random.random((50,))
def custom_generator():
batch_size = 10
num_samples = 50
while True:
batch_index = np.random.randint(0, num_samples - batch_size)
start = batch_index
end = start + batch_size
x = arr_data[start: end]
y = arr_labels[start: end]
w = arr_sample_weights[start: end]
yield x, y, w
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=(2,)))
model.compile(
loss='mse',
optimizer='sgd',
metrics=['mae', metrics_module.CategoricalAccuracy()])
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator(),
validation_steps=10)
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
def test_generator_methods_invalid_use_case(self):
def custom_generator():
while 1:
yield 0
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=(2,)))
model.compile(loss='mse', optimizer='sgd')
with self.assertRaises(ValueError):
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False)
with self.assertRaises(ValueError):
model.fit_generator(custom_generator(),
steps_per_epoch=5,
epochs=1,
verbose=1,
max_queue_size=10,
use_multiprocessing=False,
validation_data=custom_generator(),
validation_steps=10)
with self.assertRaises(AttributeError):
model.predict_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
with self.assertRaises(ValueError):
model.evaluate_generator(custom_generator(),
steps=5,
max_queue_size=10,
use_multiprocessing=False)
def test_training_with_sequences(self):
class DummySequence(keras.utils.Sequence):
def __getitem__(self, idx):
return np.zeros([10, 2]), np.ones([10])
def __len__(self):
return 10
arr_data = np.random.random((50, 2))
arr_labels = np.random.random((50,))
arr_sample_weights = np.random.random((50,))
def custom_generator():
batch_size = 10
num_samples = 50
while True:
batch_index = np.random.randint(0, num_samples - batch_size)
start = batch_index
end = start + batch_size
x = arr_data[start: end]
y = arr_labels[start: end]
w = arr_sample_weights[start: end]
yield x, y, w
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_shape=(2,)))
model.compile(loss='mse', optimizer='sgd')
model.fit_generator(DummySequence(),
steps_per_epoch=10,
validation_data=custom_generator(),
validation_steps=1,
max_queue_size=10,
workers=0,
use_multiprocessing=True)
model.fit_generator(DummySequence(),
steps_per_epoch=10,
validation_data=custom_generator(),
validation_steps=1,
max_queue_size=10,
workers=0,
use_multiprocessing=False)
@tf_test_util.run_in_graph_and_eager_modes
def test_generator_input_to_fit_eval_predict(self):
val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
def custom_generator():
while True:
yield np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(10, activation='relu')(inputs)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
model.compile(RMSPropOptimizer(0.001), 'binary_crossentropy')
model.fit(
custom_generator(),
steps_per_epoch=2,
validation_data=val_data,
epochs=2)
model.evaluate(custom_generator(), steps=2)
model.predict(custom_generator(), steps=2)
@tf_test_util.run_in_graph_and_eager_modes
def test_sequence_input_to_fit_eval_predict(self):
val_data = np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
class CustomSequence(keras.utils.Sequence):
def __getitem__(self, idx):
return np.ones([10, 10], np.float32), np.ones([10, 1], np.float32)
def __len__(self):
return 2
inputs = keras.layers.Input(shape=(10,))
x = keras.layers.Dense(10, activation='relu')(inputs)
outputs = keras.layers.Dense(1, activation='sigmoid')(x)
model = keras.Model(inputs, outputs)
model.compile(RMSPropOptimizer(0.001), 'binary_crossentropy')
model.fit(CustomSequence(), validation_data=val_data, epochs=2)
model.evaluate(CustomSequence())
model.predict(CustomSequence())
with self.assertRaisesRegexp(ValueError, '`y` argument is not supported'):
model.fit(CustomSequence(), y=np.ones([10, 1]))
with self.assertRaisesRegexp(ValueError,
'`sample_weight` argument is not supported'):
model.fit(CustomSequence(), sample_weight=np.ones([10, 1]))
if __name__ == '__main__':
test.main()
|
{"hexsha": "88e89434242a7d7334e025acb5da530675d3f054", "size": 11660, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/keras/engine/training_generator_test.py", "max_stars_repo_name": "wenming2014/tensorflow", "max_stars_repo_head_hexsha": "a102a6a71844e194f3946f6318768c5367f1f16b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 52, "max_stars_repo_stars_event_min_datetime": "2018-11-12T06:39:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:31:27.000Z", "max_issues_repo_path": "tensorflow/python/keras/engine/training_generator_test.py", "max_issues_repo_name": "apeforest/tensorflow", "max_issues_repo_head_hexsha": "07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-12-04T08:35:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-22T16:17:39.000Z", "max_forks_repo_path": "tensorflow/python/keras/engine/training_generator_test.py", "max_forks_repo_name": "apeforest/tensorflow", "max_forks_repo_head_hexsha": "07da23bfa2a9ca10cd7c1dd6bea0f85d981c013e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-03-11T01:17:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T00:44:47.000Z", "avg_line_length": 37.8571428571, "max_line_length": 80, "alphanum_fraction": 0.5428816467, "include": true, "reason": "import numpy", "num_tokens": 2159}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" rbc_ode.py
Test ODENet (OnsagerNet or plain multi-layer perception net) on RBC PCA data.
@author: Haijun Yu <hyu@lsec.cc.ac.cn>
"""
# %%
import config as cfgs
import ode_net as ode
import rbctools as rbc
import argparse
from scipy.special import binom
import torch.utils.data as data
import torch
import numpy as np
from torch import manual_seed
from numpy import random
random.seed(1)
manual_seed(2)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Learn ODE for Rayleigh-Bernard convection encoded data')
parser.add_argument('-tc', type=int, default=cfgs.DEFAULT_CASE_ID,
metavar='tc',
help='id of the test case')
parser.add_argument('nPC', type=int, nargs='?', default=-1, metavar='nPC',
help='number of hidden variables')
parser.add_argument('--method', type=str, choices=['pca', 'ae'],
default='pca', metavar='METHOD',
help='input model of dim reduction (default pca)')
parser.add_argument('--onet', type=str, choices=['ons', 'ode', 'res'],
default='ons', metavar='onet',
help='input name of the ODE net (default ons)')
parser.add_argument('--nHnode', type=int,
default=-1, metavar='nHnode',
help='number of nodes in each hidden layers')
parser.add_argument('--nL', type=int,
default=1, metavar='nHiddenLayers',
help='number of hidden layers')
parser.add_argument('-f', '--fid', type=int, default=0,
metavar='FID',
help='the id of activation function')
parser.add_argument('--ig', type=float, default=0.1, metavar='IG',
help='gain used to initialize the network')
parser.add_argument('-e', '--epochs', type=int, default=-1,
metavar='epochs',
help='number of epochs')
parser.add_argument('-lr', type=float, default=-0.1, metavar='LR',
help='learning rate')
parser.add_argument('--patience', type=int, default=0, metavar='PAT',
help='patience to reduce lr (default 25)')
parser.add_argument('--seed', type=int, default=0, metavar='SEED',
help='The first SEED to test the performance')
parser.add_argument('--nseeds', type=int, default=1, metavar='NSEEDs',
help='number of seeds(runs) to test the performance')
parser.add_argument('--no_amsgrad', default=False, action='store_true',
help='Set Adam parameter amsgrad')
args = parser.parse_args()
print(args)
test_id = args.tc
cfg = cfgs.get_test_case(test_id)
outloc = cfg.outloc
lr_min = 5e-6
epochs = args.epochs if args.epochs > 0 else cfg.epochs
nPC = args.nPC if args.nPC > 0 else cfg.nPC
st_seed = args.seed if args.seed > 0 else cfg.iseed
nHnode = args.nHnode if args.nHnode > 0 else int(
cfg.iNodeC * binom(nPC+2, 2))
amsgrad = not args.no_amsgrad
ode.fid = args.fid
init_gain = args.ig
method = args.method
onet = args.onet
nL = args.nL
nseeds = args.nseeds
lr = args.lr if args.lr >0 else cfg.lr_ode
patience = args.patience if args.patience > 0 else cfg.patience
batch_size = cfg.batch_size
wt_decay = cfg.wt_decay
if method == 'pca':
datfile = cfg.h5fname+f'_{method}{nPC}_enc_data.txt.gz'
hvar = np.loadtxt(datfile, delimiter=',')
else:
datfile = outloc+f'_{method}{nPC}_{onet}_enc_data.txt.gz'
hvar = np.loadtxt(datfile, delimiter=',')
print('Data loaded from ', datfile)
nf = hvar.shape[0]
nS_train = int(nf//2 * cfg.tr_ratio)
nS_test = nf//2 - nS_train
ds1 = torch.FloatTensor(hvar[0:2*nS_train:2, :])
ds2 = torch.FloatTensor(hvar[1:2*nS_train:2, :])
dataset_train = data.TensorDataset(ds1, ds2)
dt1 = torch.FloatTensor(hvar[2*nS_train::2, :])
dt2 = torch.FloatTensor(hvar[2*nS_train+1::2, :])
dataset_test = (dt1, dt2)
# %% Train model
for iseed in range(nseeds):
seed = st_seed + iseed
np.random.seed(seed=seed)
torch.manual_seed(seed)
dataloader_train = data.DataLoader(dataset_train,
batch_size=batch_size,
shuffle=True,
num_workers=1)
ode_nodes = cfg.get_ode_nodes(nPC, nHnode, nL, onet)
if onet == 'ode':
ONet = ode.ODENet(ode_nodes, init_gain=init_gain)
elif onet == 'res':
ONet = ode.ResODENet(ode_nodes, init_gain=init_gain)
else:
ONet = ode.OnsagerNet(ode_nodes, init_gain=init_gain,
pot_beta=cfg.pot_beta,
ons_min_d=cfg.ons_min_d
)
nHnode = ode_nodes[1]
print(f'\t Trainable paramters: {ONet.size()}')
optimizer, scheduler = ode.get_opt_sch(ONet, lr=lr,
weight_decay=wt_decay,
patience=patience,
amsgrad=amsgrad,
lr_min=lr_min, method='Adam',
epoch=epochs)
log = ONet.train_ode(optimizer,
dataloader_train,
epochs,
dataset_test, scheduler, nt=1, dt=cfg.dt)
if np.isnan(log).any():
continue
train_loss, test_loss = log[-1, 1], log[-1, 2]
print(f'>>Results: r={cfg.rRa}',
f'n={nPC:2d}',
f'net={onet}',
f'L={nL}',
f'nH={nHnode}',
f'nDoF={ONet.size()}',
f'fid={ode.fid}',
f'gain={init_gain} seed={seed}',
f'ode_train={train_loss:.3e}',
f'ode_test={test_loss:.3e}')
if np.isnan(log).any():
print('The fitting for last parameter set failed due to NAN!')
else:
savefile = outloc + f'_{method}{nPC}_{onet}_f{ode.fid}_L{nL}_s{seed}'
torch.save(ONet.state_dict(), savefile+'_model_dict.pth')
np.savetxt(savefile+'.txt', log, delimiter=', ', fmt='%.3e')
rbc.plot_ode_train_log(log, savefile)
|
{"hexsha": "ecca4cc6ceba96c21fd46320b1574ddc2d32a07d", "size": 6793, "ext": "py", "lang": "Python", "max_stars_repo_path": "RBC1r/rbc_ode.py", "max_stars_repo_name": "yuhj1998/OnsagerNet", "max_stars_repo_head_hexsha": "32cbb31116cf4244b340497d739a86eb7de9e7a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-01T07:23:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T03:02:33.000Z", "max_issues_repo_path": "RBC1r/rbc_ode.py", "max_issues_repo_name": "yuhj1998/OnsagerNet", "max_issues_repo_head_hexsha": "32cbb31116cf4244b340497d739a86eb7de9e7a2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RBC1r/rbc_ode.py", "max_forks_repo_name": "yuhj1998/OnsagerNet", "max_forks_repo_head_hexsha": "32cbb31116cf4244b340497d739a86eb7de9e7a2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.7232704403, "max_line_length": 83, "alphanum_fraction": 0.528337995, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1650}
|
using DataFrames
using CSV
# TODO REFACTOR
function load_tables()
cols = ["source", "target", "flags"]
df_icd9 = CSV.File("2018_I9gem.txt", delim = ' ',
header = false, type=String, ignorerepeated=true) |> DataFrame
rename!(df_icd9, cols)
df_icd10 = CSV.File("2018_I10gem.txt", delim = ' ', header = false, type=String, ignorerepeated=true) |> DataFrame
rename!(df_icd10, cols)
df_icd9_desc = CSV.File("CMS32_DESC_LONG_DX.txt", delim = '\t', header = false, type=String) |> DataFrame
df_icd10_desc = CSV.File("icd10cm_codes_2018.txt", delim = '\t', header = false, type=String) |> DataFrame
df_icd9_pcs = CSV.File("gem_i9pcs.txt", delim = ' ', header = false, type=String, ignorerepeated=true) |> DataFrame
rename!(df_icd9_pcs, cols)
df_icd10_pcs = CSV.File("gem_pcsi9.txt", delim = ' ', header = false, type=String, ignorerepeated=true) |> DataFrame
rename!(df_icd10_pcs, cols)
df_icd10_pcs_desc = CSV.File("icd10pcs_order_2014.txt", delim = '\t', header = false, type=String) |> DataFrame
return df_icd9, df_icd10, df_icd9_desc, df_icd10_desc, df_icd9_pcs, df_icd10_pcs, df_icd10_pcs_desc
end
function make_flag_cols(df_in)
df = copy(df_in)
df[:flags] = map(x -> lpad(split(string(x), " ")[1], 5, "0"), df[:flags])
flag_types = [ "approximate", "no map", "combination", "scenario", "choice list"]
for (index, flag) in enumerate(flag_types)
df[Symbol(flag_types[index])] = map(x -> string(x[index]), df[:flags])
end
select!(df, Not(:flags))
return df
end
function make_desc_cols(df_in, code::String)
df = copy(df_in)
df[:code] = map(x -> split(string(x), " ")[1], df[:Column1])
df[:descriptions] = map(x -> lstrip(join(split(string(x), " ")[2:end], " ")), df[:Column1])
select!(df, Not(:Column1))
return df
end
function join_icd_descriptions(df_gems, df_desc)
df = join(df_gems, df_desc,
on = :target => :code,
kind = :inner)
return df
end
function join_icd_descriptions(df_gems, df_desc_target, df_source_description)
df = join(df_gems, df_desc_target,
on = :target => :code,
kind = :inner)
rename!(df, Dict(:descriptions => :target_descriptions))
df = join(df, df_source_description,
on = :source => :code,
kind = :left)
rename!(df, Dict(:descriptions => :source_descriptions))
return df
end
cd("../data/")
gems9, gems10, desc9, desc10, icd9_pcs, icd10_pcs, icd10_pcs_desc = load_tables();
gems9 = make_flag_cols(gems9);
gems10 = make_flag_cols(gems10);
desc9 = make_desc_cols(desc9, "icd9");
desc10 = make_desc_cols(desc10, "icd10");
gems9_10 = join_icd_descriptions(gems9,desc10, desc9);
gems10_9 = join_icd_descriptions(gems10,desc9, desc10);
icd9_pcs = make_flag_cols(icd9_pcs);
icd10_pcs = make_flag_cols(icd10_pcs);
desc10pcs = make_desc_cols(icd10_pcs_desc, "icd10_pcs");
CSV.write("processed/gems9_10.csv", gems9_10)
CSV.write("processed/gems10_9.csv", gems10_9)
CSV.write("processed/gems_pcs_desc.csv", desc10pcs)
CSV.write("processed/gems_icd9_pcs.csv", icd9_pcs)
CSV.write("processed/gems_icd10_pcs.csv", icd10_pcs)
|
{"hexsha": "a36442c6c56f988208075249b52ee3ae5ed7711e", "size": 3241, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/process_gems.jl", "max_stars_repo_name": "pkmklong/Gems", "max_stars_repo_head_hexsha": "3d3210792631ea9a188743a0df4d68369644b6ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/process_gems.jl", "max_issues_repo_name": "pkmklong/Gems", "max_issues_repo_head_hexsha": "3d3210792631ea9a188743a0df4d68369644b6ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/process_gems.jl", "max_forks_repo_name": "pkmklong/Gems", "max_forks_repo_head_hexsha": "3d3210792631ea9a188743a0df4d68369644b6ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5754716981, "max_line_length": 132, "alphanum_fraction": 0.6612156742, "num_tokens": 1007}
|
"""Tests relating to constants."""
import WrightTools as wt
import numpy as np
import re
def test_set_remove():
data = wt.Data()
data.create_variable("x", np.linspace(0, 10))
data.create_variable("y", np.linspace(0, 10))
data.create_variable("z", np.zeros(50))
data.set_constants("x-y", "z")
assert data.constant_names == ("x__m__y", "z")
data.remove_constant("z")
assert data.constant_names == ("x__m__y",)
data.remove_constant(0)
assert data.constant_names == ()
def test_label():
data = wt.Data()
data.create_variable("x", np.linspace(0, 10))
data.create_variable("y", np.linspace(0, 10))
data.create_variable("z", np.full(50, 2.3), units="fs")
data.z.label = "z"
data.set_constants("x-y", "z")
assert data.constants[1].label == r"$\mathsf{\tau_{z}\,=\,2.3\,fs}$"
def test_repr():
data = wt.Data()
data.create_variable("x", np.linspace(0, 10))
data.create_variable("y", np.linspace(0, 10))
data.create_variable("z", np.full(50, 2.3), units="fs")
data.z.label = "z"
data.set_constants("x-y", "z")
assert (
re.match(r"\<WrightTools\.Constant x-y = 0\.0 None at .*\>", repr(data.constants[0]))
is not None
)
|
{"hexsha": "f2c9369e857689a6f6cf1cfad08510044b34bddc", "size": 1231, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/data/constants.py", "max_stars_repo_name": "untzag/WrightTools", "max_stars_repo_head_hexsha": "05480d2f91ceeca422d9e5ac381fce1840207cb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-07-11T15:58:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T20:33:26.000Z", "max_issues_repo_path": "tests/data/constants.py", "max_issues_repo_name": "untzag/WrightTools", "max_issues_repo_head_hexsha": "05480d2f91ceeca422d9e5ac381fce1840207cb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 808, "max_issues_repo_issues_event_min_datetime": "2015-04-12T00:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T21:06:06.000Z", "max_forks_repo_path": "tests/data/constants.py", "max_forks_repo_name": "untzag/WrightTools", "max_forks_repo_head_hexsha": "05480d2f91ceeca422d9e5ac381fce1840207cb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-07-22T18:54:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T20:31:05.000Z", "avg_line_length": 29.3095238095, "max_line_length": 93, "alphanum_fraction": 0.6190089358, "include": true, "reason": "import numpy", "num_tokens": 365}
|
#!/usr/bin/env python3
# Copyright 2020 Yuri Khokhlov, Ivan Medennikov (STC-innovations Ltd)
# Apache 2.0.
"""This script transforms phone-indices in alignment to 0(silence phones), 1(speech phones), 2(spn phones)"""
import os
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Usage: conv_ali_to_vad_012.py 1:2:3:4:5 6:7:8:9:10 <in-text-phone-ali> <out-text-vad-ali>')
parser.add_argument('silence_phones', type=str)
parser.add_argument('spn_phones', type=str)
parser.add_argument('phone_ali', type=str)
parser.add_argument('vad_ali', type=str)
args = parser.parse_args()
print('Options:')
print(' Silence phones (colon-separated list): {}'.format(args.silence_phones))
print(' Spoken-noise phones (colon-separated list): {}'.format(args.spn_phones))
print(' Input phone ali in text format: {}'.format(args.phone_ali))
print(' Output vad ali in text format: {}'.format(args.vad_ali))
silence_set = set(args.silence_phones.split(':'))
print("sil phones: ")
print(args.silence_phones.split(':'))
spn_set = set(args.spn_phones.split(':'))
print("spn phones: ")
print(args.spn_phones.split(':'))
assert os.path.exists(args.phone_ali), 'File does not exist {}'.format(args.phone_ali)
parent = os.path.dirname(os.path.abspath(args.vad_ali))
if not os.path.exists(parent):
os.makedirs(parent)
print('Starting to convert')
count = 0
with open(args.phone_ali) as ali_file:
with open(args.vad_ali, 'wt') as vad_file:
for line in ali_file:
line = line.strip()
if len(line) == 0:
continue
parts = line.split(' ')
parts = list(filter(None, parts))
assert len(parts) > 1, 'Empty alignment in line {}'.format(line)
vad_file.write('{}'.format(parts[0]))
phones = parts[1:]
for phone in phones:
if phone in silence_set:
vad_file.write(' 0')
elif phone in spn_set:
vad_file.write(' 2')
else:
vad_file.write(' 1')
vad_file.write('\n')
count += 1
vad_file.close()
ali_file.close()
print('Converted alignments for {} utterances'.format(count))
|
{"hexsha": "291ca3e8d6728dfdf5011a904e1c0c0b97a7a803", "size": 2453, "ext": "py", "lang": "Python", "max_stars_repo_path": "egs/chime6/s5c_track2/local/ts-vad/conv_ali_to_vad_012.py", "max_stars_repo_name": "LanceaKing/kaldi", "max_stars_repo_head_hexsha": "eb205a83f08fb8056ba1deb03c505ec8b722d4d9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2021-06-16T11:38:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T01:33:56.000Z", "max_issues_repo_path": "egs/chime6/s5c_track2/local/ts-vad/conv_ali_to_vad_012.py", "max_issues_repo_name": "LanceaKing/kaldi", "max_issues_repo_head_hexsha": "eb205a83f08fb8056ba1deb03c505ec8b722d4d9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2017-09-06T00:12:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-22T08:03:19.000Z", "max_forks_repo_path": "egs/chime6/s5c_track2/local/ts-vad/conv_ali_to_vad_012.py", "max_forks_repo_name": "LanceaKing/kaldi", "max_forks_repo_head_hexsha": "eb205a83f08fb8056ba1deb03c505ec8b722d4d9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2020-01-03T22:28:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T23:00:27.000Z", "avg_line_length": 38.328125, "max_line_length": 141, "alphanum_fraction": 0.5923359152, "include": true, "reason": "import numpy", "num_tokens": 601}
|
"""
Tests for the C implementation of the sequence transducer.
From outside the package directory, run
`python -m transducer.test.`
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import time
import mxnet as mx
from rnnt_mx import RNNTLoss
from rnnt_np import RNNTLoss as rnntloss
parser = argparse.ArgumentParser(description='MXNet RNN Transducer Test.')
parser.add_argument('--mx', default=False, action='store_true')
args = parser.parse_args()
def wrap_and_call(acts, labels):
gpu = ctx == mx.gpu()
acts = mx.nd.array(acts, dtype='float32', ctx=ctx)
lengths = mx.nd.array([acts.shape[1]] * acts.shape[0], dtype='int32', ctx=ctx)
label_lengths = mx.nd.array([len(l) for l in labels], dtype='int32', ctx=ctx)
labels = mx.nd.array(labels, dtype='int32', ctx=ctx)
fn = RNNTLoss() if args.mx else rnntloss()
with mx.autograd.record():
acts.attach_grad()
logits = acts if gpu and args.mx else mx.nd.log_softmax(acts, axis=3)
costs = fn(logits, labels, lengths, label_lengths)
costs.backward()
return costs.asnumpy(), acts.grad.asnumpy()
def small_test():
acts = np.array([[[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1]]]])
labels = [[1, 2]]
cost, grads = wrap_and_call(acts, labels)
expected_cost = 4.495666
expected_grads = np.array([[[[-0.13116688, -0.3999269 , 0.17703125, 0.17703125,
0.17703125],
[-0.18572757, 0.12247056, -0.18168412, 0.12247056,
0.12247056],
[-0.32091254, 0.06269141, 0.06928472, 0.12624499,
0.06269141]],
[[ 0.05456069, -0.21824276, 0.05456069, 0.05456069,
0.05456069],
[ 0.12073959, 0.12073959, -0.48295835, 0.12073959,
0.12073959],
[-0.6925882 , 0.16871116, 0.18645467, 0.16871116,
0.16871116]]]])
assert np.allclose(cost, expected_cost, rtol=1e-6), \
"small_test costs mismatch."
assert np.allclose(grads, expected_grads), \
"small_test gradient mismatch."
def big_test():
# minibatch x T x U x alphabet_size
activations = [
[[[0.06535690384862791, 0.7875301411923206, 0.08159176605666074],
[0.5297155426466327, 0.7506749639230854, 0.7541348379087998],
[0.6097641124736383, 0.8681404965673826, 0.6225318186056529]],
[[0.6685222872103057, 0.8580392805336061, 0.16453892311765583],
[0.989779515236694, 0.944298460961015, 0.6031678586829663],
[0.9467833543605416, 0.666202507295747, 0.28688179752461884]],
[[0.09418426230195986, 0.3666735970751962, 0.736168049462793],
[0.1666804425271342, 0.7141542198635192, 0.3993997272216727],
[0.5359823524146038, 0.29182076440286386, 0.6126422611507932]],
[[0.3242405528768486, 0.8007644367291621, 0.5241057606558068],
[0.779194617063042, 0.18331417220174862, 0.113745182072432],
[0.24022162381327106, 0.3394695622533106, 0.1341595066017014]]],
[[[0.5055615569388828, 0.051597282072282646, 0.6402903936686337],
[0.43073311517251, 0.8294731834714112, 0.1774668847323424],
[0.3207001991262245, 0.04288308912457006, 0.30280282975568984]],
[[0.6751777088333762, 0.569537369330242, 0.5584738347504452],
[0.08313242153985256, 0.06016544344162322, 0.10795752845152584],
[0.7486153608562472, 0.943918041459349, 0.4863558118797222]],
[[0.4181986264486809, 0.6524078485043804, 0.024242983423721887],
[0.13458171554507403, 0.3663418070512402, 0.2958297395361563],
[0.9236695822497084, 0.6899291482654177, 0.7418981733448822]],
[[0.25000547599982104, 0.6034295486281007, 0.9872887878887768],
[0.5926057265215715, 0.8846724004467684, 0.5434495396894328],
[0.6607698886038497, 0.3771277082495921, 0.3580209022231813]]]]
expected_costs = [4.2806528590890736, 3.9384369822503591]
expected_grads = [[[[-1.86843902e-01, -6.25548810e-02, 2.49398798e-01],
[-2.03376666e-01, 2.02399328e-01, 9.77333169e-04],
[-1.41016081e-01, 7.91234672e-02, 6.18926100e-02]],
[[-1.15517676e-02, -8.12802389e-02, 9.28319991e-02],
[-1.54257029e-01, 2.29432687e-01, -7.51756504e-02],
[-2.46593088e-01, 1.46404594e-01, 1.00188486e-01]],
[[-1.29182907e-02, -6.15932420e-02, 7.45115355e-02],
[-5.59857301e-02, 2.19830811e-01, -1.63845062e-01],
[-4.97626871e-01, 2.09239945e-01, 2.88386941e-01]],
[[ 1.36048580e-02, -3.02196294e-02, 1.66147724e-02],
[ 1.13924511e-01, 6.27811998e-02, -1.76705718e-01],
[-6.67078257e-01, 3.67658824e-01, 2.99419403e-01]]],
[[[-3.56343776e-01, -5.53474613e-02, 4.11691219e-01],
[-9.69219357e-02, 2.94591039e-02, 6.74628317e-02],
[-6.35175705e-02, 2.76544970e-02, 3.58630717e-02]],
[[-1.54499024e-01, -7.39420280e-02, 2.28441030e-01],
[-1.66789949e-01, -8.78955179e-05, 1.66877866e-01],
[-1.72369644e-01, 1.05565332e-01, 6.68043196e-02]],
[[ 2.38748826e-02, -1.18255816e-01, 9.43809375e-02],
[-1.04707085e-01, -1.08934477e-01, 2.13641584e-01],
[-3.69844258e-01, 1.80118099e-01, 1.89726159e-01]],
[[ 2.57137045e-02, -7.94617534e-02, 5.37480488e-02],
[ 1.22328237e-01, -2.38788679e-01, 1.16460443e-01],
[-5.98686993e-01, 3.02203178e-01, 2.96483815e-01]]]]
activations = np.array(activations)
labels = [[1, 2],
[1, 1]]
costs, grads = wrap_and_call(activations, labels)
assert np.allclose(costs, expected_costs, 1e-4), \
"big_test average costs mismatch."
assert np.allclose(grads, expected_grads, 1e-3), \
"big_test grads for average cost mismatch."
if __name__ == "__main__":
ctx = mx.cpu()
small_test()
big_test()
print("CPU Tests passed!")
ctx = mx.gpu()
big_test()
print('GPU Test passed!')
|
{"hexsha": "7ec7422b5e8606b737cd901dd444f29320872b6a", "size": 6934, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test.py", "max_stars_repo_name": "vlavla/mxnet-transducer", "max_stars_repo_head_hexsha": "50800904658c18914ac0c92adefbec29502882ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2018-11-26T05:48:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T03:21:55.000Z", "max_issues_repo_path": "test/test.py", "max_issues_repo_name": "vlavla/mxnet-transducer", "max_issues_repo_head_hexsha": "50800904658c18914ac0c92adefbec29502882ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-06-13T21:19:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-15T08:05:44.000Z", "max_forks_repo_path": "test/test.py", "max_forks_repo_name": "vlavla/mxnet-transducer", "max_forks_repo_head_hexsha": "50800904658c18914ac0c92adefbec29502882ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-07-11T03:23:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-08T16:52:07.000Z", "avg_line_length": 44.4487179487, "max_line_length": 85, "alphanum_fraction": 0.5631670032, "include": true, "reason": "import numpy", "num_tokens": 2521}
|
program tarefa6
! Definindo pi
pi = 4*atan(1e0)
! Recebe o valor inteiro de N
print *, 'Raízes da equação (Z - 2)**N = 3'
print *, 'Digite o valor inteiro de N:'
read (*,*) N
! loop k para solução geral z = |z|**(1/n) * ( cos( (theta+2*pi*k)/n ) + i*sin( (theta+2*pi*k)/n ) )
! forma polar do complexo z
! na equação theta = 0
! arg = 2*pi*k/n
! zmod = |z|
do k = 1, N
arg = 2e0*pi*k/N
zmod = 3e0**(1e0/N)
x = zmod*cos(arg) + 2e0
yi = zmod*sin(arg)
print '("Z"I0,": ",F8.5," ",SP,F8.5,"i")', k, cmplx(x, yi)
end do
end program tarefa6
|
{"hexsha": "1cfc8229abdb284798405aa1646c5fa48083efa3", "size": 644, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "projeto-1/tarefa-6/tarefa-6-10407962.f90", "max_stars_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_stars_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projeto-1/tarefa-6/tarefa-6-10407962.f90", "max_issues_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_issues_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projeto-1/tarefa-6/tarefa-6-10407962.f90", "max_forks_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_forks_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.76, "max_line_length": 104, "alphanum_fraction": 0.4875776398, "num_tokens": 256}
|
import numpy as np
"""
Args:
epoch (int) - number of iterations to run through neural net
w1, w2, w3, w4, b1, b2, b3, b4 (numpy arrays) - starting weights
x_train (np array) - (n,d) numpy array where d=number of features
y_train (np array) - (n,) all the labels corresponding to x_train
num_classes (int) - number of classes (range of y_train)
shuffle (bool) - shuffle data at each epoch if True. Turn this off for testing.
Returns:
w1, w2, w3, w4, b1, b2, b3, b4 (numpy arrays) - resulting weights
losses (list of ints) - each index should correspond to epoch number
Note that len(losses) == epoch
"""
def minibatch_gd(epoch, w1, w2, w3, w4, b1, b2, b3, b4, x_train, y_train, num_classes, shuffle=True):
#IMPLEMENT HERE
losses = []
for e in range(epoch):
print(e)
if shuffle:
train = np.column_stack((x_train,y_train))
np.random.shuffle(train)
x_train = train[:,:-1]
y_train = train[:,-1]
loss = 0
for i in range(len(x_train)//200):
x_batch = x_train[i*200:(i+1)*200]
y_batch = y_train[i*200:(i+1)*200]
curr_loss = four_nn(x_batch,[w1,w2,w3,w4],[b1,b2,b3,b4],y_batch,False)
loss += curr_loss
losses.append(loss)
return w1, w2, w3, w4, b1, b2, b3, b4, losses
"""
Args:
All the weights/biases from minibatch_gd()
x_test (np array) - (n', d) numpy array
y_test (np array) - (n',) all the labels corresponding to x_test
num_classes (int) - number of classes (range of y_test)
Returns:
avg_class_rate (float) - average classification rate
class_rate_per_class (list of floats) - Classification Rate per class
(index corresponding to class number)
"""
def test_nn(w1, w2, w3, w4, b1, b2, b3, b4, x_test, y_test, num_classes):
classes = four_nn(x_test,[w1,w2,w3,w4],[b1,b2,b3,b4],y_test,True)
results = np.bincount(classes == y_test)
tot_correct = results[1]
avg_class_rate = tot_correct/len(classes)
class_results = np.bincount((classes == y_test)*y_test)
class_results[0] -= results[0]
class_rate_per_class = class_results/(np.bincount(y_test))
return avg_class_rate, class_rate_per_class
"""
4 Layer Neural Network
Helper function for minibatch_gd
"""
def four_nn(A,W,b,y,test):
Z1, acache1 = affine_forward(A, W[0], b[0])
A1, rcache1 = relu_forward(Z1)
Z2, acache2 = affine_forward(A1, W[1], b[1])
A2, rcache2 = relu_forward(Z2)
Z3, acache3 = affine_forward(A2, W[2], b[2])
A3, rcache3 = relu_forward(Z3)
F, acache4 = affine_forward(A3, W[3], b[3])
if test == True:
c = np.argmax(F,axis=1)
return c
loss, dF = cross_entropy(F, y)
dA3, dW4, db4 = affine_backward(dF, acache4)
dZ3 = relu_backward(dA3, rcache3)
dA2, dW3, db3 = affine_backward(dZ3, acache3)
dZ2 = relu_backward(dA2, rcache2)
dA1, dW2, db2 = affine_backward(dZ2, acache2)
dZ1 = relu_backward(dA1,rcache1)
dX, dW1, db1 = affine_backward(dZ1, acache1)
eta = 0.1
W[0] -= eta*dW1
W[1] -= eta*dW2
W[2] -= eta*dW3
W[3] -= eta*dW4
b[0] -= eta*db1
b[1] -= eta*db2
b[2] -= eta*db3
b[3] -= eta*db4
A = A - eta*dX
return loss
def affine_forward(A, W, b):
Z = np.matmul(A,W) + b
cache = (A, W, b)
return Z, cache
def affine_backward(dZ, cache):
dA = np.matmul(dZ,np.transpose(cache[1]))
dW = np.matmul(np.transpose(cache[0]),dZ)
dB = np.sum(dZ,axis=0)
return dA, dW, dB
def relu_forward(Z):
A = np.maximum(np.zeros(np.shape(Z)),Z)
cache = Z
return A, cache
def relu_backward(dA, cache):
bools = np.zeros(np.shape(cache)) <= cache
dZ = bools*dA
return dZ
def cross_entropy(F, y):
Fy = F[np.array(range(len(y))),y.astype(int)]
loss = -(1/np.size(y))*(np.sum(Fy - np.log(np.sum(np.exp(F),axis=1))))
bools = np.indices((np.shape(F)))
bools = bools[1]
bools = np.transpose(bools) == y
bools = np.transpose(bools)
sum_Fik = np.sum(np.exp(F),axis=1)
sum_Fik = sum_Fik.reshape((len(sum_Fik),1))
dF = -(1/np.size(y))*(bools - np.exp(F)/sum_Fik)
return loss, dF
|
{"hexsha": "568d96756cd8c8335a5a461f5f517834281885b5", "size": 4271, "ext": "py", "lang": "Python", "max_stars_repo_path": "neural_network.py", "max_stars_repo_name": "CoolyComrade/four-layer-nn", "max_stars_repo_head_hexsha": "11fa059939bb2e51ab8935d7d18d732ed6a22a64", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neural_network.py", "max_issues_repo_name": "CoolyComrade/four-layer-nn", "max_issues_repo_head_hexsha": "11fa059939bb2e51ab8935d7d18d732ed6a22a64", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neural_network.py", "max_forks_repo_name": "CoolyComrade/four-layer-nn", "max_forks_repo_head_hexsha": "11fa059939bb2e51ab8935d7d18d732ed6a22a64", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.168, "max_line_length": 101, "alphanum_fraction": 0.6066494966, "include": true, "reason": "import numpy", "num_tokens": 1403}
|
using Documenter, Metida
#using DocumenterLaTeX
makedocs(
modules = [MetidaReports],
sitename = "MetidaReports.jl",
authors = "Vladimir Arnautov",
pages = [
"Home" => "index.md",
],
)
deploydocs(repo = "github.com/PharmCat/MetidaReports.jl.git", push_preview = true,
)
|
{"hexsha": "218d5a0f75dc701b9f232f8ad5ceeb816f4b1da5", "size": 299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "PharmCat/MetidaReports.jl", "max_stars_repo_head_hexsha": "08e906e2c653cf545435cd2205d96881e1d30142", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "PharmCat/MetidaReports.jl", "max_issues_repo_head_hexsha": "08e906e2c653cf545435cd2205d96881e1d30142", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "PharmCat/MetidaReports.jl", "max_forks_repo_head_hexsha": "08e906e2c653cf545435cd2205d96881e1d30142", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.9333333333, "max_line_length": 82, "alphanum_fraction": 0.6555183946, "num_tokens": 89}
|
(*
Benedikt Ahrens and Régis Spadotti
Terminal semantics for codata types in intensional Martin-Löf type theory
http://arxiv.org/abs/1401.1053
*)
(*
Content of this file:
definition of the category of coalgebras for the signature of infinite tri. matrices
*)
Require Import Category.Types.
Require Import Category.Setoids.
Require Import Category.Types_Setoids.
Require Import Category.RComod.
Require Import Category.RComonadWithCut.
Require Import Theory.Category.
Require Import Theory.Functor.
Require Import Theory.RelativeComonadWithCut.
Require Import Theory.Comodule.
Require Import Theory.Product.
Require Import Theory.PrecompositionWithProduct.
Require Import Theory.PushforwardComodule.
Generalizable All Variables.
(*------------------------------------------------------------------------------
-- CATEGORY OF TRIANGLES
----------------------------------------------------------------------------*)
(** * Category of triangular matrices **)
(** ** Object and morphism definitions **)
Module TriMat.
Structure Obj (E : 𝑻𝒚𝒑𝒆) : Type := mkObj
{ T :> 𝑹𝑪𝒐𝒎𝒐𝒏𝒂𝒅𝑾𝒊𝒕𝒉𝑪𝒖𝒕 𝑬𝑸 E
; rest :> [T] ⇒ [T][E×─]
; rest_cut : ∀ {A}, rest(A) ∘ T⋅cut ≈ T⋅cut ∘ rest(E × A) }.
Arguments mkObj {_ _ _} _.
Arguments T {_} _.
Arguments rest {_} _.
Arguments rest_cut {_} _ {_ _ _ _}.
Notation "'TriMat.make' ⦃ 'T' ≔ T ; 'rest' ≔ rest ⦄" :=
(@mkObj _ T rest _) (only parsing).
Structure Morphism {E} (T S : Obj E) : Type := mkMorphism
{ τ :> T ⇒ S
; τ_commutes : ⟨τ⟩[E×─] ∘ Φ ∘ τ⁎⋅T ≈ S ∘ ⟨τ⟩ }.
Arguments mkMorphism {_ _ _ _} _.
Arguments τ {_ _ _} _.
Arguments τ_commutes {_ _ _} _ {_ _ _ _}.
Notation "'TriMat.make' ⦃ 'τ' ≔ τ ⦄" := (@mkMorphism _ _ _ τ _) (only parsing).
Program Definition Hom {E} (T S : Obj E) : Setoid :=
Setoid.make ⦃ Carrier ≔ Morphism T S
; Equiv ≔ (λ g f ∙ g ≈ f) ⦄.
(** equivalence **)
Next Obligation.
constructor.
- repeat intro. now rewrite H.
- repeat intro. symmetry; now rewrite H.
- repeat intro; etransitivity; eauto. now apply H0.
Qed.
End TriMat.
Export TriMat.
(** ** Identity and compositon definitions **)
Section Defs.
Variable (E : 𝑻𝒚𝒑𝒆).
Implicit Types (T S R U : Obj E).
Infix "⇒" := Hom.
Program Definition id {T} : T ⇒ T :=
TriMat.make ⦃ τ ≔ id[T] ⦄.
(** τ-cong **)
Next Obligation.
now rewrite H.
Qed.
Obligation Tactic := idtac.
Program Definition compose {T S R} : [ S ⇒ R ⟶ T ⇒ S ⟶ T ⇒ R ] :=
λ g f ↦₂ TriMat.make ⦃ τ ≔ g ∘ f ⦄.
(** τ-commutes **)
Next Obligation.
intros T S R g f.
destruct g as [g g_commutes]. simpl in g_commutes.
destruct f as [f f_commutes]. simpl in f_commutes. simpl.
intros.
rewrite H.
etransitivity.
eapply Setoids.cong.
apply f_commutes.
reflexivity.
apply g_commutes.
reflexivity.
Qed.
(** τ-cong **)
Next Obligation.
repeat intro.
simpl.
etransitivity. eapply Setoids.cong.
eapply Setoids.cong. apply H1.
etransitivity. eapply Setoids.cong.
apply H0. reflexivity.
apply H.
reflexivity.
Qed.
Infix "∘" := compose.
Lemma left_id : ∀ T S (f : T ⇒ S), id ∘ f ≈ f.
Proof.
intros. simpl. intros. rewrite H.
reflexivity.
Qed.
Lemma right_id : ∀ T S (f : T ⇒ S), f ∘ id ≈ f.
Proof.
repeat intro. simpl. now rewrite H.
Qed.
Lemma compose_assoc T R S U (f : T ⇒ R) (g : R ⇒ S) (h : S ⇒ U) : h ∘ g ∘ f ≈ h ∘ (g ∘ f).
Proof.
repeat intro.
simpl. now rewrite H.
Qed.
Canonical Structure 𝑻𝒓𝒊𝑴𝒂𝒕 : Category :=
mkCategory left_id right_id compose_assoc.
End Defs.
|
{"author": "rs-", "repo": "Triangles", "sha": "57f10cb6c627c331b2c6e7b344a34ae50838cc67", "save_path": "github-repos/coq/rs--Triangles", "path": "github-repos/coq/rs--Triangles/Triangles-57f10cb6c627c331b2c6e7b344a34ae50838cc67/Category/TriMat/Category.v"}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 27 13:40:25 2017
@author: knrai
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#encoding catagorical data state
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, -1] = labelencoder_X.fit_transform(X[:, -1])
onehotencoder = OneHotEncoder(categorical_features = [3])
X = onehotencoder.fit_transform(X).toarray()
#Avoind the dummy variable trap
X = X[:, 1:]
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((50,1)).astype(int), values = X, axis = 1)
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 1, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
X_opt = X[:, [0, 3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_opt, y, test_size = 0.2, random_state = 0)
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
|
{"hexsha": "6e96c65feaf8bbe2686d271ca67dcb87077eeb66", "size": 1958, "ext": "py", "lang": "Python", "max_stars_repo_path": "Part 2 - Regression/Section 5 - Multiple Linear Regression/K_multipleregression_backwardelimination.py", "max_stars_repo_name": "KrishnanandRai/ML-Learning", "max_stars_repo_head_hexsha": "ad4dcd98ebe889b14ffcdfc6a5ae6d9af9dd88b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Part 2 - Regression/Section 5 - Multiple Linear Regression/K_multipleregression_backwardelimination.py", "max_issues_repo_name": "KrishnanandRai/ML-Learning", "max_issues_repo_head_hexsha": "ad4dcd98ebe889b14ffcdfc6a5ae6d9af9dd88b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Part 2 - Regression/Section 5 - Multiple Linear Regression/K_multipleregression_backwardelimination.py", "max_forks_repo_name": "KrishnanandRai/ML-Learning", "max_forks_repo_head_hexsha": "ad4dcd98ebe889b14ffcdfc6a5ae6d9af9dd88b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5774647887, "max_line_length": 96, "alphanum_fraction": 0.7206332993, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 603}
|
import os
import pickle
import h5py
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from models import basenet
from models import dataloader
from models.celeba_core import CelebaModel
import utils
class CelebaDomainIndependent(CelebaModel):
def __init__(self, opt):
super(CelebaDomainIndependent, self).__init__(opt)
self.best_dev_mAP_conditional = 0.
self.best_dev_mAP_max = 0.
self.best_dev_mAP_sum_prob = 0.
self.best_dev_mAP_sum_out = 0.
def _criterion(self, output, target):
domain_label = target[:, -1:]
class_num = output.size(1) // 2
loss = F.binary_cross_entropy_with_logits(
domain_label*output[:, :class_num]
+ (1-domain_label)*output[:, class_num:],
target[:, :-1])
return loss
def inference_conditional(self, output, target):
"""Inference method: condition on the known domain"""
domain_label = target[:, -1:]
predict_prob = torch.sigmoid(output).cpu().numpy()
class_num = predict_prob.shape[1] // 2
predict_prob = domain_label*predict_prob[:, :class_num] \
+ (1-domain_label)*predict_prob[:, class_num:]
return predict_prob
def inference_max(self, output):
"""Inference method: choose the max of the two domains"""
predict_prob = torch.sigmoid(output).cpu().numpy()
class_num = predict_prob.shape[1] // 2
predict_prob = np.maximum(predict_prob[:, :class_num],
predict_prob[:, class_num:])
return predict_prob
def inference_sum_prob(self, output):
"""Inference method: sum the probability from two domains"""
predict_prob = torch.sigmoid(output).cpu().numpy()
class_num = predict_prob.shape[1] // 2
predict_prob = predict_prob[:, :class_num] + predict_prob[:, class_num:]
return predict_prob
def inference_sum_out(self, output):
"""Inference method: sum the output from two domains"""
class_num = output.size(1) // 2
return (output[:, :class_num] + output[:, class_num:]).cpu().numpy()
def train(self):
"""Train the model for one epoch, evaluate on validation set and
save the best model for each inference method
"""
start_time = datetime.now()
self._train(self.train_loader)
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'ckpt.pth'))
dev_loss, dev_output, _ = self._test(self.dev_loader)
dev_predict_conditional = self.inference_conditional(dev_output, self.dev_target)
dev_per_class_AP_conditional = utils.compute_weighted_AP(self.dev_target,
dev_predict_conditional, self.dev_class_weight)
dev_mAP_conditional = utils.compute_mAP(dev_per_class_AP_conditional, self.subclass_idx)
if dev_mAP_conditional > self.best_dev_mAP_conditional:
self.best_dev_mAP_conditional = dev_mAP_conditional
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'best-conditional.pth'))
dev_predict_max = self.inference_max(dev_output)
dev_per_class_AP_max = utils.compute_weighted_AP(self.dev_target,
dev_predict_max, self.dev_class_weight)
dev_mAP_max = utils.compute_mAP(dev_per_class_AP_max, self.subclass_idx)
if dev_mAP_max > self.best_dev_mAP_max:
self.best_dev_mAP_max = dev_mAP_max
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'best-max.pth'))
dev_predict_sum_prob = self.inference_sum_prob(dev_output)
dev_per_class_AP_sum_prob = utils.compute_weighted_AP(self.dev_target,
dev_predict_sum_prob, self.dev_class_weight)
dev_mAP_sum_prob = utils.compute_mAP(dev_per_class_AP_sum_prob, self.subclass_idx)
if dev_mAP_sum_prob > self.best_dev_mAP_sum_prob:
self.best_dev_mAP_sum_prob = dev_mAP_sum_prob
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'best-sum_prob.pth'))
dev_predict_sum_out = self.inference_sum_out(dev_output)
dev_per_class_AP_sum_out = utils.compute_weighted_AP(self.dev_target,
dev_predict_sum_out, self.dev_class_weight)
dev_mAP_sum_out = utils.compute_mAP(dev_per_class_AP_sum_out, self.subclass_idx)
if dev_mAP_sum_out > self.best_dev_mAP_sum_out:
self.best_dev_mAP_sum_out = dev_mAP_sum_out
utils.save_state_dict(self.state_dict(), os.path.join(self.save_path, 'best-sum_out.pth'))
self.log_result('Dev epoch',
{
'loss': dev_loss/len(self.dev_loader),
'mAP_conditional': dev_mAP_conditional,
'mAP_max': dev_mAP_max,
'mAP_sum_prob': dev_mAP_sum_prob,
'mAP_sum_out': dev_mAP_sum_out,
},
self.epoch)
duration = datetime.now() - start_time
print(('Finish training epoch {}, dev mAP conditional: {}'
'dev mAP max: {}, dev mAP sum prob: {}, '
'dev mAP sum out: {}, time used: {}').format(self.epoch, dev_mAP_conditional,
dev_mAP_max, dev_mAP_sum_prob, dev_mAP_sum_out, duration))
def _compute_result(self, model_name, data_loader, target, class_weight,
inference_fn, save_name, conditional=False):
"""Load model and compute performance with given inference method"""
state_dict = torch.load(os.path.join(self.save_path, model_name))
self.network.load_state_dict(state_dict['model'])
loss, output, feature = self._test(data_loader)
if conditional:
predict = inference_fn(output, target)
else:
predict = inference_fn(output)
per_class_AP = utils.compute_weighted_AP(target, predict,
class_weight)
mAP = utils.compute_mAP(per_class_AP, self.subclass_idx)
result = {'output': output.cpu().numpy(),
'feature': feature.cpu().numpy(),
'per_class_AP': per_class_AP,
'mAP': mAP}
utils.save_pkl(result, os.path.join(self.save_path, save_name))
return mAP
def test(self):
# Test and save the result for different inference methods
dev_mAP_conditional = self._compute_result('best-conditional.pth', self.dev_loader,
self.dev_target, self.dev_class_weight,
self.inference_conditional,
'dev_conditional_result.pkl', conditional=True)
test_mAP_conditional = self._compute_result('best-conditional.pth', self.test_loader,
self.test_target, self.test_class_weight,
self.inference_conditional,
'test_conditional_result.pkl', conditional=True)
dev_mAP_max = self._compute_result('best-max.pth', self.dev_loader,
self.dev_target, self.dev_class_weight,
self.inference_max,
'dev_max_result.pkl')
test_mAP_max = self._compute_result('best-max.pth', self.test_loader,
self.test_target, self.test_class_weight,
self.inference_max,
'test_max_result.pkl')
dev_mAP_sum_prob = self._compute_result('best-sum_prob.pth', self.dev_loader,
self.dev_target, self.dev_class_weight,
self.inference_sum_prob,
'dev_sum_prob_result.pkl')
test_mAP_sum_prob = self._compute_result('best-sum_prob.pth', self.test_loader,
self.test_target, self.test_class_weight,
self.inference_sum_prob,
'test_sum_prob_result.pkl')
dev_mAP_sum_out = self._compute_result('best-sum_out.pth', self.dev_loader,
self.dev_target, self.dev_class_weight,
self.inference_sum_out,
'dev_sum_out_result.pkl')
test_mAP_sum_out = self._compute_result('best-sum_out.pth', self.test_loader,
self.test_target, self.test_class_weight,
self.inference_sum_out,
'test_sum_out_result.pkl')
# Output the mean AP for the best model on dev and test set
info = (('Dev conditional mAP: {}, max mAP: {}, sum prob mAP: {}, sum out mAP: {}\n'
'Test conditional mAP: {}, max mAP: {}, sum prob mAP: {}, sum out mAP: {}'
).format(dev_mAP_conditional, dev_mAP_max, dev_mAP_sum_prob, dev_mAP_sum_out,
test_mAP_conditional, test_mAP_max, test_mAP_sum_prob, test_mAP_sum_out))
utils.write_info(os.path.join(self.save_path, 'result.txt'), info)
|
{"hexsha": "cb324fd83f10fbc288b82acf66b27c6a810cf9ce", "size": 9786, "ext": "py", "lang": "Python", "max_stars_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/celeba_domain_independent.py", "max_stars_repo_name": "lin-tan/fairness-variance", "max_stars_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/celeba_domain_independent.py", "max_issues_repo_name": "lin-tan/fairness-variance", "max_issues_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dlfairness/original_code/DomainBiasMitigation/models/celeba_domain_independent.py", "max_forks_repo_name": "lin-tan/fairness-variance", "max_forks_repo_head_hexsha": "7f6aee23160707ffe78f429e5d960022ea1c9fe4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.96875, "max_line_length": 106, "alphanum_fraction": 0.5902309422, "include": true, "reason": "import numpy", "num_tokens": 1978}
|
from copy import deepcopy
from enum import Enum
from astropy.io import registry
import pathlib
import os
import sys
import astropy.io.fits as fits
from astropy.nddata import (
VarianceUncertainty,
StdDevUncertainty,
InverseVariance,
)
import astropy.units as u
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_pixel
from specutils import Spectrum1D, SpectrumList
from specutils.io.registers import data_loader
_unregistered_loaders = {}
_unregistered_identifiers = {}
_low_priority_loaders = {}
def whatformat(*args, format=None, **kwargs):
"""
Read in data.
The arguments passed to this method depend on the format.
"""
cls = SpectrumList
formats = []
ctx = None
try:
if format is None:
path = None
fileobj = None
if len(args):
if isinstance(args[0], (str, pathlib.Path)) and not os.path.isdir(args[0]):
from astropy.utils.data import get_readable_fileobj
# path might be a pathlib.Path object
if isinstance(args[0], pathlib.Path):
args = (str(args[0]),) + args[1:]
path = args[0]
try:
ctx = get_readable_fileobj(args[0], encoding='binary')
fileobj = ctx.__enter__()
except OSError:
raise
except Exception:
fileobj = None
else:
args = [fileobj] + list(args[1:])
elif hasattr(args[0], 'read'):
path = None
fileobj = args[0]
formats = whatformat_get_valid_format(
'read', cls, path, fileobj, args, kwargs)
else:
formats = [format]
finally:
if ctx is not None:
ctx.__exit__(*sys.exc_info())
return formats
def unregister(format):
"""
The unregistered format also stays the less preferred format even once its restored
"""
_low_priority_loaders[format] = True
if format not in _unregistered_loaders.keys():
_unregistered_loaders[format] = SpectrumList
_unregistered_identifiers[format] = registry._identifiers[(format, SpectrumList)]
registry.unregister_identifier(format, SpectrumList)
def restore_registered_loaders():
"""
Restore all unregistered formats
"""
for format in list(_unregistered_loaders):
registry.register_identifier(format, SpectrumList, _unregistered_identifiers[format])
_unregistered_loaders.pop(format)
_unregistered_identifiers.pop(format)
pass
def whatformat_get_valid_format(mode, cls, path, fileobj, args, kwargs):
"""
Returns the first valid format that can be used to read/write the data in
question. Mode can be either 'read' or 'write'.
"""
valid_formats = registry.identify_format(mode, cls, path, fileobj, args, kwargs)
# A low priority loader is only intended to be used if its the only one
reordered = []
for format in valid_formats:
if format in _low_priority_loaders:
reordered.append(format)
for format in valid_formats:
if not format in _low_priority_loaders:
reordered.append(format)
return reordered
HEADER_PUPOSE_KEYWORDS = ["EXTNAME", "HDUNAME"]
HEADER_INDEX_PUPOSE_KEYWORDS = ["ROW", "ARRAY"]
FITS_FILE_EXTS = ["fit", "fits", "fts"]
SINGLE_SPLIT_LABEL = "Data Central Single-Split"
MULTILINE_SINGLE_LABEL = "Data Central Multiline-Single"
UNKNOWN_LABEL = "Unable to find a sensible label for spectrum"
# These are order in a best guess of priority, ideally the loader would know
# which label to use.
HEADER_LABEL_KEYWORDS = [
"OBJECT",
"OBJNAME",
"OBS_ID",
"EXTNAME",
"HDUNAME",
"TITLE",
"ORIGIN",
"ROOTNAME",
"FILENAME",
"AUTHOR",
"OBSERVER",
"CREATOR",
"INSTRUME",
"PROGRAM",
]
def guess_label_from_header(header):
"""
Guess the label from `header`, which is assumed to be some mapping with
FITS-like keys.
"""
for header_key in HEADER_LABEL_KEYWORDS:
label = header.get(header_key)
if label is not None:
return str(label)
raise ValueError(UNKNOWN_LABEL)
class Purpose(Enum):
SKIP = "skip"
SCIENCE = "science"
ERROR_STDEV = "error_stdev"
ERROR_VARIANCE = "error_variance"
ERROR_INVERSEVARIANCE = "error_inversevariance"
SKY = "sky"
COMBINED_SCIENCE = "combined_science"
COMBINED_ERROR_STDEV = "combined_error_stdev"
COMBINED_ERROR_VARIANCE = "combined_error_variance"
COMBINED_ERROR_INVERSEVARIANCE = "combined_error_inversevariance"
UNREDUCED_SCIENCE = "unreduced_science"
UNREDUCED_ERROR_STDEV = "unreduced_error_stdev"
UNREDUCED_ERROR_VARIANCE = "unreduced_error_variance"
UNREDUCED_ERROR_INVERSEVARIANCE = "unreduced_error_inversevariance"
CREATE_SPECTRA = {
Purpose.SCIENCE,
Purpose.SKY,
Purpose.COMBINED_SCIENCE,
Purpose.UNREDUCED_SCIENCE,
}
ERROR_PURPOSES = {
Purpose.ERROR_STDEV,
Purpose.ERROR_VARIANCE,
Purpose.ERROR_INVERSEVARIANCE,
Purpose.COMBINED_ERROR_STDEV,
Purpose.COMBINED_ERROR_VARIANCE,
Purpose.COMBINED_ERROR_INVERSEVARIANCE,
Purpose.UNREDUCED_ERROR_STDEV,
Purpose.UNREDUCED_ERROR_VARIANCE,
Purpose.UNREDUCED_ERROR_INVERSEVARIANCE,
}
PURPOSE_SPECTRA_MAP = {
Purpose.SCIENCE: "reduced",
Purpose.ERROR_STDEV: "reduced",
Purpose.ERROR_VARIANCE: "reduced",
Purpose.ERROR_INVERSEVARIANCE: "reduced",
Purpose.SKY: "sky",
Purpose.COMBINED_SCIENCE: "combined",
Purpose.COMBINED_ERROR_STDEV: "combined",
Purpose.COMBINED_ERROR_VARIANCE: "combined",
Purpose.COMBINED_ERROR_INVERSEVARIANCE: "combined",
Purpose.UNREDUCED_SCIENCE: "unreduced",
Purpose.UNREDUCED_ERROR_STDEV: "unreduced",
Purpose.UNREDUCED_ERROR_VARIANCE: "unreduced",
Purpose.UNREDUCED_ERROR_INVERSEVARIANCE: "unreduced",
}
UNCERTAINTY_MAP = {
Purpose.ERROR_STDEV: StdDevUncertainty,
Purpose.ERROR_VARIANCE: VarianceUncertainty,
Purpose.ERROR_INVERSEVARIANCE: InverseVariance,
Purpose.COMBINED_ERROR_STDEV: StdDevUncertainty,
Purpose.COMBINED_ERROR_VARIANCE: VarianceUncertainty,
Purpose.COMBINED_ERROR_INVERSEVARIANCE: InverseVariance,
Purpose.UNREDUCED_ERROR_STDEV: StdDevUncertainty,
Purpose.UNREDUCED_ERROR_VARIANCE: VarianceUncertainty,
Purpose.UNREDUCED_ERROR_INVERSEVARIANCE: InverseVariance,
}
GUESS_TO_PURPOSE = {
"badpix": Purpose.SKIP,
"": Purpose.SKIP,
"sky": Purpose.SKY,
"stdev": Purpose.ERROR_STDEV,
"sigma": Purpose.ERROR_STDEV,
"variance": Purpose.ERROR_VARIANCE,
"spectrum": Purpose.SCIENCE,
}
def add_labels(spec_list, use_purpose=False):
not_labeled = 0
label_set = set()
for spec in spec_list:
meta = spec.meta
purpose = meta.get("purpose")
if use_purpose:
tail = " (" + str(purpose) + ")"
else:
tail = ""
try:
meta["label"] = guess_label_from_header(meta["header"]) + tail
except ValueError:
not_labeled += 1
else:
label_set.add(meta["label"])
if len(label_set) + not_labeled < len(spec_list):
# This implies there are duplicates
for i, spec in enumerate(spec_list, start=1):
label = spec.meta.get("label")
if label is not None:
spec.meta["label"] = label + " #" + str(i)
def compute_wcs_from_keys_and_values(
header=None,
*,
wavelength_unit_keyword=None,
wavelength_unit=None,
pixel_reference_point_keyword=None,
pixel_reference_point=None,
pixel_reference_point_value_keyword=None,
pixel_reference_point_value=None,
pixel_width_keyword=None,
pixel_width=None,
):
if wavelength_unit is None:
if wavelength_unit_keyword is None:
raise ValueError(
"Either wavelength_unit or wavelength_unit_keyword must be "
"provided"
)
wavelength_unit = u.Unit(header[wavelength_unit_keyword])
if pixel_reference_point is None:
if pixel_reference_point_keyword is None:
raise ValueError(
"Either pixel_reference_point or "
"pixel_reference_point_keyword must be provided"
)
pixel_reference_point = header[pixel_reference_point_keyword]
if pixel_reference_point_value is None:
if pixel_reference_point_value_keyword is None:
raise ValueError(
"Either pixel_reference_point_value or "
"pixel_reference_point_value_keyword must be provided"
)
pixel_reference_point_value = header[
pixel_reference_point_value_keyword
]
if pixel_width is None:
if pixel_width_keyword is None:
raise ValueError(
"Either pixel_width or pixel_width_keyword must be provided"
)
# RS: The javascript drivers allow for the keyword CDELT1 (TODO: discuss with James)
if header.get(pixel_width_keyword) is None:
if header.get("CDELT1") is not None:
pixel_width_keyword = "CDELT1"
pixel_width = header[pixel_width_keyword]
w = WCS(naxis=1)
w.wcs.crpix[0] = pixel_reference_point
w.wcs.crval[0] = pixel_reference_point_value
w.wcs.cdelt[0] = pixel_width
w.wcs.cunit[0] = wavelength_unit
return w
def get_flux_units_from_keys_and_values(
header,
*,
flux_unit_keyword=None,
flux_unit=None,
flux_scale_keyword=None,
flux_scale=None,
):
if flux_unit is None:
if flux_unit_keyword is None:
raise ValueError(
"Either flux_unit or flux_unit_keyword must be provided"
)
flux_unit = header[flux_unit_keyword]
flux_unit = u.Unit(flux_unit)
if flux_scale is None:
if flux_scale_keyword is None:
flux_scale = 1
else:
flux_scale = header[flux_scale_keyword]
return flux_scale * flux_unit
def add_single_spectra_to_map(
spectra_map,
*,
header,
data,
spec_info=None,
wcs_info=None,
units_info=None,
purpose_prefix=None,
all_standard_units,
all_keywords,
valid_wcs,
index=None,
):
spec_wcs_info = {}
spec_units_info = {}
if wcs_info is not None:
spec_wcs_info.update(wcs_info)
if units_info is not None:
spec_units_info.update(units_info)
if spec_info is not None:
spec_wcs_info.update(spec_info.get("wcs", {}))
spec_units_info.update(spec_info.get("units", {}))
purpose = spec_info.get("purpose")
else:
purpose = None
purpose = get_purpose(
header,
purpose=purpose,
purpose_prefix=purpose_prefix,
all_keywords=all_keywords,
index=index,
)
if purpose == Purpose.SKIP:
return None
if valid_wcs or not spec_wcs_info:
wcs = WCS(header)
else:
wcs = compute_wcs_from_keys_and_values(header, **spec_wcs_info)
if all_standard_units:
spec_units_info = {"flux_unit_keyword": "BUNIT"}
flux_unit = get_flux_units_from_keys_and_values(header, **spec_units_info)
flux = data * flux_unit
meta = {"header": header, "purpose": PURPOSE_SPECTRA_MAP[purpose]}
if purpose in CREATE_SPECTRA:
spectrum = Spectrum1D(wcs=wcs, flux=flux, meta=meta)
spectra_map[PURPOSE_SPECTRA_MAP[purpose]].append(spectrum)
elif purpose in ERROR_PURPOSES:
try:
spectrum = spectra_map[PURPOSE_SPECTRA_MAP[purpose]][-1]
except IndexError:
raise ValueError(f"No spectra to associate with {purpose}")
aligned_flux = pixel_to_pixel(wcs, spectrum.wcs, flux)
spectrum.uncertainty = UNCERTAINTY_MAP[purpose](aligned_flux)
spectrum.meta["uncertainty_header"] = header
# We never actually want to return something, this just flags it to pylint
# that we know we're breaking out of the function when skip is selected
return None
def get_purpose(
header, *, purpose=None, purpose_prefix=None, all_keywords, index=None
):
def guess_purpose(header):
for keyword in HEADER_PUPOSE_KEYWORDS:
guess = header.get(keyword)
if guess is not None:
return GUESS_TO_PURPOSE[guess.strip().lower()]
return None
def guess_index_purpose(header, index):
for keyword in HEADER_INDEX_PUPOSE_KEYWORDS:
guess = header.get(keyword + str(index))
if guess is not None:
return GUESS_TO_PURPOSE[guess.strip().lower()]
return None
if all_keywords:
if index is None:
guessed_purpose = guess_purpose(header)
if guessed_purpose is not None:
return guessed_purpose
if "XTENSION" not in header:
# we have a primary HDU, assume science
return Purpose.SCIENCE
raise ValueError(
"Cannot identify purpose, cannot use all_keywords"
)
guessed_purpose = guess_index_purpose(header, index)
if guessed_purpose is not None:
return guessed_purpose
raise ValueError("Cannot identify purpose, cannot use all_keywords")
if purpose is not None:
return Purpose(purpose)
if purpose_prefix is not None:
if index is None:
return Purpose(header.get(purpose_prefix))
return Purpose(header.get(purpose_prefix + str(index)))
raise ValueError(
"Either all_keywords must be True, or one of purpose or "
"purpose_prefix must not be None."
)
def no_auto_identify(*args, **kwargs):
return False
|
{"hexsha": "63134daaa4574b7ddede319054130213e5a4214b", "size": 13908, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ssv/ssvloaders.py", "max_stars_repo_name": "ADACS-Australia/ssv-py", "max_stars_repo_head_hexsha": "d54e2ff0bcaf0197607125a2a5f39815e17d54e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-15T05:56:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-15T05:56:42.000Z", "max_issues_repo_path": "src/ssv/ssvloaders.py", "max_issues_repo_name": "ADACS-Australia/ssv-py", "max_issues_repo_head_hexsha": "d54e2ff0bcaf0197607125a2a5f39815e17d54e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ssv/ssvloaders.py", "max_forks_repo_name": "ADACS-Australia/ssv-py", "max_forks_repo_head_hexsha": "d54e2ff0bcaf0197607125a2a5f39815e17d54e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9724137931, "max_line_length": 93, "alphanum_fraction": 0.6555939028, "include": true, "reason": "import astropy,from astropy", "num_tokens": 3226}
|
import unittest
import math
import numpy
import pyglet
from pygly.input.digital import Digital
class test_digital( unittest.TestCase ):
def setUp( self ):
pass
def tearDown( self ):
pass
def test_digital( self ):
device = Digital( 'keyboard' )
def handle_event( device, event, value ):
self.assertEqual(
device,
'keyboard',
"Incorrect device"
)
self.assertEqual(
event,
'down',
"Incorrect event"
)
self.assertEqual(
value[ 0 ],
'd',
"Incorrect value"
)
self.assertEqual(
value[ 1 ],
None,
"Incorrect value"
)
self.assertFalse(
handle_event in device.handlers,
"IMPOSSIBRU!"
)
device.register_handler( handle_event )
self.assertTrue(
handle_event in device.handlers,
"Handler not registered"
)
device.dispatch_event( 'down', ('d',None) )
device.unregister_handler( handle_event )
self.assertFalse(
handle_event in device.handlers,
"Handler still registered"
)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "0f3fce238e3723280a98974f2d02b5d78937dfe4", "size": 1426, "ext": "py", "lang": "Python", "max_stars_repo_path": "razorback/test/test_digital.py", "max_stars_repo_name": "adamlwgriffiths/Razorback", "max_stars_repo_head_hexsha": "44158c0b2c8d842dce10c0b4c46570b876d42486", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "razorback/test/test_digital.py", "max_issues_repo_name": "adamlwgriffiths/Razorback", "max_issues_repo_head_hexsha": "44158c0b2c8d842dce10c0b4c46570b876d42486", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "razorback/test/test_digital.py", "max_forks_repo_name": "adamlwgriffiths/Razorback", "max_forks_repo_head_hexsha": "44158c0b2c8d842dce10c0b4c46570b876d42486", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2835820896, "max_line_length": 51, "alphanum_fraction": 0.4810659187, "include": true, "reason": "import numpy", "num_tokens": 255}
|
"""
Fcn for doing the parameterisation
"""
import numpy as np
from . import util
def _verify_args(h1, h2, h3, h4):
# Check they're all arrays of 4 arrays
assert h1.shape[0] == 4, "h1_plus should be a shape (4, N) array"
assert h2.shape[0] == 4, "h2_minus should be a shape (4, N) array"
assert h3.shape[0] == 4, "h3_minus should be a shape (4, N) array"
assert h4.shape[0] == 4, "h4_plus should be a shape (4, N) array"
# Check they all contain the same number of particles
n_particles = len(h1[0])
assert h2.shape[1] == n_particles, "h2_minus and h1_plus are different lengths"
assert h3.shape[1] == n_particles, "h3_minus and h1_plus are different lengths"
assert h4.shape[1] == n_particles, "h4_minus and h1_plus are different lengths"
def helicity_param(
h1_plus: np.ndarray, h2_minus: np.ndarray, h3_minus: np.ndarray, h4_plus: np.ndarray
) -> np.ndarray:
"""
Find 5 dimensional four-body phase space parameterisation using invariant masses and helicity angles
Our decay is `X -> h1+ h2- h3- h4+`
Parameterisation comes from the original paper by Cabibbo and Maksymowicz- definitions are in the full documentation.
In brief, our parameters are:
Invariant mass of (h1, h4)
Invariant mass of (h2, h3)
Cosine angle of h1 wrt parent particle, in the CoM frame of + charged particle system
Cosine angle of h2 wrt parent particle, in the CoM frame of - charged particle system
Angle between + and - system decay planes
:param h1_plus: array of +ve charged hadron parameters, (px, py, pz, energy).
Each entry in this array should be an N-length array of momenta; overall shape is (4, N) for N particles.
:param h2_minus: array of -ve charged hadron parameters, (px, py, pz, energy).
Each entry in this array should be an N-length array of momenta; overall shape is (4, N) for N particles.
:param h3_minus: array of -ve charged hadron parameters, (px, py, pz, energy).
Each entry in this array should be an N-length array of momenta; overall shape is (4, N) for N particles.
:param h3_plus: array of +ve charged hadron parameters, (px, py, pz, energy).
Each entry in this array should be an N-length array of momenta; overall shape is (4, N) for N particles.
:return: shape (N, 5) array of points in 5d phase space.
"""
_verify_args(h1_plus, h2_minus, h3_minus, h4_plus)
# Find invariant masses
m_plus, m_minus = util.m_plus_minus(h1_plus, h2_minus, h3_minus, h4_plus)
# Find costheta + and -
d = np.add(h1_plus, np.add(h2_minus, np.add(h3_minus, h4_plus)))
cos_theta_plus = util.cos_theta(h1_plus, h4_plus, d)
cos_theta_minus = util.cos_theta(h2_minus, h3_minus, d)
# Find phi
phi = util.phi(
util.cos_phi(h1_plus, h2_minus, h3_minus, h4_plus),
util.sin_phi(h1_plus, h2_minus, h3_minus, h4_plus),
)
# Return
return np.column_stack((m_plus, m_minus, cos_theta_plus, cos_theta_minus, phi))
|
{"hexsha": "d365c5fac7c7dc173f0fe1ca5fb0641e3ba68632", "size": 3076, "ext": "py", "lang": "Python", "max_stars_repo_path": "fourbody/param.py", "max_stars_repo_name": "richard-lane/fourbody", "max_stars_repo_head_hexsha": "9c029ad4d179e7ad7448522166e09c29c7096071", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fourbody/param.py", "max_issues_repo_name": "richard-lane/fourbody", "max_issues_repo_head_hexsha": "9c029ad4d179e7ad7448522166e09c29c7096071", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fourbody/param.py", "max_forks_repo_name": "richard-lane/fourbody", "max_forks_repo_head_hexsha": "9c029ad4d179e7ad7448522166e09c29c7096071", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9428571429, "max_line_length": 126, "alphanum_fraction": 0.6768530559, "include": true, "reason": "import numpy", "num_tokens": 880}
|
import asyncio
import pandas as pd
import numpy as np
from statsmodels.tsa.arima.model import ARIMA
class TradingSystem:
def __init__(self, logger, config, yahoo_repository, ai_repository):
self._config = config
self._logger = logger
self._yahoo_repository = yahoo_repository
self._ai_repository = ai_repository
async def monitoring(self, seconds, exec_on_start):
if not exec_on_start:
await asyncio.sleep(seconds)
while True:
finance_data = self._yahoo_repository.get_finance_data()
if finance_data is None:
self._logger.info(f'No new movements')
else:
last_real_data, forecast = self._ai_repository.get_forecast(finance_data)
self._logger.info(f'Last data: {last_real_data} | Forecast: {forecast}')
await asyncio.sleep(seconds)
|
{"hexsha": "45fc8aceda10bd29a13a6ea8116c3f28d557e59a", "size": 923, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai-trading-system/src/application/actions/trading_system.py", "max_stars_repo_name": "yash5OG/RecommenderForDHim", "max_stars_repo_head_hexsha": "841d981ec97626ddbe718cf0a044f92ee139fccc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ai-trading-system/src/application/actions/trading_system.py", "max_issues_repo_name": "yash5OG/RecommenderForDHim", "max_issues_repo_head_hexsha": "841d981ec97626ddbe718cf0a044f92ee139fccc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ai-trading-system/src/application/actions/trading_system.py", "max_forks_repo_name": "yash5OG/RecommenderForDHim", "max_forks_repo_head_hexsha": "841d981ec97626ddbe718cf0a044f92ee139fccc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-13T23:06:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T23:06:46.000Z", "avg_line_length": 29.7741935484, "max_line_length": 108, "alphanum_fraction": 0.6468039003, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 188}
|
#Original file by Titu1994, changed for this project
import json
import numpy as np
import argparse
import sklearn.metrics as metrics
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from scipy.optimize import minimize
from sklearn.metrics import log_loss
from models import wide_residual_net as WRN, dense_net as DN
from scipy.stats import multivariate_normal
from keras.datasets import cifar10
from keras import backend as K
import keras.utils.np_utils as kutils
parser = argparse.ArgumentParser(description='CIFAR 10 Ensemble Prediction')
parser.add_argument('--optimize', type=int, default=0, help='Set to 0 to perform regular snapshot ensembles.\n'
'Set to 1 to weigh the snapshots by their accuracy on the training/validation set.\n'
'Set to 2 to weigh the snapshots based on multivariate Gaussian distributions.')
parser.add_argument('--model', type=str, default='wrn', help='Type of model to train')
parser.add_argument('--validation', action='store_true', help='Split off a part of the training data to use as validation data')
# Wide ResNet Parameters
parser.add_argument('--wrn_N', type=int, default=2, help='Number of WRN blocks. Computed as N = (n - 4) / 6.')
parser.add_argument('--wrn_k', type=int, default=4, help='Width factor of WRN')
# DenseNet Parameters
parser.add_argument('--dn_depth', type=int, default=40, help='Depth of DenseNet')
parser.add_argument('--dn_growth_rate', type=int, default=12, help='Growth rate of DenseNet')
args = parser.parse_args()
# Change to False to only predict
OPTIMIZE = args.optimize
assert OPTIMIZE in [0,1,2], 'OPTIMIZE may only have values 0, 1 and 2'
validation = args.validation
model_type = str(args.model).lower()
assert model_type in ['wrn', 'dn'], 'Model type must be one of "wrn" for Wide ResNets or "dn" for DenseNets'
files_dir = "weights"
if model_type == "wrn":
n = args.wrn_N * 6 + 4
k = args.wrn_k
models_filenames = [r"%s/WRN-CIFAR10-%d-%d-1.h5" % (files_dir, n, k),
r"%s/WRN-CIFAR10-%d-%d-2.h5" % (files_dir, n, k),
r"%s/WRN-CIFAR10-%d-%d-3.h5" % (files_dir, n, k),
r"%s/WRN-CIFAR10-%d-%d-4.h5" % (files_dir, n, k),
r"%s/WRN-CIFAR10-%d-%d-5.h5" % (files_dir, n, k)]
else:
depth = args.dn_depth
growth_rate = args.dn_growth_rate
models_filenames = [r"%s/DenseNet-CIFAR10-%d-%d-1.h5" % (files_dir, depth, growth_rate),
r"%s/DenseNet-CIFAR10-%d-%d-2.h5" % (files_dir, depth, growth_rate),
r"%s/DenseNet-CIFAR10-%d-%d-3.h5" % (files_dir, depth, growth_rate),
r"%s/DenseNet-CIFAR10-%d-%d-4.h5" % (files_dir, depth, growth_rate),
r"%s/DenseNet-CIFAR10-%d-%d-5.h5" % (files_dir, depth, growth_rate)]
(trainX, trainY), (testX, testY) = cifar10.load_data()
nb_classes = len(np.unique(testY))
trainX = trainX.astype('float32')
trainX /= 255.0
testX = testX.astype('float32')
testX /= 255.0
trainY_cat = kutils.to_categorical(trainY)
testY_cat = kutils.to_categorical(testY)
if (validation): # Use validation set to determine weights of the snapshots
_, trainX, _, trainY = train_test_split(trainX, trainY, test_size=0.2, random_state=0)
if K.image_data_format() == "th":
init = (3, 32, 32)
else:
init = (32, 32, 3)
testX_flattened = [sample.flatten() for sample in testX]
def create_model():
if model_type == "wrn":
model_prefix = 'WRN-CIFAR10-%d-%d' % (args.wrn_N * 6 + 4, args.wrn_k)
return WRN.create_wide_residual_network(init, nb_classes=10, N=args.wrn_N, k=args.wrn_k, dropout=0.00, verbose=False)
else:
model_prefix = 'DenseNet-CIFAR10-%d-%d' % (args.dn_depth, args.dn_growth_rate)
return DN.create_dense_net(nb_classes=10, img_dim=init, depth=args.dn_depth, nb_dense_block=1,
growth_rate=args.dn_growth_rate, nb_filter=16, dropout_rate=0.2, verbose=False)
def calculate_weighted_accuracy():
global weighted_predictions, weight, prediction, yPred, yTrue, accuracy, error
weighted_predictions = np.zeros((testX.shape[0], nb_classes), dtype='float32')
for weight, prediction in zip(prediction_weights, test_preds):
weighted_predictions += weight * prediction
yPred = np.argmax(weighted_predictions, axis=1)
yTrue = testY
accuracy = metrics.accuracy_score(yTrue, yPred) * 100
return accuracy
# Calculate train predictions of each snapshot.
train_preds = []
for fn in models_filenames:
model = create_model()
model.load_weights(fn)
print("Predicting train set values on model %s" % (fn))
yPreds = model.predict(trainX, batch_size=128, verbose=2)
train_preds.append(yPreds)
# Calculate test predictions of each snapshot.
test_preds = []
for fn in models_filenames:
model = create_model()
model.load_weights(fn)
print("Predicting test set values on model %s" % (fn))
yPreds = model.predict(testX, batch_size=128, verbose=2)
yPred = np.argmax(yPreds, axis=1)
yTrue = testY
accuracy = metrics.accuracy_score(yTrue, yPred) * 100
print("Accuracy : ", accuracy)
test_preds.append(yPreds)
if OPTIMIZE == 0: # Use non-weighed test predictions (standard snapshot ensembles)
prediction_weights = [1. / len(models_filenames)] * len(models_filenames)
accuracy = calculate_weighted_accuracy()
elif OPTIMIZE == 1: # Use weighed test predictions based on training/validation set (--validation parameter)
training_accuracies = []
for yPreds in train_preds:
yPred = np.argmax(yPreds, axis=1)
yTrue = trainY
training_accuracies.append(metrics.accuracy_score(yTrue, yPred))
m = min(training_accuracies)
prediction_weights = [training_accuracy - m for training_accuracy in training_accuracies]
accuracy = calculate_weighted_accuracy()
elif OPTIMIZE == 2: # Gaussian distributions
# Initialize Gaussian distributions from training predictions
densities = []
for yPreds in train_preds:
correct = [] # List containing all training samples this model predicted correctly
for pred, val, trained in zip(yPreds, trainY, trainX):
cat = np.argmax(pred)
if cat == val:
correct.append(trained.flatten())
correct = np.array(correct)
# Apply dimensionality reduction to make fitting it to a multivariate Gaussian distribution feasible
pca = PCA(n_components=10)
correct_reduced = pca.fit_transform(correct)
# Calculate the maximum likelihood estimates of this data as a multivariate Gaussian distribution
correct_reduced = correct_reduced[:500]
mean_estimator = np.mean(correct_reduced, axis=0)
correct_centered = correct_reduced - mean_estimator # 2D array minus 1D array --> [[1,2],[3,4]] - [5,8] = [[-4,-6],[-2,-4]]
covariance_estimator = np.mean([np.transpose(sample[np.newaxis]) @ sample[np.newaxis] for sample in correct_centered], axis=0)
distribution = multivariate_normal(mean=mean_estimator, cov=covariance_estimator)
# Gaussian distributions created. Calculate density function on all test samples
testX_reduced = pca.fit_transform(testX_flattened)
cur_densities = distribution.pdf(testX_reduced)
densities.append(cur_densities)
# Weigh final predictions by their pdf on the different distributions
weighted_predictions = np.zeros((testX.shape[0], nb_classes), dtype='float32')
for cur_densities, cur_predictions in zip(densities, test_preds):
weighted_predictions += cur_predictions * np.transpose(np.array(cur_densities)[np.newaxis])
yPred = np.argmax(weighted_predictions, axis=1)
yTrue = testY
accuracy = metrics.accuracy_score(yTrue, yPred) * 100
error = 100-accuracy
print("Accuracy : ", accuracy)
print("Error : ", error)
exit()
|
{"hexsha": "ff69bba7a7e7571eb2a215b352c22ee544591a9b", "size": 8073, "ext": "py", "lang": "Python", "max_stars_repo_path": "predict_cifar_10.py", "max_stars_repo_name": "ThomasWink/AML_project", "max_stars_repo_head_hexsha": "4f1a036b7ec3e7b22eee6c94525986a7655e8298", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "predict_cifar_10.py", "max_issues_repo_name": "ThomasWink/AML_project", "max_issues_repo_head_hexsha": "4f1a036b7ec3e7b22eee6c94525986a7655e8298", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predict_cifar_10.py", "max_forks_repo_name": "ThomasWink/AML_project", "max_forks_repo_head_hexsha": "4f1a036b7ec3e7b22eee6c94525986a7655e8298", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1711229947, "max_line_length": 145, "alphanum_fraction": 0.683265205, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2070}
|
# Matplotlib packages to import
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import ImageGrid
# Used for plotting cases only
import seaborn as sns
# Obspy librabries
import obspy
from obspy import Stream
from obspy.core import UTCDateTime
# Standard Libraries
import numpy as np
import pandas as pd
from glob import glob
import pickle
import math
import random
import sys
import json
import copy
from string import digits
from scipy import stats
import time
import copy
from pyproj import Proj
# Pytorch Libraires
import torch
from torch.nn import Linear
from torch import Tensor
from torch.nn import MSELoss
from torch.optim import SGD, Adam, RMSprop
from torch.autograd import Variable, grad
from torch.utils.data.sampler import SubsetRandomSampler,WeightedRandomSampler
from torch.cuda.amp import autocast
# Sklearn libraries
from sklearn.cluster import DBSCAN
# Suppressing the warning
pd.options.mode.chained_assignment = None # default='warn'
class RBF(torch.nn.Module):
'''
Radial Basis Function (RBF)
'''
def __init__(self, sigma=None):
super(RBF, self).__init__()
self.sigma = sigma
self.print_sigma = False
def forward(self, X, Y):
XX = X.matmul(X.t())
XY = X.matmul(Y.t())
YY = Y.matmul(Y.t())
dnorm2 = -2 * XY + XX.diag().unsqueeze(1) + YY.diag().unsqueeze(0)
# Apply the median heuristic (PyTorch does not give true median)
if self.sigma is None:
np_dnorm2 = dnorm2.detach().cpu().numpy()
h = np.median(np_dnorm2) / (2 * np.log(X.size(0) + 1))
sigma = np.sqrt(h).item()
if self.print_sigma:
print(sigma)
else:
sigma = self.sigma
gamma = 1.0 / (1e-8 + 2 * sigma ** 2)
K_XY = (-gamma * dnorm2).exp()
return K_XY
def IO_JSON(file,Events=None,rw_type='r'):
'''
Reading/Writing in JSON file into location archieve
'''
if rw_type == 'w':
tmpEvents = copy.deepcopy(Events)
elif rw_type == 'r':
with open(file, 'r') as f:
tmpEvents = json.load(f)
for key in tmpEvents.keys():
if rw_type=='w':
tmpEvents[key]['Picks'] = tmpEvents[key]['Picks'].astype(str).to_dict()
elif rw_type=='r':
tmpEvents[key]['Picks'] = pd.DataFrame.from_dict(tmpEvents[key]['Picks'])
else:
print('Please specify either "read" or "write" for handelling the data')
if rw_type == 'w':
with open(file, rw_type) as f:
json.dump(tmpEvents, f)
elif rw_type =='r':
return tmpEvents
def IO_NLLoc2JSON(file,EVT={},startEventID=1000000):
# Reading in the lines
f = open(file, "r")
lines = f.readlines()
lds = np.where(np.array(lines) == '\n')[0] - np.arange(len(np.where(np.array(lines) == '\n')[0]))
lines_start = np.append([0],lds[:-1])
lines_end = lds
# Reading in the event lines
evt = pd.read_csv(file,sep=r'\s+',names=['Station','Network','r1','r2','PhasePick', 'r3','Date','Time','Sec','r4','PickError','r5','r6','r7'])
evt['DT'] = pd.to_datetime(evt['Date'].astype(str).str.slice(stop=4) + '/' +
evt['Date'].astype(str).str.slice(start=4,stop=6) + '/' +
evt['Date'].astype(str).str.slice(start=6,stop=8) + 'T' +
evt['Time'].astype(str).str.zfill(4).str.slice(stop=2) + ':' +
evt['Time'].astype(str).str.zfill(4).str.slice(start=2) + ':' +
evt['Sec'].astype(str).str.split('.',expand=True)[0].str.zfill(2) + '.' +
evt['Sec'].astype(str).str.split('.',expand=True)[1].str.zfill(2),format='%Y/%m/%dT%H:%M:%S.%f')
evt = evt[['Network','Station','PhasePick','DT','PickError']]
# Turning
for eds in range(len(lines_start)):
evt_tmp = evt.iloc[lines_start[eds]:lines_end[eds]]
EVT['{}'.format(startEventID+eds)] = {}
EVT['{}'.format(startEventID+eds)]['Picks'] = evt_tmp.reset_index(drop=True)
return EVT
def IO_JSON2CSV(EVT,savefile=None):
'''
Saving Events in CSV format
'''
Events = EVT
# Loading location information
picks =(np.zeros((len(Events.keys()),8))*np.nan).astype(str)
for indx,evtid in enumerate(Events.keys()):
try:
picks[indx,0] = str(evtid)
picks[indx,1] = Events[evtid]['location']['OriginTime']
picks[indx,2:5] = (np.array(Events[evtid]['location']['Hypocentre'])).astype(str)
picks[indx,5:] = (np.array(Events[evtid]['location']['Hypocentre_std'])).astype(str)
except:
continue
picks_df = pd.DataFrame(picks,
columns=['EventID','DT','X','Y','Z','StdX','StdY','StdZ'])
picks_df['X'] = picks_df['X'].astype(float)
picks_df['Y'] = picks_df['Y'].astype(float)
picks_df['Z'] = picks_df['Z'].astype(float)
picks_df['StdX'] = picks_df['StdX'].astype(float)
picks_df['StdY'] = picks_df['StdY'].astype(float)
picks_df['StdZ'] = picks_df['StdZ'].astype(float)
picks_df = picks_df.dropna(axis=0)
picks_df['DT'] = pd.to_datetime(picks_df['DT'])
picks_df = picks_df[['EventID','DT','X','Y','Z','StdX','StdY','StdZ']]
if type(savefile) == type(None):
return picks_df
else:
picks_df.to_csv(savefile,index=False)
class HypoSVI(torch.nn.Module):
def __init__(self, EikoNet, Phases=['P','S'], device='cpu'):
super(HypoSVI, self).__init__()
# -- Defining the EikoNet input formats
self.eikonet_Phases = Phases
self.eikonet_models = EikoNet
if len(self.eikonet_Phases) != len(self.eikonet_models):
print('Error - Number of phases not equal to number of EikoNet models')
# Determining if the EikoNets are solved for the same domain
xmin_stack = np.vstack([self.eikonet_models[x].Params['VelocityClass'].xmin for x in range(len(self.eikonet_models))])
xmax_stack = np.vstack([self.eikonet_models[x].Params['VelocityClass'].xmax for x in range(len(self.eikonet_models))])
if not (xmin_stack == xmin_stack[0,:]).all() or not (xmax_stack == xmax_stack[0,:]).all():
print('Error - EikoNet Models not in the same domain\n Min Points = {}\n Max Points = {}'.format(xmin_stack,xmax_stack))
self.VelocityClass = self.eikonet_models[0].Params['VelocityClass']
# Converting to UTM projection scheme form
self.proj_str = copy.copy(self.eikonet_models[0].Params['VelocityClass'].projection)
if type(self.proj_str) != type(None):
self.projection = Proj(self.proj_str)
self.xmin = copy.copy(self.VelocityClass.xmin)
self.xmax = copy.copy(self.VelocityClass.xmax)
self.xmin[0],self.xmin[1] = self.projection(self.xmin[0],self.xmin[1])
self.xmax[0],self.xmax[1] = self.projection(self.xmax[0],self.xmax[1])
else:
self.projection = None
self.xmin = copy.copy(self.VelocityClass.xmin)
self.xmax = copy.copy(self.VelocityClass.xmax)
# --------- Initialising Location Information ---------
# -- Defining the device to run the location procedure on
self.device = torch.device(device)
# -- Defining the parameters required in the earthquake location procedure
self.location_info = {}
self.location_info['Log-likehood'] = 'EDT'
self.location_info['Travel Time Uncertainty - [Gradient(km/s),Min(s),Max(s)]'] = [0.1,0.1,2.0]
self.location_info['Individual Event Epoch Save and Print Rate'] = [None,False]
self.location_info['Number of Particles'] = 150
self.location_info['Step Size'] = 1
self.location_info['Save every * events'] = 100
self.location_info['Hypocenter Cluster - Seperation (km)'] = 0.8
self.location_info['Hypocenter Cluster - Minimum Samples'] = 3
# --------- Initialising Plotting Information ---------
self.plot_info={}
# - Event Plot parameters
# Location plotting
self.plot_info['EventPlot'] = {}
self.plot_info['EventPlot']['Errbar std'] = 2.0
self.plot_info['EventPlot']['Domain Distance'] = 10
self.plot_info['EventPlot']['Save Type'] = 'png'
self.plot_info['EventPlot']['Figure Size Scale'] = 1.0
self.plot_info['EventPlot']['Plot kde'] = True
self.plot_info['EventPlot']['NonClusterd SVGD'] = [0.5,'k']
self.plot_info['EventPlot']['Clusterd SVGD'] = [1.2,'g']
self.plot_info['EventPlot']['Hypocenter Location'] = [15,'k']
self.plot_info['EventPlot']['Hypocenter Errorbar'] = [False,'k']
self.plot_info['EventPlot']['Legend'] = True
# Optional Station Plotting
self.plot_info['EventPlot']['Stations'] = {}
self.plot_info['EventPlot']['Stations']['Plot Stations'] = True
self.plot_info['EventPlot']['Stations']['Station Names'] = True
self.plot_info['EventPlot']['Stations']['Marker Color'] = 'b'
self.plot_info['EventPlot']['Stations']['Marker Size'] = 25
# Optional Trace Plotting
self.plot_info['EventPlot']['Traces'] = {}
self.plot_info['EventPlot']['Traces']['Plot Traces'] = False
self.plot_info['EventPlot']['Traces']['Trace Host'] = None
self.plot_info['EventPlot']['Traces']['Channel Types'] = ['EH*','HH*']
self.plot_info['EventPlot']['Traces']['Filter Freq'] = [2,16]
self.plot_info['EventPlot']['Traces']['Normalisation Factor'] = 1.0
self.plot_info['EventPlot']['Traces']['Time Bounds'] = [0,5]
self.plot_info['EventPlot']['Traces']['Pick linewidth'] = 2.0
self.plot_info['EventPlot']['Traces']['Trace linewidth'] = 1.0
# - Catalogue Plot parameters
self.plot_info['CataloguePlot'] = {}
self.plot_info['CataloguePlot']['Minimum Phase Picks'] = 12
self.plot_info['CataloguePlot']['Maximum Location Uncertainty (km)'] = 15
self.plot_info['CataloguePlot']['Num Std to define errorbar'] = 2
self.plot_info['CataloguePlot']['Event Info - [Size, Color, Marker, Alpha]'] = [0.1,'r','*',0.8]
self.plot_info['CataloguePlot']['Event Errorbar - [On/Off(Bool),Linewidth,Color,Alpha]'] = [True,0.1,'r',0.8]
self.plot_info['CataloguePlot']['Station Marker - [Size,Color,Names On/Off(Bool)]'] = [15,'b',True]
self.plot_info['CataloguePlot']['Fault Planes - [Size,Color,Marker,Alpha]'] = [0.1,'gray','-',1.0]
# ----- Kernel Information ----
self.K = RBF()
self.K.sigma = 15
# --- Variables that are updated in run-time
self._σ_T = None
self._optimizer = None
self._orgTime = None
def locVar(self,T_obs,T_obs_err):
'''
Applying variance from Pick and Distance weighting to each of the observtions
'''
# Intialising a variance of the LOCGAU2 settings
self._σ_T = torch.clamp(T_obs*self.location_info['Travel Time Uncertainty - [Gradient(km/s),Min(s),Max(s)]'][0],
self.location_info['Travel Time Uncertainty - [Gradient(km/s),Min(s),Max(s)]'][1],
self.location_info['Travel Time Uncertainty - [Gradient(km/s),Min(s),Max(s)]'][2]).to(self.device)**2
# Adding the variance of the Station Pick Uncertainties
self._σ_T += (T_obs_err**2)
# Turning back into a std
self._σ_T = torch.sqrt(self._σ_T)
def log_L(self, T_pred, T_obs, σ_T):
if self.location_info['Log-likehood'] == 'EDT':
from itertools import combinations
pairs = combinations(np.arange(T_obs.shape[1]), 2)
pairs = np.array(list(pairs))
dT_obs = T_obs[:,pairs[:,0]] - T_obs[:,pairs[:,1]]
dT_pred = T_pred[:,pairs[:,0]] - T_pred[:,pairs[:,1]]
σ_T = ((σ_T[:,pairs[:,0]])**2 + (σ_T[:,pairs[:,1]])**2)
logL = torch.exp((-(dT_obs-dT_pred)**2)/(σ_T))*(1/torch.sqrt(σ_T))
logL = torch.sum(logL,dim=1)
logL = logL.sum()
return logL
def phi(self, X_src, X_rec, t_obs,t_obs_err,t_phase):
# Setting up the gradient requirements
X_src = X_src.detach().requires_grad_(True)
# Preparing EikoNet input
n_particles = X_src.shape[0]
# Forcing points to stay within domain
X_src[:,0] = torch.clamp(X_src[:,0],self.xmin[0],self.xmax[0])
X_src[:,1] = torch.clamp(X_src[:,1],self.xmin[1],self.xmax[1])
X_src[:,2] = torch.clamp(X_src[:,2],self.xmin[2],self.xmax[2])
# Determining the predicted travel-time for the different phases
n_obs = 0
cc=0
for ind,phs in enumerate(self.eikonet_Phases):
phase_index = np.where(t_phase==phs)[0]
if len(phase_index) != 0:
pha_T_obs = t_obs[phase_index].repeat(n_particles, 1)
pha_T_obs_err = t_obs_err[phase_index].repeat(n_particles, 1)
pha_X_inp = torch.cat([X_src.repeat_interleave(len(phase_index), dim=0), X_rec[phase_index,:].repeat(n_particles, 1)], dim=1)
pha_T_pred = self.eikonet_models[ind].TravelTimes(pha_X_inp,projection=False).reshape(n_particles,len(phase_index))
if cc == 0:
n_obs = len(phase_index)
T_obs = pha_T_obs
T_obs_err = pha_T_obs_err
T_pred = pha_T_pred
cc+=1
else:
n_obs += len(phase_index)
T_obs = torch.cat([T_obs,pha_T_obs],dim=1)
T_obs_err = torch.cat([T_obs_err,pha_T_obs_err],dim=1)
T_pred = torch.cat([T_pred,pha_T_pred],dim=1)
self.locVar(T_obs,T_obs_err)
log_prob = self.log_L(T_pred,T_obs,self._σ_T)
score_func = torch.autograd.grad(log_prob, X_src)[0]
# Determining the phi
K_XX = self.K(X_src, X_src.detach())
grad_K = -torch.autograd.grad(K_XX.sum(), X_src)[0]
phi = (K_XX.detach().matmul(score_func) + grad_K) / (n_particles)
# Setting Misfit to zero to restart
self._σ_T = None
return phi
def step(self, X_src, X_rec, T_obs, T_obs_err, T_phase):
self.optim.zero_grad()
X_src.grad = -self.phi(X_src, X_rec, T_obs, T_obs_err, T_phase)
self.optim.step()
def _compute_origin(self,Tobs,t_phase,X_rec,Hyp):
'''
Internal function to compute origin time and predicted travel-times from Obs and Predicted travel-times
'''
# Determining the predicted travel-time for the different phases
n_obs = 0
cc=0
for ind,phs in enumerate(self.eikonet_Phases):
phase_index = np.where(t_phase==phs)[0]
if len(phase_index) != 0:
pha_X_inp = torch.cat([torch.repeat_interleave(Hyp[None,:],len(phase_index),dim=0), X_rec[phase_index,:]], dim=1)
pha_T_obs = Tobs[phase_index]
pha_T_pred = self.eikonet_models[ind].TravelTimes(pha_X_inp,projection=False)
if cc == 0:
T_obs = pha_T_obs
T_pred = pha_T_pred
cc+=1
else:
T_obs = torch.cat([T_obs,pha_T_obs])
T_pred = torch.cat([T_pred,pha_T_pred])
OT = np.median((T_pred - Tobs).detach().cpu().numpy())
pick_TD = ((T_pred - OT) - T_obs).detach().cpu().numpy()
OT_std = np.nanmedian(abs(pick_TD))
return OT,OT_std,pick_TD
def SyntheticCatalogue(self,input_file,Stations,save_file=None):
'''
Determining synthetic travel-times between source and reciever locations, returning a JSON pick file for each event
Event_Locations - EventNum, OriginTime, PickErr, X, Y, Z
Stations -
# JDS - MAKE CORRECTIONS TO PROJECTION !!
'''
# Determining the predicted travel-time to each of the stations to corresponding
#source locations. Optional argumenent to return them as json pick
evtdf = pd.read_csv(input_file)
EVT = {}
for indx in range(len(evtdf)):
EVT['{}'.format(evtdf['EventNum'].iloc[indx])] = {}
OT = evtdf['OriginTime'].iloc[indx]
# Defining the picks to append
picks = pd.DataFrame(columns=['Network','Station','PhasePick','DT','PickError'])
for ind,phs in enumerate(self.eikonet_Phases):
picks_phs = Stations[['Network','Station','X','Y','Z']]
picks_phs['PhasePick'] = phs
picks_phs['PickError'] = evtdf['PickErr'].iloc[indx]
Pairs = np.zeros((int(len(Stations)),6))
Pairs[:,:3] = np.array(evtdf[['X','Y','Z']].iloc[indx])
Pairs[:,3:] = np.array(picks_phs[['X','Y','Z']])
if type(self.projection) != type(None):
Pairs[:,0],Pairs[:,1] = self.projection(Pairs[:,0],Pairs[:,1])
Pairs[:,3],Pairs[:,4] = self.projection(Pairs[:,3],Pairs[:,4])
Pairs = Tensor(Pairs)
Pairs = Pairs.to(self.device)
TT_pred = self.eikonet_models[ind].TravelTimes(Pairs,projection=False).detach().to('cpu').numpy()
del Pairs
picks_phs['DT'] = TT_pred
picks_phs['DT'] = (pd.to_datetime(OT ) + pd.to_timedelta(picks_phs['DT'],unit='S')).dt.strftime('%Y/%m/%dT%H:%M:%S.%f')
picks = picks.append(picks_phs[['Network','Station','PhasePick','DT','PickError']])
EVT['{}'.format(evtdf['EventNum'].iloc[indx])]['Picks'] = picks
if type(save_file) == str:
IO_JSON('{}.json'.format(save_file),Events=EVT,rw_type='w')
return EVT
def Events2CSV(self,EVT=None,savefile=None,projection=None):
'''
Saving Events in CSV format
'''
if type(EVT) == type(None):
Events = self.Events
else:
Events = EVT
# Loading location information
picks =(np.zeros((len(Events.keys()),8))*np.nan).astype(str)
for indx,evtid in enumerate(Events.keys()):
try:
picks[indx,0] = str(evtid)
picks[indx,1] = self.Events[evtid]['location']['OriginTime']
picks[indx,2:5] = (np.array(self.Events[evtid]['location']['Hypocentre'])).astype(str)
picks[indx,5:] = (np.array(self.Events[evtid]['location']['Hypocentre_std'])).astype(str)
except:
continue
picks_df = pd.DataFrame(picks,
columns=['EventID','DT','X','Y','Z','StdX','StdY','StdZ'])
picks_df['X'] = picks_df['X'].astype(float)
picks_df['Y'] = picks_df['Y'].astype(float)
picks_df['Z'] = picks_df['Z'].astype(float)
picks_df['StdX'] = picks_df['StdX'].astype(float)
picks_df['StdY'] = picks_df['StdY'].astype(float)
picks_df['StdZ'] = picks_df['StdZ'].astype(float)
picks_df = picks_df.dropna(axis=0)
picks_df['DT'] = pd.to_datetime(picks_df['DT'])
picks_df = picks_df[['EventID','DT','X','Y','Z','StdX','StdY','StdZ']]
if type(savefile) == type(None):
return picks_df
else:
picks_df.to_csv(savefile,index=False)
def LocateEvents(self,EVTS,Stations,output_path,epochs=175,output_plots=False,timer=False):
self.Events = EVTS
print('============================================================================================================')
print('============================================================================================================')
print('========================================= HYPOSVI - Earthquake Location ====================================')
print('============================================================================================================')
print('============================================================================================================')
print('\n')
print(' Procssing for {} Events - Starting DateTime {}'.format(len(EVTS.keys()),time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
print(' Output Folder = {}'.format(output_path))
print('\n')
print('======== Location Settings:')
print(json.dumps(self.location_info, indent=2, sort_keys=True))
print('\n')
if output_plots:
print('======== Plotting Settings:')
print(json.dumps(self.plot_info['EventPlot'], indent=2, sort_keys=True))
print('\n')
print('============================================================================================================')
print('============================================================================================================')
for c,ev in enumerate(self.Events.keys()):
# try:
if timer == True:
timer_start = time.time()
# Determining the event to look at
Ev = self.Events[ev]
# Formating the pandas datatypes
Ev['Picks']['Network'] = Ev['Picks']['Network'].astype(str)
Ev['Picks']['Station'] = Ev['Picks']['Station'].astype(str)
Ev['Picks']['PhasePick'] = Ev['Picks']['PhasePick'].astype(str)
Ev['Picks']['DT'] = pd.to_datetime(Ev['Picks']['DT'])
Ev['Picks']['PickError'] = Ev['Picks']['PickError'].astype(float)
# printing the current event being run
print('================= Processing Event:{} - Event {} of {} - Number of observtions={} =============='.format(ev,c+1,len(self.Events.keys()),len(Ev['Picks'])))
# Adding the station location to the pick files
pick_info = pd.merge(Ev['Picks'],Stations[['Network','Station','X','Y','Z']])
Ev['Picks'] = pick_info[['Network','Station','X','Y','Z','PhasePick','DT','PickError']]
# Setting up the random seed locations
X_src = torch.zeros((int(self.location_info['Number of Particles']),3))
X_src[:,:3] = Tensor(np.random.rand(int(self.location_info['Number of Particles']),3))*(Tensor(self.xmax)-Tensor(self.xmin))[None,:] + Tensor(self.xmin)[None,:]
X_src = Variable(X_src).to(self.device)
self.optim = torch.optim.Adam([X_src], self.location_info['Step Size'])
# Defining the arrivals times in seconds
pick_info['Seconds'] = (pick_info['DT'] - np.min(pick_info['DT'])).dt.total_seconds()
# Applying projection
X_rec = np.array(pick_info[['X','Y','Z']])
if type(self.projection) != type(None):
X_rec[:,0],X_rec[:,1] = self.projection(X_rec[:,0],X_rec[:,1])
X_rec = Tensor(X_rec).to(self.device)
T_obs = Tensor(np.array(pick_info['Seconds'])).to(self.device)
T_obs_err = Tensor(np.array(pick_info['PickError'])).to(self.device)
T_obs_phase = np.array(pick_info['PhasePick'])
X_rec.requires_grad_()
l = None
losses = []
best_l = np.inf
#with autocast():
cc=0
for epoch in range(epochs):
self.optim.zero_grad()
if self.location_info['Individual Event Epoch Save and Print Rate'][0] != None:
if epoch % self.location_info['Individual Event Epoch Save and Print Rate'][0] == 0:
with torch.no_grad():
# Print the mean location and std
if self.location_info['Individual Event Epoch Save and Print Rate'][1] == True:
print("Epoch - {} ".format(epoch))
# Save to array the SVGD array
if cc==0:
PointsSVGD = X_src[...,None]
cc+=1
else:
PointsSVGD = torch.cat((PointsSVGD, X_src[...,None]), -1)
self.step(X_src, X_rec, T_obs, T_obs_err, T_obs_phase)
del cc
# -- Drop points outside of the domain
dmindx = [(X_src[:,2] > self.xmin[2]) & (X_src[:,2] < self.xmax[2])]
X_src = X_src[dmindx[0],:]
Ev['location'] = {}
Ev['location']['SVGD_points'] = X_src.detach().cpu().numpy().tolist()
if len(Ev['location']['SVGD_points']) == 0:
Ev['location']['Hypocentre'] = (np.ones(3)*np.nan).tolist()
Ev['location']['Hypocentre_std'] = (np.ones(3)*np.nan).tolist()
continue
# -- SVGD Points in Epochs --
if self.location_info['Individual Event Epoch Save and Print Rate'][0] != None:
Ev['location']['SVGD_Epochs'] = PointsSVGD.detach().cpu().numpy()
for ii in range(Ev['location']['SVGD_Epochs'].shape[-1]):
if type(self.projection) != type(None):
Ev['location']['SVGD_Epochs'][:,0,ii],Ev['location']['SVGD_Epochs'][:,1,ii] = self.projection(Ev['location']['SVGD_Epochs'][:,0,ii],Ev['location']['SVGD_Epochs'][:,1,ii],inverse=True)
Ev['location']['SVGD_Epochs'] = Ev['location']['SVGD_Epochs'].tolist()
# -- Determining the hypocentral location
clustering = DBSCAN(eps=self.location_info['Hypocenter Cluster - Seperation (km)'], min_samples=self.location_info['Hypocenter Cluster - Minimum Samples']).fit(X_src.detach().cpu())
try:
indx = np.where((clustering.labels_ == (np.argmax(np.bincount(np.array(clustering.labels_[clustering.labels_ !=-1]+1)))-1)))[0]
except:
# No cluster
Ev['location']['Hypocentre'] = (np.ones(3)*np.nan).tolist()
Ev['location']['Hypocentre_std'] = (np.ones(3)*np.nan).tolist()
continue
pts = np.transpose(X_src[indx,:].detach().cpu().numpy())
kde = stats.gaussian_kde(pts)
pdf = kde(pts)
cov = np.sqrt(abs(kde.covariance))
Ev['location']['SVGD_points_clusterindx'] = indx.tolist()
Ev['location']['Hypocentre'] = (pts[:,np.argmax(stats.gaussian_kde(pts)(pts))]).tolist()
Ev['location']['Hypocentre_std'] = np.array([cov[0,0],cov[1,1],cov[2,2]]).tolist()
# -- Determining the origin time and pick times
originOffset,originOffset_std,pick_TD = self._compute_origin(T_obs,T_obs_phase,X_rec,Tensor(Ev['location']['Hypocentre']).to(self.device))
Ev['location']['OriginTime_std'] = float(originOffset_std)
Ev['location']['OriginTime'] = str(np.min(pick_info['DT']) - pd.Timedelta(float(originOffset),unit='S'))
Ev['Picks']['TimeDiff'] = pick_TD
# -- Applying the projection from UTM to LatLong
if type(self.projection) != type(None):
Ev['location']['Hypocentre'] = np.array(Ev['location']['Hypocentre'])
Ev['location']['Hypocentre'][0],Ev['location']['Hypocentre'][1] = self.projection(Ev['location']['Hypocentre'][0],Ev['location']['Hypocentre'][1],inverse=True)
Ev['location']['Hypocentre'] = Ev['location']['Hypocentre'].tolist()
Ev['location']['SVGD_points'] = np.array(Ev['location']['SVGD_points'])
Ev['location']['SVGD_points'][:,0],Ev['location']['SVGD_points'][:,1] = self.projection(Ev['location']['SVGD_points'][:,0],Ev['location']['SVGD_points'][:,1],inverse=True)
Ev['location']['SVGD_points'] = Ev['location']['SVGD_points'].tolist()
print('---- OT= {} +/- {}s - Hyp=[{:.2f},{:.2f},{:.2f}] - Hyp/Std (km)=[{:.2f},{:.2f},{:.2f}]'.format(Ev['location']['OriginTime'],Ev['location']['OriginTime_std'],Ev['location']['Hypocentre'][0],Ev['location']['Hypocentre'][1],Ev['location']['Hypocentre'][2],
Ev['location']['Hypocentre_std'][0],Ev['location']['Hypocentre_std'][1],Ev['location']['Hypocentre_std'][2]))
if timer == True:
timer_end = time.time()
print('Processing took {}s'.format(timer_end-timer_start))
# Plotting Event plots
if output_plots:
if timer == True:
timer_start = time.time()
print('---- Saving Event Plot ----')
#try:
self.EventPlot(output_path,Ev,EventID=ev)
# except:
# print('----Issue with saving plot ! ----')
if timer == True:
timer_end = time.time()
print('Plotting took {}s'.format(timer_end-timer_start))
# Saving Catalogue instance
if (self.location_info['Save every * events'] != None) and ((c%self.location_info['Save every * events']) == 0):
if timer == True:
timer_start = time.time()
print('---- Saving Catalogue instance ----')
IO_JSON('{}/Catalogue.json'.format(output_path),Events=self.Events,rw_type='w')
if timer == True:
timer_end = time.time()
print('Saving took {}s'.format(timer_end-timer_start))
# except:
# print('Event Location failed ! Continuing to next event')
# Writing out final catalogue
IO_JSON('{}/Catalogue.json'.format(output_path),Events=self.Events,rw_type='w')
def EventPlot(self,PATH,Event,EventID=None):
plt.close('all')
OT = str(Event['location']['OriginTime'])
OT_std = Event['location']['OriginTime_std']
locs = np.array(Event['location']['SVGD_points'])
optimalloc = np.array(Event['location']['Hypocentre'])
optimalloc_std = np.array(Event['location']['Hypocentre_std'])*self.plot_info['EventPlot']['Errbar std']
indx_cluster = np.array(Event['location']['SVGD_points_clusterindx'])
Stations = Event['Picks'][['Station','X','Y','Z']]
if self.plot_info['EventPlot']['Traces']['Plot Traces']==True:
fig = plt.figure(figsize=(20*self.plot_info['EventPlot']['Figure Size Scale'], 9*self.plot_info['EventPlot']['Figure Size Scale']))
xz = plt.subplot2grid((3, 5), (2, 0), colspan=2)
xy = plt.subplot2grid((3, 5), (0, 0), colspan=2, rowspan=2,sharex=xz)
yz = plt.subplot2grid((3, 5), (0, 2), rowspan=2, sharey=xy)
trc = plt.subplot2grid((3, 5), (0, 3), rowspan=3, colspan=2)
else:
fig = plt.figure(figsize=(9*self.plot_info['EventPlot']['Figure Size Scale'], 9*self.plot_info['EventPlot']['Figure Size Scale']))
xz = plt.subplot2grid((3, 3), (2, 0), colspan=2)
xy = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=2,sharex=xz)
yz = plt.subplot2grid((3, 3), (0, 2), rowspan=2, sharey=xy)
fig.patch.set_facecolor("white")
# Specifying the label names
xz.set_xlabel('UTM X (km)')
xz.set_ylabel('Depth (km)')
yz.set_ylabel('UTM Y (km)')
yz.yaxis.tick_right()
yz.yaxis.set_label_position("right")
yz.set_xlabel('Depth (km)')
if self.plot_info['EventPlot']['Domain Distance'] != None:
if type(self.projection) != type(None):
optimalloc_UTM = copy.copy(optimalloc)
optimalloc_UTM[0],optimalloc_UTM[1] = self.projection(optimalloc_UTM[0],optimalloc_UTM[1])
boundsmin = optimalloc_UTM-self.plot_info['EventPlot']['Domain Distance']/2
boundsmax = optimalloc_UTM+self.plot_info['EventPlot']['Domain Distance']/2
boundsmin[0],boundsmin[1] = self.projection(boundsmin[0],boundsmin[1],inverse=True)
boundsmax[0],boundsmax[1] = self.projection(boundsmax[0],boundsmax[1],inverse=True)
else:
boundsmin = optimalloc-self.plot_info['EventPlot']['Domain Distance']/2
boundsmax = optimalloc+self.plot_info['EventPlot']['Domain Distance']/2
xy.set_xlim([boundsmin[0],boundsmax[0]])
xy.set_ylim([boundsmin[1],boundsmax[1]])
xz.set_xlim([boundsmin[0],boundsmax[0]])
xz.set_ylim([boundsmin[2],boundsmax[2]])
yz.set_xlim([boundsmin[2],boundsmax[2]])
yz.set_ylim([boundsmin[1],boundsmax[1]])
else:
if type(self.projection) != type(None):
lim_min = self.VelocityClass.xmin
lim_max = self.VelocityClass.xmax
else:
lim_min = self.xmin
lim_max = self.xmax
xy.set_xlim([lim_min[0],lim_max[0]])
xy.set_ylim([lim_min[1],lim_max[1]])
xz.set_xlim([lim_min[0],lim_max[0]])
xz.set_ylim([lim_min[2],lim_max[2]])
yz.set_xlim([lim_min[2],lim_max[2]])
yz.set_ylim([lim_min[1],lim_max[1]])
# Invert yaxis
xz.invert_yaxis()
# Plotting the kde representation of the scatter data
if self.plot_info['EventPlot']['Plot kde']:
sns.kdeplot(locs[indx_cluster,0],locs[indx_cluster,1], cmap="Reds",ax=xy,zorder=-1)
sns.kdeplot(locs[indx_cluster,0],locs[indx_cluster,2], cmap="Reds",ax=xz,zorder=-1)
sns.kdeplot(locs[indx_cluster,2],locs[indx_cluster,1], cmap="Reds",ax=yz,zorder=-1)
# Plotting the SVGD samples
xy.scatter(locs[:,0],locs[:,1],float(self.plot_info['EventPlot']['NonClusterd SVGD'][0]),str(self.plot_info['EventPlot']['NonClusterd SVGD'][1]),label='SVGD Samples')
xz.scatter(locs[:,0],locs[:,2],float(self.plot_info['EventPlot']['NonClusterd SVGD'][0]),str(self.plot_info['EventPlot']['NonClusterd SVGD'][1]))
yz.scatter(locs[:,2],locs[:,1],float(self.plot_info['EventPlot']['NonClusterd SVGD'][0]),str(self.plot_info['EventPlot']['NonClusterd SVGD'][1]))
# Plotting the SVGD samples after clustering
xy.scatter(locs[indx_cluster,0],locs[indx_cluster,1],float(self.plot_info['EventPlot']['Clusterd SVGD'][0]),str(self.plot_info['EventPlot']['Clusterd SVGD'][1]),label=' Clustered SVGD Samples')
xz.scatter(locs[indx_cluster,0],locs[indx_cluster,2],float(self.plot_info['EventPlot']['Clusterd SVGD'][0]),str(self.plot_info['EventPlot']['Clusterd SVGD'][1]))
yz.scatter(locs[indx_cluster,2],locs[indx_cluster,1],float(self.plot_info['EventPlot']['Clusterd SVGD'][0]),str(self.plot_info['EventPlot']['Clusterd SVGD'][1]))
# Plotting the predicted hypocentre and standard deviation location
xy.scatter(optimalloc[0],optimalloc[1],float(self.plot_info['EventPlot']['Hypocenter Location'][0]),str(self.plot_info['EventPlot']['Hypocenter Location'][1]),label='Hypocentre')
xz.scatter(optimalloc[0],optimalloc[2],float(self.plot_info['EventPlot']['Hypocenter Location'][0]),str(self.plot_info['EventPlot']['Hypocenter Location'][1]))
yz.scatter(optimalloc[2],optimalloc[1],float(self.plot_info['EventPlot']['Hypocenter Location'][0]),str(self.plot_info['EventPlot']['Hypocenter Location'][1]))
# Defining the Error bar location
if self.plot_info['EventPlot']['Hypocenter Errorbar'][0]:
# JDS - Need to define the plot lines ! Currently Turn off
xy.errorbar(optimalloc[0],optimalloc[1],xerr=optimalloc_std[0], yerr=optimalloc_std[1],color=self.plot_info['EventPlot']['Hypocenter Errorbar'][1],label='Hyp {}-stds'.format(self.plot_info['EventPlot']['Errbar std']))
xz.errorbar(optimalloc[0],optimalloc[2],xerr=optimalloc_std[0], yerr=optimalloc_std[2],color=self.plot_info['EventPlot']['Hypocenter Errorbar'][1],label='Hyp {}stds'.format(self.plot_info['EventPlot']['Errbar std']))
yz.errorbar(optimalloc[2],optimalloc[1],xerr=optimalloc_std[2], yerr=optimalloc_std[1],color=self.plot_info['EventPlot']['Hypocenter Errorbar'][1],label='Hyp {}stds'.format(self.plot_info['EventPlot']['Errbar std']))
# Optional Station Location used in inversion
if self.plot_info['EventPlot']['Stations']['Plot Stations']:
idxsta = Stations['Station'].drop_duplicates().index
station_markersize = self.plot_info['EventPlot']['Stations']['Marker Size']
station_markercolor = self.plot_info['EventPlot']['Stations']['Marker Color']
xy.scatter(Stations['X'].iloc[idxsta],
Stations['Y'].iloc[idxsta],
station_markersize, marker='^',color=station_markercolor,label='Stations')
if self.plot_info['EventPlot']['Stations']['Station Names']:
for i, txt in enumerate(Stations['Station'].iloc[idxsta]):
xy.annotate(txt, (np.array(Stations['X'].iloc[idxsta])[i], np.array(Stations['Y'].iloc[idxsta])[i]))
xz.scatter(Stations['X'].iloc[idxsta],
Stations['Z'].iloc[idxsta],
station_markersize,marker='^',color=station_markercolor)
yz.scatter(Stations['Z'].iloc[idxsta],
Stations['Y'].iloc[idxsta],
station_markersize,marker='^',color=station_markercolor)
# Defining the legend as top lef
if self.plot_info['EventPlot']['Legend']:
xy.legend(loc='upper left')
plt.suptitle(' Earthquake {} +/- {:.2f}s\n Hyp=[{:.2f},{:.2f},{:.2f}] - Hyp Uncertainty (km) +/- [{:.2f},{:.2f},{:.2f}]'.format(OT,OT_std,optimalloc[0],optimalloc[1],optimalloc[2],optimalloc_std[0],optimalloc_std[1],optimalloc_std[2]))
if self.plot_info['EventPlot']['Traces']['Plot Traces']:
# Determining Event data information
evt_yr = str(pd.to_datetime(Event['Picks']['DT'].min()).year)
evt_jd = str(pd.to_datetime(Event['Picks']['DT'].min()).dayofyear).zfill(3)
evt_starttime = UTCDateTime(OT) + self.plot_info['EventPlot']['Traces']['Time Bounds'][0]
evt_endtime = UTCDateTime(Event['Picks']['DT'].max()) + self.plot_info['EventPlot']['Traces']['Time Bounds'][1]
nf = self.plot_info['EventPlot']['Traces']['Normalisation Factor']
Host_path = self.plot_info['EventPlot']['Traces']['Trace Host']
pick_linewidth = self.plot_info['EventPlot']['Traces']['Pick linewidth']
tr_linewidth = self.plot_info['EventPlot']['Traces']['Trace linewidth']
# Loading the trace data
ST = Stream()
stations = np.array(Event['Picks']['Station'].drop_duplicates())
network = np.array(Event['Picks']['Network'].iloc[Event['Picks']['Station'].drop_duplicates().index])
for indx,sta in enumerate(stations):
try:
net = network[indx]
# Loading the data of interest
st = obspy.read('{}/{}/{}/*{}*'.format(Host_path,evt_yr,evt_jd,sta),
starttime=evt_starttime-10,endtime=evt_endtime)
# Selecting only the user specified channels
for ch in self.plot_info['EventPlot']['Traces']['Channel Types']:
ST = ST + st.select(channel=ch,network=net).filter('bandpass',freqmin=self.plot_info['EventPlot']['Traces']['Filter Freq'][0],freqmax=self.plot_info['EventPlot']['Traces']['Filter Freq'][1])
except:
continue
# Plotting the Trace data
yloc = np.arange(len(stations)) + 1
for indx,staName in enumerate(stations):
try:
net = network[indx]
# Plotting the Station traces
stm = ST.select(station=staName)
for tr in stm:
normdata = (tr.data/abs(tr.data).max())
normdata = normdata - np.mean(normdata)
if (tr.stats.channel[-1] == '1') or (tr.stats.channel[-1] == 'N'):
trc.plot(tr.times(reftime=evt_starttime),np.ones(tr.data.shape)*yloc[indx] + normdata*nf,'c',linewidth=tr_linewidth)
if (tr.stats.channel[-1] == '2') or (tr.stats.channel[-1] == 'E'):
trc.plot(tr.times(reftime=evt_starttime),np.ones(tr.data.shape)*yloc[indx] + normdata*nf,'g',linewidth=tr_linewidth)
if (tr.stats.channel[-1] == 'Z'):
trc.plot(tr.times(reftime=evt_starttime),np.ones(tr.data.shape)*yloc[indx] + normdata*nf,'m',linewidth=tr_linewidth)
# Plotting the picks
stadf = Event['Picks'][(Event['Picks']['Station'] == staName) & (Event['Picks']['Network'] == net)].reset_index(drop=True)
for indxrw in range(len(stadf)):
pick_time = UTCDateTime(stadf['DT'].iloc[indxrw]) - evt_starttime
synpick_time = UTCDateTime(stadf['DT'].iloc[indxrw]) - evt_starttime + stadf['TimeDiff'].iloc[indxrw]
if stadf.iloc[indxrw]['PhasePick'] == 'P':
trc.plot([pick_time,pick_time],[yloc[indx]-0.6*nf,yloc[indx]+0.6*nf],linestyle='-',color='r',linewidth=pick_linewidth)
trc.plot([synpick_time,synpick_time],[yloc[indx]-0.6*nf,yloc[indx]+0.6*nf],linestyle='--',color='r',linewidth=pick_linewidth)
if stadf.iloc[indxrw]['PhasePick'] == 'S':
trc.plot([pick_time,pick_time],[yloc[indx]-0.6*nf,yloc[indx]+0.6*nf],linestyle='-',color='b',linewidth=pick_linewidth)
trc.plot([synpick_time,synpick_time],[yloc[indx]-0.6*nf,yloc[indx]+0.6*nf],linestyle='--',color='b',linewidth=pick_linewidth)
except:
continue
trc.yaxis.tick_right()
trc.yaxis.set_label_position("right")
trc.set_xlim([0,evt_endtime - evt_starttime])
trc.set_ylim([0,len(stations)+1])
trc.set_yticks(np.arange(1,len(stations)+1))
trc.set_yticklabels(stations)
trc.set_xlabel('Seconds since earthquake origin')
plt.savefig('{}/{}.{}'.format(PATH,EventID,self.plot_info['EventPlot']['Save Type']))
def CataloguePlot(self,filepath=None,Events=None,Stations=None,user_xmin=[None,None,None],user_xmax=[None,None,None], Faults=None):
if type(Events) != type(None):
self.Events = Events
# - Catalogue Plot parameters
min_phases = self.plot_info['CataloguePlot']['Minimum Phase Picks']
max_uncertainty = self.plot_info['CataloguePlot']['Maximum Location Uncertainty (km)']
num_std = self.plot_info['CataloguePlot']['Num Std to define errorbar']
event_marker = self.plot_info['CataloguePlot']['Event Info - [Size, Color, Marker, Alpha]']
event_errorbar_marker = self.plot_info['CataloguePlot']['Event Errorbar - [On/Off(Bool),Linewidth,Color,Alpha]']
stations_plot = self.plot_info['CataloguePlot']['Station Marker - [Size,Color,Names On/Off(Bool)]']
fault_plane = self.plot_info['CataloguePlot']['Fault Planes - [Size,Color,Marker,Alpha]']
fig = plt.figure(figsize=(15, 15))
xz = plt.subplot2grid((3, 3), (2, 0), colspan=2)
xy = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=2,sharex=xz)
yz = plt.subplot2grid((3, 3), (0, 2), rowspan=2, sharey=xy)
# Defining the limits of the domain
if type(self.projection) != type(None):
lim_min = self.VelocityClass.xmin
lim_max = self.VelocityClass.xmax
else:
lim_min = self.xmin
lim_max = self.xmax
for indx,val in enumerate(user_xmin):
if val != None:
lim_min[indx] = val
for indx,val in enumerate(user_xmax):
if val != None:
lim_max[indx] = val
xy.set_xlim([lim_min[0],lim_max[0]])
xy.set_ylim([lim_min[1],lim_max[1]])
xz.set_xlim([lim_min[0],lim_max[0]])
xz.set_ylim([lim_min[2],lim_max[2]])
yz.set_xlim([lim_min[2],lim_max[2]])
yz.set_ylim([lim_min[1],lim_max[1]])
# Specifying the label names
xz.set_xlabel('UTM X (km)')
xz.set_ylabel('Depth (km)')
xz.invert_yaxis()
yz.set_ylabel('UTM Y (km)')
yz.yaxis.tick_right()
yz.yaxis.set_label_position("right")
yz.set_xlabel('Depth (km)')
# Plotting the station locations
if type(Stations) != type(None):
sta = Stations[['Station','X','Y','Z']].drop_duplicates()
xy.scatter(sta['X'],sta['Y'],stations_plot[0], marker='^',color=stations_plot[1],label='Stations')
if stations_plot[2]:
for i, txt in enumerate(sta['Station']):
xy.annotate(txt, (np.array(sta['X'])[i], np.array(sta['Y'])[i]))
xz.scatter(sta['X'],sta['Z'],stations_plot[0], marker='^',color=stations_plot[1])
yz.scatter(sta['Z'],sta['Y'],stations_plot[0], marker='<',color=stations_plot[1])
picks_df = self.Events2CSV()
picks_df['ErrX'] = picks_df['StdX']*num_std
picks_df['ErrY'] = picks_df['StdY']*num_std
picks_df['ErrZ'] = picks_df['StdZ']*num_std
picks_df = picks_df[np.sum(picks_df[['ErrX','ErrY','ErrZ']],axis=1) <= max_uncertainty].reset_index(drop=True)
# # Plotting Location info
# if event_errorbar_marker[0]:
# xy.errorbar(picks_df['X'],picks_df['Y'],xerr=picks_df['ErrX'],yerr=picks_df['ErrY'],fmt='none',linewidth=event_errorbar_marker[1],color=event_errorbar_marker[2],alpha=event_errorbar_marker[3],label='Catalogue Errorbars')
# xz.errorbar(picks_df['X'],picks_df['Z'],xerr=picks_df['ErrX'],yerr=picks_df['ErrZ'],fmt='none',linewidth=event_errorbar_marker[1],color=event_errorbar_marker[2],alpha=event_errorbar_marker[3])
# yz.errorbar(picks_df['Z'],picks_df['Y'],xerr=picks_df['ErrZ'],yerr=picks_df['ErrY'],fmt='none',linewidth=event_errorbar_marker[1],color=event_errorbar_marker[2],alpha=event_errorbar_marker[3])
xy.scatter(picks_df['X'],picks_df['Y'],event_marker[0],event_marker[1],marker=event_marker[2],alpha=event_marker[3],label='Catalogue Locations')
xz.scatter(picks_df['X'],picks_df['Z'],event_marker[0],event_marker[1],marker=event_marker[2],alpha=event_marker[3])
yz.scatter(picks_df['Z'],picks_df['Y'],event_marker[0],event_marker[1],marker=event_marker[2],alpha=event_marker[3])
# # # Plotting Fault-planes
# if type(Faults) == str:
# FAULTS = pd.read_csv(Faults,names=['X','Y'])
# FAULTS = FAULTS[(FAULTS['X']>=lim_min[0]) & (FAULTS['X']<=lim_max[0]) & (FAULTS['Y']>=lim_min[1]) & (FAULTS['Y']<=lim_max[1])].reset_index(drop=True)
# xy.scatter(FAULTS['X'],FAULTS['Y'],fault_plane[0],color=fault_plane[1],linestyle=fault_plane[2],alpha=fault_plane[3],label='Mapped Faults')
# Plotting legend
xy.legend(loc='upper left', markerscale=2, scatterpoints=1, fontsize=10)
if filepath != None:
plt.savefig('{}'.format(filepath))
else:
plt.show()
plt.close('all')
|
{"hexsha": "ccb29372ffbbd80fc6dcabe048ae0239b4b49e3f", "size": 49645, "ext": "py", "lang": "Python", "max_stars_repo_path": "HypoSVI/location.py", "max_stars_repo_name": "interseismic/HypoSVI", "max_stars_repo_head_hexsha": "240ee1d8edd05b7f42b50b93086d351ebe8a7450", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-02-14T20:44:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T13:56:44.000Z", "max_issues_repo_path": "HypoSVI/location.py", "max_issues_repo_name": "interseismic/HypoSVI", "max_issues_repo_head_hexsha": "240ee1d8edd05b7f42b50b93086d351ebe8a7450", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HypoSVI/location.py", "max_forks_repo_name": "interseismic/HypoSVI", "max_forks_repo_head_hexsha": "240ee1d8edd05b7f42b50b93086d351ebe8a7450", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-14T00:51:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-14T00:51:16.000Z", "avg_line_length": 50.3498985801, "max_line_length": 340, "alphanum_fraction": 0.5491791721, "include": true, "reason": "import numpy,from scipy", "num_tokens": 12426}
|
[STATEMENT]
lemma (in valid_unMultigraph) longest_path:
assumes "finite E" "n \<in> V"
shows "\<exists>v. \<exists>max_path. is_trail v max_path n \<and>
(\<forall>v'. \<forall>e\<in>E. \<not>is_trail v' (e#max_path) n)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n)
[PROOF STEP]
proof (rule ccontr)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
assume contro:"\<not> (\<exists>v max_path. is_trail v max_path n
\<and> (\<forall>v'. \<forall>e\<in>E. \<not>is_trail v' (e#max_path) n))"
[PROOF STATE]
proof (state)
this:
\<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence induct:"(\<forall>v max_path. is_trail v max_path n
\<longrightarrow> (\<exists>v'. \<exists>e\<in>E. is_trail v' (e#max_path) n))"
[PROOF STATE]
proof (prove)
using this:
\<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n)
goal (1 subgoal):
1. \<forall>v max_path. is_trail v max_path n \<longrightarrow> (\<exists>v'. \<exists>e\<in>E. is_trail v' (e # max_path) n)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>v max_path. is_trail v max_path n \<longrightarrow> (\<exists>v'. \<exists>e\<in>E. is_trail v' (e # max_path) n)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
have "is_trail n [] n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_trail n [] n
[PROOF STEP]
using \<open>n \<in> V\<close>
[PROOF STATE]
proof (prove)
using this:
n \<in> V
goal (1 subgoal):
1. is_trail n [] n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
is_trail n [] n
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence "exist_path_length n 0"
[PROOF STATE]
proof (prove)
using this:
is_trail n [] n
goal (1 subgoal):
1. exist_path_length n 0
[PROOF STEP]
unfolding exist_path_length_def
[PROOF STATE]
proof (prove)
using this:
is_trail n [] n
goal (1 subgoal):
1. \<exists>v' ps. is_trail v' ps n \<and> length ps = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
exist_path_length n 0
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
exist_path_length n 0
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
have "\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
[PROOF STEP]
using trail_bound[OF \<open>finite E\<close>]
[PROOF STATE]
proof (prove)
using this:
is_trail ?v ?ps ?v' \<Longrightarrow> length ?ps \<le> card E
goal (1 subgoal):
1. \<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
[PROOF STEP]
unfolding exist_path_length_def
[PROOF STATE]
proof (prove)
using this:
is_trail ?v ?ps ?v' \<Longrightarrow> length ?ps \<le> card E
goal (1 subgoal):
1. \<forall>y. (\<exists>v' ps. is_trail v' ps n \<and> length ps = y) \<longrightarrow> y \<le> card E
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence bound:"\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E"
[PROOF STATE]
proof (prove)
using this:
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
goal (1 subgoal):
1. \<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
exist_path_length n 0
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
[PROOF STEP]
have "exist_path_length n (GREATEST x. exist_path_length n x)"
[PROOF STATE]
proof (prove)
using this:
exist_path_length n 0
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
goal (1 subgoal):
1. exist_path_length n (GREATEST x. exist_path_length n x)
[PROOF STEP]
using GreatestI_nat
[PROOF STATE]
proof (prove)
using this:
exist_path_length n 0
\<forall>y. exist_path_length n y \<longrightarrow> y \<le> card E
\<lbrakk>?P ?k; \<And>y. ?P y \<Longrightarrow> y \<le> ?b\<rbrakk> \<Longrightarrow> ?P (Greatest ?P)
goal (1 subgoal):
1. exist_path_length n (GREATEST x. exist_path_length n x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
exist_path_length n (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
exist_path_length n (GREATEST x. exist_path_length n x)
[PROOF STEP]
obtain v max_path where
max_path:"is_trail v max_path n" "length max_path=(GREATEST x. exist_path_length n x)"
[PROOF STATE]
proof (prove)
using this:
exist_path_length n (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. (\<And>v max_path. \<lbrakk>is_trail v max_path n; length max_path = (GREATEST x. exist_path_length n x)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis exist_path_length_def)
[PROOF STATE]
proof (state)
this:
is_trail v max_path n
length max_path = (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence "\<exists> v' e. is_trail v' (e#max_path) n"
[PROOF STATE]
proof (prove)
using this:
is_trail v max_path n
length max_path = (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. \<exists>v' e. is_trail v' (e # max_path) n
[PROOF STEP]
using induct
[PROOF STATE]
proof (prove)
using this:
is_trail v max_path n
length max_path = (GREATEST x. exist_path_length n x)
\<forall>v max_path. is_trail v max_path n \<longrightarrow> (\<exists>v'. \<exists>e\<in>E. is_trail v' (e # max_path) n)
goal (1 subgoal):
1. \<exists>v' e. is_trail v' (e # max_path) n
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
\<exists>v' e. is_trail v' (e # max_path) n
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence "exist_path_length n (length max_path +1)"
[PROOF STATE]
proof (prove)
using this:
\<exists>v' e. is_trail v' (e # max_path) n
goal (1 subgoal):
1. exist_path_length n (length max_path + 1)
[PROOF STEP]
by (metis One_nat_def exist_path_length_def list.size(4))
[PROOF STATE]
proof (state)
this:
exist_path_length n (length max_path + 1)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence "length max_path + 1 \<le> (GREATEST x. exist_path_length n x)"
[PROOF STATE]
proof (prove)
using this:
exist_path_length n (length max_path + 1)
goal (1 subgoal):
1. length max_path + 1 \<le> (GREATEST x. exist_path_length n x)
[PROOF STEP]
by (metis Greatest_le_nat bound)
[PROOF STATE]
proof (state)
this:
length max_path + 1 \<le> (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
hence "length max_path + 1 \<le> length max_path"
[PROOF STATE]
proof (prove)
using this:
length max_path + 1 \<le> (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. length max_path + 1 \<le> length max_path
[PROOF STEP]
using max_path
[PROOF STATE]
proof (prove)
using this:
length max_path + 1 \<le> (GREATEST x. exist_path_length n x)
is_trail v max_path n
length max_path = (GREATEST x. exist_path_length n x)
goal (1 subgoal):
1. length max_path + 1 \<le> length max_path
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
length max_path + 1 \<le> length max_path
goal (1 subgoal):
1. \<nexists>v max_path. is_trail v max_path n \<and> (\<forall>v'. \<forall>e\<in>E. \<not> is_trail v' (e # max_path) n) \<Longrightarrow> False
[PROOF STEP]
thus False
[PROOF STATE]
proof (prove)
using this:
length max_path + 1 \<le> length max_path
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4036, "file": "Koenigsberg_Friendship_MoreGraph", "length": 37}
|
import numpy as np
import os
import heapq
from tqdm import tqdm
import argparse
import pickle
import json
def read_json(file):
f = open(file, "r", encoding="utf-8").read()
return json.loads(f)
def write_json(file, data):
f = open(file, "w", encoding="utf-8")
json.dump(data, f, indent=2, ensure_ascii=False)
return
def read_pickle(filename):
return pickle.loads(open(filename,"rb").read())
def write_pickle(filename,data):
open(filename,"wb").write(pickle.dumps(data))
return
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--query_feature_path",type=str
)
parser.add_argument(
"--gallery_feature_path",type=str
)
parser.add_argument(
"--retrieval_results_path",type=str
)
parser.add_argument(
"--v",action="store_true"
)
parser.add_argument(
"--t",action="store_true"
)
parser.add_argument(
"--i",action="store_true"
)
parser.add_argument(
"--p",action="store_true"
)
parser.add_argument(
"--a",action="store_true"
)
parser.add_argument(
"--it",action="store_true"
)
parser.add_argument(
"--itp",action="store_true"
)
parser.add_argument(
"--max_topk",type=int,default=110
)
return parser.parse_args()
def read_feature(query_feature_txt):
query_id = []
query_feature=[]
for each in tqdm(query_feature_txt):
each_split = each.split(",")
item_id = each_split[0]
each_feature = [float(i) for i in each_split[1:]]
query_id.append(item_id)
query_feature.append(each_feature)
return query_id,query_feature
if __name__ == '__main__':
print()
args=parse_args()
save_path=args.retrieval_results_path
if not os.path.exists(save_path):
os.makedirs(save_path)
feature_type = []
if args.v:
feature_type.append("v")
if args.t:
feature_type.append("t")
if args.i:
feature_type.append("i")
if args.p:
feature_type.append("p")
if args.a:
feature_type.append("a")
if args.it:
feature_type.append("it")
if args.itp:
feature_type.append("itp")
for each_feature_type in feature_type:
query_dir = args.query_feature_path
gallery_dir = args.gallery_feature_path
save_file=open("{}/{}_feature_retrieval_id_list.txt".format(save_path,each_feature_type),"w")
# new
gallery_ids = np.load("{}/id.npy".format(gallery_dir))
gallery_ids = np.hstack(gallery_ids)
query_ids = np.load("{}/id.npy".format(query_dir))
query_ids = np.hstack(query_ids)
gallery_feature_np = np.load("{}/{}_feature_np.npy".format(gallery_dir, each_feature_type))
print(gallery_feature_np.shape)
query_feature_np = np.load("{}/{}_feature_np.npy".format(query_dir, each_feature_type))
print(query_feature_np.shape)
# query_id=query_id[:100]
# query_feature_np=query_feature_np[:100]
score_matrix = query_feature_np.dot(gallery_feature_np.T)
max_topk = args.max_topk
for q,each_score in tqdm(zip(query_ids,score_matrix)):
max_index = heapq.nlargest(max_topk, range(len(each_score)), each_score.take)
topk_item_id = gallery_ids[max_index]
topk_item_id=[each_item_id for each_item_id in topk_item_id if each_item_id!=q]
topk_item_id_str = ",".join(topk_item_id)
save_file.write("{},{}\n".format(q, topk_item_id_str))
|
{"hexsha": "8b99b115bd0bd662aa1181ad0e2caa2065673bfb", "size": 3785, "ext": "py", "lang": "Python", "max_stars_repo_path": "datatoolkit/eval/retrieval_unit_id_list.py", "max_stars_repo_name": "Xiaodongsuper/M5Product_toolkit", "max_stars_repo_head_hexsha": "8d972640586440f4a6c24baf67a77dc1efa62545", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-09-27T07:54:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T09:50:46.000Z", "max_issues_repo_path": "datatoolkit/eval/retrieval_unit_id_list.py", "max_issues_repo_name": "Xiaodongsuper/M5Product_toolkit", "max_issues_repo_head_hexsha": "8d972640586440f4a6c24baf67a77dc1efa62545", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datatoolkit/eval/retrieval_unit_id_list.py", "max_forks_repo_name": "Xiaodongsuper/M5Product_toolkit", "max_forks_repo_head_hexsha": "8d972640586440f4a6c24baf67a77dc1efa62545", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0792682927, "max_line_length": 102, "alphanum_fraction": 0.6071334214, "include": true, "reason": "import numpy", "num_tokens": 868}
|
from random import *
from numpy import *
n = 10
print 10
for i in random.permutation(n):
print i+1,
print
for i in random.permutation(n):
print i+1,
|
{"hexsha": "7f64fb3070f151898f611058878dd4e1c7632810", "size": 152, "ext": "py", "lang": "Python", "max_stars_repo_path": "CodeChef/SHORT/COOK61/Problem D/gen.py", "max_stars_repo_name": "VastoLorde95/Competitive-Programming", "max_stars_repo_head_hexsha": "6c990656178fb0cd33354cbe5508164207012f24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 170, "max_stars_repo_stars_event_min_datetime": "2017-07-25T14:47:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T19:16:31.000Z", "max_issues_repo_path": "CodeChef/SHORT/COOK61/Problem D/gen.py", "max_issues_repo_name": "navodit15/Competitive-Programming", "max_issues_repo_head_hexsha": "6c990656178fb0cd33354cbe5508164207012f24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CodeChef/SHORT/COOK61/Problem D/gen.py", "max_forks_repo_name": "navodit15/Competitive-Programming", "max_forks_repo_head_hexsha": "6c990656178fb0cd33354cbe5508164207012f24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2017-07-28T06:17:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-31T03:06:22.000Z", "avg_line_length": 13.8181818182, "max_line_length": 31, "alphanum_fraction": 0.7105263158, "include": true, "reason": "from numpy", "num_tokens": 45}
|
using NeuralVerification: compute_output
using LinearAlgebra
function sample_based_bounds(network, cell, coefficients, num_samples)
xs = sample(cell, num_samples)
min_obj = Inf
max_obj = -Inf
for x in xs
output = compute_output(network, x)
obj = dot(output, coefficients)
min_obj = min(min_obj, obj)
max_obj = max(max_obj, obj)
end
return [min_obj, max_obj]
end
|
{"hexsha": "b53beafeecaae129b66bc8a6a43eeeb0285d134f", "size": 419, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "utils.jl", "max_stars_repo_name": "castrong/VNN21Benchmarks", "max_stars_repo_head_hexsha": "57e2b6992e33bc2112d2a747282bb91a0e3f87f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.jl", "max_issues_repo_name": "castrong/VNN21Benchmarks", "max_issues_repo_head_hexsha": "57e2b6992e33bc2112d2a747282bb91a0e3f87f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.jl", "max_forks_repo_name": "castrong/VNN21Benchmarks", "max_forks_repo_head_hexsha": "57e2b6992e33bc2112d2a747282bb91a0e3f87f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9333333333, "max_line_length": 70, "alphanum_fraction": 0.6801909308, "num_tokens": 104}
|
\section{Hyperchains}
\textbf{todo: reorganize, mention commitments earlier, images!}
The previous approaches had a lot to offer, but considering they cons it is hard
to scale them in a reasonable way. PoW seems to work well only with big
computational effort being burned and PoS suffers from huge amount of security
holes that require very complicated algorithms that usually either don't solve
the problem at all or just move it further to another layer of abstraction.
Here we present a hybrid strategy which will benefit from stability of PoW
solutions but will offer the scalability of PoS systems. A Hyperchain is a
special kind of blockchain that sticks to an already existing chain. They are
going to be called respectively child- and parent-chain
\footnote{https://medium.com/@yanislav/hyperchains-secure-cheap-scalable-blockchain-technology-for-everyone-3ddec96a4152}.
The parent chain can be almost any blockchain in the world. In general, we want
to use some big existing PoW based chains (at the time of writing, preferably
Bitcoin or Ethereum, but not limited to) to reuse their burned work to maintain
the stability of the childchain. We would also like to have
PoS-like election system to choose the leaders on the hyperchain. In this case
however we have a really reliable – and most important, unpredictable – source
of randomness – the keyblock hash of the parent chain. The idea is not very new
though – there is already some research made in this direction
\footnote{https://eprint.iacr.org/2015/1015.pdf}.
The critics say that it is still possible to exploit it to mine blocks in such a
manner that the outcome would be beneficial, but in reality that reduces to
compound PoW – most likely the difficulty of mining such a block will go
squared, so in order to have actual control over the entropy one needs to
control the parent chain anyway.
Having this machinery it seems natural to start a new election each time a
keyblock was mined on the parent chain. The next leader shall be chosen
depending on the hash of that block and selected with proportional chances to
their stake. The selection algorithm is going to be straightforward – we take
the hash (let's say, MD5) and consider a whole MD5 counterdomain as a closed
line segment divided into intervals of lengths proportional to the stakes of the
delegates. The intervals are going to be sorted by the order of the respective
commitments that appeared on the parent chain. The generated hash will then
point into some subsection which will determine the winner.
$$Insert\ some\ nice\ image\ here$$
One of the important concepts of the commitment idea is to be able to rely on
the parent chain's stability. Therefore we want to treat it as a rigid skeleton
of the hyperchain which can be achieved by proper blockhash linking. Each
commitment must declare over which block is the delegating going to compete.
Therefore the commitment must consist of:
\begin{itemize}
\item The subject of delegation on the child chain
\item The block over the delegate is going to build
\item Singature of the delegate from the child chain
\end{itemize}
One dillema that rises at this point is whether should the commitment reference
the latest keyblock or the microblock of the child chain. Referencing microblock
on the first sight looks more consistent, but we believe that in reality would
lead to massive forking (especially when some peers wouldn't receive all of the
blocks). The problem with referencing keyblock is that the next leader could
steal the transactions and post them in their microblocks. This however can be
faced with the smarter feeing strategy: instead of giving the full fee to the
miner we can split it up and give the bigger part to the next leader that did
include the previous leader's microblocks in their continuation of the history.
$$Insert\ some\ nice\ image\ here$$
|
{"hexsha": "26e9c4decce78c09cf596d1e499147f2179624fc", "size": 3874, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "hyperchains.tex", "max_stars_repo_name": "gorbak25/hyperchains-whitepaper", "max_stars_repo_head_hexsha": "84a32f7451cf7536bd7049a6b0cfa3b9ff216143", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-14T13:46:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-14T13:46:48.000Z", "max_issues_repo_path": "hyperchains.tex", "max_issues_repo_name": "gorbak25/hyperchains-whitepaper", "max_issues_repo_head_hexsha": "84a32f7451cf7536bd7049a6b0cfa3b9ff216143", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hyperchains.tex", "max_forks_repo_name": "gorbak25/hyperchains-whitepaper", "max_forks_repo_head_hexsha": "84a32f7451cf7536bd7049a6b0cfa3b9ff216143", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9705882353, "max_line_length": 122, "alphanum_fraction": 0.8048528653, "num_tokens": 851}
|
import base_solver as base
import game
from lib import helpers
import numpy as np
import redis
r = redis.StrictRedis(host='localhost', port=6379, db=0)
STATE_MISS = 0
STATE_HIT = 1
STATE_UNKNOWN = 2
SHIP_SIZES = helpers.SHIP_SIZES
class OpenCLSolver(base.BaseSolver):
def __init__(self):
super(OpenCLSolver,self).__init__()
current_state = np.empty([10,10]).astype(np.uint8)
current_state.fill(STATE_UNKNOWN);
def mark_tile_used(self,tile):
self.remaining_tiles.remove(tile)
def get_next_target_random(self):
ret = self.tiles[self.turn]
self.turn+=1
return ret
def get_next_target(self,misses,hits):
state = self.get_state(hits,misses)
target = self.get_from_cache(state)
if target:
return self.string_coordinates_to_array(target)
shipBoards = helpers.get_ship_boards(misses,matrix=True)
s12 = helpers.shortInterpolate(shipBoards[0],shipBoards[1],9)
s123 = helpers.shortInterpolate(s12,shipBoards[2],12)
s45 = helpers.shortInterpolate(shipBoards[3],shipBoards[4],5)
print "Combinations to Compute: " + str(len(s123)*len(s45))
target = helpers.opencl_interpolate(helpers.bool2IntArray(s123),helpers.bool2IntArray(s45),hits)
self.set_to_cache(state,target)
return self.int_coordinates_to_array(target)
def get_state(self,hits,misses):
state = np.copy(hits)
for miss in misses:
state[miss[0]][miss[1]] = 2
print state
return state
def get_from_cache(self,state):
#return None
key = ""
for row in state:
key += "".join(map(str,row))
#print key
return r.get(key)
def set_to_cache(self,state,target):
key = ""
for row in state:
key += "".join(map(str,row))
r.set(key,target)
def string_coordinates_to_array(self,coord):
return [int(coord[0]),int(coord[1])]
def int_coordinates_to_array(self,coord):
return [int(coord / 10),int(coord % 10)]
def play_game(bs_game,solver):
limit = 100
misses = []
hits = np.zeros([10,10],dtype=np.uint8)
for turn in xrange(limit):
print misses
tile = solver.get_next_target(misses,hits)
print tile
ret = bs_game.play_turn(tile)
#solver.mark_tile_used(tile)
print ret
if (ret["code"] == STATE_MISS):
misses.append(tile)
if (ret["code"] == STATE_HIT):
x,y = tile
hits[x][y] = 1
if (ret["code"] == -1):
print(turn +1)
return
solver = OpenCLSolver();
rounds = 1
for x in xrange(rounds):
bs_game = game.BattleshipGame()
solver.reset()
play_game(bs_game,solver)
|
{"hexsha": "3857d93c16994e18b3ab24cb6d284086c1210cf7", "size": 2439, "ext": "py", "lang": "Python", "max_stars_repo_path": "solvers/opencl_solver.py", "max_stars_repo_name": "nicofff/baas", "max_stars_repo_head_hexsha": "676819a4b1c5ae1a63f5779fe799fcd1006b79fb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-06-21T21:02:22.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-21T21:02:22.000Z", "max_issues_repo_path": "solvers/opencl_solver.py", "max_issues_repo_name": "nicofff/baas", "max_issues_repo_head_hexsha": "676819a4b1c5ae1a63f5779fe799fcd1006b79fb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solvers/opencl_solver.py", "max_forks_repo_name": "nicofff/baas", "max_forks_repo_head_hexsha": "676819a4b1c5ae1a63f5779fe799fcd1006b79fb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.972972973, "max_line_length": 98, "alphanum_fraction": 0.7117671177, "include": true, "reason": "import numpy", "num_tokens": 702}
|
# Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for serializing and deserializing cirq_google.api.v2 protos."""
from typing import Any, Dict, List, Optional
import sympy
import cirq
from cirq_google.api import v2
from cirq_google.ops import PhysicalZTag
from cirq_google.ops.calibration_tag import CalibrationTag
from cirq_google.serialization import serializer, op_deserializer, op_serializer, arg_func_langs
class CircuitSerializer(serializer.Serializer):
"""A class for serializing and deserializing programs and operations.
This class is for serializing cirq_google.api.v2. protos using one
message type per gate type. It serializes qubits by adding a field
into the constants table. Usage is by passing a `cirq.Circuit`
to the `serialize()` method of the class, which will produce a
`Program` proto. Likewise, the `deserialize` method will produce
a `cirq.Circuit` object from a `Program` proto.
This class is more performant than the previous `SerializableGateSet`
at the cost of some extendability.
"""
def __init__(self, gate_set_name: str):
"""Construct the circuit serializer object.
Args:
gate_set_name: The name used to identify the gate set.
"""
super().__init__(gate_set_name)
def serialize(
self,
program: cirq.AbstractCircuit,
msg: Optional[v2.program_pb2.Program] = None,
*,
arg_function_language: Optional[str] = None,
) -> v2.program_pb2.Program:
"""Serialize a Circuit to cirq_google.api.v2.Program proto.
Args:
program: The Circuit to serialize.
msg: An optional proto object to populate with the serialization
results.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
Raises:
NotImplementedError: If the program is of a type that is supported.
"""
if not isinstance(program, (cirq.Circuit, cirq.FrozenCircuit)):
raise NotImplementedError(f'Unrecognized program type: {type(program)}')
raw_constants: Dict[Any, int] = {}
if msg is None:
msg = v2.program_pb2.Program()
msg.language.gate_set = self.name
msg.language.arg_function_language = (
arg_function_language or arg_func_langs.MOST_PERMISSIVE_LANGUAGE
)
self._serialize_circuit(
program,
msg.circuit,
arg_function_language=arg_function_language,
constants=msg.constants,
raw_constants=raw_constants,
)
return msg
def _serialize_circuit(
self,
circuit: cirq.AbstractCircuit,
msg: v2.program_pb2.Circuit,
*,
arg_function_language: Optional[str],
constants: List[v2.program_pb2.Constant],
raw_constants: Dict[Any, int],
) -> None:
msg.scheduling_strategy = v2.program_pb2.Circuit.MOMENT_BY_MOMENT
for moment in circuit:
moment_proto = msg.moments.add()
for op in moment:
if isinstance(op.untagged, cirq.CircuitOperation):
op_pb = moment_proto.circuit_operations.add()
self._serialize_circuit_op(
op.untagged,
op_pb,
arg_function_language=arg_function_language,
constants=constants,
raw_constants=raw_constants,
)
else:
op_pb = moment_proto.operations.add()
self._serialize_gate_op(
op,
op_pb,
arg_function_language=arg_function_language,
constants=constants,
raw_constants=raw_constants,
)
def _serialize_gate_op(
self,
op: cirq.Operation,
msg: v2.program_pb2.Operation,
*,
constants: List[v2.program_pb2.Constant],
raw_constants: Dict[Any, int],
arg_function_language: Optional[str] = '',
) -> v2.program_pb2.Operation:
"""Serialize an Operation to cirq_google.api.v2.Operation proto.
Args:
op: The operation to serialize.
msg: An optional proto object to populate with the serialization
results.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of previously-serialized Constant protos.
raw_constants: A map raw objects to their respective indices in
`constants`.
Returns:
The cirq.google.api.v2.Operation proto.
Raises:
ValueError: If the operation cannot be serialized.
"""
gate = op.gate
if isinstance(gate, cirq.XPowGate):
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.xpowgate.exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.YPowGate):
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.ypowgate.exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.ZPowGate):
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.zpowgate.exponent,
arg_function_language=arg_function_language,
)
if any(isinstance(tag, PhysicalZTag) for tag in op.tags):
msg.zpowgate.is_physical_z = True
elif isinstance(gate, cirq.PhasedXPowGate):
arg_func_langs.float_arg_to_proto(
gate.phase_exponent,
out=msg.phasedxpowgate.phase_exponent,
arg_function_language=arg_function_language,
)
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.phasedxpowgate.exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.PhasedXZGate):
arg_func_langs.float_arg_to_proto(
gate.x_exponent,
out=msg.phasedxzgate.x_exponent,
arg_function_language=arg_function_language,
)
arg_func_langs.float_arg_to_proto(
gate.z_exponent,
out=msg.phasedxzgate.z_exponent,
arg_function_language=arg_function_language,
)
arg_func_langs.float_arg_to_proto(
gate.axis_phase_exponent,
out=msg.phasedxzgate.axis_phase_exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.CZPowGate):
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.czpowgate.exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.ISwapPowGate):
arg_func_langs.float_arg_to_proto(
gate.exponent,
out=msg.iswappowgate.exponent,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.FSimGate):
arg_func_langs.float_arg_to_proto(
gate.theta, out=msg.fsimgate.theta, arg_function_language=arg_function_language
)
arg_func_langs.float_arg_to_proto(
gate.phi, out=msg.fsimgate.phi, arg_function_language=arg_function_language
)
elif isinstance(gate, cirq.MeasurementGate):
arg_func_langs.arg_to_proto(
gate.key, out=msg.measurementgate.key, arg_function_language=arg_function_language
)
arg_func_langs.arg_to_proto(
gate.invert_mask,
out=msg.measurementgate.invert_mask,
arg_function_language=arg_function_language,
)
elif isinstance(gate, cirq.WaitGate):
arg_func_langs.float_arg_to_proto(
gate.duration.total_nanos(),
out=msg.waitgate.duration_nanos,
arg_function_language=arg_function_language,
)
else:
raise ValueError(f'Cannot serialize op {op!r} of type {type(gate)}')
for qubit in op.qubits:
if qubit not in raw_constants:
constants.append(
v2.program_pb2.Constant(
qubit=v2.program_pb2.Qubit(id=v2.qubit_to_proto_id(qubit))
)
)
raw_constants[qubit] = len(constants) - 1
msg.qubit_constant_index.append(raw_constants[qubit])
for tag in op.tags:
if isinstance(tag, CalibrationTag):
constant = v2.program_pb2.Constant()
constant.string_value = tag.token
if tag.token in raw_constants:
msg.token_constant_index = raw_constants[tag.token]
else:
# Token not found, add it to the list
msg.token_constant_index = len(constants)
constants.append(constant)
if raw_constants is not None:
raw_constants[tag.token] = msg.token_constant_index
return msg
def _serialize_circuit_op(
self,
op: cirq.CircuitOperation,
msg: Optional[v2.program_pb2.CircuitOperation] = None,
*,
arg_function_language: Optional[str] = '',
constants: Optional[List[v2.program_pb2.Constant]] = None,
raw_constants: Optional[Dict[Any, int]] = None,
) -> v2.program_pb2.CircuitOperation:
"""Serialize a CircuitOperation to cirq.google.api.v2.CircuitOperation proto.
Args:
op: The circuit operation to serialize.
msg: An optional proto object to populate with the serialization
results.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of previously-serialized Constant protos.
raw_constants: A map raw objects to their respective indices in
`constants`.
Returns:
The cirq.google.api.v2.CircuitOperation proto.
Raises:
ValueError: If `constant` or `raw_constants` are not specified.
"""
circuit = op.circuit
if constants is None or raw_constants is None:
raise ValueError(
'CircuitOp serialization requires a constants list and a corresponding '
'map of pre-serialization values to indices (raw_constants).'
)
serializer = op_serializer.CircuitOpSerializer()
if circuit not in raw_constants:
subcircuit_msg = v2.program_pb2.Circuit()
self._serialize_circuit(
circuit,
subcircuit_msg,
arg_function_language=arg_function_language,
constants=constants,
raw_constants=raw_constants,
)
constants.append(v2.program_pb2.Constant(circuit_value=subcircuit_msg))
raw_constants[circuit] = len(constants) - 1
return serializer.to_proto(
op,
msg,
arg_function_language=arg_function_language,
constants=constants,
raw_constants=raw_constants,
)
def deserialize(self, proto: v2.program_pb2.Program) -> cirq.Circuit:
"""Deserialize a Circuit from a cirq_google.api.v2.Program.
Args:
proto: A dictionary representing a cirq_google.api.v2.Program proto.
Returns:
The deserialized Circuit
Raises:
ValueError: If the given proto has no language or the langauge gate set mismatches
that specified in as the name of this serialized gate set. Also if deserializing
a schedule is attempted.
NotImplementedError: If the program proto does not contain a circuit or schedule.
"""
if not proto.HasField('language') or not proto.language.gate_set:
raise ValueError('Missing gate set specification.')
if proto.language.gate_set != self.name:
raise ValueError(
'Gate set in proto was {} but expected {}'.format(
proto.language.gate_set, self.name
)
)
which = proto.WhichOneof('program')
arg_func_language = (
proto.language.arg_function_language or arg_func_langs.MOST_PERMISSIVE_LANGUAGE
)
if which == 'circuit':
deserialized_constants: List[Any] = []
for constant in proto.constants:
which_const = constant.WhichOneof('const_value')
if which_const == 'string_value':
deserialized_constants.append(constant.string_value)
elif which_const == 'circuit_value':
circuit = self._deserialize_circuit(
constant.circuit_value,
arg_function_language=arg_func_language,
constants=proto.constants,
deserialized_constants=deserialized_constants,
)
deserialized_constants.append(circuit.freeze())
elif which_const == 'qubit':
deserialized_constants.append(v2.qubit_from_proto_id(constant.qubit.id))
circuit = self._deserialize_circuit(
proto.circuit,
arg_function_language=arg_func_language,
constants=proto.constants,
deserialized_constants=deserialized_constants,
)
return circuit
if which == 'schedule':
raise ValueError('Deserializing a schedule is no longer supported.')
raise NotImplementedError('Program proto does not contain a circuit.')
def _deserialize_circuit(
self,
circuit_proto: v2.program_pb2.Circuit,
*,
arg_function_language: str,
constants: List[v2.program_pb2.Constant],
deserialized_constants: List[Any],
) -> cirq.Circuit:
moments = []
for moment_proto in circuit_proto.moments:
moment_ops = []
for op in moment_proto.operations:
moment_ops.append(
self._deserialize_gate_op(
op,
arg_function_language=arg_function_language,
constants=constants,
deserialized_constants=deserialized_constants,
)
)
for op in moment_proto.circuit_operations:
moment_ops.append(
self._deserialize_circuit_op(
op,
arg_function_language=arg_function_language,
constants=constants,
deserialized_constants=deserialized_constants,
)
)
moments.append(cirq.Moment(moment_ops))
return cirq.Circuit(moments)
def _deserialize_gate_op(
self,
operation_proto: v2.program_pb2.Operation,
*,
arg_function_language: str = '',
constants: Optional[List[v2.program_pb2.Constant]] = None,
deserialized_constants: Optional[List[Any]] = None,
) -> cirq.Operation:
"""Deserialize an Operation from a cirq_google.api.v2.Operation.
Args:
operation_proto: A dictionary representing a
cirq.google.api.v2.Operation proto.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: The deserialized contents of `constants`.
cirq_google.api.v2.Operation proto.
Returns:
The deserialized Operation.
Raises:
ValueError: If the operation cannot be deserialized.
"""
if deserialized_constants is not None:
qubits = [deserialized_constants[q] for q in operation_proto.qubit_constant_index]
else:
qubits = []
for q in operation_proto.qubits:
# Preserve previous functionality in case
# constants table was not used
qubits.append(v2.qubit_from_proto_id(q.id))
which_gate_type = operation_proto.WhichOneof('gate_value')
if which_gate_type == 'xpowgate':
op = cirq.XPowGate(
exponent=arg_func_langs.float_arg_from_proto(
operation_proto.xpowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)(*qubits)
elif which_gate_type == 'ypowgate':
op = cirq.YPowGate(
exponent=arg_func_langs.float_arg_from_proto(
operation_proto.ypowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)(*qubits)
elif which_gate_type == 'zpowgate':
op = cirq.ZPowGate(
exponent=arg_func_langs.float_arg_from_proto(
operation_proto.zpowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)(*qubits)
if operation_proto.zpowgate.is_physical_z:
op = op.with_tags(PhysicalZTag())
elif which_gate_type == 'phasedxpowgate':
exponent = (
arg_func_langs.float_arg_from_proto(
operation_proto.phasedxpowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)
phase_exponent = (
arg_func_langs.float_arg_from_proto(
operation_proto.phasedxpowgate.phase_exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)
op = cirq.PhasedXPowGate(exponent=exponent, phase_exponent=phase_exponent)(*qubits)
elif which_gate_type == 'phasedxzgate':
x_exponent = (
arg_func_langs.float_arg_from_proto(
operation_proto.phasedxzgate.x_exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)
z_exponent = (
arg_func_langs.float_arg_from_proto(
operation_proto.phasedxzgate.z_exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)
axis_phase_exponent = (
arg_func_langs.float_arg_from_proto(
operation_proto.phasedxzgate.axis_phase_exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)
op = cirq.PhasedXZGate(
x_exponent=x_exponent,
z_exponent=z_exponent,
axis_phase_exponent=axis_phase_exponent,
)(*qubits)
elif which_gate_type == 'czpowgate':
op = cirq.CZPowGate(
exponent=arg_func_langs.float_arg_from_proto(
operation_proto.czpowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)(*qubits)
elif which_gate_type == 'iswappowgate':
op = cirq.ISwapPowGate(
exponent=arg_func_langs.float_arg_from_proto(
operation_proto.iswappowgate.exponent,
arg_function_language=arg_function_language,
required_arg_name=None,
)
or 0.0
)(*qubits)
elif which_gate_type == 'fsimgate':
theta = arg_func_langs.float_arg_from_proto(
operation_proto.fsimgate.theta,
arg_function_language=arg_function_language,
required_arg_name=None,
)
phi = arg_func_langs.float_arg_from_proto(
operation_proto.fsimgate.phi,
arg_function_language=arg_function_language,
required_arg_name=None,
)
if isinstance(theta, (int, float, sympy.Basic)) and isinstance(
phi, (int, float, sympy.Basic)
):
op = cirq.FSimGate(theta=theta, phi=phi)(*qubits)
else:
raise ValueError('theta and phi must be specified for FSimGate')
elif which_gate_type == 'measurementgate':
key = arg_func_langs.arg_from_proto(
operation_proto.measurementgate.key,
arg_function_language=arg_function_language,
required_arg_name=None,
)
invert_mask = arg_func_langs.arg_from_proto(
operation_proto.measurementgate.invert_mask,
arg_function_language=arg_function_language,
required_arg_name=None,
)
if isinstance(invert_mask, list) and isinstance(key, str):
op = cirq.MeasurementGate(
num_qubits=len(qubits), key=key, invert_mask=tuple(invert_mask)
)(*qubits)
else:
raise ValueError(f'Incorrect types for measurement gate {invert_mask} {key}')
elif which_gate_type == 'waitgate':
total_nanos = arg_func_langs.float_arg_from_proto(
operation_proto.waitgate.duration_nanos,
arg_function_language=arg_function_language,
required_arg_name=None,
)
op = cirq.WaitGate(duration=cirq.Duration(nanos=total_nanos or 0.0))(*qubits)
else:
raise ValueError(
f'Unsupported serialized gate with type "{which_gate_type}".'
f'\n\noperation_proto:\n{operation_proto}'
)
which = operation_proto.WhichOneof('token')
if which == 'token_constant_index':
if not constants:
raise ValueError(
'Proto has references to constants table '
'but none was passed in, value ='
f'{operation_proto}'
)
op = op.with_tags(
CalibrationTag(constants[operation_proto.token_constant_index].string_value)
)
elif which == 'token_value':
op = op.with_tags(CalibrationTag(operation_proto.token_value))
return op
def _deserialize_circuit_op(
self,
operation_proto: v2.program_pb2.CircuitOperation,
*,
arg_function_language: str = '',
constants: Optional[List[v2.program_pb2.Constant]] = None,
deserialized_constants: Optional[List[Any]] = None,
) -> cirq.CircuitOperation:
"""Deserialize a CircuitOperation from a
cirq.google.api.v2.CircuitOperation.
Args:
operation_proto: A dictionary representing a
cirq.google.api.v2.CircuitOperation proto.
arg_function_language: The `arg_function_language` field from
`Program.Language`.
constants: The list of Constant protos referenced by constant
table indices in `proto`.
deserialized_constants: The deserialized contents of `constants`.
Returns:
The deserialized CircuitOperation.
"""
return op_deserializer.CircuitOpDeserializer().from_proto(
operation_proto,
arg_function_language=arg_function_language,
constants=constants,
deserialized_constants=deserialized_constants,
)
CIRCUIT_SERIALIZER = CircuitSerializer('v2_5')
|
{"hexsha": "914b85e4c76309c4529bad9335fde6f90d26497f", "size": 25403, "ext": "py", "lang": "Python", "max_stars_repo_path": "cirq-google/cirq_google/serialization/circuit_serializer.py", "max_stars_repo_name": "BearerPipelineTest/Cirq", "max_stars_repo_head_hexsha": "e00767a2ef1233e82e9089cf3801a77e4cc3aea3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-05T22:17:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T22:17:39.000Z", "max_issues_repo_path": "cirq-google/cirq_google/serialization/circuit_serializer.py", "max_issues_repo_name": "BearerPipelineTest/Cirq", "max_issues_repo_head_hexsha": "e00767a2ef1233e82e9089cf3801a77e4cc3aea3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-01-16T14:12:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T03:58:46.000Z", "max_forks_repo_path": "cirq-google/cirq_google/serialization/circuit_serializer.py", "max_forks_repo_name": "Nexuscompute/Cirq", "max_forks_repo_head_hexsha": "640ef8f82d6a56ec95361388ce7976e096cca906", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7752808989, "max_line_length": 98, "alphanum_fraction": 0.5859150494, "include": true, "reason": "import sympy", "num_tokens": 4864}
|
/-
Copyright (c) 2018 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Mario Carneiro, Yury Kudryashov, Heather Macbeth
-/
import analysis.normed.order.lattice
import analysis.normed_space.operator_norm
import analysis.normed_space.star.basic
import data.real.sqrt
import topology.continuous_function.algebra
import topology.metric_space.equicontinuity
/-!
# Bounded continuous functions
The type of bounded continuous functions taking values in a metric space, with
the uniform distance.
-/
noncomputable theory
open_locale topology classical nnreal uniformity uniform_convergence
open set filter metric function
universes u v w
variables {F : Type*} {α : Type u} {β : Type v} {γ : Type w}
/-- `α →ᵇ β` is the type of bounded continuous functions `α → β` from a topological space to a
metric space.
When possible, instead of parametrizing results over `(f : α →ᵇ β)`,
you should parametrize over `(F : Type*) [bounded_continuous_map_class F α β] (f : F)`.
When you extend this structure, make sure to extend `bounded_continuous_map_class`. -/
structure bounded_continuous_function (α : Type u) (β : Type v)
[topological_space α] [pseudo_metric_space β] extends continuous_map α β :
Type (max u v) :=
(map_bounded' : ∃ C, ∀ x y, dist (to_fun x) (to_fun y) ≤ C)
localized "infixr (name := bounded_continuous_function)
` →ᵇ `:25 := bounded_continuous_function" in bounded_continuous_function
section
set_option old_structure_cmd true
/-- `bounded_continuous_map_class F α β` states that `F` is a type of bounded continuous maps.
You should also extend this typeclass when you extend `bounded_continuous_function`. -/
class bounded_continuous_map_class (F α β : Type*) [topological_space α] [pseudo_metric_space β]
extends continuous_map_class F α β :=
(map_bounded (f : F) : ∃ C, ∀ x y, dist (f x) (f y) ≤ C)
end
export bounded_continuous_map_class (map_bounded)
namespace bounded_continuous_function
section basics
variables [topological_space α] [pseudo_metric_space β] [pseudo_metric_space γ]
variables {f g : α →ᵇ β} {x : α} {C : ℝ}
instance : bounded_continuous_map_class (α →ᵇ β) α β :=
{ coe := λ f, f.to_fun,
coe_injective' := λ f g h, by { obtain ⟨⟨_, _⟩, _⟩ := f, obtain ⟨⟨_, _⟩, _⟩ := g, congr' },
map_continuous := λ f, f.continuous_to_fun,
map_bounded := λ f, f.map_bounded' }
/-- Helper instance for when there's too many metavariables to apply `fun_like.has_coe_to_fun`
directly. -/
instance : has_coe_to_fun (α →ᵇ β) (λ _, α → β) := fun_like.has_coe_to_fun
instance [bounded_continuous_map_class F α β] : has_coe_t F (α →ᵇ β) :=
⟨λ f, { to_fun := f, continuous_to_fun := map_continuous f, map_bounded' := map_bounded f }⟩
@[simp] lemma coe_to_continuous_fun (f : α →ᵇ β) : (f.to_continuous_map : α → β) = f := rfl
/-- See Note [custom simps projection]. We need to specify this projection explicitly in this case,
because it is a composition of multiple projections. -/
def simps.apply (h : α →ᵇ β) : α → β := h
initialize_simps_projections bounded_continuous_function (to_continuous_map_to_fun → apply)
protected lemma bounded (f : α →ᵇ β) : ∃C, ∀ x y : α, dist (f x) (f y) ≤ C := f.map_bounded'
protected lemma continuous (f : α →ᵇ β) : continuous f := f.to_continuous_map.continuous
@[ext] lemma ext (h : ∀ x, f x = g x) : f = g := fun_like.ext _ _ h
lemma bounded_range (f : α →ᵇ β) : bounded (range f) :=
bounded_range_iff.2 f.bounded
lemma bounded_image (f : α →ᵇ β) (s : set α) : bounded (f '' s) :=
f.bounded_range.mono $ image_subset_range _ _
lemma eq_of_empty [is_empty α] (f g : α →ᵇ β) : f = g :=
ext $ is_empty.elim ‹_›
/-- A continuous function with an explicit bound is a bounded continuous function. -/
def mk_of_bound (f : C(α, β)) (C : ℝ) (h : ∀ x y : α, dist (f x) (f y) ≤ C) : α →ᵇ β :=
⟨f, ⟨C, h⟩⟩
@[simp] lemma mk_of_bound_coe {f} {C} {h} : (mk_of_bound f C h : α → β) = (f : α → β) :=
rfl
/-- A continuous function on a compact space is automatically a bounded continuous function. -/
def mk_of_compact [compact_space α] (f : C(α, β)) : α →ᵇ β :=
⟨f, bounded_range_iff.1 (is_compact_range f.continuous).bounded⟩
@[simp] lemma mk_of_compact_apply [compact_space α] (f : C(α, β)) (a : α) :
mk_of_compact f a = f a :=
rfl
/-- If a function is bounded on a discrete space, it is automatically continuous,
and therefore gives rise to an element of the type of bounded continuous functions -/
@[simps] def mk_of_discrete [discrete_topology α] (f : α → β)
(C : ℝ) (h : ∀ x y : α, dist (f x) (f y) ≤ C) : α →ᵇ β :=
⟨⟨f, continuous_of_discrete_topology⟩, ⟨C, h⟩⟩
/-- The uniform distance between two bounded continuous functions -/
instance : has_dist (α →ᵇ β) :=
⟨λf g, Inf {C | 0 ≤ C ∧ ∀ x : α, dist (f x) (g x) ≤ C}⟩
lemma dist_eq : dist f g = Inf {C | 0 ≤ C ∧ ∀ x : α, dist (f x) (g x) ≤ C} := rfl
lemma dist_set_exists : ∃ C, 0 ≤ C ∧ ∀ x : α, dist (f x) (g x) ≤ C :=
begin
rcases f.bounded_range.union g.bounded_range with ⟨C, hC⟩,
refine ⟨max 0 C, le_max_left _ _, λ x, (hC _ _ _ _).trans (le_max_right _ _)⟩;
[left, right]; apply mem_range_self
end
/-- The pointwise distance is controlled by the distance between functions, by definition. -/
lemma dist_coe_le_dist (x : α) : dist (f x) (g x) ≤ dist f g :=
le_cInf dist_set_exists $ λb hb, hb.2 x
/- This lemma will be needed in the proof of the metric space instance, but it will become
useless afterwards as it will be superseded by the general result that the distance is nonnegative
in metric spaces. -/
private lemma dist_nonneg' : 0 ≤ dist f g :=
le_cInf dist_set_exists (λ C, and.left)
/-- The distance between two functions is controlled by the supremum of the pointwise distances -/
lemma dist_le (C0 : (0 : ℝ) ≤ C) : dist f g ≤ C ↔ ∀x:α, dist (f x) (g x) ≤ C :=
⟨λ h x, le_trans (dist_coe_le_dist x) h, λ H, cInf_le ⟨0, λ C, and.left⟩ ⟨C0, H⟩⟩
lemma dist_le_iff_of_nonempty [nonempty α] :
dist f g ≤ C ↔ ∀ x, dist (f x) (g x) ≤ C :=
⟨λ h x, le_trans (dist_coe_le_dist x) h,
λ w, (dist_le (le_trans dist_nonneg (w (nonempty.some ‹_›)))).mpr w⟩
lemma dist_lt_of_nonempty_compact [nonempty α] [compact_space α]
(w : ∀x:α, dist (f x) (g x) < C) : dist f g < C :=
begin
have c : continuous (λ x, dist (f x) (g x)), { continuity, },
obtain ⟨x, -, le⟩ :=
is_compact.exists_forall_ge is_compact_univ set.univ_nonempty (continuous.continuous_on c),
exact lt_of_le_of_lt (dist_le_iff_of_nonempty.mpr (λ y, le y trivial)) (w x),
end
lemma dist_lt_iff_of_compact [compact_space α] (C0 : (0 : ℝ) < C) :
dist f g < C ↔ ∀x:α, dist (f x) (g x) < C :=
begin
fsplit,
{ intros w x,
exact lt_of_le_of_lt (dist_coe_le_dist x) w, },
{ by_cases h : nonempty α,
{ resetI,
exact dist_lt_of_nonempty_compact, },
{ rintro -,
convert C0,
apply le_antisymm _ dist_nonneg',
rw [dist_eq],
exact cInf_le ⟨0, λ C, and.left⟩ ⟨le_rfl, λ x, false.elim (h (nonempty.intro x))⟩, }, },
end
lemma dist_lt_iff_of_nonempty_compact [nonempty α] [compact_space α] :
dist f g < C ↔ ∀x:α, dist (f x) (g x) < C :=
⟨λ w x, lt_of_le_of_lt (dist_coe_le_dist x) w, dist_lt_of_nonempty_compact⟩
/-- The type of bounded continuous functions, with the uniform distance, is a pseudometric space. -/
instance : pseudo_metric_space (α →ᵇ β) :=
{ dist_self := λ f, le_antisymm ((dist_le le_rfl).2 $ λ x, by simp) dist_nonneg',
dist_comm := λ f g, by simp [dist_eq, dist_comm],
dist_triangle := λ f g h,
(dist_le (add_nonneg dist_nonneg' dist_nonneg')).2 $ λ x,
le_trans (dist_triangle _ _ _) (add_le_add (dist_coe_le_dist _) (dist_coe_le_dist _)) }
/-- The type of bounded continuous functions, with the uniform distance, is a metric space. -/
instance {α β} [topological_space α] [metric_space β] : metric_space (α →ᵇ β) :=
{ eq_of_dist_eq_zero := λ f g hfg, by ext x; exact
eq_of_dist_eq_zero (le_antisymm (hfg ▸ dist_coe_le_dist _) dist_nonneg) }
lemma nndist_eq : nndist f g = Inf {C | ∀ x : α, nndist (f x) (g x) ≤ C} :=
subtype.ext $ dist_eq.trans $ begin
rw [nnreal.coe_Inf, nnreal.coe_image],
simp_rw [mem_set_of_eq, ←nnreal.coe_le_coe, subtype.coe_mk, exists_prop, coe_nndist],
end
lemma nndist_set_exists : ∃ C, ∀ x : α, nndist (f x) (g x) ≤ C :=
subtype.exists.mpr $ dist_set_exists.imp $ λ a ⟨ha, h⟩, ⟨ha, h⟩
lemma nndist_coe_le_nndist (x : α) : nndist (f x) (g x) ≤ nndist f g :=
dist_coe_le_dist x
/-- On an empty space, bounded continuous functions are at distance 0 -/
lemma dist_zero_of_empty [is_empty α] : dist f g = 0 :=
by rw [(ext is_empty_elim : f = g), dist_self]
lemma dist_eq_supr : dist f g = ⨆ x : α, dist (f x) (g x) :=
begin
casesI is_empty_or_nonempty α, { rw [supr_of_empty', real.Sup_empty, dist_zero_of_empty] },
refine (dist_le_iff_of_nonempty.mpr $ le_csupr _).antisymm (csupr_le dist_coe_le_dist),
exact dist_set_exists.imp (λ C hC, forall_range_iff.2 hC.2)
end
lemma nndist_eq_supr : nndist f g = ⨆ x : α, nndist (f x) (g x) :=
subtype.ext $ dist_eq_supr.trans $ by simp_rw [nnreal.coe_supr, coe_nndist]
lemma tendsto_iff_tendsto_uniformly {ι : Type*} {F : ι → (α →ᵇ β)} {f : α →ᵇ β} {l : filter ι} :
tendsto F l (𝓝 f) ↔ tendsto_uniformly (λ i, F i) f l :=
iff.intro
(λ h, tendsto_uniformly_iff.2
(λ ε ε0, (metric.tendsto_nhds.mp h ε ε0).mp (eventually_of_forall $
λ n hn x, lt_of_le_of_lt (dist_coe_le_dist x) (dist_comm (F n) f ▸ hn))))
(λ h, metric.tendsto_nhds.mpr $ λ ε ε_pos,
(h _ (dist_mem_uniformity $ half_pos ε_pos)).mp (eventually_of_forall $
λ n hn, lt_of_le_of_lt ((dist_le (half_pos ε_pos).le).mpr $
λ x, dist_comm (f x) (F n x) ▸ le_of_lt (hn x)) (half_lt_self ε_pos)))
/-- The topology on `α →ᵇ β` is exactly the topology induced by the natural map to `α →ᵤ β`. -/
lemma inducing_coe_fn : inducing (uniform_fun.of_fun ∘ coe_fn : (α →ᵇ β) → (α →ᵤ β)) :=
begin
rw inducing_iff_nhds,
refine λ f, eq_of_forall_le_iff (λ l, _),
rw [← tendsto_iff_comap, ← tendsto_id', tendsto_iff_tendsto_uniformly,
uniform_fun.tendsto_iff_tendsto_uniformly],
refl
end
-- TODO: upgrade to a `uniform_embedding`
lemma embedding_coe_fn : embedding (uniform_fun.of_fun ∘ coe_fn : (α →ᵇ β) → (α →ᵤ β)) :=
⟨inducing_coe_fn, λ f g h, ext $ λ x, congr_fun h x⟩
variables (α) {β}
/-- Constant as a continuous bounded function. -/
@[simps {fully_applied := ff}] def const (b : β) : α →ᵇ β :=
⟨continuous_map.const α b, 0, by simp [le_rfl]⟩
variable {α}
lemma const_apply' (a : α) (b : β) : (const α b : α → β) a = b := rfl
/-- If the target space is inhabited, so is the space of bounded continuous functions -/
instance [inhabited β] : inhabited (α →ᵇ β) := ⟨const α default⟩
lemma lipschitz_evalx (x : α) : lipschitz_with 1 (λ f : α →ᵇ β, f x) :=
lipschitz_with.mk_one $ λ f g, dist_coe_le_dist x
theorem uniform_continuous_coe : @uniform_continuous (α →ᵇ β) (α → β) _ _ coe_fn :=
uniform_continuous_pi.2 $ λ x, (lipschitz_evalx x).uniform_continuous
lemma continuous_coe : continuous (λ (f : α →ᵇ β) x, f x) :=
uniform_continuous.continuous uniform_continuous_coe
/-- When `x` is fixed, `(f : α →ᵇ β) ↦ f x` is continuous -/
@[continuity] theorem continuous_eval_const {x : α} : continuous (λ f : α →ᵇ β, f x) :=
(continuous_apply x).comp continuous_coe
/-- The evaluation map is continuous, as a joint function of `u` and `x` -/
@[continuity] theorem continuous_eval : continuous (λ p : (α →ᵇ β) × α, p.1 p.2) :=
continuous_prod_of_continuous_lipschitz _ 1 (λ f, f.continuous) $ lipschitz_evalx
/-- Bounded continuous functions taking values in a complete space form a complete space. -/
instance [complete_space β] : complete_space (α →ᵇ β) :=
complete_of_cauchy_seq_tendsto $ λ (f : ℕ → α →ᵇ β) (hf : cauchy_seq f),
begin
/- We have to show that `f n` converges to a bounded continuous function.
For this, we prove pointwise convergence to define the limit, then check
it is a continuous bounded function, and then check the norm convergence. -/
rcases cauchy_seq_iff_le_tendsto_0.1 hf with ⟨b, b0, b_bound, b_lim⟩,
have f_bdd := λx n m N hn hm, le_trans (dist_coe_le_dist x) (b_bound n m N hn hm),
have fx_cau : ∀x, cauchy_seq (λn, f n x) :=
λx, cauchy_seq_iff_le_tendsto_0.2 ⟨b, b0, f_bdd x, b_lim⟩,
choose F hF using λx, cauchy_seq_tendsto_of_complete (fx_cau x),
/- F : α → β, hF : ∀ (x : α), tendsto (λ (n : ℕ), f n x) at_top (𝓝 (F x))
`F` is the desired limit function. Check that it is uniformly approximated by `f N` -/
have fF_bdd : ∀x N, dist (f N x) (F x) ≤ b N :=
λ x N, le_of_tendsto (tendsto_const_nhds.dist (hF x))
(filter.eventually_at_top.2 ⟨N, λn hn, f_bdd x N n N (le_refl N) hn⟩),
refine ⟨⟨⟨F, _⟩, _⟩, _⟩,
{ /- Check that `F` is continuous, as a uniform limit of continuous functions -/
have : tendsto_uniformly (λn x, f n x) F at_top,
{ refine metric.tendsto_uniformly_iff.2 (λ ε ε0, _),
refine ((tendsto_order.1 b_lim).2 ε ε0).mono (λ n hn x, _),
rw dist_comm,
exact lt_of_le_of_lt (fF_bdd x n) hn },
exact this.continuous (eventually_of_forall $ λ N, (f N).continuous) },
{ /- Check that `F` is bounded -/
rcases (f 0).bounded with ⟨C, hC⟩,
refine ⟨C + (b 0 + b 0), λ x y, _⟩,
calc dist (F x) (F y) ≤ dist (f 0 x) (f 0 y) + (dist (f 0 x) (F x) + dist (f 0 y) (F y)) :
dist_triangle4_left _ _ _ _
... ≤ C + (b 0 + b 0) : by mono* },
{ /- Check that `F` is close to `f N` in distance terms -/
refine tendsto_iff_dist_tendsto_zero.2 (squeeze_zero (λ _, dist_nonneg) _ b_lim),
exact λ N, (dist_le (b0 _)).2 (λx, fF_bdd x N) }
end
/-- Composition of a bounded continuous function and a continuous function. -/
def comp_continuous {δ : Type*} [topological_space δ] (f : α →ᵇ β) (g : C(δ, α)) : δ →ᵇ β :=
{ to_continuous_map := f.1.comp g,
map_bounded' := f.map_bounded'.imp (λ C hC x y, hC _ _) }
@[simp] lemma coe_comp_continuous {δ : Type*} [topological_space δ] (f : α →ᵇ β) (g : C(δ, α)) :
coe_fn (f.comp_continuous g) = f ∘ g := rfl
@[simp] lemma comp_continuous_apply {δ : Type*} [topological_space δ]
(f : α →ᵇ β) (g : C(δ, α)) (x : δ) : f.comp_continuous g x = f (g x) :=
rfl
lemma lipschitz_comp_continuous {δ : Type*} [topological_space δ] (g : C(δ, α)) :
lipschitz_with 1 (λ f : α →ᵇ β, f.comp_continuous g) :=
lipschitz_with.mk_one $ λ f₁ f₂, (dist_le dist_nonneg).2 $ λ x, dist_coe_le_dist (g x)
lemma continuous_comp_continuous {δ : Type*} [topological_space δ] (g : C(δ, α)) :
continuous (λ f : α →ᵇ β, f.comp_continuous g) :=
(lipschitz_comp_continuous g).continuous
/-- Restrict a bounded continuous function to a set. -/
def restrict (f : α →ᵇ β) (s : set α) : s →ᵇ β :=
f.comp_continuous $ (continuous_map.id _).restrict s
@[simp] lemma coe_restrict (f : α →ᵇ β) (s : set α) : coe_fn (f.restrict s) = f ∘ coe := rfl
@[simp] lemma restrict_apply (f : α →ᵇ β) (s : set α) (x : s) : f.restrict s x = f x := rfl
/-- Composition (in the target) of a bounded continuous function with a Lipschitz map again
gives a bounded continuous function -/
def comp (G : β → γ) {C : ℝ≥0} (H : lipschitz_with C G)
(f : α →ᵇ β) : α →ᵇ γ :=
⟨⟨λx, G (f x), H.continuous.comp f.continuous⟩,
let ⟨D, hD⟩ := f.bounded in
⟨max C 0 * D, λ x y, calc
dist (G (f x)) (G (f y)) ≤ C * dist (f x) (f y) : H.dist_le_mul _ _
... ≤ max C 0 * dist (f x) (f y) : mul_le_mul_of_nonneg_right (le_max_left C 0) dist_nonneg
... ≤ max C 0 * D : mul_le_mul_of_nonneg_left (hD _ _) (le_max_right C 0)⟩⟩
/-- The composition operator (in the target) with a Lipschitz map is Lipschitz -/
lemma lipschitz_comp {G : β → γ} {C : ℝ≥0} (H : lipschitz_with C G) :
lipschitz_with C (comp G H : (α →ᵇ β) → α →ᵇ γ) :=
lipschitz_with.of_dist_le_mul $ λ f g,
(dist_le (mul_nonneg C.2 dist_nonneg)).2 $ λ x,
calc dist (G (f x)) (G (g x)) ≤ C * dist (f x) (g x) : H.dist_le_mul _ _
... ≤ C * dist f g : mul_le_mul_of_nonneg_left (dist_coe_le_dist _) C.2
/-- The composition operator (in the target) with a Lipschitz map is uniformly continuous -/
lemma uniform_continuous_comp {G : β → γ} {C : ℝ≥0} (H : lipschitz_with C G) :
uniform_continuous (comp G H : (α →ᵇ β) → α →ᵇ γ) :=
(lipschitz_comp H).uniform_continuous
/-- The composition operator (in the target) with a Lipschitz map is continuous -/
lemma continuous_comp {G : β → γ} {C : ℝ≥0} (H : lipschitz_with C G) :
continuous (comp G H : (α →ᵇ β) → α →ᵇ γ) :=
(lipschitz_comp H).continuous
/-- Restriction (in the target) of a bounded continuous function taking values in a subset -/
def cod_restrict (s : set β) (f : α →ᵇ β) (H : ∀x, f x ∈ s) : α →ᵇ s :=
⟨⟨s.cod_restrict f H, f.continuous.subtype_mk _⟩, f.bounded⟩
section extend
variables {δ : Type*} [topological_space δ] [discrete_topology δ]
/-- A version of `function.extend` for bounded continuous maps. We assume that the domain has
discrete topology, so we only need to verify boundedness. -/
def extend (f : α ↪ δ) (g : α →ᵇ β) (h : δ →ᵇ β) : δ →ᵇ β :=
{ to_fun := extend f g h,
continuous_to_fun := continuous_of_discrete_topology,
map_bounded' :=
begin
rw [← bounded_range_iff, range_extend f.injective, metric.bounded_union],
exact ⟨g.bounded_range, h.bounded_image _⟩
end }
@[simp] lemma extend_apply (f : α ↪ δ) (g : α →ᵇ β) (h : δ →ᵇ β) (x : α) :
extend f g h (f x) = g x :=
f.injective.extend_apply _ _ _
@[simp] lemma extend_comp (f : α ↪ δ) (g : α →ᵇ β) (h : δ →ᵇ β) : extend f g h ∘ f = g :=
extend_comp f.injective _ _
lemma extend_apply' {f : α ↪ δ} {x : δ} (hx : x ∉ range f) (g : α →ᵇ β) (h : δ →ᵇ β) :
extend f g h x = h x :=
extend_apply' _ _ _ hx
lemma extend_of_empty [is_empty α] (f : α ↪ δ) (g : α →ᵇ β) (h : δ →ᵇ β) :
extend f g h = h :=
fun_like.coe_injective $ function.extend_of_empty f g h
@[simp] lemma dist_extend_extend (f : α ↪ δ) (g₁ g₂ : α →ᵇ β) (h₁ h₂ : δ →ᵇ β) :
dist (g₁.extend f h₁) (g₂.extend f h₂) =
max (dist g₁ g₂) (dist (h₁.restrict (range f)ᶜ) (h₂.restrict (range f)ᶜ)) :=
begin
refine le_antisymm ((dist_le $ le_max_iff.2 $ or.inl dist_nonneg).2 $ λ x, _) (max_le _ _),
{ rcases em (∃ y, f y = x) with (⟨x, rfl⟩|hx),
{ simp only [extend_apply],
exact (dist_coe_le_dist x).trans (le_max_left _ _) },
{ simp only [extend_apply' hx],
lift x to ((range f)ᶜ : set δ) using hx,
calc dist (h₁ x) (h₂ x) = dist (h₁.restrict (range f)ᶜ x) (h₂.restrict (range f)ᶜ x) : rfl
... ≤ dist (h₁.restrict (range f)ᶜ) (h₂.restrict (range f)ᶜ) : dist_coe_le_dist x
... ≤ _ : le_max_right _ _ } },
{ refine (dist_le dist_nonneg).2 (λ x, _),
rw [← extend_apply f g₁ h₁, ← extend_apply f g₂ h₂],
exact dist_coe_le_dist _ },
{ refine (dist_le dist_nonneg).2 (λ x, _),
calc dist (h₁ x) (h₂ x) = dist (extend f g₁ h₁ x) (extend f g₂ h₂ x) :
by rw [extend_apply' x.coe_prop, extend_apply' x.coe_prop]
... ≤ _ : dist_coe_le_dist _ }
end
lemma isometry_extend (f : α ↪ δ) (h : δ →ᵇ β) :
isometry (λ g : α →ᵇ β, extend f g h) :=
isometry.of_dist_eq $ λ g₁ g₂, by simp [dist_nonneg]
end extend
end basics
section arzela_ascoli
variables [topological_space α] [compact_space α] [pseudo_metric_space β]
variables {f g : α →ᵇ β} {x : α} {C : ℝ}
/- Arzela-Ascoli theorem asserts that, on a compact space, a set of functions sharing
a common modulus of continuity and taking values in a compact set forms a compact
subset for the topology of uniform convergence. In this section, we prove this theorem
and several useful variations around it. -/
/-- First version, with pointwise equicontinuity and range in a compact space -/
theorem arzela_ascoli₁ [compact_space β]
(A : set (α →ᵇ β))
(closed : is_closed A)
(H : equicontinuous (coe_fn : A → α → β)) :
is_compact A :=
begin
simp_rw [equicontinuous, metric.equicontinuous_at_iff_pair] at H,
refine is_compact_of_totally_bounded_is_closed _ closed,
refine totally_bounded_of_finite_discretization (λ ε ε0, _),
rcases exists_between ε0 with ⟨ε₁, ε₁0, εε₁⟩,
let ε₂ := ε₁/2/2,
/- We have to find a finite discretization of `u`, i.e., finite information
that is sufficient to reconstruct `u` up to ε. This information will be
provided by the values of `u` on a sufficiently dense set tα,
slightly translated to fit in a finite ε₂-dense set tβ in the image. Such
sets exist by compactness of the source and range. Then, to check that these
data determine the function up to ε, one uses the control on the modulus of
continuity to extend the closeness on tα to closeness everywhere. -/
have ε₂0 : ε₂ > 0 := half_pos (half_pos ε₁0),
have : ∀x:α, ∃U, x ∈ U ∧ is_open U ∧ ∀ (y z ∈ U) {f : α →ᵇ β},
f ∈ A → dist (f y) (f z) < ε₂ := λ x,
let ⟨U, nhdsU, hU⟩ := H x _ ε₂0,
⟨V, VU, openV, xV⟩ := _root_.mem_nhds_iff.1 nhdsU in
⟨V, xV, openV, λy hy z hz f hf, hU y (VU hy) z (VU hz) ⟨f, hf⟩⟩,
choose U hU using this,
/- For all x, the set hU x is an open set containing x on which the elements of A
fluctuate by at most ε₂.
We extract finitely many of these sets that cover the whole space, by compactness -/
rcases is_compact_univ.elim_finite_subcover_image
(λx _, (hU x).2.1) (λx hx, mem_bUnion (mem_univ _) (hU x).1)
with ⟨tα, _, ⟨_⟩, htα⟩,
/- tα : set α, htα : univ ⊆ ⋃x ∈ tα, U x -/
rcases @finite_cover_balls_of_compact β _ _ is_compact_univ _ ε₂0
with ⟨tβ, _, ⟨_⟩, htβ⟩, resetI,
/- tβ : set β, htβ : univ ⊆ ⋃y ∈ tβ, ball y ε₂ -/
/- Associate to every point `y` in the space a nearby point `F y` in tβ -/
choose F hF using λy, show ∃z∈tβ, dist y z < ε₂, by simpa using htβ (mem_univ y),
/- F : β → β, hF : ∀ (y : β), F y ∈ tβ ∧ dist y (F y) < ε₂ -/
/- Associate to every function a discrete approximation, mapping each point in `tα`
to a point in `tβ` close to its true image by the function. -/
refine ⟨tα → tβ, by apply_instance, λ f a, ⟨F (f a), (hF (f a)).1⟩, _⟩,
rintro ⟨f, hf⟩ ⟨g, hg⟩ f_eq_g,
/- If two functions have the same approximation, then they are within distance ε -/
refine lt_of_le_of_lt ((dist_le $ le_of_lt ε₁0).2 (λ x, _)) εε₁,
obtain ⟨x', x'tα, hx'⟩ : ∃x' ∈ tα, x ∈ U x' := mem_Union₂.1 (htα (mem_univ x)),
calc dist (f x) (g x)
≤ dist (f x) (f x') + dist (g x) (g x') + dist (f x') (g x') : dist_triangle4_right _ _ _ _
... ≤ ε₂ + ε₂ + ε₁/2 : le_of_lt (add_lt_add (add_lt_add _ _) _)
... = ε₁ : by rw [add_halves, add_halves],
{ exact (hU x').2.2 _ hx' _ ((hU x').1) hf },
{ exact (hU x').2.2 _ hx' _ ((hU x').1) hg },
{ have F_f_g : F (f x') = F (g x') :=
(congr_arg (λ f:tα → tβ, (f ⟨x', x'tα⟩ : β)) f_eq_g : _),
calc dist (f x') (g x')
≤ dist (f x') (F (f x')) + dist (g x') (F (f x')) : dist_triangle_right _ _ _
... = dist (f x') (F (f x')) + dist (g x') (F (g x')) : by rw F_f_g
... < ε₂ + ε₂ : add_lt_add (hF (f x')).2 (hF (g x')).2
... = ε₁/2 : add_halves _ }
end
/-- Second version, with pointwise equicontinuity and range in a compact subset -/
theorem arzela_ascoli₂
(s : set β) (hs : is_compact s)
(A : set (α →ᵇ β))
(closed : is_closed A)
(in_s : ∀(f : α →ᵇ β) (x : α), f ∈ A → f x ∈ s)
(H : equicontinuous (coe_fn : A → α → β)) :
is_compact A :=
/- This version is deduced from the previous one by restricting to the compact type in the target,
using compactness there and then lifting everything to the original space. -/
begin
have M : lipschitz_with 1 coe := lipschitz_with.subtype_coe s,
let F : (α →ᵇ s) → α →ᵇ β := comp coe M,
refine is_compact_of_is_closed_subset
((_ : is_compact (F ⁻¹' A)).image (continuous_comp M)) closed (λ f hf, _),
{ haveI : compact_space s := is_compact_iff_compact_space.1 hs,
refine arzela_ascoli₁ _ (continuous_iff_is_closed.1 (continuous_comp M) _ closed) _,
rw uniform_embedding_subtype_coe.to_uniform_inducing.equicontinuous_iff,
exact H.comp (A.restrict_preimage F) },
{ let g := cod_restrict s f (λx, in_s f x hf),
rw [show f = F g, by ext; refl] at hf ⊢,
exact ⟨g, hf, rfl⟩ }
end
/-- Third (main) version, with pointwise equicontinuity and range in a compact subset, but
without closedness. The closure is then compact -/
theorem arzela_ascoli [t2_space β]
(s : set β) (hs : is_compact s)
(A : set (α →ᵇ β))
(in_s : ∀(f : α →ᵇ β) (x : α), f ∈ A → f x ∈ s)
(H : equicontinuous (coe_fn : A → α → β)) :
is_compact (closure A) :=
/- This version is deduced from the previous one by checking that the closure of A, in
addition to being closed, still satisfies the properties of compact range and equicontinuity -/
arzela_ascoli₂ s hs (closure A) is_closed_closure
(λ f x hf, (mem_of_closed' hs.is_closed).2 $ λ ε ε0,
let ⟨g, gA, dist_fg⟩ := metric.mem_closure_iff.1 hf ε ε0 in
⟨g x, in_s g x gA, lt_of_le_of_lt (dist_coe_le_dist _) dist_fg⟩)
(H.closure' continuous_coe)
end arzela_ascoli
section has_one
variables [topological_space α] [pseudo_metric_space β] [has_one β]
@[to_additive] instance : has_one (α →ᵇ β) := ⟨const α 1⟩
@[simp, to_additive] lemma coe_one : ((1 : α →ᵇ β) : α → β) = 1 := rfl
@[simp, to_additive]
lemma mk_of_compact_one [compact_space α] : mk_of_compact (1 : C(α, β)) = 1 := rfl
@[to_additive] lemma forall_coe_one_iff_one (f : α →ᵇ β) : (∀ x, f x = 1) ↔ f = 1 :=
(@fun_like.ext_iff _ _ _ _ f 1).symm
@[simp, to_additive] lemma one_comp_continuous [topological_space γ] (f : C(γ, α)) :
(1 : α →ᵇ β).comp_continuous f = 1 := rfl
end has_one
section has_lipschitz_add
/- In this section, if `β` is an `add_monoid` whose addition operation is Lipschitz, then we show
that the space of bounded continuous functions from `α` to `β` inherits a topological `add_monoid`
structure, by using pointwise operations and checking that they are compatible with the uniform
distance.
Implementation note: The material in this section could have been written for `has_lipschitz_mul`
and transported by `@[to_additive]`. We choose not to do this because this causes a few lemma
names (for example, `coe_mul`) to conflict with later lemma names for normed rings; this is only a
trivial inconvenience, but in any case there are no obvious applications of the multiplicative
version. -/
variables [topological_space α] [pseudo_metric_space β] [add_monoid β]
variables [has_lipschitz_add β]
variables (f g : α →ᵇ β) {x : α} {C : ℝ}
/-- The pointwise sum of two bounded continuous functions is again bounded continuous. -/
instance : has_add (α →ᵇ β) :=
{ add := λ f g,
bounded_continuous_function.mk_of_bound (f.to_continuous_map + g.to_continuous_map)
(↑(has_lipschitz_add.C β) * max (classical.some f.bounded) (classical.some g.bounded))
begin
intros x y,
refine le_trans (lipschitz_with_lipschitz_const_add ⟨f x, g x⟩ ⟨f y, g y⟩) _,
rw prod.dist_eq,
refine mul_le_mul_of_nonneg_left _ (has_lipschitz_add.C β).coe_nonneg,
apply max_le_max,
exact classical.some_spec f.bounded x y,
exact classical.some_spec g.bounded x y,
end }
@[simp] lemma coe_add : ⇑(f + g) = f + g := rfl
lemma add_apply : (f + g) x = f x + g x := rfl
@[simp] lemma mk_of_compact_add [compact_space α] (f g : C(α, β)) :
mk_of_compact (f + g) = mk_of_compact f + mk_of_compact g := rfl
lemma add_comp_continuous [topological_space γ] (h : C(γ, α)) :
(g + f).comp_continuous h = g.comp_continuous h + f.comp_continuous h := rfl
@[simp] lemma coe_nsmul_rec : ∀ n, ⇑(nsmul_rec n f) = n • f
| 0 := by rw [nsmul_rec, zero_smul, coe_zero]
| (n + 1) := by rw [nsmul_rec, succ_nsmul, coe_add, coe_nsmul_rec]
instance has_nat_scalar : has_smul ℕ (α →ᵇ β) :=
{ smul := λ n f,
{ to_continuous_map := n • f.to_continuous_map,
map_bounded' := by simpa [coe_nsmul_rec] using (nsmul_rec n f).map_bounded' } }
@[simp] lemma coe_nsmul (r : ℕ) (f : α →ᵇ β) : ⇑(r • f) = r • f := rfl
@[simp] lemma nsmul_apply (r : ℕ) (f : α →ᵇ β) (v : α) : (r • f) v = r • f v := rfl
instance : add_monoid (α →ᵇ β) :=
fun_like.coe_injective.add_monoid _ coe_zero coe_add (λ _ _, coe_nsmul _ _)
instance : has_lipschitz_add (α →ᵇ β) :=
{ lipschitz_add := ⟨has_lipschitz_add.C β, begin
have C_nonneg := (has_lipschitz_add.C β).coe_nonneg,
rw lipschitz_with_iff_dist_le_mul,
rintros ⟨f₁, g₁⟩ ⟨f₂, g₂⟩,
rw dist_le (mul_nonneg C_nonneg dist_nonneg),
intros x,
refine le_trans (lipschitz_with_lipschitz_const_add ⟨f₁ x, g₁ x⟩ ⟨f₂ x, g₂ x⟩) _,
refine mul_le_mul_of_nonneg_left _ C_nonneg,
apply max_le_max; exact dist_coe_le_dist x,
end⟩ }
/-- Coercion of a `normed_add_group_hom` is an `add_monoid_hom`. Similar to
`add_monoid_hom.coe_fn`. -/
@[simps] def coe_fn_add_hom : (α →ᵇ β) →+ (α → β) :=
{ to_fun := coe_fn, map_zero' := coe_zero, map_add' := coe_add }
variables (α β)
/-- The additive map forgetting that a bounded continuous function is bounded.
-/
@[simps] def to_continuous_map_add_hom : (α →ᵇ β) →+ C(α, β) :=
{ to_fun := to_continuous_map,
map_zero' := by { ext, simp, },
map_add' := by { intros, ext, simp, }, }
end has_lipschitz_add
section comm_has_lipschitz_add
variables [topological_space α] [pseudo_metric_space β] [add_comm_monoid β] [has_lipschitz_add β]
@[to_additive] instance : add_comm_monoid (α →ᵇ β) :=
{ add_comm := assume f g, by ext; simp [add_comm],
.. bounded_continuous_function.add_monoid }
open_locale big_operators
@[simp] lemma coe_sum {ι : Type*} (s : finset ι) (f : ι → (α →ᵇ β)) :
⇑(∑ i in s, f i) = (∑ i in s, (f i : α → β)) :=
(@coe_fn_add_hom α β _ _ _ _).map_sum f s
lemma sum_apply {ι : Type*} (s : finset ι) (f : ι → (α →ᵇ β)) (a : α) :
(∑ i in s, f i) a = (∑ i in s, f i a) :=
by simp
end comm_has_lipschitz_add
section normed_add_comm_group
/- In this section, if β is a normed group, then we show that the space of bounded
continuous functions from α to β inherits a normed group structure, by using
pointwise operations and checking that they are compatible with the uniform distance. -/
variables [topological_space α] [seminormed_add_comm_group β]
variables (f g : α →ᵇ β) {x : α} {C : ℝ}
instance : has_norm (α →ᵇ β) := ⟨λu, dist u 0⟩
lemma norm_def : ‖f‖ = dist f 0 := rfl
/-- The norm of a bounded continuous function is the supremum of `‖f x‖`.
We use `Inf` to ensure that the definition works if `α` has no elements. -/
lemma norm_eq (f : α →ᵇ β) :
‖f‖ = Inf {C : ℝ | 0 ≤ C ∧ ∀ (x : α), ‖f x‖ ≤ C} :=
by simp [norm_def, bounded_continuous_function.dist_eq]
/-- When the domain is non-empty, we do not need the `0 ≤ C` condition in the formula for ‖f‖ as an
`Inf`. -/
lemma norm_eq_of_nonempty [h : nonempty α] : ‖f‖ = Inf {C : ℝ | ∀ (x : α), ‖f x‖ ≤ C} :=
begin
unfreezingI { obtain ⟨a⟩ := h, },
rw norm_eq,
congr,
ext,
simp only [and_iff_right_iff_imp],
exact λ h', le_trans (norm_nonneg (f a)) (h' a),
end
@[simp] lemma norm_eq_zero_of_empty [h : is_empty α] : ‖f‖ = 0 :=
dist_zero_of_empty
lemma norm_coe_le_norm (x : α) : ‖f x‖ ≤ ‖f‖ := calc
‖f x‖ = dist (f x) ((0 : α →ᵇ β) x) : by simp [dist_zero_right]
... ≤ ‖f‖ : dist_coe_le_dist _
lemma dist_le_two_norm' {f : γ → β} {C : ℝ} (hC : ∀ x, ‖f x‖ ≤ C) (x y : γ) :
dist (f x) (f y) ≤ 2 * C :=
calc dist (f x) (f y) ≤ ‖f x‖ + ‖f y‖ : dist_le_norm_add_norm _ _
... ≤ C + C : add_le_add (hC x) (hC y)
... = 2 * C : (two_mul _).symm
/-- Distance between the images of any two points is at most twice the norm of the function. -/
lemma dist_le_two_norm (x y : α) : dist (f x) (f y) ≤ 2 * ‖f‖ :=
dist_le_two_norm' f.norm_coe_le_norm x y
variable {f}
/-- The norm of a function is controlled by the supremum of the pointwise norms -/
lemma norm_le (C0 : (0 : ℝ) ≤ C) : ‖f‖ ≤ C ↔ ∀x:α, ‖f x‖ ≤ C :=
by simpa using @dist_le _ _ _ _ f 0 _ C0
lemma norm_le_of_nonempty [nonempty α]
{f : α →ᵇ β} {M : ℝ} : ‖f‖ ≤ M ↔ ∀ x, ‖f x‖ ≤ M :=
begin
simp_rw [norm_def, ←dist_zero_right],
exact dist_le_iff_of_nonempty,
end
lemma norm_lt_iff_of_compact [compact_space α]
{f : α →ᵇ β} {M : ℝ} (M0 : 0 < M) : ‖f‖ < M ↔ ∀ x, ‖f x‖ < M :=
begin
simp_rw [norm_def, ←dist_zero_right],
exact dist_lt_iff_of_compact M0,
end
lemma norm_lt_iff_of_nonempty_compact [nonempty α] [compact_space α]
{f : α →ᵇ β} {M : ℝ} : ‖f‖ < M ↔ ∀ x, ‖f x‖ < M :=
begin
simp_rw [norm_def, ←dist_zero_right],
exact dist_lt_iff_of_nonempty_compact,
end
variable (f)
/-- Norm of `const α b` is less than or equal to `‖b‖`. If `α` is nonempty,
then it is equal to `‖b‖`. -/
lemma norm_const_le (b : β) : ‖const α b‖ ≤ ‖b‖ :=
(norm_le (norm_nonneg b)).2 $ λ x, le_rfl
@[simp] lemma norm_const_eq [h : nonempty α] (b : β) : ‖const α b‖ = ‖b‖ :=
le_antisymm (norm_const_le b) $ h.elim $ λ x, (const α b).norm_coe_le_norm x
/-- Constructing a bounded continuous function from a uniformly bounded continuous
function taking values in a normed group. -/
def of_normed_add_comm_group {α : Type u} {β : Type v} [topological_space α]
[seminormed_add_comm_group β] (f : α → β) (Hf : continuous f) (C : ℝ) (H : ∀x, ‖f x‖ ≤ C) :
α →ᵇ β :=
⟨⟨λn, f n, Hf⟩, ⟨_, dist_le_two_norm' H⟩⟩
@[simp] lemma coe_of_normed_add_comm_group
{α : Type u} {β : Type v} [topological_space α] [seminormed_add_comm_group β]
(f : α → β) (Hf : continuous f) (C : ℝ) (H : ∀x, ‖f x‖ ≤ C) :
(of_normed_add_comm_group f Hf C H : α → β) = f := rfl
lemma norm_of_normed_add_comm_group_le {f : α → β} (hfc : continuous f) {C : ℝ} (hC : 0 ≤ C)
(hfC : ∀ x, ‖f x‖ ≤ C) : ‖of_normed_add_comm_group f hfc C hfC‖ ≤ C :=
(norm_le hC).2 hfC
/-- Constructing a bounded continuous function from a uniformly bounded
function on a discrete space, taking values in a normed group -/
def of_normed_add_comm_group_discrete {α : Type u} {β : Type v}
[topological_space α] [discrete_topology α] [seminormed_add_comm_group β]
(f : α → β) (C : ℝ) (H : ∀x, norm (f x) ≤ C) : α →ᵇ β :=
of_normed_add_comm_group f continuous_of_discrete_topology C H
@[simp] lemma coe_of_normed_add_comm_group_discrete {α : Type u} {β : Type v} [topological_space α]
[discrete_topology α] [seminormed_add_comm_group β] (f : α → β) (C : ℝ) (H : ∀x, ‖f x‖ ≤ C) :
(of_normed_add_comm_group_discrete f C H : α → β) = f := rfl
/-- Taking the pointwise norm of a bounded continuous function with values in a
`seminormed_add_comm_group` yields a bounded continuous function with values in ℝ. -/
def norm_comp : α →ᵇ ℝ :=
f.comp norm lipschitz_with_one_norm
@[simp] lemma coe_norm_comp : (f.norm_comp : α → ℝ) = norm ∘ f := rfl
@[simp] lemma norm_norm_comp : ‖f.norm_comp‖ = ‖f‖ :=
by simp only [norm_eq, coe_norm_comp, norm_norm]
lemma bdd_above_range_norm_comp : bdd_above $ set.range $ norm ∘ f :=
(real.bounded_iff_bdd_below_bdd_above.mp $ @bounded_range _ _ _ _ f.norm_comp).2
lemma norm_eq_supr_norm : ‖f‖ = ⨆ x : α, ‖f x‖ :=
by simp_rw [norm_def, dist_eq_supr, coe_zero, pi.zero_apply, dist_zero_right]
/-- If `‖(1 : β)‖ = 1`, then `‖(1 : α →ᵇ β)‖ = 1` if `α` is nonempty. -/
instance [nonempty α] [has_one β] [norm_one_class β] : norm_one_class (α →ᵇ β) :=
{ norm_one := by simp only [norm_eq_supr_norm, coe_one, pi.one_apply, norm_one, csupr_const] }
/-- The pointwise opposite of a bounded continuous function is again bounded continuous. -/
instance : has_neg (α →ᵇ β) :=
⟨λf, of_normed_add_comm_group (-f) f.continuous.neg ‖f‖ $ λ x,
trans_rel_right _ (norm_neg _) (f.norm_coe_le_norm x)⟩
/-- The pointwise difference of two bounded continuous functions is again bounded continuous. -/
instance : has_sub (α →ᵇ β) :=
⟨λf g, of_normed_add_comm_group (f - g) (f.continuous.sub g.continuous) (‖f‖ + ‖g‖) $ λ x,
by { simp only [sub_eq_add_neg],
exact le_trans (norm_add_le _ _) (add_le_add (f.norm_coe_le_norm x) $
trans_rel_right _ (norm_neg _) (g.norm_coe_le_norm x)) }⟩
@[simp] lemma coe_neg : ⇑(-f) = -f := rfl
lemma neg_apply : (-f) x = -f x := rfl
@[simp] lemma coe_sub : ⇑(f - g) = f - g := rfl
lemma sub_apply : (f - g) x = f x - g x := rfl
@[simp] lemma mk_of_compact_neg [compact_space α] (f : C(α, β)) :
mk_of_compact (-f) = -mk_of_compact f := rfl
@[simp] lemma mk_of_compact_sub [compact_space α] (f g : C(α, β)) :
mk_of_compact (f - g) = mk_of_compact f - mk_of_compact g := rfl
@[simp] lemma coe_zsmul_rec : ∀ z, ⇑(zsmul_rec z f) = z • f
| (int.of_nat n) := by rw [zsmul_rec, int.of_nat_eq_coe, coe_nsmul_rec, coe_nat_zsmul]
| -[1+ n] := by rw [zsmul_rec, zsmul_neg_succ_of_nat, coe_neg, coe_nsmul_rec]
instance has_int_scalar : has_smul ℤ (α →ᵇ β) :=
{ smul := λ n f,
{ to_continuous_map := n • f.to_continuous_map,
map_bounded' := by simpa using (zsmul_rec n f).map_bounded' } }
@[simp] lemma coe_zsmul (r : ℤ) (f : α →ᵇ β) : ⇑(r • f) = r • f := rfl
@[simp] lemma zsmul_apply (r : ℤ) (f : α →ᵇ β) (v : α) : (r • f) v = r • f v := rfl
instance : add_comm_group (α →ᵇ β) :=
fun_like.coe_injective.add_comm_group _ coe_zero coe_add coe_neg coe_sub (λ _ _, coe_nsmul _ _)
(λ _ _, coe_zsmul _ _)
instance : seminormed_add_comm_group (α →ᵇ β) :=
{ dist_eq := λ f g, by simp only [norm_eq, dist_eq, dist_eq_norm, sub_apply] }
instance {α β} [topological_space α] [normed_add_comm_group β] : normed_add_comm_group (α →ᵇ β) :=
{ ..bounded_continuous_function.seminormed_add_comm_group }
lemma nnnorm_def : ‖f‖₊ = nndist f 0 := rfl
lemma nnnorm_coe_le_nnnorm (x : α) : ‖f x‖₊ ≤ ‖f‖₊ := norm_coe_le_norm _ _
lemma nndist_le_two_nnnorm (x y : α) : nndist (f x) (f y) ≤ 2 * ‖f‖₊ := dist_le_two_norm _ _ _
/-- The nnnorm of a function is controlled by the supremum of the pointwise nnnorms -/
lemma nnnorm_le (C : ℝ≥0) : ‖f‖₊ ≤ C ↔ ∀x:α, ‖f x‖₊ ≤ C :=
norm_le C.prop
lemma nnnorm_const_le (b : β) : ‖const α b‖₊ ≤ ‖b‖₊ :=
norm_const_le _
@[simp] lemma nnnorm_const_eq [h : nonempty α] (b : β) : ‖const α b‖₊ = ‖b‖₊ :=
subtype.ext $ norm_const_eq _
lemma nnnorm_eq_supr_nnnorm : ‖f‖₊ = ⨆ x : α, ‖f x‖₊ :=
subtype.ext $ (norm_eq_supr_norm f).trans $ by simp_rw [nnreal.coe_supr, coe_nnnorm]
lemma abs_diff_coe_le_dist : ‖f x - g x‖ ≤ dist f g :=
by { rw dist_eq_norm, exact (f - g).norm_coe_le_norm x }
lemma coe_le_coe_add_dist {f g : α →ᵇ ℝ} : f x ≤ g x + dist f g :=
sub_le_iff_le_add'.1 $ (abs_le.1 $ @dist_coe_le_dist _ _ _ _ f g x).2
lemma norm_comp_continuous_le [topological_space γ] (f : α →ᵇ β) (g : C(γ, α)) :
‖f.comp_continuous g‖ ≤ ‖f‖ :=
((lipschitz_comp_continuous g).dist_le_mul f 0).trans $
by rw [nnreal.coe_one, one_mul, dist_zero_right]
end normed_add_comm_group
section has_bounded_smul
/-!
### `has_bounded_smul` (in particular, topological module) structure
In this section, if `β` is a metric space and a `𝕜`-module whose addition and scalar multiplication
are compatible with the metric structure, then we show that the space of bounded continuous
functions from `α` to `β` inherits a so-called `has_bounded_smul` structure (in particular, a
`has_continuous_mul` structure, which is the mathlib formulation of being a topological module), by
using pointwise operations and checking that they are compatible with the uniform distance. -/
variables {𝕜 : Type*} [pseudo_metric_space 𝕜] [topological_space α] [pseudo_metric_space β]
section has_smul
variables [has_zero 𝕜] [has_zero β] [has_smul 𝕜 β] [has_bounded_smul 𝕜 β]
instance : has_smul 𝕜 (α →ᵇ β) :=
{ smul := λ c f,
{ to_continuous_map := c • f.to_continuous_map,
map_bounded' := let ⟨b, hb⟩ := f.bounded in ⟨dist c 0 * b, λ x y, begin
refine (dist_smul_pair c (f x) (f y)).trans _,
refine mul_le_mul_of_nonneg_left _ dist_nonneg,
exact hb x y
end⟩ } }
@[simp] lemma coe_smul (c : 𝕜) (f : α →ᵇ β) : ⇑(c • f) = λ x, c • (f x) := rfl
lemma smul_apply (c : 𝕜) (f : α →ᵇ β) (x : α) : (c • f) x = c • f x := rfl
instance [has_smul 𝕜ᵐᵒᵖ β] [is_central_scalar 𝕜 β] : is_central_scalar 𝕜 (α →ᵇ β) :=
{ op_smul_eq_smul := λ _ _, ext $ λ _, op_smul_eq_smul _ _ }
instance : has_bounded_smul 𝕜 (α →ᵇ β) :=
{ dist_smul_pair' := λ c f₁ f₂, begin
rw dist_le (mul_nonneg dist_nonneg dist_nonneg),
intros x,
refine (dist_smul_pair c (f₁ x) (f₂ x)).trans _,
exact mul_le_mul_of_nonneg_left (dist_coe_le_dist x) dist_nonneg
end,
dist_pair_smul' := λ c₁ c₂ f, begin
rw dist_le (mul_nonneg dist_nonneg dist_nonneg),
intros x,
refine (dist_pair_smul c₁ c₂ (f x)).trans _,
convert mul_le_mul_of_nonneg_left (dist_coe_le_dist x) dist_nonneg,
simp
end }
end has_smul
section mul_action
variables [monoid_with_zero 𝕜] [has_zero β] [mul_action 𝕜 β] [has_bounded_smul 𝕜 β]
instance : mul_action 𝕜 (α →ᵇ β) := fun_like.coe_injective.mul_action _ coe_smul
end mul_action
section distrib_mul_action
variables [monoid_with_zero 𝕜] [add_monoid β] [distrib_mul_action 𝕜 β] [has_bounded_smul 𝕜 β]
variables [has_lipschitz_add β]
instance : distrib_mul_action 𝕜 (α →ᵇ β) :=
function.injective.distrib_mul_action ⟨_, coe_zero, coe_add⟩ fun_like.coe_injective coe_smul
end distrib_mul_action
section module
variables [semiring 𝕜] [add_comm_monoid β] [module 𝕜 β] [has_bounded_smul 𝕜 β]
variables {f g : α →ᵇ β} {x : α} {C : ℝ}
variables [has_lipschitz_add β]
instance : module 𝕜 (α →ᵇ β) :=
function.injective.module _ ⟨_, coe_zero, coe_add⟩ fun_like.coe_injective coe_smul
variables (𝕜)
/-- The evaluation at a point, as a continuous linear map from `α →ᵇ β` to `β`. -/
def eval_clm (x : α) : (α →ᵇ β) →L[𝕜] β :=
{ to_fun := λ f, f x,
map_add' := λ f g, add_apply _ _,
map_smul' := λ c f, smul_apply _ _ _ }
@[simp] lemma eval_clm_apply (x : α) (f : α →ᵇ β) :
eval_clm 𝕜 x f = f x := rfl
variables (α β)
/-- The linear map forgetting that a bounded continuous function is bounded. -/
@[simps]
def to_continuous_map_linear_map : (α →ᵇ β) →ₗ[𝕜] C(α, β) :=
{ to_fun := to_continuous_map,
map_smul' := λ f g, rfl,
map_add' := λ c f, rfl }
end module
end has_bounded_smul
section normed_space
/-!
### Normed space structure
In this section, if `β` is a normed space, then we show that the space of bounded
continuous functions from `α` to `β` inherits a normed space structure, by using
pointwise operations and checking that they are compatible with the uniform distance. -/
variables {𝕜 : Type*}
variables [topological_space α] [seminormed_add_comm_group β]
variables {f g : α →ᵇ β} {x : α} {C : ℝ}
instance [normed_field 𝕜] [normed_space 𝕜 β] : normed_space 𝕜 (α →ᵇ β) := ⟨λ c f, begin
refine norm_of_normed_add_comm_group_le _ (mul_nonneg (norm_nonneg _) (norm_nonneg _)) _,
exact (λ x, trans_rel_right _ (norm_smul _ _)
(mul_le_mul_of_nonneg_left (f.norm_coe_le_norm _) (norm_nonneg _))) end⟩
variables [nontrivially_normed_field 𝕜] [normed_space 𝕜 β]
variables [seminormed_add_comm_group γ] [normed_space 𝕜 γ]
variables (α)
-- TODO does this work in the `has_bounded_smul` setting, too?
/--
Postcomposition of bounded continuous functions into a normed module by a continuous linear map is
a continuous linear map.
Upgraded version of `continuous_linear_map.comp_left_continuous`, similar to
`linear_map.comp_left`. -/
protected def _root_.continuous_linear_map.comp_left_continuous_bounded (g : β →L[𝕜] γ) :
(α →ᵇ β) →L[𝕜] (α →ᵇ γ) :=
linear_map.mk_continuous
{ to_fun := λ f, of_normed_add_comm_group
(g ∘ f)
(g.continuous.comp f.continuous)
(‖g‖ * ‖f‖)
(λ x, (g.le_op_norm_of_le (f.norm_coe_le_norm x))),
map_add' := λ f g, by ext; simp,
map_smul' := λ c f, by ext; simp }
‖g‖
(λ f, norm_of_normed_add_comm_group_le _ (mul_nonneg (norm_nonneg g) (norm_nonneg f)) _)
@[simp] lemma _root_.continuous_linear_map.comp_left_continuous_bounded_apply (g : β →L[𝕜] γ)
(f : α →ᵇ β) (x : α) :
(g.comp_left_continuous_bounded α f) x = g (f x) :=
rfl
end normed_space
section normed_ring
/-!
### Normed ring structure
In this section, if `R` is a normed ring, then we show that the space of bounded
continuous functions from `α` to `R` inherits a normed ring structure, by using
pointwise operations and checking that they are compatible with the uniform distance. -/
variables [topological_space α] {R : Type*}
section non_unital
section semi_normed
variables [non_unital_semi_normed_ring R]
instance : has_mul (α →ᵇ R) :=
{ mul := λ f g, of_normed_add_comm_group (f * g) (f.continuous.mul g.continuous) (‖f‖ * ‖g‖) $ λ x,
le_trans (norm_mul_le (f x) (g x)) $
mul_le_mul (f.norm_coe_le_norm x) (g.norm_coe_le_norm x) (norm_nonneg _) (norm_nonneg _) }
@[simp] lemma coe_mul (f g : α →ᵇ R) : ⇑(f * g) = f * g := rfl
lemma mul_apply (f g : α →ᵇ R) (x : α) : (f * g) x = f x * g x := rfl
instance : non_unital_ring (α →ᵇ R) :=
fun_like.coe_injective.non_unital_ring _ coe_zero coe_add coe_mul coe_neg coe_sub
(λ _ _, coe_nsmul _ _) (λ _ _, coe_zsmul _ _)
instance : non_unital_semi_normed_ring (α →ᵇ R) :=
{ norm_mul := λ f g, norm_of_normed_add_comm_group_le _ (mul_nonneg (norm_nonneg _) (norm_nonneg _))
_,
.. bounded_continuous_function.seminormed_add_comm_group }
end semi_normed
instance [non_unital_normed_ring R] : non_unital_normed_ring (α →ᵇ R) :=
{ .. bounded_continuous_function.non_unital_semi_normed_ring,
.. bounded_continuous_function.normed_add_comm_group }
end non_unital
section semi_normed
variables [semi_normed_ring R]
@[simp] lemma coe_npow_rec (f : α →ᵇ R) : ∀ n, ⇑(npow_rec n f) = f ^ n
| 0 := by rw [npow_rec, pow_zero, coe_one]
| (n + 1) := by rw [npow_rec, pow_succ, coe_mul, coe_npow_rec]
instance has_nat_pow : has_pow (α →ᵇ R) ℕ :=
{ pow := λ f n,
{ to_continuous_map := f.to_continuous_map ^ n,
map_bounded' := by simpa [coe_npow_rec] using (npow_rec n f).map_bounded' } }
@[simp] lemma coe_pow (n : ℕ) (f : α →ᵇ R) : ⇑(f ^ n) = f ^ n := rfl
@[simp] lemma pow_apply (n : ℕ) (f : α →ᵇ R) (v : α) : (f ^ n) v = f v ^ n := rfl
instance : has_nat_cast (α →ᵇ R) :=
⟨λ n, bounded_continuous_function.const _ n⟩
@[simp, norm_cast] lemma coe_nat_cast (n : ℕ) : ((n : α →ᵇ R) : α → R) = n := rfl
instance : has_int_cast (α →ᵇ R) :=
⟨λ n, bounded_continuous_function.const _ n⟩
@[simp, norm_cast] lemma coe_int_cast (n : ℤ) : ((n : α →ᵇ R) : α → R) = n := rfl
instance : ring (α →ᵇ R) :=
fun_like.coe_injective.ring _ coe_zero coe_one coe_add coe_mul coe_neg coe_sub
(λ _ _, coe_nsmul _ _)
(λ _ _, coe_zsmul _ _)
(λ _ _, coe_pow _ _)
coe_nat_cast
coe_int_cast
instance : semi_normed_ring (α →ᵇ R) :=
{ ..bounded_continuous_function.non_unital_semi_normed_ring }
end semi_normed
instance [normed_ring R] : normed_ring (α →ᵇ R) :=
{ ..bounded_continuous_function.non_unital_normed_ring }
end normed_ring
section normed_comm_ring
/-!
### Normed commutative ring structure
In this section, if `R` is a normed commutative ring, then we show that the space of bounded
continuous functions from `α` to `R` inherits a normed commutative ring structure, by using
pointwise operations and checking that they are compatible with the uniform distance. -/
variables [topological_space α] {R : Type*}
instance [semi_normed_comm_ring R] : comm_ring (α →ᵇ R) :=
{ mul_comm := λ f₁ f₂, ext $ λ x, mul_comm _ _,
.. bounded_continuous_function.ring }
instance [semi_normed_comm_ring R] : semi_normed_comm_ring (α →ᵇ R) :=
{ ..bounded_continuous_function.comm_ring, ..bounded_continuous_function.seminormed_add_comm_group }
instance [normed_comm_ring R] : normed_comm_ring (α →ᵇ R) :=
{ .. bounded_continuous_function.comm_ring, .. bounded_continuous_function.normed_add_comm_group }
end normed_comm_ring
section normed_algebra
/-!
### Normed algebra structure
In this section, if `γ` is a normed algebra, then we show that the space of bounded
continuous functions from `α` to `γ` inherits a normed algebra structure, by using
pointwise operations and checking that they are compatible with the uniform distance. -/
variables {𝕜 : Type*} [normed_field 𝕜]
variables [topological_space α] [seminormed_add_comm_group β] [normed_space 𝕜 β]
variables [normed_ring γ] [normed_algebra 𝕜 γ]
variables {f g : α →ᵇ γ} {x : α} {c : 𝕜}
/-- `bounded_continuous_function.const` as a `ring_hom`. -/
def C : 𝕜 →+* (α →ᵇ γ) :=
{ to_fun := λ (c : 𝕜), const α ((algebra_map 𝕜 γ) c),
map_one' := ext $ λ x, (algebra_map 𝕜 γ).map_one,
map_mul' := λ c₁ c₂, ext $ λ x, (algebra_map 𝕜 γ).map_mul _ _,
map_zero' := ext $ λ x, (algebra_map 𝕜 γ).map_zero,
map_add' := λ c₁ c₂, ext $ λ x, (algebra_map 𝕜 γ).map_add _ _ }
instance : algebra 𝕜 (α →ᵇ γ) :=
{ to_ring_hom := C,
commutes' := λ c f, ext $ λ x, algebra.commutes' _ _,
smul_def' := λ c f, ext $ λ x, algebra.smul_def' _ _,
..bounded_continuous_function.module,
..bounded_continuous_function.ring }
@[simp] lemma algebra_map_apply (k : 𝕜) (a : α) :
algebra_map 𝕜 (α →ᵇ γ) k a = k • 1 :=
by { rw algebra.algebra_map_eq_smul_one, refl, }
instance : normed_algebra 𝕜 (α →ᵇ γ) :=
{ ..bounded_continuous_function.normed_space }
/-!
### Structure as normed module over scalar functions
If `β` is a normed `𝕜`-space, then we show that the space of bounded continuous
functions from `α` to `β` is naturally a module over the algebra of bounded continuous
functions from `α` to `𝕜`. -/
instance has_smul' : has_smul (α →ᵇ 𝕜) (α →ᵇ β) :=
⟨λ (f : α →ᵇ 𝕜) (g : α →ᵇ β), of_normed_add_comm_group (λ x, (f x) • (g x))
(f.continuous.smul g.continuous) (‖f‖ * ‖g‖) (λ x, calc
‖f x • g x‖ ≤ ‖f x‖ * ‖g x‖ : norm_smul_le _ _
... ≤ ‖f‖ * ‖g‖ : mul_le_mul (f.norm_coe_le_norm _) (g.norm_coe_le_norm _) (norm_nonneg _)
(norm_nonneg _)) ⟩
instance module' : module (α →ᵇ 𝕜) (α →ᵇ β) :=
module.of_core $
{ smul := (•),
smul_add := λ c f₁ f₂, ext $ λ x, smul_add _ _ _,
add_smul := λ c₁ c₂ f, ext $ λ x, add_smul _ _ _,
mul_smul := λ c₁ c₂ f, ext $ λ x, mul_smul _ _ _,
one_smul := λ f, ext $ λ x, one_smul 𝕜 (f x) }
/- TODO: When `normed_module` has been added to `normed_space.basic`, the above facts
show that the space of bounded continuous functions from `α` to `β` is naturally a normed
module over the algebra of bounded continuous functions from `α` to `𝕜`. -/
end normed_algebra
lemma nnreal.upper_bound {α : Type*} [topological_space α]
(f : α →ᵇ ℝ≥0) (x : α) : f x ≤ nndist f 0 :=
begin
have key : nndist (f x) ((0 : α →ᵇ ℝ≥0) x) ≤ nndist f 0,
{ exact @dist_coe_le_dist α ℝ≥0 _ _ f 0 x, },
simp only [coe_zero, pi.zero_apply] at key,
rwa nnreal.nndist_zero_eq_val' (f x) at key,
end
/-!
### Star structures
In this section, if `β` is a normed ⋆-group, then so is the space of bounded
continuous functions from `α` to `β`, by using the star operation pointwise.
If `𝕜` is normed field and a ⋆-ring over which `β` is a normed algebra and a
star module, then the space of bounded continuous functions from `α` to `β`
is a star module.
If `β` is a ⋆-ring in addition to being a normed ⋆-group, then `α →ᵇ β`
inherits a ⋆-ring structure.
In summary, if `β` is a C⋆-algebra over `𝕜`, then so is `α →ᵇ β`; note that
completeness is guaranteed when `β` is complete (see
`bounded_continuous_function.complete`). -/
section normed_add_comm_group
variables {𝕜 : Type*} [normed_field 𝕜] [star_ring 𝕜] [topological_space α]
[seminormed_add_comm_group β] [star_add_monoid β] [normed_star_group β]
variables [normed_space 𝕜 β] [star_module 𝕜 β]
instance : star_add_monoid (α →ᵇ β) :=
{ star := λ f, f.comp star star_normed_add_group_hom.lipschitz,
star_involutive := λ f, ext $ λ x, star_star (f x),
star_add := λ f g, ext $ λ x, star_add (f x) (g x) }
/-- The right-hand side of this equality can be parsed `star ∘ ⇑f` because of the
instance `pi.has_star`. Upon inspecting the goal, one sees `⊢ ⇑(star f) = star ⇑f`.-/
@[simp] lemma coe_star (f : α →ᵇ β) : ⇑(star f) = star f := rfl
@[simp] lemma star_apply (f : α →ᵇ β) (x : α) : star f x = star (f x) := rfl
instance : normed_star_group (α →ᵇ β) :=
{ norm_star := λ f, by simp only [norm_eq, star_apply, norm_star] }
instance : star_module 𝕜 (α →ᵇ β) :=
{ star_smul := λ k f, ext $ λ x, star_smul k (f x) }
end normed_add_comm_group
section cstar_ring
variables [topological_space α]
variables [non_unital_normed_ring β] [star_ring β]
instance [normed_star_group β] : star_ring (α →ᵇ β) :=
{ star_mul := λ f g, ext $ λ x, star_mul (f x) (g x),
..bounded_continuous_function.star_add_monoid }
variable [cstar_ring β]
instance : cstar_ring (α →ᵇ β) :=
{ norm_star_mul_self :=
begin
intro f,
refine le_antisymm _ _,
{ rw [←sq, norm_le (sq_nonneg _)],
dsimp [star_apply],
intro x,
rw [cstar_ring.norm_star_mul_self, ←sq],
refine sq_le_sq' _ _,
{ linarith [norm_nonneg (f x), norm_nonneg f] },
{ exact norm_coe_le_norm f x }, },
{ rw [←sq, ←real.le_sqrt (norm_nonneg _) (norm_nonneg _), norm_le (real.sqrt_nonneg _)],
intro x,
rw [real.le_sqrt (norm_nonneg _) (norm_nonneg _), sq, ←cstar_ring.norm_star_mul_self],
exact norm_coe_le_norm (star f * f) x }
end }
end cstar_ring
section normed_lattice_ordered_group
variables [topological_space α] [normed_lattice_add_comm_group β]
instance : partial_order (α →ᵇ β) := partial_order.lift (λ f, f.to_fun) (by tidy)
/--
Continuous normed lattice group valued functions form a meet-semilattice
-/
instance : semilattice_inf (α →ᵇ β) :=
{ inf := λ f g,
{ to_fun := λ t, f t ⊓ g t,
continuous_to_fun := f.continuous.inf g.continuous,
map_bounded' := begin
obtain ⟨C₁, hf⟩ := f.bounded,
obtain ⟨C₂, hg⟩ := g.bounded,
refine ⟨C₁ + C₂, λ x y, _⟩,
simp_rw normed_add_comm_group.dist_eq at hf hg ⊢,
exact (norm_inf_sub_inf_le_add_norm _ _ _ _).trans (add_le_add (hf _ _) (hg _ _)),
end },
inf_le_left := λ f g, continuous_map.le_def.mpr (λ _, inf_le_left),
inf_le_right := λ f g, continuous_map.le_def.mpr (λ _, inf_le_right),
le_inf := λ f g₁ g₂ w₁ w₂, continuous_map.le_def.mpr (λ _, le_inf (continuous_map.le_def.mp w₁ _)
(continuous_map.le_def.mp w₂ _)),
..bounded_continuous_function.partial_order }
instance : semilattice_sup (α →ᵇ β) :=
{ sup := λ f g,
{ to_fun := λ t, f t ⊔ g t,
continuous_to_fun := f.continuous.sup g.continuous,
map_bounded' := begin
obtain ⟨C₁, hf⟩ := f.bounded,
obtain ⟨C₂, hg⟩ := g.bounded,
refine ⟨C₁ + C₂, λ x y, _⟩,
simp_rw normed_add_comm_group.dist_eq at hf hg ⊢,
exact (norm_sup_sub_sup_le_add_norm _ _ _ _).trans (add_le_add (hf _ _) (hg _ _)),
end },
le_sup_left := λ f g, continuous_map.le_def.mpr (λ _, le_sup_left),
le_sup_right := λ f g, continuous_map.le_def.mpr (λ _, le_sup_right),
sup_le := λ f g₁ g₂ w₁ w₂, continuous_map.le_def.mpr (λ _, sup_le (continuous_map.le_def.mp w₁ _)
(continuous_map.le_def.mp w₂ _)),
..bounded_continuous_function.partial_order }
instance : lattice (α →ᵇ β) :=
{ .. bounded_continuous_function.semilattice_sup, .. bounded_continuous_function.semilattice_inf }
@[simp] lemma coe_fn_sup (f g : α →ᵇ β) : ⇑(f ⊔ g) = f ⊔ g := rfl
@[simp] lemma coe_fn_abs (f : α →ᵇ β) : ⇑|f| = |f| := rfl
instance : normed_lattice_add_comm_group (α →ᵇ β) :=
{ add_le_add_left := begin
intros f g h₁ h t,
simp only [coe_to_continuous_fun, pi.add_apply, add_le_add_iff_left, coe_add,
continuous_map.to_fun_eq_coe],
exact h₁ _,
end,
solid :=
begin
intros f g h,
have i1: ∀ t, ‖f t‖ ≤ ‖g t‖ := λ t, solid (h t),
rw norm_le (norm_nonneg _),
exact λ t, (i1 t).trans (norm_coe_le_norm g t),
end,
..bounded_continuous_function.lattice, ..bounded_continuous_function.seminormed_add_comm_group }
end normed_lattice_ordered_group
section nonnegative_part
variables [topological_space α]
/-- The nonnegative part of a bounded continuous `ℝ`-valued function as a bounded
continuous `ℝ≥0`-valued function. -/
def nnreal_part (f : α →ᵇ ℝ) : α →ᵇ ℝ≥0 :=
bounded_continuous_function.comp _
(show lipschitz_with 1 real.to_nnreal, from lipschitz_with_pos) f
@[simp] lemma nnreal_part_coe_fun_eq (f : α →ᵇ ℝ) : ⇑(f.nnreal_part) = real.to_nnreal ∘ ⇑f := rfl
/-- The absolute value of a bounded continuous `ℝ`-valued function as a bounded
continuous `ℝ≥0`-valued function. -/
def nnnorm (f : α →ᵇ ℝ) : α →ᵇ ℝ≥0 :=
bounded_continuous_function.comp _
(show lipschitz_with 1 (λ (x : ℝ), ‖x‖₊), from lipschitz_with_one_norm) f
@[simp] lemma nnnorm_coe_fun_eq (f : α →ᵇ ℝ) : ⇑(f.nnnorm) = has_nnnorm.nnnorm ∘ ⇑f := rfl
/-- Decompose a bounded continuous function to its positive and negative parts. -/
lemma self_eq_nnreal_part_sub_nnreal_part_neg (f : α →ᵇ ℝ) :
⇑f = coe ∘ f.nnreal_part - coe ∘ (-f).nnreal_part :=
by { funext x, dsimp, simp only [max_zero_sub_max_neg_zero_eq_self], }
/-- Express the absolute value of a bounded continuous function in terms of its
positive and negative parts. -/
lemma abs_self_eq_nnreal_part_add_nnreal_part_neg (f : α →ᵇ ℝ) :
abs ∘ ⇑f = coe ∘ f.nnreal_part + coe ∘ (-f).nnreal_part :=
by { funext x, dsimp, simp only [max_zero_add_max_neg_zero_eq_abs_self], }
end nonnegative_part
end bounded_continuous_function
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/topology/continuous_function/bounded.lean"}
|
#!/usr/bin/env python
import argparse
import cartopy.crs
import datetime
import matplotlib.pyplot as plt
import metpy
import metpy.calc as mcalc
from metpy.units import units
import numpy as np
import os
import pdb
import pickle
import s3fs
import scipy.ndimage.filters
from scipy import spatial
import sys
import xarray
### THIS CODE EVOLVED FROM CODE WITHIN /glade/u/home/sobash/NSC_scripts
### TO UPSCALE 3-KM CAM DATA TO AN 80-KM GRID
def get_closest_gridbox():
### find closest 3-km or 1-km grid point to each 80-km grid point
gpfname = f'{odir}/NSC_objects/nngridpts_80km_{model}.pk'
nngridpts = pickle.load(open(gpfname, 'rb'), encoding='bytes')
return nngridpts
def scipyfilt(da):
this_field = da.name
print(f"upscaling {this_field}")
da = da.astype(np.float32) # avoid *** RuntimeError: array type dtype('float16') not supported from scipy.ndimage
da = da.fillna(0) # slows things down 10% but needed because filtered "level_of_adiabatic_condensation_from_sfc-HGT" and "surface-HAIL_1hr_max_fcst" are all NaN otherwise.
#assert da.isnull().sum() == 0, "found null in "+this_field
# use maximum for certain fields, mean for others
if this_field in ['MAXUVV_1hr_max_fcst', 'MAXREF_1hr_max_fcst', 'WIND_1hr_max_fcst', 'MXUPHL_1hr_max_fcst', 'HAIL_1hr_max_fcst']:
field = scipy.ndimage.filters.maximum_filter(da, size=(1,27,27), mode='reflect') # default mode='reflect' (d c b a | a b c d | d c b a) The input is extended by reflecting about the edge of the last pixel. This mode is also sometimes referred to as half-sample symmetric
elif this_field in ['MAXDVV_1hr_max_fcst', 'MNUPHL_1hr_min_fcst']:
field = scipy.ndimage.filters.minimum_filter(da, size=(1,27,27))
else:
field = scipy.ndimage.filters.uniform_filter(da, size=(1,27,27))
#print(da.isnull().sum())
#print(da.to_dataframe().describe())
#da.plot(col="time", col_wrap=8)
#plt.savefig(f"{this_field}.png")
#print(xarray.DataArray(data=field,name=this_field).to_dataframe().describe())
#print(np.isnan(field).sum())
# Timed one-line alternative below. (Flatten x and y dimensions of field, slice nngridpts[1] through all times, and reshape) but for loop is 10% faster
# field_interp = field.reshape((da.time.size, -1))[:,nngridpts[1]].reshape((da.time.size,65,93))
field_interp = np.empty((da.time.size,65,93))
for t,_ in enumerate(field):
field_interp[t] = field[t].flatten()[nngridpts[1]].reshape((65,93))
ds = xarray.Dataset(data_vars={this_field:(da.dims,field_interp)})
ds[this_field].attrs.update(da.attrs)
return ds
def rename_upscale(ds): # how to handle multiple levels with same variable name
# rename dataarray so it includes the level.
# Otherwise, ValueError: Could not find any dimension coordinates to use to order the datasets for concatenation.
# If you have two CAPEs from different levels.
# use long_name attribute instead. It has the level and name.
# for example: CAPE -> 0_3000m_above_ground/CAPE or CAPE -> surface/CAPE
# Don't try to rename forecast time variables
assert 'forecast_period' not in ds.data_vars
for da in ds:
long_name = ds[da].attrs['long_name']
long_name = long_name.replace("/","-") # for netCDF and saving files based on name
ds = ds.rename({da:long_name})
return scipyfilt(ds[long_name])
def upscale_forecast(upscaled_field_list,nngridpts,debug=False):
# Open HRRR-ZARR forecasts and return xarray Dataset.
# Ignores analysis fhr=0 (_anl.zarr).
fs = s3fs.S3FileSystem(anon=True)
level, variable = upscaled_field_list[0]
# url without final level subdirectory has time, projection_x_coordinate, forecast_period, and forecast_reference_time
coord_url = os.path.join('s3://hrrrzarr/sfc', sdate.strftime("%Y%m%d/%Y%m%d_%Hz_fcst.zarr"), level, variable)
coord_ds = xarray.open_dataset(s3fs.S3Map(coord_url, s3=fs), engine='zarr')
# Sanity check - Throw error if reference time does not equal requested date.
# Some forecasts are repeats of the previous forecast hour. For example: if you request 2020121707 you get a repeat of 2020121706.
# Same with 2020121713 2020121719 2021012819 2021030101 2021030113 2021030119 2021030307 2021030313 2021030413
# 20210306 7 13 19
# 20210307 7
# 20210308 7
# 20210309 7 13 19
# 20210310 1 19
# 20210311 1
# Alerted Adair Kovac <u1334098@utah.edu>, JAMES TERRY POWELL <u1269218@utah.edu>, "atmos-mesowest@lists.utah.edu" <atmos-mesowest@lists.utah.edu>
# and they confirmed a manual error and will redo them. Oct 15, 2021.
assert coord_ds.forecast_reference_time == np.datetime64(sdate), f"Unexpected forecast_reference_time: {coord_ds.forecast_reference_time.values}. requested {sdate}."
coord_ds = coord_ds.drop(labels=["projection_x_coordinate", "projection_y_coordinate"]) # projection coordinates will be different after upscaling
urls = [os.path.join('s3://hrrrzarr/sfc', sdate.strftime("%Y%m%d/%Y%m%d_%Hz_fcst.zarr"), level, variable, level) for (level, variable) in upscaled_field_list]
if debug:
# Grab url at a time to isolate cause of zarr.errors.GroupNotFoundError: group not found at path ''.
# Or, instead of opening datasets one at a time with debug, modify the zarr code to show the url with the error message.
# Add group.root to GroupNotFoundError argument in file /lib/python3.7/site-packages/zarr/hierarchy.py, line 1167, in open_group:
# raise GroupNotFoundError(store.root+path)
for url in urls:
print(f"getting {url}")
ds = xarray.open_dataset(s3fs.S3Map(url, s3=fs), engine='zarr')
print(f"opening {len(urls)} {model} urls, {len(coord_ds.time)} forecast times")
# if parallel=True and nvars>19 on casper, RuntimeError: can't start new thread.
# casperexec run times with 50 vars
# ncpus runtime
# 1 3:00
# 2 2:00
# 3 1:45
# 4 1:43
# 5 1:40
# 6 2:45
# 10 6:00
# 20 >10:00
ds = xarray.open_mfdataset([s3fs.S3Map(url, s3=fs) for url in urls], engine='zarr', preprocess=rename_upscale, parallel=True)
# Swap forecast_period with time coordinate so output files may be concatenated along forecast_reference_time and aligned along forecast_period.
coord_ds = coord_ds.swap_dims({"time": "forecast_period"})
ds = ds.rename({"time":"forecast_period"}).merge(coord_ds) # Rename "time" "forecast_period" and merge with coord_ds.
return ds
# =============Arguments===================
parser = argparse.ArgumentParser(description = "Read HRRR-ZARR, upscale, save", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("sdate", type=str, help='start date YYYYMMDDHH format')
parser.add_argument("--clobber", action="store_true", help='clobber existing output')
parser.add_argument("--debug", action="store_true", help='debug mode')
parser.add_argument("--npz", action="store_true", help='save compressed numpy file')
parser.add_argument("--parquet", action="store_true", help='save parquet file')
args = parser.parse_args()
clobber = args.clobber
debug = args.debug
npz = args.npz
parquet = args.parquet
sdate = datetime.datetime.strptime(args.sdate, '%Y%m%d%H')
model = 'HRRR-ZARR'
odir = "/glade/work/" + os.getenv("USER")
ofile = f'{odir}/NSC_objects/HRRR/%s_{model}_upscaled.nc'%(sdate.strftime('%Y%m%d%H'))
if os.path.exists(ofile) and not clobber:
print(ofile, "already exists. Exiting. Use --clobber option to override")
sys.exit(1)
# get closest grid boxes
nngridpts = get_closest_gridbox()
upscaled_field_list = [
("0_1000m_above_ground", "VUCSH"),
("0_1000m_above_ground", "VVCSH"),
#("0_3000m_above_ground", "CAPE"), # no 0_3000m_above_ground/CIN. CAPE truncated at 3000m AGL, or low-level CAPE?
("0_6000m_above_ground", "VUCSH"),
("0_6000m_above_ground", "VVCSH"),
("0C_isotherm", "HGT"),
("100_1000mb_above_ground", "MAXDVV_1hr_max_fcst"),
("100_1000mb_above_ground", "MAXUVV_1hr_max_fcst"),
("1000_0m_above_ground", "HLCY"),
("1000_0m_above_ground", "RELV_1hr_max_fcst"),
("1000m_above_ground", "MAXREF_1hr_max_fcst"),
("1000m_above_ground", "REFD"),
("10m_above_ground", "MAXUW_1hr_max_fcst"),
("10m_above_ground", "MAXVW_1hr_max_fcst"),
("10m_above_ground", "WIND_1hr_max_fcst"),
#("180_0mb_above_ground", "CAPE"), # Associated with Best(4-layer) Lifted Index. not mucape or mlcape.
#("180_0mb_above_ground", "CIN"), # Associated with Best(4-layer) Lifted Index. not mucape or mlcape.
("255_0mb_above_ground", "CAPE"), # mucape
("255_0mb_above_ground", "CIN"), # mucin
("2m_above_ground", "DPT"),
("2m_above_ground", "SPFH"),
("2m_above_ground", "TMP"),
("3000_0m_above_ground", "HLCY"),
("3000_0m_above_ground", "MNUPHL_1hr_min_fcst"),
("3000_0m_above_ground", "MXUPHL_1hr_max_fcst"),
("4000m_above_ground", "REFD"),
("500mb", "DPT"),
("500mb", "HGT"),
("500mb", "TMP"),
("500mb", "UGRD"),
("500mb", "VGRD"),
("5000_2000m_above_ground", "MXUPHL_1hr_max_fcst"),
("5000_2000m_above_ground", "MNUPHL_1hr_min_fcst"),
("700mb", "DPT"),
("700mb", "HGT"),
("700mb", "TMP"),
("700mb", "UGRD"),
("700mb", "VGRD"),
("850mb", "DPT"),
("850mb", "TMP"),
("850mb", "UGRD"),
("850mb", "VGRD"),
("90_0mb_above_ground", "CAPE"), # mlcape
("90_0mb_above_ground", "CIN"), # mlcinh
("925mb", "DPT"),
("925mb", "TMP"),
("925mb", "UGRD"),
("925mb", "VGRD"),
("entire_atmosphere", "HAIL_1hr_max_fcst"),
("entire_atmosphere", "REFC"),
("entire_atmosphere_single_layer", "TCOLG_1hr_max_fcst"),
("level_of_adiabatic_condensation_from_sfc", "HGT"),
("surface", "APCP_1hr_acc_fcst"),
("surface", "CAPE"),
("surface", "CIN"),
("surface", "HAIL_1hr_max_fcst"),
("surface", "PRES"),
("surface", "PRATE")
]
fields_are_unique = len(set(upscaled_field_list)) == len(upscaled_field_list)
assert fields_are_unique, set([x for x in upscaled_field_list if upscaled_field_list.count(x) > 1])
upscaled_fields = upscale_forecast(upscaled_field_list,nngridpts,debug=debug)
derive_fields = len(upscaled_fields) > 10 # may have incomplete short list for debugging
if derive_fields:
upscaled_fields = upscaled_fields.metpy.quantify()
print("Derive fields")
upscaled_fields["0_1000m_above_ground-VSH"] = (upscaled_fields["0_1000m_above_ground-VUCSH"]**2 + upscaled_fields["0_1000m_above_ground-VVCSH"]**2)**0.5 * units["m/s"] # warned mesowest about VUCSH and VVCSH not having units
upscaled_fields["0_6000m_above_ground-VSH"] = (upscaled_fields["0_6000m_above_ground-VUCSH"]**2 + upscaled_fields["0_6000m_above_ground-VVCSH"]**2)**0.5 * units["m/s"] # warned mesowest about VUCSH and VVCSH not having units
print(upscaled_fields["level_of_adiabatic_condensation_from_sfc-HGT"].to_dataframe().describe())
upscaled_fields["STP"] = mcalc.significant_tornado(upscaled_fields["surface-CAPE"], upscaled_fields["level_of_adiabatic_condensation_from_sfc-HGT"],
upscaled_fields["1000_0m_above_ground-HLCY"], upscaled_fields["0_6000m_above_ground-VSH"])
upscaled_fields["LR75"] = ( upscaled_fields["500mb-TMP"] - upscaled_fields["700mb-TMP"] ) / ( upscaled_fields["500mb-HGT"] - upscaled_fields["700mb-HGT"] )
upscaled_fields["CAPESHEAR"] = upscaled_fields["0_6000m_above_ground-VSH"] * upscaled_fields["90_0mb_above_ground-CAPE"] #mlcape
upscaled_fields = upscaled_fields.metpy.dequantify()
# TODO: deal with missing surface/PRES in these forecast_reference_times. They result in an error and prevent any of the dataset from being loaded.
# 20201211 - 02, 03, 04, 05, 07, 08, 09, 10, 11, 13, 14, 15, 16, 18 ,19
# 20201217 - 00
# Save upscaled data to file. There are tradeoffs to each format.
# .npz is fast and compact but has little metadata.
# parquet has variable names and coordinate names and can write half-precision (2-byte) floats, like HRRR-ZARR, but no long_name or units.
# netCDF can't handle np.float16, or slash characters in variable names, but it handles np.float32 (twice the filesize of parquet).
# netCDF has named variables, coordinates, long_names and units.
# Use netcdf but strip masked pts for efficiency.
# Caveat: forecast_reference_time must remain 64-bit, so set aside before converting Dataset to np.float16 or np.float32.
# Assigning it as a coordinate preserves its dtype and it gives open_mfdataset a dimension to concatenate along.
upscaled_fields = upscaled_fields.assign_coords(forecast_reference_time=(upscaled_fields.forecast_reference_time)).expand_dims(dim="forecast_reference_time")
# Stack x and y into 1-D pts coordinate
upscaled_fields = upscaled_fields.stack(pts=("projection_y_coordinate","projection_x_coordinate"))
# Drop masked pts before saving--reduces file size by 75%
maskfile = '/glade/u/home/sobash/2013RT/usamask.pk'
mask = pickle.load(open(maskfile,'rb'))
upscaled_fields.coords["mask"] = (("pts"), mask) # TODO: put lat-lon and or projection information
# reset_index multi-index level "pts" or NotImplementedError: isna is not defined for MultiIndex from .to_dataframe().to_parquet(). Also, to_netcdf() can't save MultiIndex.
upscaled_fields = upscaled_fields.where(upscaled_fields.mask, drop=True).reset_index("pts")
upscaled_fields.attrs = {
"command": " ".join(sys.argv) + f" by {os.getenv('USER')} at {datetime.datetime.now()}",
"description":f"unmasked NCEP grid 211 (80km) points over usa maskfile {maskfile}"
}
# TODO: remove attribute coordinates = "projection_x_coordinate time projection_y_coordinate mask" from DataArrays?
root, ext = os.path.splitext(ofile)
if parquet:
ds = upscaled_fields.astype(np.float16)
ds.to_dataframe().to_parquet(root+".par")
print("saved", root+".par")
if npz:
ds = upscaled_fields.astype(np.float16)
np.savez_compressed(root+".npz", a=ds.to_dict())
print("saved", root+".npz")
upscaled_fields = upscaled_fields.astype(np.float32) # less disk space. HRRR-ZARR was even less precise, with np.float16, but to_netcdf() needs np.float32.
encoding = {x:{"zlib":True} for x in upscaled_fields.data_vars}
upscaled_fields.to_netcdf(ofile, encoding=encoding, unlimited_dims=["forecast_reference_time"])
print("saved", f"{os.path.realpath(ofile)}")
|
{"hexsha": "c6362cda774fbfd33d318ba8e3929f96cb954732", "size": 14720, "ext": "py", "lang": "Python", "max_stars_repo_path": "upscale_HRRR-ZARR.py", "max_stars_repo_name": "ahijevyc/NSC_objects", "max_stars_repo_head_hexsha": "322728a71ec011b681b0038e9dcd86df1f73b2fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "upscale_HRRR-ZARR.py", "max_issues_repo_name": "ahijevyc/NSC_objects", "max_issues_repo_head_hexsha": "322728a71ec011b681b0038e9dcd86df1f73b2fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "upscale_HRRR-ZARR.py", "max_forks_repo_name": "ahijevyc/NSC_objects", "max_forks_repo_head_hexsha": "322728a71ec011b681b0038e9dcd86df1f73b2fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.384341637, "max_line_length": 278, "alphanum_fraction": 0.6985733696, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 4238}
|
function inspect_menu_viewpoint
% Some of the FT_PLOT_XXX functions that return a 3D object support a
% right-mouse-click context menu with which you can select
% top/bottom/left/right/front/back. This functionality requires that the object being
% plotted has a known coordinate system.
% note that the objects don't really go together, so it does not fit 100%
figure
%%
elec = ft_read_sens('GSN-HydroCel-128.sfp');
elec = ft_convert_units(elec, 'mm');
elec.coordsys = 'ras';
ft_plot_sens(elec, 'label', 'label'); axis off
%%
mesh = ft_read_headshape('cortex_8196.surf.gii');
mesh.coordsys = 'mni';
ft_plot_mesh(mesh, 'facealpha', 0.5, 'unit', 'mm')
lighting gouraud
%%
% dipole pointing to anterior and superior
ft_plot_dipole([40 0 50], [0 1 1], 'coordsys', 'ras', 'unit', 'mm', 'axes', true)
%%
ft_plot_topo3d(elec.elecpos, elec.elecpos(:,3), 'coordsys', 'ras', 'unit', 'mm', 'axes', true)
|
{"author": "fieldtrip", "repo": "fieldtrip", "sha": "c2039be598a02d86b39aae76bfa7aaa720f9801c", "save_path": "github-repos/MATLAB/fieldtrip-fieldtrip", "path": "github-repos/MATLAB/fieldtrip-fieldtrip/fieldtrip-c2039be598a02d86b39aae76bfa7aaa720f9801c/test/inspect_menu_viewpoint.m"}
|
C
C $Id: gxmdef.f,v 1.4 2008-07-27 00:21:03 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE GXMDEF
C
C Set all the current values to default.
C
include 'trdefl.h'
include 'trstat.h'
C
INTEGER II, TLNCOL, TTXCOL, TFLCOL
C
C Set the default polyline information.
C
POLIDX = POLIDF
LINTYP = LINTDF
LINWTH = LINWDF
TLNCOL = TLNCDF
C
C Set the default polymarker information.
C
MARIDX = MARIDF
MARTYP = MARTDF
MARSIZ = MARSDF
C
C Set the default TEXT information.
C
HORIZ = HORIDF
VERT = VERTDF
PATH = PATHDF
CHIGHT = CHIGDF
XU = XUDF
YU = YUDF
XB = XBDF
YB = YBDF
TXTIDX = TXTIDF
TTXCOL = TTXCDF
TXTPRE = TXTPDF
CEXPN = CEXPDF
CSPACE = CSPADF
C
C Set the default POLYGON information.
C
FILIDX = FILIDF
INTSTL = INTSDF
HATIDX = HATIDF
PATIDX = PATIDF
FILRPT(1) = FILRDF(1)
FILRPT(2) = FILRPT(2)
TFLCOL = TFLCDF
FILCOL = TFLCOL
TXTCOL = TTXCOL
LINCOL = TLNCOL
C
C Set the aspect source flags.
C
DO 160 II=1,ASFMAX
ASFSRF(II) = ASFSDF(II)
160 CONTINUE
C
RETURN
END
|
{"hexsha": "ae0f9aa89dbca9613c06b8a7d4dad6a51d75a980", "size": 1448, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ncarg2d/src/libncarg_gks/awi/gxmdef.f", "max_stars_repo_name": "tenomoto/ncl", "max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 210, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z", "max_issues_repo_path": "ncarg2d/src/libncarg_gks/awi/gxmdef.f", "max_issues_repo_name": "tenomoto/ncl", "max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 156, "max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z", "max_forks_repo_path": "ncarg2d/src/libncarg_gks/awi/gxmdef.f", "max_forks_repo_name": "tenomoto/ncl", "max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z", "avg_line_length": 20.9855072464, "max_line_length": 71, "alphanum_fraction": 0.5662983425, "num_tokens": 497}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.