text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
section \<open>Well-Ordered Strategy\<close>
theory WellOrderedStrategy
imports
Main
Strategy
begin
text \<open>
Constructing a uniform strategy from a set of strategies on a set of nodes often works by
well-ordering the strategies and then choosing the minimal strategy on each node.
Then every path eventually follows one strategy because we choose the strategies along the path
to be non-increasing in the well-ordering.
The following locale formalizes this idea.
We will use this to construct uniform attractor and winning strategies.
\<close>
locale WellOrderedStrategies = ParityGame +
fixes S :: "'a set"
and p :: Player
\<comment> \<open>The set of good strategies on a node @{term v}\<close>
and good :: "'a \<Rightarrow> 'a Strategy set"
and r :: "('a Strategy \<times> 'a Strategy) set"
assumes S_V: "S \<subseteq> V"
\<comment> \<open>@{term r} is a wellorder on the set of all strategies which are good somewhere.\<close>
and r_wo: "well_order_on {\<sigma>. \<exists>v \<in> S. \<sigma> \<in> good v} r"
\<comment> \<open>Every node has a good strategy.\<close>
and good_ex: "\<And>v. v \<in> S \<Longrightarrow> \<exists>\<sigma>. \<sigma> \<in> good v"
\<comment> \<open>good strategies are well-formed strategies.\<close>
and good_strategies: "\<And>v \<sigma>. \<sigma> \<in> good v \<Longrightarrow> strategy p \<sigma>"
\<comment> \<open>A good strategy on @{term v} is also good on possible successors of @{term v}.\<close>
and strategies_continue: "\<And>v w \<sigma>. \<lbrakk> v \<in> S; v\<rightarrow>w; v \<in> VV p \<Longrightarrow> \<sigma> v = w; \<sigma> \<in> good v \<rbrakk> \<Longrightarrow> \<sigma> \<in> good w"
begin
text \<open>The set of all strategies which are good somewhere.\<close>
abbreviation "Strategies \<equiv> {\<sigma>. \<exists>v \<in> S. \<sigma> \<in> good v}"
definition minimal_good_strategy where
"minimal_good_strategy v \<sigma> \<equiv> \<sigma> \<in> good v \<and> (\<forall>\<sigma>'. (\<sigma>', \<sigma>) \<in> r - Id \<longrightarrow> \<sigma>' \<notin> good v)"
no_notation binomial (infixl "choose" 65)
text \<open>Among the good strategies on @{term v}, choose the minimum.\<close>
definition choose where
"choose v \<equiv> THE \<sigma>. minimal_good_strategy v \<sigma>"
text \<open>
Define a strategy which uses the minimum strategy on all nodes of @{term S}.
Of course, we need to prove that this is a well-formed strategy.
\<close>
definition well_ordered_strategy where
"well_ordered_strategy \<equiv> override_on \<sigma>_arbitrary (\<lambda>v. choose v v) S"
text \<open>Show some simple properties of the binary relation @{term r} on the set @{const Strategies}.\<close>
lemma r_refl [simp]: "refl_on Strategies r"
using r_wo unfolding well_order_on_def linear_order_on_def partial_order_on_def preorder_on_def by blast
lemma r_total [simp]: "total_on Strategies r"
using r_wo unfolding well_order_on_def linear_order_on_def by blast
lemma r_trans [simp]: "trans r"
using r_wo unfolding well_order_on_def linear_order_on_def partial_order_on_def preorder_on_def by blast
lemma r_wf [simp]: "wf (r - Id)"
using well_order_on_def r_wo by blast
text \<open>@{const choose} always chooses a minimal good strategy on @{term S}.\<close>
lemma choose_works:
assumes "v \<in> S"
shows "minimal_good_strategy v (choose v)"
proof-
have wf: "wf (r - Id)" using well_order_on_def r_wo by blast
obtain \<sigma> where \<sigma>1: "minimal_good_strategy v \<sigma>"
unfolding minimal_good_strategy_def by (meson good_ex[OF \<open>v \<in> S\<close>] wf wf_eq_minimal)
hence \<sigma>: "\<sigma> \<in> good v" "\<And>\<sigma>'. (\<sigma>', \<sigma>) \<in> r - Id \<Longrightarrow> \<sigma>' \<notin> good v"
unfolding minimal_good_strategy_def by auto
{ fix \<sigma>' assume "minimal_good_strategy v \<sigma>'"
hence \<sigma>': "\<sigma>' \<in> good v" "\<And>\<sigma>. (\<sigma>, \<sigma>') \<in> r - Id \<Longrightarrow> \<sigma> \<notin> good v"
unfolding minimal_good_strategy_def by auto
have "(\<sigma>, \<sigma>') \<notin> r - Id" using \<sigma>(1) \<sigma>'(2) by blast
moreover have "(\<sigma>', \<sigma>) \<notin> r - Id" using \<sigma>(2) \<sigma>'(1) by auto
moreover have "\<sigma> \<in> Strategies" using \<sigma>(1) \<open>v \<in> S\<close> by auto
moreover have "\<sigma>' \<in> Strategies" using \<sigma>'(1) \<open>v \<in> S\<close> by auto
ultimately have "\<sigma>' = \<sigma>"
using r_wo Linear_order_in_diff_Id well_order_on_Field well_order_on_def by fastforce
}
with \<sigma>1 have "\<exists>!\<sigma>. minimal_good_strategy v \<sigma>" by blast
thus ?thesis using theI'[of "minimal_good_strategy v", folded choose_def] by blast
qed
corollary
assumes "v \<in> S"
shows choose_good: "choose v \<in> good v"
and choose_minimal: "\<And>\<sigma>'. (\<sigma>', choose v) \<in> r - Id \<Longrightarrow> \<sigma>' \<notin> good v"
and choose_strategy: "strategy p (choose v)"
using choose_works[OF assms, unfolded minimal_good_strategy_def] good_strategies by blast+
corollary choose_in_Strategies: "v \<in> S \<Longrightarrow> choose v \<in> Strategies" using choose_good by blast
lemma well_ordered_strategy_valid: "strategy p well_ordered_strategy"
proof-
{
fix v assume "v \<in> S" "v \<in> VV p" "\<not>deadend v"
moreover have "strategy p (choose v)"
using choose_works[OF \<open>v \<in> S\<close>, unfolded minimal_good_strategy_def, THEN conjunct1] good_strategies
by blast
ultimately have "v\<rightarrow>(\<lambda>v. choose v v) v" using strategy_def by blast
}
thus ?thesis unfolding well_ordered_strategy_def using valid_strategy_updates_set by force
qed
subsection \<open>Strategies on a Path\<close>
text \<open>Maps a path to its strategies.\<close>
definition "path_strategies \<equiv> lmap choose"
lemma path_strategies_in_Strategies:
assumes "lset P \<subseteq> S"
shows "lset (path_strategies P) \<subseteq> Strategies"
using path_strategies_def assms choose_in_Strategies by auto
lemma path_strategies_good:
assumes "lset P \<subseteq> S" "enat n < llength P"
shows "path_strategies P $ n \<in> good (P $ n)"
by (simp add: path_strategies_def assms choose_good lset_lnth_member)
lemma path_strategies_strategy:
assumes "lset P \<subseteq> S" "enat n < llength P"
shows "strategy p (path_strategies P $ n)"
using path_strategies_good assms good_strategies by blast
lemma path_strategies_monotone_Suc:
assumes P: "lset P \<subseteq> S" "valid_path P" "path_conforms_with_strategy p P well_ordered_strategy"
"enat (Suc n) < llength P"
shows "(path_strategies P $ Suc n, path_strategies P $ n) \<in> r"
proof-
define P' where "P' = ldropn n P"
hence "enat (Suc 0) < llength P'" using P(4)
by (metis enat_ltl_Suc ldrop_eSuc_ltl ldropn_Suc_conv_ldropn llist.disc(2) lnull_0_llength ltl_ldropn)
then obtain v w Ps where vw: "P' = LCons v (LCons w Ps)"
by (metis ldropn_0 ldropn_Suc_conv_ldropn ldropn_lnull lnull_0_llength)
moreover have "lset P' \<subseteq> S" unfolding P'_def using P(1) lset_ldropn_subset[of n P] by blast
ultimately have "v \<in> S" "w \<in> S" by auto
moreover have "v\<rightarrow>w" using valid_path_edges'[of v w Ps, folded vw] valid_path_drop[OF P(2)] P'_def by blast
moreover have "choose v \<in> good v" using choose_good \<open>v \<in> S\<close> by blast
moreover have "v \<in> VV p \<Longrightarrow> choose v v = w" proof-
assume "v \<in> VV p"
moreover have "path_conforms_with_strategy p P' well_ordered_strategy"
unfolding P'_def using path_conforms_with_strategy_drop P(3) by blast
ultimately have "well_ordered_strategy v = w" using vw path_conforms_with_strategy_start by blast
thus "choose v v = w" unfolding well_ordered_strategy_def using \<open>v \<in> S\<close> by auto
qed
ultimately have "choose v \<in> good w" using strategies_continue by blast
hence *: "(choose v, choose w) \<notin> r - Id" using choose_minimal \<open>w \<in> S\<close> by blast
have "(choose w, choose v) \<in> r" proof (cases)
assume "choose v = choose w"
thus ?thesis using r_refl refl_onD choose_in_Strategies[OF \<open>v \<in> S\<close>] by fastforce
next
assume "choose v \<noteq> choose w"
thus ?thesis using * r_total choose_in_Strategies[OF \<open>v \<in> S\<close>] choose_in_Strategies[OF \<open>w \<in> S\<close>]
by (metis (lifting) Linear_order_in_diff_Id r_wo well_order_on_Field well_order_on_def)
qed
hence "(path_strategies P' $ Suc 0, path_strategies P' $ 0) \<in> r"
unfolding path_strategies_def using vw by simp
thus ?thesis unfolding path_strategies_def P'_def
using lnth_lmap_ldropn[OF Suc_llength[OF P(4)], of choose]
lnth_lmap_ldropn_Suc[OF P(4), of choose]
by simp
qed
lemma path_strategies_monotone:
assumes P: "lset P \<subseteq> S" "valid_path P" "path_conforms_with_strategy p P well_ordered_strategy"
"n < m" "enat m < llength P"
shows "(path_strategies P $ m, path_strategies P $ n) \<in> r"
using assms proof (induct "m - n" arbitrary: n m)
case (Suc d)
show ?case proof (cases)
assume "d = 0"
thus ?thesis using path_strategies_monotone_Suc[OF P(1,2,3)]
by (metis (no_types) Suc.hyps(2) Suc.prems(4,5) Suc_diff_Suc Suc_inject Suc_leI diff_is_0_eq diffs0_imp_equal)
next
assume "d \<noteq> 0"
have "m \<noteq> 0" using Suc.hyps(2) by linarith
then obtain m' where m': "Suc m' = m" using not0_implies_Suc by blast
hence "d = m' - n" using Suc.hyps(2) by presburger
moreover hence "n < m'" using \<open>d \<noteq> 0\<close> by presburger
ultimately have "(path_strategies P $ m', path_strategies P $ n) \<in> r"
using Suc.hyps(1)[of m' n, OF _ P(1,2,3)] Suc.prems(5) dual_order.strict_trans enat_ord_simps(2) m'
by blast
thus ?thesis
using m' path_strategies_monotone_Suc[OF P(1,2,3)] by (metis (no_types) Suc.prems(5) r_trans trans_def)
qed
qed simp
lemma path_strategies_eventually_constant:
assumes "\<not>lfinite P" "lset P \<subseteq> S" "valid_path P" "path_conforms_with_strategy p P well_ordered_strategy"
shows "\<exists>n. \<forall>m \<ge> n. path_strategies P $ n = path_strategies P $ m"
proof-
define \<sigma>_set where "\<sigma>_set = lset (path_strategies P)"
have "\<exists>\<sigma>. \<sigma> \<in> \<sigma>_set" unfolding \<sigma>_set_def path_strategies_def
using assms(1) lfinite_lmap lset_nth_member_inf by blast
then obtain \<sigma>' where \<sigma>': "\<sigma>' \<in> \<sigma>_set" "\<And>\<tau>. (\<tau>, \<sigma>') \<in> r - Id \<Longrightarrow> \<tau> \<notin> \<sigma>_set"
using wfE_min[of "r - Id" _ \<sigma>_set] by auto
obtain n where n: "path_strategies P $ n = \<sigma>'"
using \<sigma>'(1) lset_lnth[of \<sigma>'] unfolding \<sigma>_set_def by blast
{
fix m assume "n \<le> m"
have "path_strategies P $ n = path_strategies P $ m" proof (rule ccontr)
assume *: "path_strategies P $ n \<noteq> path_strategies P $ m"
with \<open>n \<le> m\<close> have "n < m" using le_imp_less_or_eq by blast
with path_strategies_monotone have "(path_strategies P $ m, path_strategies P $ n) \<in> r"
using assms by (simp add: infinite_small_llength)
with * have "(path_strategies P $ m, path_strategies P $ n) \<in> r - Id" by simp
with \<sigma>'(2) n have "path_strategies P $ m \<notin> \<sigma>_set" by blast
thus False unfolding \<sigma>_set_def path_strategies_def
using assms(1) lfinite_lmap lset_nth_member_inf by blast
qed
}
thus ?thesis by blast
qed
subsection \<open>Eventually One Strategy\<close>
text \<open>
The key lemma: Every path that stays in @{term S} and follows @{const well_ordered_strategy}
eventually follows one strategy because the strategies are well-ordered and non-increasing
along the path.
\<close>
lemma path_eventually_conforms_to_\<sigma>_map_n:
assumes "lset P \<subseteq> S" "valid_path P" "path_conforms_with_strategy p P well_ordered_strategy"
shows "\<exists>n. path_conforms_with_strategy p (ldropn n P) (path_strategies P $ n)"
proof (cases)
assume "lfinite P"
then obtain n where "llength P = enat n" using lfinite_llength_enat by blast
hence "ldropn n P = LNil" by simp
thus ?thesis by (metis path_conforms_LNil)
next
assume "\<not>lfinite P"
then obtain n where n: "\<And>m. n \<le> m \<Longrightarrow> path_strategies P $ n = path_strategies P $ m"
using path_strategies_eventually_constant assms by blast
let ?\<sigma> = well_ordered_strategy
define P' where "P' = ldropn n P"
{ fix v assume "v \<in> lset P'"
hence "v \<in> S" using \<open>lset P \<subseteq> S\<close> P'_def in_lset_ldropnD by fastforce
from \<open>v \<in> lset P'\<close> obtain m where m: "enat m < llength P'" "P' $ m = v" by (meson in_lset_conv_lnth)
hence "P $ m + n = v" unfolding P'_def by (simp add: \<open>\<not>lfinite P\<close> infinite_small_llength)
moreover have "?\<sigma> v = choose v v" unfolding well_ordered_strategy_def using \<open>v \<in> S\<close> by auto
ultimately have "?\<sigma> v = (path_strategies P $ m + n) v"
unfolding path_strategies_def using infinite_small_llength[OF \<open>\<not>lfinite P\<close>] by simp
hence "?\<sigma> v = (path_strategies P $ n) v" using n[of "m + n"] by simp
}
moreover have "path_conforms_with_strategy p P' well_ordered_strategy"
unfolding P'_def by (simp add: assms(3) path_conforms_with_strategy_drop)
ultimately show ?thesis
using path_conforms_with_strategy_irrelevant_updates P'_def by blast
qed
end \<comment> \<open>WellOrderedStrategies\<close>
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/SeLFiE/Example/afp-2020-05-16/thys/Parity_Game/WellOrderedStrategy.thy"}
|
# -----------------------------------------------------------
# Class to create and handle Path-Signature-Feature Datasets.
#
# (C) 2020 Kevin Schlegel, Oxford, United Kingdom
# Released under Apache License, Version 2.0.
# email kevinschlegel@cantab.net
# -----------------------------------------------------------
import numpy as np
import json
from tqdm import tqdm
from typing import List, Optional, Iterator, Union, Tuple
from .types import KeypointLabelPair, DescriptionDict, KeypointTransformation
from .psfdatasubset import PSFDataSubset
from joblib import Parallel, delayed
class PSFDataset:
"""
A class to create and handle Path-Signature-Feature Datasets.
This class aims to provide a convient interface for creating datasets using
pathsignature features for human action recognition from landmark data
(see https://arxiv.org/abs/1707.03993).
Datasets keep track of the transformations applied to the data during
creation. Transformations are callable objects, inspired by torchvision
transformations. They can be chained using Compose.
Datasets can be saved to file, one npz file containing data and labels only
(indexed as 'data' and 'labels') is created for easy reading in elsewhere.
Another json file is created containing the properties of the dataset such
as transformations that where applied to the data.
Methods
-------
add_element(keypoints, label)
Take keypoints and label and add them to the dataset.
set_split(desc, train_ids, test_ids)
Sets the training/test split for the dataset.
from_iterator(data_iterator)
Take iterator for keypoint label pairs and fill dataset.
get_iterator()
Python generator for iteration over dataset.
get_data_dimension()
Return dimension of feature vector.
get_labels()
Return a numpy array of all labels of the dataset.
get_description()
Return a dictionary describing the properties of the dataset.
save(filename)
Save the dataset.
load(filename)
Load the dataset.
"""
def __init__(self,
transform: Optional[KeypointTransformation] = None,
flattened: bool = True,
dtype: np.dtype = np.float64) -> None:
"""
Parameters
----------
transform : callable object from the transforms subpackage, optional
A transformation to be applied to every new data element
added to the dataset. (default is None)
"""
# _data contains the (transformed) keypoint data
self._data: List[np.ndarray] = []
# _labels contains the ground truth classification labels
self._labels: List[int] = []
# _transform is a callable object, to be applied to every data element
# when added
if transform is None:
# for type checking purposes only, want self._transform to be None
# in this case
pass
self._transform = transform
self._flattened = flattened
self._dtype = dtype
# optionally the dataset can hold a training/testset split
# using the PSFDataSubset module
self._trainingset: Optional[PSFDataSubset] = None
self._testset: Optional[PSFDataSubset] = None
self._split_desc: DescriptionDict = {}
# properties to access trainingset and testset subsets of the dataset.
# No setters implemented as setting the subsets should only happen through
# the set_split method which also sets the description dictionary.
@property
def trainingset(self) -> Optional[PSFDataSubset]:
"""
Access to the training subset of the dataset.
Returns None if no split is defined.
"""
return self._trainingset
@property
def testset(self) -> Optional[PSFDataSubset]:
"""
Access to the test subset of the dataset.
Returns None if no split is defined.
"""
return self._testset
def __getitem__(self, index: int) -> KeypointLabelPair:
""" Returns the flattened feature vector and its label. """
if self._flattened:
return (self._data[index].reshape(-1), self._labels[index])
else:
return (self._data[index], self._labels[index])
def __len__(self) -> int:
return len(self._data)
def add_element(self, keypoints: np.ndarray, label: int) -> None:
"""
Takes keypoints and label and add them to the dataset.
Takes a numpy array of keypoints of the form [frame_id,keypoint,coords]
and applies the transformation to the keypoints. Adds the transformed
keypoints and the target label to the dataset.
Parameters:
-----------
keypoints : numpy array of keypoints
Original landmark data to be transformed and added to the dataset.
label: int
Ground truth classification label
"""
if self._transform is not None:
keypoints = self._transform(keypoints)
self._data.append(keypoints.astype(self._dtype))
self._labels.append(label)
def parallel_psf_tansform(self, input_data: np.ndarray, input_label: np.ndarray, input_length: np.ndarray = None, n_threads: int = -1):
if input_length is not None:
self._data = Parallel(n_jobs=n_threads)(delayed(self._transform)(sample[:input_length[i]]) for i,sample in enumerate(tqdm(input_data)))
else:
self._data = Parallel(n_jobs=n_threads)(delayed(self._transform)(sample) for sample in tqdm(input_data))
self._labels = list(input_label)
def set_split(self, description: DescriptionDict, train_ids: List[int],
test_ids: List[int]):
"""
Sets the training/test split for the dataset.
The subsets can then be accessed via the trainingset and testset
properties.
Parameters
----------
desc : dict
Dictionary with all information to identify the split in the logs.
train_ids : list of ints
List of the ids of elements of the trainingset
test_ids : list of ints
List of the ids of elements of the testset
"""
self._split_desc = description
self._trainingset = PSFDataSubset(self, train_ids)
self._testset = PSFDataSubset(self, test_ids)
def fill_from_iterator(self,
data_iterator: Iterator[KeypointLabelPair]) -> None:
"""
Fill dataset with data using given iterator.
Takes an iterator on a collection of keypoints, label pairs (with the
keypoints of shape [frame_id,keypoint,coords]) and adds everything to
the dataset using the add_element method.
Parameters
----------
data_iterator: iterable
Iterable returning keypoint,label pairs of data.
"""
for element in tqdm(data_iterator):
self.add_element(element[0], element[1])
def get_iterator(self) -> Iterator[KeypointLabelPair]:
""" Python generator for iterating over the dataset. """
for i in range(len(self._data)):
yield self[i] # return self[i] to use __getitem__ implementation
def get_data_dimension(self) -> Union[int, Tuple[int]]:
"""
Returns size of feature vector.
Returns the size of the flattened array of one dataset entry for
determining the input size of a model.
Returns
-------
int
The size of the feature vector
"""
if len(self._data) > 0:
if self._flattened:
return np.prod(self._data[0].shape)
else:
return self._data[0].shape
else:
raise ValueError(
"The dimension of the feature vector is undefined as the "
"dataset does nopt contain any data yet")
def get_labels(self) -> np.ndarray:
"""
Return array of all labels of the entire dataset.
This is useful e.g. for computation of metrics after training epochs.
Returns
-------
numpy array
Labels of the dataset
"""
return np.array(self._labels)
def get_description(self) -> DescriptionDict:
"""
Returns a dictionary describing all properties of the dataset.
The dictionary helps to keep track of the properties of the dataset. It
also gets written to file when saving the dataset.
The dict can also be passed into TensorBoard for hparam tracking.
Currently the dict contains only the transformations applied.
Returns
-------
dict
Description of the dataset
"""
desc: DescriptionDict = {}
if self._transform is not None:
desc = self._transform.get_description()
desc.update(self._split_desc)
return desc
def save(self, filename: str) -> None:
"""
Saves the dataset to file.
Saves the data and labels to an .npz file for easy loading anywhere.
Data and labels are indexed as 'data' and 'labels' repsectively in
the .npz file.
Saves the settings used to create the dataset into a second file to
allow easy keeping track of its properties. Reloading transformations
is currently not supported.
Parameters
----------
filename : string
full filepath and filename without an extension (added
automatically for the two files)
"""
np.savez(filename,
data=np.array(self._data),
labels=np.array(self._labels))
transform: Optional[DescriptionDict]
if self._transform is not None:
transform = self._transform.get_description()
else:
transform = None
with open(filename + ".json", "w") as json_file:
json.dump(transform, json_file)
def load(self, filename: str) -> None:
"""
Load the dataset from file.
Loads the data file created by the save method to load data and
labels. Loading of settings of the dataset is currently not supported.
Parameters
----------
filename: string
full filepath and filename without an extension (added
automatically for the two files)
"""
with np.load(filename + ".npz") as data:
self._data = data['data']
self._labels = data['labels']
|
{"hexsha": "8c607d4abf59bf6eaab7572520abec937f7ec9d7", "size": 10652, "ext": "py", "lang": "Python", "max_stars_repo_path": "psfdataset/psfdataset.py", "max_stars_repo_name": "WeixinYang/PSFDataset", "max_stars_repo_head_hexsha": "f29b37489c580ad3c677bb9385a721cc57da60e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "psfdataset/psfdataset.py", "max_issues_repo_name": "WeixinYang/PSFDataset", "max_issues_repo_head_hexsha": "f29b37489c580ad3c677bb9385a721cc57da60e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "psfdataset/psfdataset.py", "max_forks_repo_name": "WeixinYang/PSFDataset", "max_forks_repo_head_hexsha": "f29b37489c580ad3c677bb9385a721cc57da60e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2447552448, "max_line_length": 147, "alphanum_fraction": 0.6260796095, "include": true, "reason": "import numpy", "num_tokens": 2181}
|
module MR1dCNN
using Base: include_package_for_output
const DIR = @__DIR__
const ARCH_PATH = DIR * "/../arch/arch.json"
using Pkg
Pkg.activate(DIR * "/..")
Pkg.status()
@info "Loading modules..."
using BSON
using CUDA
using Flux
using Flux: logitcrossentropy
using Flux.Data: DataLoader
using Flux: onehotbatch, onecold
using Logging: with_logger
using ProgressMeter: Progress, next!
using TensorBoardLogger: TBLogger, tb_overwrite
using Random, Dates, DelimitedFiles, Statistics, JSON
#load data utilities for wrangling datasets and model building
include("DataUtils.jl")
include("ModelUtilities.jl")
include("Args.jl")
export Args, train, args, test
args = Args()
function cast_param(x::A, param::T) where A<:AbstractArray{T} where T <: Real
convert(eltype(x), param)
end
rng = MersenneTwister(args.seed)
function augment(x::A, ρ::T, loc) where A<:AbstractArray{T} where T<:Real
if loc == gpu
x .+ ρ * CUDA.randn(eltype(x), size(x))
else
x .+ ρ * randn(eltype(x), size(x))
end
end
# training loss
function model_loss(x::A, y::B, model::Model, ρ::T, loc) where {C<:Chain, A<:AbstractArray, B<:AbstractArray, T<:Real}
ŷs = model(augment(x, ρ, loc))
mean(logitcrossentropy(ŷᵢ, y) for ŷᵢ in ŷs)
end
# loss over data
function total_loss(data::DataLoader, model::Model)
l = 0f0
for (x, y) in data
ŷs = model(x)
l += mean(logitcrossentropy(ŷᵢ, y) for ŷᵢ in ŷs)
end
l = l/length(data)
return l
end
#accuracy over data
function accuracy(data::DataLoader, model::Model)
acc = zero(Float32)
for (x, y) in data
ŷs = model(x)
out = softmax(mean(ŷs))
acc += sum(onecold(out) .== onecold(y)) * 1 / size(x,4)
end
acc/length(data)
end
# convert model parameters to a suitable data structure for TensorBoard logging
function fill_param_dict!(dict, m, prefix)
if m isa Chain
for (i, layer) in enumerate(m.layers)
fill_param_dict!(dict, layer, prefix*"layer_"*string(i)*"/"*string(layer)*"/")
end
else
for fieldname in fieldnames(typeof(m))
val = getfield(m, fieldname)
if val isa AbstractArray
val = vec(val)
end
dict[prefix*string(fieldname)] = val
end
end
end
# TensorBoard callback function to log model data and loss every epoch
function TBCallback(logger, train_data, val_data, model)
param_dict = Dict{String, Any}()
fill_param_dict!(param_dict, model, "")
with_logger(logger) do
@info "model" params=param_dict log_step_increment=0
@info "train" loss=total_loss(train_data, model) acc=accuracy(train_data, model) log_step_increment=0
@info "validation" loss=total_loss(val_data, model) acc=accuracy(val_data, model)
end
end
function training_function(model::Model, Xs::Flux.Data.DataLoader, params::Flux.Zygote.Params, opt::ADAM, ρ::N, loc, progress) where {N<:Real}
for (xs, ys) in Xs
train_loss, back = Flux.pullback(() -> model_loss(xs, ys, model, ρ, loc), params)
grad = back(one(train_loss))
Flux.Optimise.update!(opt, params, grad)
next!(progress; showvalues=[(:Loss, train_loss)])
end
end
@info "Warming up training function..."
tm = build_Model(ARCH_PATH, args.input_dims, 6)
d = DataLoader((rand(Float32, size(tm.paths[1].layers[1].layers[1].weight, 1), 128, 1, 1), onehotbatch([2], [1,2,3,4,5,6])))
training_function(tm, d, params(tm), ADAM(1e-3), Float32(.1), cpu, Progress(1))
accuracy(d, tm)
@info "Ready. Use fields in 'args' struct to change parameter settings."
# training function
function train(args::Args)
args.seed > 0 && Random.seed!(args.seed)
if args.cuda && has_cuda_gpu()
loc = gpu
@info "Training on GPU"
else
loc = cpu
@info "Training on CPU"
end
# load data
train_data, val_data, T = DataUtils.get_train_validation(DataUtils.data_prep(args.train_dir), readdlm(args.train_dir * "/y_train.txt", Int), args.batch_size, args.train_prop, loc, args.scale, args.shuffle)
# initialize model
m = build_Model(DIR * "/../arch/arch.json", args.input_dims, args.nclasses) |> loc
best_model = build_Model(DIR * "/../arch/arch.json", args.input_dims, args.nclasses)
#optimizer
opt = ADAM(args.η)
ρ = cast_param(train_data.data[1], args.ρ) |> loc
# parameters
ps = Flux.params(m)
# make path for model storage and log output
!ispath(args.save_path) && mkpath(args.save_path)
# logging by TensorBoard.jl
if args.tblogging
tblogger = TBLogger(args.save_path, tb_overwrite)
end
#initialize tracking of accuracy and improvement
best_acc = 0.0
last_improvement = 0
if loc == gpu && CUDA.functional()
augment(randn(Float32, 2, 4) |> gpu, 0.1f0, gpu)
end
# training
train_steps = 0
@info "Starting training. Total epochs: $(args.epochs)"
for epoch = 1:args.epochs
@info "Epoch $(epoch)"
progress = Progress(length(train_data))
training_function(m, train_data, ps, opt, ρ, loc, progress)
# calculate accuracy on validation set
vacc = accuracy(val_data, m)
@info "Validation set accuracy: $(vacc)"
# log model and loss with TensorBoard
if args.tblogging
TBCallback(tblogger, train_data, val_data, m)
end
#If accuracy improves, save current model (so saved model is most performant)
if vacc >= best_acc
best_model = deepcopy(m)
best_acc = vacc
last_improvement = epoch
end
#If no improvement in args.lr_patience epochs, reduce learning rate
if epoch - last_improvement >= args.lr_patience && opt.eta > 1e-6
opt.eta /= args.γ
@warn(" -> No improvement in $(args.lr_patience) epochs, reducing learning rate to $(opt.eta)!")
# reset last_improvement to provide enough time to improve after reducing LR
last_improvement = epoch
end
# Early stopping - Stop if no improvement in args.convergence epochs
if epoch - last_improvement >= args.convergence
@warn(" -> No improvement in $(args.convergence) epochs. Approximately converged.")
break
end
end
if args.save_model
model_path = abspath(joinpath(args.save_path, "model.bson"))
let model = best_model |> cpu, data = train_data |> cpu, args = args, transform = T
BSON.@save model_path model args data transform
@info "Best model saved: $(model_path)"
end
end
if loc == gpu
CUDA.reclaim()
end
return nothing
end
function test(model_path::String, scale::Bool=true, shuffle::Bool=true)
saved_model = BSON.load(model_path)
model = saved_model[:model]
T = saved_model[:transform]
test_data = DataUtils.get_test_set(DataUtils.data_prep(DIR*"/../data/test"), readdlm(DIR*"/../data/test/y_test.txt"), T, cpu, scale, shuffle)
accuracy(test_data, model)
end
end #module
|
{"hexsha": "1207ad3eca064163085ba2cef50ca75108f66019", "size": 7068, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/MR1dCNN.jl", "max_stars_repo_name": "cjw199/OneDCNN.jl", "max_stars_repo_head_hexsha": "2bb43258287bf913344b6957026bd4ad24e00cd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/MR1dCNN.jl", "max_issues_repo_name": "cjw199/OneDCNN.jl", "max_issues_repo_head_hexsha": "2bb43258287bf913344b6957026bd4ad24e00cd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/MR1dCNN.jl", "max_forks_repo_name": "cjw199/OneDCNN.jl", "max_forks_repo_head_hexsha": "2bb43258287bf913344b6957026bd4ad24e00cd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1272727273, "max_line_length": 209, "alphanum_fraction": 0.6513865308, "num_tokens": 1950}
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Convenient syntax for "equational reasoning" using a preorder
------------------------------------------------------------------------
-- I think that the idea behind this library is originally Ulf
-- Norell's. I have adapted it to my tastes and mixfix operators,
-- though.
-- If you need to use several instances of this module in a given
-- file, then you can use the following approach:
--
-- import Relation.Binary.PreorderReasoning as Pre
--
-- f x y z = begin
-- ...
-- ∎
-- where open Pre preorder₁
--
-- g i j = begin
-- ...
-- ∎
-- where open Pre preorder₂
open import Relation.Binary
module Relation.Binary.PreorderReasoning
{p₁ p₂ p₃} (P : Preorder p₁ p₂ p₃) where
open Preorder P
infix 4 _IsRelatedTo_
infix 2 _∎
infixr 2 _∼⟨_⟩_ _≈⟨_⟩_ _≈⟨⟩_
infix 1 begin_
-- This seemingly unnecessary type is used to make it possible to
-- infer arguments even if the underlying equality evaluates.
data _IsRelatedTo_ (x y : Carrier) : Set p₃ where
relTo : (x∼y : x ∼ y) → x IsRelatedTo y
begin_ : ∀ {x y} → x IsRelatedTo y → x ∼ y
begin relTo x∼y = x∼y
_∼⟨_⟩_ : ∀ x {y z} → x ∼ y → y IsRelatedTo z → x IsRelatedTo z
_ ∼⟨ x∼y ⟩ relTo y∼z = relTo (trans x∼y y∼z)
_≈⟨_⟩_ : ∀ x {y z} → x ≈ y → y IsRelatedTo z → x IsRelatedTo z
_ ≈⟨ x≈y ⟩ relTo y∼z = relTo (trans (reflexive x≈y) y∼z)
_≈⟨⟩_ : ∀ x {y} → x IsRelatedTo y → x IsRelatedTo y
_ ≈⟨⟩ x∼y = x∼y
_∎ : ∀ x → x IsRelatedTo x
_∎ _ = relTo refl
|
{"hexsha": "2bd974aa8a2edbad2580f77405a21cd8a91c843b", "size": 1563, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "agda-stdlib-0.9/src/Relation/Binary/PreorderReasoning.agda", "max_stars_repo_name": "qwe2/try-agda", "max_stars_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-10-20T15:52:05.000Z", "max_stars_repo_stars_event_max_datetime": "2016-10-20T15:52:05.000Z", "max_issues_repo_path": "agda-stdlib-0.9/src/Relation/Binary/PreorderReasoning.agda", "max_issues_repo_name": "qwe2/try-agda", "max_issues_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "agda-stdlib-0.9/src/Relation/Binary/PreorderReasoning.agda", "max_forks_repo_name": "qwe2/try-agda", "max_forks_repo_head_hexsha": "9d4c43b1609d3f085636376fdca73093481ab882", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9482758621, "max_line_length": 72, "alphanum_fraction": 0.5770953295, "num_tokens": 541}
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_SWAR_FUNCTIONS_SIMD_SSE_SSE2_SPLIT_MULTIPLIES_HPP_INCLUDED
#define BOOST_SIMD_SWAR_FUNCTIONS_SIMD_SSE_SSE2_SPLIT_MULTIPLIES_HPP_INCLUDED
#ifdef BOOST_SIMD_HAS_SSE2_SUPPORT
#include <boost/simd/swar/functions/split_multiplies.hpp>
#include <boost/simd/include/functions/simd/interleave_first.hpp>
#include <boost/simd/include/functions/simd/interleave_second.hpp>
#include <boost/simd/include/functions/simd/deinterleave_first.hpp>
#include <boost/simd/include/functions/simd/deinterleave_second.hpp>
namespace boost { namespace simd { namespace ext
{
BOOST_DISPATCH_IMPLEMENT ( split_multiplies_
, boost::simd::tag::sse2_
, (A0)(A1)
, ((simd_<int16_<A0>,boost::simd::tag::sse_>))
((simd_<int16_<A0>,boost::simd::tag::sse_>))
((simd_<int32_<A1>,boost::simd::tag::sse_>))
((simd_<int32_<A1>,boost::simd::tag::sse_>))
)
{
typedef void result_type;
BOOST_FORCEINLINE result_type operator()(A0 const& a0, A0 const& a1, A1& a2, A1& a3) const
{
A0 lo = _mm_mullo_epi16(a0, a1);
A0 hi = _mm_mulhi_epi16(a0, a1);
a2 = interleave_first(lo, hi);
a3 = interleave_second(lo, hi);
}
};
BOOST_DISPATCH_IMPLEMENT ( split_multiplies_
, boost::simd::tag::sse2_
, (A0)(A1)
, ((simd_<uint16_<A0>,boost::simd::tag::sse_>))
((simd_<uint16_<A0>,boost::simd::tag::sse_>))
((simd_<uint32_<A1>,boost::simd::tag::sse_>))
((simd_<uint32_<A1>,boost::simd::tag::sse_>))
)
{
typedef void result_type;
BOOST_FORCEINLINE result_type operator()(A0 const& a0, A0 const& a1, A1& a2, A1& a3) const
{
A0 lo = _mm_mullo_epi16(a0, a1);
A0 hi = _mm_mulhi_epu16(a0, a1);
a2 = interleave_first(lo, hi);
a3 = interleave_second(lo, hi);
}
};
BOOST_DISPATCH_IMPLEMENT ( split_multiplies_
, boost::simd::tag::sse2_
, (A0)(A1)
, ((simd_<uint32_<A0>,boost::simd::tag::sse_>))
((simd_<uint32_<A0>,boost::simd::tag::sse_>))
((simd_<uint64_<A1>,boost::simd::tag::sse_>))
((simd_<uint64_<A1>,boost::simd::tag::sse_>))
)
{
typedef void result_type;
BOOST_FORCEINLINE result_type operator()(A0 const& a0, A0 const& a1, A1& a2, A1& a3) const
{
A1 lo = _mm_mul_epu32(a0, a1);
A1 hi = _mm_mul_epu32(_mm_srli_si128(a0, 4), _mm_srli_si128(a1, 4));
a2 = deinterleave_first(lo, hi);
a3 = deinterleave_second(lo, hi);
}
};
} } }
#endif
#endif
|
{"hexsha": "f43f92997a154ce32dc39b0c86af202abc713776", "size": 3626, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "modules/boost/simd/base/include/boost/simd/swar/functions/simd/sse/sse2/split_multiplies.hpp", "max_stars_repo_name": "psiha/nt2", "max_stars_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2017-05-19T18:10:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T02:18:13.000Z", "max_issues_repo_path": "modules/boost/simd/base/include/boost/simd/swar/functions/simd/sse/sse2/split_multiplies.hpp", "max_issues_repo_name": "psiha/nt2", "max_issues_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/boost/simd/base/include/boost/simd/swar/functions/simd/sse/sse2/split_multiplies.hpp", "max_forks_repo_name": "psiha/nt2", "max_forks_repo_head_hexsha": "5e829807f6b57b339ca1be918a6b60a2507c54d0", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-02T12:59:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-31T12:46:14.000Z", "avg_line_length": 43.1666666667, "max_line_length": 94, "alphanum_fraction": 0.5030336459, "num_tokens": 908}
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from contextlib import contextmanager
import numpy as np
import popdist
import popdist.tensorflow
import tensorflow as tf
from tensorflow.python import ipu
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op, test_util
from tensorflow.python.ipu.horovod import popdist_strategy
from tensorflow.python.ipu import horovod as hvd
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.platform import test
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.ops import control_flow_v2_toggles
class DistributedTF2Test(test_util.TensorFlowTestCase):
def assert_all_instances_equal(self, local_value, name=None):
# Assert that the current instance has the same value as the root instance.
local_tensor = constant_op.constant(local_value)
root_tensor = hvd.broadcast(local_tensor, root_rank=0)
np.testing.assert_equal(local_value, root_tensor.numpy(), name)
def assert_all_instances_not_equal(self, local_value):
local_tensor = constant_op.constant(local_value)
root_tensor = hvd.broadcast(local_tensor, root_rank=0)
if hvd.local_rank() == 0:
return
assert not np.equal(local_value, root_tensor.numpy()).any()
def prepare_model(self):
# Make sure we have different parameters on each index
bias = tf.keras.initializers.Constant(value=popdist.getInstanceIndex())
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(4,
3,
activation='relu',
bias_initializer=bias,
name='test_bias'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2),
])
def prepare_dataset(self):
def generator():
for _ in range(100):
yield np.random.rand(4, 4, 1), np.random.randint(1, 2, size=1)
dataset = tf.data.Dataset.from_generator(
generator,
output_types=(tf.float32, tf.float32),
output_shapes=((4, 4, 1), (1,)),
)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.shard(num_shards=popdist.getNumInstances(),
index=popdist.getInstanceIndex())
dataset = dataset.batch(10, drop_remainder=True)
return dataset
def test_tf2_distributed_ipu_strategy(self):
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
dataset = self.prepare_dataset()
model = self.prepare_model()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer,
loss=loss_fn,
steps_per_execution=popdist.getNumTotalReplicas())
# Build the model separately so we can assert that the biases are
# broadcasted properly before training.
model.build((1, 4, 4, 1))
layer = model.get_layer(name='test_bias')
self.assert_all_instances_not_equal(layer.get_weights()[1])
history = model.fit(dataset,
steps_per_epoch=popdist.getNumTotalReplicas(),
epochs=1)
# Make sure the losses and weights are not equal
self.assert_all_instances_not_equal(history.history['loss'])
self.assert_all_instances_not_equal(layer.get_weights()[1])
def test_tf2_distributed_popdist_strategy(self):
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
with strategy.scope():
dataset = self.prepare_dataset()
model = self.prepare_model()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer,
loss=loss_fn,
steps_per_execution=popdist.getNumTotalReplicas())
# Build the model separately so we can assert that the biases are
# broadcasted properly before training.
model.build((1, 4, 4, 1))
layer = model.get_layer(name='test_bias')
self.assert_all_instances_equal(layer.get_weights()[1])
history = model.fit(dataset,
steps_per_epoch=popdist.getNumTotalReplicas(),
epochs=1)
# Make sure the losses and weights are identical as we reduce over all
# IPUs
self.assert_all_instances_equal(history.history['loss'])
for v in model.trainable_variables:
self.assert_all_instances_equal(v)
def test_single_multi_replica_training_step(self):
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
with strategy.scope():
learning_rate = 0.5
initial_w = 2.0
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
w = tf.Variable(initial_w)
@tf.function(experimental_compile=True)
def step_fn(x):
with tf.GradientTape() as tape:
loss = w * x
optimizer.minimize(loss, var_list=[w], tape=tape)
return loss
@tf.function(experimental_compile=True)
def step_fn_wrapper(x):
per_replica_loss = strategy.run(step_fn, args=[x])
loss_reduced = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss)
return loss_reduced
num_replicas = popdist.getNumTotalReplicas()
reference_w = initial_w
for x in range(10):
self.assertEqual(reference_w, w.numpy())
with tf.device("/device:IPU:0"):
loss_final = step_fn_wrapper(tf.constant(tf.cast(x, tf.float32)))
self.assertEqual(num_replicas * reference_w * x, loss_final)
# L(x) = num_replicas * w * x
# dL(x)/dw = num_replicas * x
# w := w - learning_rate * num_replicas * x
reference_w -= learning_rate * num_replicas * x
def test_single_multi_replica_training_step_keras(self):
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
with strategy.scope():
learning_rate = 0.5
initial_w = 2.0
model = tf.keras.Sequential([
tf.keras.layers.Dense(
1,
kernel_initializer=tf.keras.initializers.Constant(initial_w),
use_bias=False)
])
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
@tf.function(experimental_compile=True)
def loss_fn(_, y_pred):
return y_pred
num_replicas = popdist.getNumTotalReplicas()
reference_w = initial_w
model.compile(loss=loss_fn,
optimizer=optimizer,
steps_per_execution=num_replicas)
model.build((1, 1))
for x in range(10):
self.assertEqual(reference_w,
model.trainable_variables[0][0][0].numpy())
history = model.fit(
np.array([[x]], np.float32).repeat(popdist.getNumLocalReplicas(),
axis=0),
np.array([[x]], np.float32).repeat(popdist.getNumLocalReplicas(),
axis=0),
steps_per_epoch=num_replicas,
epochs=1)
self.assertEqual(reference_w * x, history.history['loss'][0])
# L(x) = w * x
# dL(x)/dw = x
# w := w - learning_rate * x
reference_w -= learning_rate * x
def test_single_training_step_equal_in_tf_and_keras(self):
# This test verifies that a training loop in raw TensorFlow and Keras yield
# the same losses, gradients and weight updates.
def initialize_model_with_seed():
# Make sure we initialize the kernels in a reproducible manner, create
# an initializer with a constant seed.
initializer = tf.keras.initializers.GlorotNormal(seed=1234)
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(4,
3,
kernel_initializer=initializer,
use_bias=False,
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2,
kernel_initializer=initializer,
use_bias=False),
])
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
with strategy.scope():
learning_rate = 0.01
model_tf = initialize_model_with_seed()
model_keras = initialize_model_with_seed()
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
@tf.function(experimental_compile=True)
def step_fn_tf(x, y):
with tf.GradientTape() as tape:
output = model_tf(x)
loss = tf.keras.losses.sparse_categorical_crossentropy(
y_true=y, y_pred=output, from_logits=True)
loss = tf.nn.compute_average_loss(
loss, global_batch_size=popdist.getNumTotalReplicas())
optimizer.minimize(loss,
var_list=model_tf.trainable_variables,
tape=tape)
return loss
@tf.function(experimental_compile=True)
def run_training_step_tf(x, y):
per_replica_loss = strategy.run(step_fn_tf, args=[x, y])
loss_reduced = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss)
return loss_reduced
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model_keras.compile(optimizer=optimizer,
loss=loss_fn,
steps_per_execution=popdist.getNumTotalReplicas())
def run_training_step_keras(x, y):
history = model_keras.fit(
x, y, steps_per_epoch=popdist.getNumTotalReplicas(), epochs=1)
return history.history['loss'][0]
# First generate some random data using numpy, so we can reuse the same
# data for both TF and Keras in order to force reproducibility.
input_sample = np.random.uniform(0, 1, (1, 4, 4, 1))
output_sample = np.random.randint(1, 2, size=1)
# The Keras `.fit()` API requires the input to be replicated `num_replica`
# times.
x_tf = tf.constant(tf.cast(input_sample, tf.float32))
y_tf = tf.constant(tf.cast(output_sample, tf.float32))
x_keras = tf.constant(
tf.cast(
np.repeat(input_sample, popdist.getNumLocalReplicas(), axis=0),
tf.float32))
y_keras = tf.constant(
tf.cast(
np.repeat(output_sample, popdist.getNumLocalReplicas(), axis=0),
tf.float32))
# First test whether a single distributed training step yields the same
# loss values for both TensorFlow and Keras.
with tf.device("/device:IPU:0"):
loss_final_tf = run_training_step_tf(x_tf, y_tf)
loss_final_keras = run_training_step_keras(x_keras, y_keras)
self.assertEqual(loss_final_tf, loss_final_keras)
# Assert that both models have the same weights after the first backwards
# pass.
for i, _ in enumerate(model_tf.trainable_variables):
np.testing.assert_equal(model_tf.trainable_variables[i].numpy(),
model_keras.trainable_variables[i].numpy())
@tf.function(experimental_compile=True)
def step_fn_eval_tf(x, y):
output = model_tf(x, training=False)
loss = tf.keras.losses.sparse_categorical_crossentropy(
y_true=y, y_pred=output, from_logits=True)
loss = tf.nn.compute_average_loss(
loss, global_batch_size=popdist.getNumTotalReplicas())
return loss
@tf.function(experimental_compile=True)
def run_eval_step_tf(x, y):
per_replica_loss = strategy.run(step_fn_eval_tf, args=[x, y])
loss_reduced = strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_loss)
return loss_reduced
def run_eval_step_keras(x, y):
scores = model_keras.evaluate(x,
y,
steps=popdist.getNumTotalReplicas())
return scores
x_keras_eval = tf.constant(
tf.cast(
np.repeat(input_sample, popdist.getNumTotalReplicas(), axis=0),
tf.float32))
y_keras_eval = tf.constant(
tf.cast(
np.repeat(output_sample, popdist.getNumTotalReplicas(), axis=0),
tf.float32))
with tf.device("/device:IPU:0"):
val_loss_final_tf = run_eval_step_tf(x_tf, y_tf)
val_loss_final_keras = run_eval_step_keras(x_keras_eval, y_keras_eval)
self.assertEqual(val_loss_final_tf, val_loss_final_keras)
@contextmanager
def control_flow_v1(self):
control_flow_v2_toggles.disable_control_flow_v2()
try:
yield
finally:
control_flow_v2_toggles.enable_control_flow_v2()
@test_util.deprecated_graph_mode_only
def single_training_step_equal_tf1(self):
num_iterations = 2
learning_rate = 0.5
batch_size = 2
np.random.seed(1234)
input_sample = np.random.uniform(
0, 1, (batch_size * popdist.getNumLocalReplicas(), 4, 4, 1)).astype(
np.float32)
output_sample = np.random.randint(
1, 2,
size=batch_size * popdist.getNumLocalReplicas()).astype(np.float32)
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
def initialize_model_with_seed():
# Make sure we initialize the kernels in a reproducible manner, create
# an initializer with a constant seed.
initializer = tf.keras.initializers.GlorotNormal(seed=1234)
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2,
kernel_initializer=initializer,
use_bias=False),
])
with self.control_flow_v1(), strategy.scope():
dataset = tf.data.Dataset.from_tensor_slices(
(input_sample, output_sample))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
infeed_queue = ipu.ipu_infeed_queue.IPUInfeedQueue(dataset)
outfeed_queue_gradients = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
outfeed_queue_losses = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
model_tf = initialize_model_with_seed()
def per_replica_step(loss_sum, x, y):
with tf.GradientTape() as tape:
logits = model_tf(x)
per_example_loss = tf.keras.losses.sparse_categorical_crossentropy(
y_true=y, y_pred=logits, from_logits=True)
loss = tf.nn.compute_average_loss(per_example_loss,
global_batch_size=batch_size *
popdist.getNumTotalReplicas())
loss_sum += loss
gradients_ = tape.gradient(loss, model_tf.trainable_variables)
gradient_enqueue_op = outfeed_queue_gradients.enqueue(gradients_)
loss_enqueue_op = outfeed_queue_losses.enqueue(loss)
train_op = optimizer.apply_gradients(
zip(gradients_, model_tf.trainable_variables))
return loss_sum, train_op, gradient_enqueue_op, loss_enqueue_op
def per_replica_loop():
return ipu.loops.repeat(num_iterations,
per_replica_step,
infeed_queue=infeed_queue,
inputs=[0.0])
def run_model():
per_replica_loss = strategy.run(per_replica_loop)
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_loss)
with ipu.scopes.ipu_scope("/device:IPU:0"):
compiled_model = ipu.ipu_compiler.compile(run_model)
with session.Session() as sess:
sess.run(infeed_queue.initializer)
sess.run(tf.compat.v1.global_variables_initializer())
loss = sess.run(compiled_model)[0] / num_iterations
gradients = sess.run(outfeed_queue_gradients.dequeue())
losses = sess.run(outfeed_queue_losses.dequeue())
weights = [
var.eval(session=sess) for var in model_tf.trainable_variables
]
return loss, gradients, losses, weights
def single_training_step_equal_keras(self):
# This test verifies that a training loop in raw TensorFlow 1 and Keras
# yield the same losses, gradients and weight updates.
num_iterations = 2
learning_rate = 0.5
batch_size = 2
def initialize_model_with_seed():
# Make sure we initialize the kernels in a reproducible manner, create
# an initializer with a constant seed.
initializer = tf.keras.initializers.GlorotNormal(seed=1234)
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(2,
kernel_initializer=initializer,
use_bias=False),
])
# Instantiate a custom optimizer that allows us to keep track of the
# gradients in `model.fit()`.
class ModelKeras(tf.keras.Sequential):
def __init__(self, layers):
super(ModelKeras, self).__init__(layers)
self.outfeed_queue_gradients = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
self.outfeed_queue_losses = ipu.ipu_outfeed_queue.IPUOutfeedQueue()
def train_step(self, data):
data = data_adapter.expand_1d(data)
x, y, _ = data_adapter.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(y,
y_pred,
regularization_losses=self.losses)
# Save the loss to an outfeed queue so we can use it later.
self.outfeed_queue_losses.enqueue(loss)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Save the gradients to an outfeed queue so we can use them later.
self.outfeed_queue_gradients.enqueue(gradients)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.compiled_metrics.update_state(y, y_pred)
return {m.name: m.result() for m in self.metrics}
# First generate some random data using numpy, so we can reuse the same
# data for both TF1 and Keras in order to force reproducibility.
np.random.seed(1234)
input_sample = np.random.uniform(
0, 1, (batch_size * popdist.getNumLocalReplicas(), 4, 4, 1)).astype(
np.float32)
output_sample = np.random.randint(
1, 2,
size=batch_size * popdist.getNumLocalReplicas()).astype(np.float32)
config = ipu.config.IPUConfig()
popdist.tensorflow.set_ipu_config(config, ipus_per_replica=1)
config.configure_ipu_system()
hvd.init()
strategy = popdist_strategy.PopDistStrategy()
with strategy.scope():
dataset = tf.data.Dataset.from_tensor_slices(
(input_sample, output_sample))
dataset = dataset.repeat()
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model_keras = ModelKeras(initialize_model_with_seed())
model_keras.compile(optimizer=optimizer,
loss=loss_fn,
steps_per_execution=popdist.getNumTotalReplicas() *
num_iterations)
history = model_keras.fit(dataset,
steps_per_epoch=popdist.getNumTotalReplicas() *
num_iterations,
epochs=1)
loss = history.history['loss'][0]
# Extract weights to numpy now we are still in eager mode, this will not be
# possible afterwards.
weights = [var.numpy() for var in model_keras.trainable_variables]
gradients = [
g.numpy() for g in model_keras.outfeed_queue_gradients.dequeue()
]
losses = [l.numpy() for l in model_keras.outfeed_queue_losses.dequeue()]
return loss, gradients, losses, weights
def test_single_training_step_equal_tf1_and_keras(self):
loss_tf1, gradients_tf1, losses_tf1, weights_tf1 =\
self.single_training_step_equal_tf1()
loss_keras, gradients_keras, losses_keras, weights_keras =\
self.single_training_step_equal_keras()
np.testing.assert_equal(loss_tf1, loss_keras)
# Assert that both models have identical losses (both reduced and non-
# reduced.
np.testing.assert_almost_equal(loss_keras, loss_tf1)
for l_1, l_2 in zip(losses_keras, losses_tf1):
np.testing.assert_equal(l_1, l_2)
# Assert that both models have the same gradients.
np.testing.assert_equal(gradients_keras, gradients_tf1)
# Assert that both models have the same weights after the first backwards
# pass.
for w_1, w_2 in zip(weights_tf1, weights_keras):
np.testing.assert_equal(w_1, w_2)
if __name__ == "__main__":
test.main()
|
{"hexsha": "159ff0eb95ed4586a9341cd3606db6604ee1d0f6", "size": 22940, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/compiler/plugin/poplar/tests/distributed_tf2_test.py", "max_stars_repo_name": "chenzhengda/tensorflow", "max_stars_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-07-06T17:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:31:28.000Z", "max_issues_repo_path": "tensorflow/compiler/plugin/poplar/tests/distributed_tf2_test.py", "max_issues_repo_name": "chenzhengda/tensorflow", "max_issues_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-10-13T23:25:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T06:54:48.000Z", "max_forks_repo_path": "tensorflow/compiler/plugin/poplar/tests/distributed_tf2_test.py", "max_forks_repo_name": "chenzhengda/tensorflow", "max_forks_repo_head_hexsha": "8debb698097670458b5f21d728bc6f734a7b5a53", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-07-08T07:27:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T08:54:27.000Z", "avg_line_length": 37.1197411003, "max_line_length": 80, "alphanum_fraction": 0.6504359198, "include": true, "reason": "import numpy", "num_tokens": 4982}
|
[STATEMENT]
lemma Invoke_correct:
fixes \<sigma>' :: jvm_state
assumes wtprog: "wf_jvm_prog\<^bsub>\<Phi>\<^esub> P"
assumes meth_C: "P \<turnstile> C sees M:Ts\<rightarrow>T=(mxs,mxl\<^sub>0,ins,xt) in C"
assumes ins: "ins ! pc = Invoke M' n"
assumes wti: "P,T,mxs,size ins,xt \<turnstile> ins!pc,pc :: \<Phi> C M"
assumes \<sigma>': "Some \<sigma>' = exec (P, None, h, (stk,loc,C,M,pc)#frs)"
assumes approx: "P,\<Phi> \<turnstile> (None, h, (stk,loc,C,M,pc)#frs)\<surd>"
assumes no_xcp: "fst (exec_instr (ins!pc) P h stk loc C M pc frs) = None"
shows "P,\<Phi> \<turnstile> \<sigma>'\<surd>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
note split_paired_Ex [simp del]
[PROOF STATE]
proof (state)
this:
(\<exists>x. ?P x) = (\<exists>a b. ?P (a, b))
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from wtprog
[PROOF STATE]
proof (chain)
picking this:
wf_jvm_prog\<^bsub>\<Phi>\<^esub> P
[PROOF STEP]
obtain wfmb where wfprog: "wf_prog wfmb P"
[PROOF STATE]
proof (prove)
using this:
wf_jvm_prog\<^bsub>\<Phi>\<^esub> P
goal (1 subgoal):
1. (\<And>wfmb. wf_prog wfmb P \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (simp add: wf_jvm_prog_phi_def)
[PROOF STATE]
proof (state)
this:
wf_prog wfmb P
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from ins meth_C approx
[PROOF STATE]
proof (chain)
picking this:
ins ! pc = Invoke M' n
P \<turnstile> C sees M: Ts\<rightarrow>T = (mxs, mxl\<^sub>0, ins, xt) in C
P,\<Phi> |- (None, h, (stk, loc, C, M, pc) # frs) [ok]
[PROOF STEP]
obtain ST LT where
heap_ok: "P\<turnstile> h\<surd>" and
\<Phi>_pc: "\<Phi> C M!pc = Some (ST,LT)" and
frame: "conf_f P h (ST,LT) ins (stk,loc,C,M,pc)" and
frames: "conf_fs P h \<Phi> M (size Ts) T frs"
[PROOF STATE]
proof (prove)
using this:
ins ! pc = Invoke M' n
P \<turnstile> C sees M: Ts\<rightarrow>T = (mxs, mxl\<^sub>0, ins, xt) in C
P,\<Phi> |- (None, h, (stk, loc, C, M, pc) # frs) [ok]
goal (1 subgoal):
1. (\<And>ST LT. \<lbrakk>P \<turnstile> h \<surd>; \<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>; conf_f P h (ST, LT) ins (stk, loc, C, M, pc); conf_fs P h \<Phi> M (length Ts) T frs\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (fastforce dest: sees_method_fun)
[PROOF STATE]
proof (state)
this:
P \<turnstile> h \<surd>
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
conf_f P h (ST, LT) ins (stk, loc, C, M, pc)
conf_fs P h \<Phi> M (length Ts) T frs
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from ins wti \<Phi>_pc
[PROOF STATE]
proof (chain)
picking this:
ins ! pc = Invoke M' n
P,T,mxs,length ins,xt \<turnstile> ins ! pc,pc :: \<Phi> C M
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
[PROOF STEP]
have n: "n < size ST"
[PROOF STATE]
proof (prove)
using this:
ins ! pc = Invoke M' n
P,T,mxs,length ins,xt \<turnstile> ins ! pc,pc :: \<Phi> C M
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
goal (1 subgoal):
1. n < length ST
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
n < length ST
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
n < length ST
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
assume "stk!n = Null"
[PROOF STATE]
proof (state)
this:
stk ! n = Null
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with ins no_xcp
[PROOF STATE]
proof (chain)
picking this:
ins ! pc = Invoke M' n
fst (exec_instr (ins ! pc) P h stk loc C M pc frs) = None
stk ! n = Null
[PROOF STEP]
have False
[PROOF STATE]
proof (prove)
using this:
ins ! pc = Invoke M' n
fst (exec_instr (ins ! pc) P h stk loc C M pc frs) = None
stk ! n = Null
goal (1 subgoal):
1. False
[PROOF STEP]
by (simp add: split_beta)
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
hence ?thesis
[PROOF STATE]
proof (prove)
using this:
False
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
stk ! n = Null \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
stk ! n = Null \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
stk ! n = Null \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
assume "ST!n = NT"
[PROOF STATE]
proof (state)
this:
ST ! n = NT
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ST ! n = NT
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from frame
[PROOF STATE]
proof (chain)
picking this:
conf_f P h (ST, LT) ins (stk, loc, C, M, pc)
[PROOF STEP]
have "P,h \<turnstile> stk [:\<le>] ST"
[PROOF STATE]
proof (prove)
using this:
conf_f P h (ST, LT) ins (stk, loc, C, M, pc)
goal (1 subgoal):
1. P,h \<turnstile> stk [:\<le>] ST
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> stk [:\<le>] ST
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with n
[PROOF STATE]
proof (chain)
picking this:
n < length ST
P,h \<turnstile> stk [:\<le>] ST
[PROOF STEP]
have "P,h \<turnstile> stk!n :\<le> ST!n"
[PROOF STATE]
proof (prove)
using this:
n < length ST
P,h \<turnstile> stk [:\<le>] ST
goal (1 subgoal):
1. P,h \<turnstile> stk ! n :\<le> ST ! n
[PROOF STEP]
by (simp add: list_all2_conv_all_nth)
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> stk ! n :\<le> ST ! n
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
ST ! n = NT
P,h \<turnstile> stk ! n :\<le> ST ! n
[PROOF STEP]
have "stk!n = Null"
[PROOF STATE]
proof (prove)
using this:
ST ! n = NT
P,h \<turnstile> stk ! n :\<le> ST ! n
goal (1 subgoal):
1. stk ! n = Null
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
stk ! n = Null
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with ins no_xcp
[PROOF STATE]
proof (chain)
picking this:
ins ! pc = Invoke M' n
fst (exec_instr (ins ! pc) P h stk loc C M pc frs) = None
stk ! n = Null
[PROOF STEP]
have False
[PROOF STATE]
proof (prove)
using this:
ins ! pc = Invoke M' n
fst (exec_instr (ins ! pc) P h stk loc C M pc frs) = None
stk ! n = Null
goal (1 subgoal):
1. False
[PROOF STEP]
by (simp add: split_beta)
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
hence ?thesis
[PROOF STATE]
proof (prove)
using this:
False
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
ST ! n = NT \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
ST ! n = NT \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
ST ! n = NT \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
assume NT: "ST!n \<noteq> NT" and Null: "stk!n \<noteq> Null"
[PROOF STATE]
proof (state)
this:
ST ! n \<noteq> NT
stk ! n \<noteq> Null
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from NT ins wti \<Phi>_pc
[PROOF STATE]
proof (chain)
picking this:
ST ! n \<noteq> NT
ins ! pc = Invoke M' n
P,T,mxs,length ins,xt \<turnstile> ins ! pc,pc :: \<Phi> C M
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
[PROOF STEP]
obtain D D' Ts T m ST' LT' where
D: "ST!n = Class D" and
pc': "pc+1 < size ins" and
m_D: "P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'" and
Ts: "P \<turnstile> rev (take n ST) [\<le>] Ts" and
\<Phi>': "\<Phi> C M ! (pc+1) = Some (ST', LT')" and
LT': "P \<turnstile> LT [\<le>\<^sub>\<top>] LT'" and
ST': "P \<turnstile> (T # drop (n+1) ST) [\<le>] ST'"
[PROOF STATE]
proof (prove)
using this:
ST ! n \<noteq> NT
ins ! pc = Invoke M' n
P,T,mxs,length ins,xt \<turnstile> ins ! pc,pc :: \<Phi> C M
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
goal (1 subgoal):
1. (\<And>D Ts T m D' ST' LT'. \<lbrakk>ST ! n = Class D; pc + 1 < length ins; P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'; P \<turnstile> rev (take n ST) [\<le>] Ts; \<Phi> C M ! (pc + 1) = \<lfloor>(ST', LT')\<rfloor>; P \<turnstile> LT [\<le>\<^sub>\<top>] LT'; P \<turnstile> (T # drop (n + 1) ST) [\<le>] ST'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (clarsimp simp add: sup_state_opt_any_Some)
[PROOF STATE]
proof (state)
this:
ST ! n = Class D
pc + 1 < length ins
P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'
P \<turnstile> rev (take n ST) [\<le>] Ts
\<Phi> C M ! (pc + 1) = \<lfloor>(ST', LT')\<rfloor>
P \<turnstile> LT [\<le>\<^sub>\<top>] LT'
P \<turnstile> (T # drop (n + 1) ST) [\<le>] ST'
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from frame
[PROOF STATE]
proof (chain)
picking this:
conf_f P h (ST, LT) ins (stk, loc, C, M, pc)
[PROOF STEP]
obtain
stk: "P,h \<turnstile> stk [:\<le>] ST" and
loc: "P,h \<turnstile> loc [:\<le>\<^sub>\<top>] LT"
[PROOF STATE]
proof (prove)
using this:
conf_f P h (ST, LT) ins (stk, loc, C, M, pc)
goal (1 subgoal):
1. (\<lbrakk>P,h \<turnstile> stk [:\<le>] ST; P,h \<turnstile> loc [:\<le>\<^sub>\<top>] LT\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> stk [:\<le>] ST
P,h \<turnstile> loc [:\<le>\<^sub>\<top>] LT
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from n stk D
[PROOF STATE]
proof (chain)
picking this:
n < length ST
P,h \<turnstile> stk [:\<le>] ST
ST ! n = Class D
[PROOF STEP]
have "P,h \<turnstile> stk!n :\<le> Class D"
[PROOF STATE]
proof (prove)
using this:
n < length ST
P,h \<turnstile> stk [:\<le>] ST
ST ! n = Class D
goal (1 subgoal):
1. P,h \<turnstile> stk ! n :\<le> Class D
[PROOF STEP]
by (auto simp add: list_all2_conv_all_nth)
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> stk ! n :\<le> Class D
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with Null
[PROOF STATE]
proof (chain)
picking this:
stk ! n \<noteq> Null
P,h \<turnstile> stk ! n :\<le> Class D
[PROOF STEP]
obtain a C' fs where
Addr: "stk!n = Addr a" and
obj: "h a = Some (C',fs)" and
C'subD: "P \<turnstile> C' \<preceq>\<^sup>* D"
[PROOF STATE]
proof (prove)
using this:
stk ! n \<noteq> Null
P,h \<turnstile> stk ! n :\<le> Class D
goal (1 subgoal):
1. (\<And>a C' fs. \<lbrakk>stk ! n = Addr a; h a = \<lfloor>(C', fs)\<rfloor>; P \<turnstile> C' \<preceq>\<^sup>* D\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (fastforce dest!: conf_ClassD)
[PROOF STATE]
proof (state)
this:
stk ! n = Addr a
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' \<preceq>\<^sup>* D
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with wfprog m_D
[PROOF STATE]
proof (chain)
picking this:
wf_prog wfmb P
P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'
stk ! n = Addr a
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' \<preceq>\<^sup>* D
[PROOF STEP]
obtain Ts' T' m' D'' mxs' mxl' ins' xt' where
m_C': "P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs',mxl',ins',xt') in D''" and
T': "P \<turnstile> T' \<le> T" and
Ts': "P \<turnstile> Ts [\<le>] Ts'"
[PROOF STATE]
proof (prove)
using this:
wf_prog wfmb P
P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'
stk ! n = Addr a
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' \<preceq>\<^sup>* D
goal (1 subgoal):
1. (\<And>Ts' T' mxs' mxl' ins' xt' D''. \<lbrakk>P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''; subtype P T' T; P \<turnstile> Ts [\<le>] Ts'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto dest: sees_method_mono)
[PROOF STATE]
proof (state)
this:
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
subtype P T' T
P \<turnstile> Ts [\<le>] Ts'
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
let ?loc' = "Addr a # rev (take n stk) @ replicate mxl' undefined"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
let ?f' = "([], ?loc', D'', M', 0)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
let ?f = "(stk, loc, C, M, pc)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from Addr obj m_C' ins \<sigma>' meth_C
[PROOF STATE]
proof (chain)
picking this:
stk ! n = Addr a
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
ins ! pc = Invoke M' n
\<lfloor>\<sigma>'\<rfloor> = exec (P, None, h, (stk, loc, C, M, pc) # frs)
P \<turnstile> C sees M: Ts\<rightarrow>T = (mxs, mxl\<^sub>0, ins, xt) in C
[PROOF STEP]
have s': "\<sigma>' = (None, h, ?f' # ?f # frs)"
[PROOF STATE]
proof (prove)
using this:
stk ! n = Addr a
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
ins ! pc = Invoke M' n
\<lfloor>\<sigma>'\<rfloor> = exec (P, None, h, (stk, loc, C, M, pc) # frs)
P \<turnstile> C sees M: Ts\<rightarrow>T = (mxs, mxl\<^sub>0, ins, xt) in C
goal (1 subgoal):
1. \<sigma>' = (None, h, ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0) # (stk, loc, C, M, pc) # frs)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<sigma>' = (None, h, ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0) # (stk, loc, C, M, pc) # frs)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from Ts n
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> rev (take n ST) [\<le>] Ts
n < length ST
[PROOF STEP]
have [simp]: "size Ts = n"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> rev (take n ST) [\<le>] Ts
n < length ST
goal (1 subgoal):
1. length Ts = n
[PROOF STEP]
by (auto dest: list_all2_lengthD simp: min_def)
[PROOF STATE]
proof (state)
this:
length Ts = n
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with Ts'
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> Ts [\<le>] Ts'
length Ts = n
[PROOF STEP]
have [simp]: "size Ts' = n"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> Ts [\<le>] Ts'
length Ts = n
goal (1 subgoal):
1. length Ts' = n
[PROOF STEP]
by (auto dest: list_all2_lengthD)
[PROOF STATE]
proof (state)
this:
length Ts' = n
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
from m_C' wfprog
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
wf_prog wfmb P
[PROOF STEP]
obtain mD'': "P \<turnstile> D'' sees M':Ts'\<rightarrow>T'=(mxs',mxl',ins',xt') in D''"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
wf_prog wfmb P
goal (1 subgoal):
1. (P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D'' \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (fast dest: sees_method_idemp)
[PROOF STATE]
proof (state)
this:
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
with wtprog
[PROOF STATE]
proof (chain)
picking this:
wf_jvm_prog\<^bsub>\<Phi>\<^esub> P
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
[PROOF STEP]
obtain start: "wt_start P D'' Ts' mxl' (\<Phi> D'' M')" and ins': "ins' \<noteq> []"
[PROOF STATE]
proof (prove)
using this:
wf_jvm_prog\<^bsub>\<Phi>\<^esub> P
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
goal (1 subgoal):
1. (\<lbrakk>wt_start P D'' Ts' mxl' (\<Phi> D'' M'); ins' \<noteq> []\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto dest: wt_jvm_prog_impl_wt_start)
[PROOF STATE]
proof (state)
this:
wt_start P D'' Ts' mxl' (\<Phi> D'' M')
ins' \<noteq> []
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
wt_start P D'' Ts' mxl' (\<Phi> D'' M')
ins' \<noteq> []
[PROOF STEP]
obtain LT\<^sub>0 where LT\<^sub>0: "\<Phi> D'' M' ! 0 = Some ([], LT\<^sub>0)"
[PROOF STATE]
proof (prove)
using this:
wt_start P D'' Ts' mxl' (\<Phi> D'' M')
ins' \<noteq> []
goal (1 subgoal):
1. (\<And>LT\<^sub>0. \<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (clarsimp simp add: wt_start_def defs1 sup_state_opt_any_Some)
[PROOF STATE]
proof (state)
this:
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
have "conf_f P h ([], LT\<^sub>0) ins' ?f'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
let ?LT = "OK (Class D'') # (map OK Ts') @ (replicate mxl' Err)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
from stk
[PROOF STATE]
proof (chain)
picking this:
P,h \<turnstile> stk [:\<le>] ST
[PROOF STEP]
have "P,h \<turnstile> take n stk [:\<le>] take n ST"
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> stk [:\<le>] ST
goal (1 subgoal):
1. P,h \<turnstile> take n stk [:\<le>] take n ST
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> take n stk [:\<le>] take n ST
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
hence "P,h \<turnstile> rev (take n stk) [:\<le>] rev (take n ST)"
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> take n stk [:\<le>] take n ST
goal (1 subgoal):
1. P,h \<turnstile> rev (take n stk) [:\<le>] rev (take n ST)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> rev (take n stk) [:\<le>] rev (take n ST)
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> rev (take n stk) [:\<le>] rev (take n ST)
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
note Ts
[PROOF STATE]
proof (state)
this:
P \<turnstile> rev (take n ST) [\<le>] Ts
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P \<turnstile> rev (take n ST) [\<le>] Ts
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
note Ts'
[PROOF STATE]
proof (state)
this:
P \<turnstile> Ts [\<le>] Ts'
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
P,h \<turnstile> rev (take n stk) [:\<le>] Ts'
[PROOF STEP]
have "P,h \<turnstile> rev (take n stk) [:\<le>\<^sub>\<top>] map OK Ts'"
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> rev (take n stk) [:\<le>] Ts'
goal (1 subgoal):
1. P,h \<turnstile> rev (take n stk) [:\<le>\<^sub>\<top>] map OK Ts'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> rev (take n stk) [:\<le>\<^sub>\<top>] map OK Ts'
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> rev (take n stk) [:\<le>\<^sub>\<top>] map OK Ts'
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
have "P,h \<turnstile> replicate mxl' undefined [:\<le>\<^sub>\<top>] replicate mxl' Err"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P,h \<turnstile> replicate mxl' undefined [:\<le>\<^sub>\<top>] replicate mxl' Err
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> replicate mxl' undefined [:\<le>\<^sub>\<top>] replicate mxl' Err
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> replicate mxl' undefined [:\<le>\<^sub>\<top>] replicate mxl' Err
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
from m_C'
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
[PROOF STEP]
have "P \<turnstile> C' \<preceq>\<^sup>* D''"
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> C' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
goal (1 subgoal):
1. P \<turnstile> C' \<preceq>\<^sup>* D''
[PROOF STEP]
by (rule sees_method_decl_above)
[PROOF STATE]
proof (state)
this:
P \<turnstile> C' \<preceq>\<^sup>* D''
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
with obj
[PROOF STATE]
proof (chain)
picking this:
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' \<preceq>\<^sup>* D''
[PROOF STEP]
have "P,h \<turnstile> Addr a :\<le> Class D''"
[PROOF STATE]
proof (prove)
using this:
h a = \<lfloor>(C', fs)\<rfloor>
P \<turnstile> C' \<preceq>\<^sup>* D''
goal (1 subgoal):
1. P,h \<turnstile> Addr a :\<le> Class D''
[PROOF STEP]
by (simp add: conf_def)
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> Addr a :\<le> Class D''
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P,h \<turnstile> rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] map OK Ts' @ replicate mxl' Err
P,h \<turnstile> Addr a :\<le> Class D''
[PROOF STEP]
have "P,h \<turnstile> ?loc' [:\<le>\<^sub>\<top>] ?LT"
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] map OK Ts' @ replicate mxl' Err
P,h \<turnstile> Addr a :\<le> Class D''
goal (1 subgoal):
1. P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] OK (Class D'') # map OK Ts' @ replicate mxl' Err
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] OK (Class D'') # map OK Ts' @ replicate mxl' Err
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] OK (Class D'') # map OK Ts' @ replicate mxl' Err
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
from start LT\<^sub>0
[PROOF STATE]
proof (chain)
picking this:
wt_start P D'' Ts' mxl' (\<Phi> D'' M')
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
[PROOF STEP]
have "P \<turnstile> \<dots> [\<le>\<^sub>\<top>] LT\<^sub>0"
[PROOF STATE]
proof (prove)
using this:
wt_start P D'' Ts' mxl' (\<Phi> D'' M')
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
goal (1 subgoal):
1. P \<turnstile> (OK (Class D'') # map OK Ts' @ replicate mxl' Err) [\<le>\<^sub>\<top>] LT\<^sub>0
[PROOF STEP]
by (simp add: wt_start_def)
[PROOF STATE]
proof (state)
this:
P \<turnstile> (OK (Class D'') # map OK Ts' @ replicate mxl' Err) [\<le>\<^sub>\<top>] LT\<^sub>0
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
[PROOF STEP]
have "P,h \<turnstile> ?loc' [:\<le>\<^sub>\<top>] LT\<^sub>0"
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
goal (1 subgoal):
1. P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
using ins'
[PROOF STATE]
proof (prove)
using this:
P,h \<turnstile> Addr a # rev (take n stk) @ replicate mxl' undefined [:\<le>\<^sub>\<top>] LT\<^sub>0
ins' \<noteq> []
goal (1 subgoal):
1. conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
[PROOF STEP]
have ?thesis
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
using s' \<Phi>_pc approx meth_C m_D T' ins D
[PROOF STATE]
proof (prove)
using this:
P \<turnstile> D'' sees M': Ts'\<rightarrow>T' = (mxs', mxl', ins', xt') in D''
\<Phi> D'' M' ! 0 = \<lfloor>([], LT\<^sub>0)\<rfloor>
conf_f P h ([], LT\<^sub>0) ins' ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0)
\<sigma>' = (None, h, ([], Addr a # rev (take n stk) @ replicate mxl' undefined, D'', M', 0) # (stk, loc, C, M, pc) # frs)
\<Phi> C M ! pc = \<lfloor>(ST, LT)\<rfloor>
P,\<Phi> |- (None, h, (stk, loc, C, M, pc) # frs) [ok]
P \<turnstile> C sees M: Ts\<rightarrow>T = (mxs, mxl\<^sub>0, ins, xt) in C
P \<turnstile> D sees M': Ts\<rightarrow>T = m in D'
subtype P T' T
ins ! pc = Invoke M' n
ST ! n = Class D
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
by (fastforce dest: sees_method_fun [of _ C])
[PROOF STATE]
proof (state)
this:
P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>ST ! n \<noteq> NT; stk ! n \<noteq> Null\<rbrakk> \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
stk ! n = Null \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
ST ! n = NT \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
\<lbrakk>ST ! n \<noteq> NT; stk ! n \<noteq> Null\<rbrakk> \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
stk ! n = Null \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
ST ! n = NT \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
\<lbrakk>ST ! n \<noteq> NT; stk ! n \<noteq> Null\<rbrakk> \<Longrightarrow> P,\<Phi> |- \<sigma>' [ok]
goal (1 subgoal):
1. P,\<Phi> |- \<sigma>' [ok]
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P,\<Phi> |- \<sigma>' [ok]
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 13555, "file": "Jinja_BV_BVSpecTypeSafe", "length": 128}
|
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/*****************************************************************************
* $Id: we_colbufcompressed.cpp 4737 2013-08-14 20:45:46Z bwilkinson $
*
****************************************************************************/
/** @file
* Implementation of the ColumnBufferCompressed class
*
*/
#include "we_colbufcompressed.h"
#include <cassert>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <sstream>
#include <boost/scoped_array.hpp>
#include "we_define.h"
#include "we_config.h"
#include "we_convertor.h"
#include "we_columninfo.h"
#include "we_fileop.h"
#include "we_log.h"
#include "we_stats.h"
#include "IDBDataFile.h"
using namespace idbdatafile;
#include "idbcompress.h"
using namespace compress;
namespace WriteEngine
{
//------------------------------------------------------------------------------
// Constructor
//------------------------------------------------------------------------------
ColumnBufferCompressed::ColumnBufferCompressed( ColumnInfo* pColInfo,
Log* logger) :
ColumnBuffer(pColInfo, logger),
fToBeCompressedBuffer(0),
fToBeCompressedCapacity(0),
fNumBytes(0),
fPreLoadHWMChunk(true),
fFlushedStartHwmChunk(false)
{
fUserPaddingBytes = Config::getNumCompressedPadBlks() * BYTE_PER_BLOCK;
compress::initializeCompressorPool(fCompressorPool, fUserPaddingBytes);
}
//------------------------------------------------------------------------------
// Destructor
//------------------------------------------------------------------------------
ColumnBufferCompressed::~ColumnBufferCompressed()
{
if (fToBeCompressedBuffer)
delete []fToBeCompressedBuffer;
fToBeCompressedBuffer = 0;
fToBeCompressedCapacity = 0;
fNumBytes = 0;
}
//------------------------------------------------------------------------------
// Reset "this" ColumnBufferCompressed object to read a different file, by
// resetting the FILE*, starting HWM, and the chunk pointers.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::setDbFile(IDBDataFile* f, HWM startHwm, const char* hdrs)
{
fFile = f;
fStartingHwm = startHwm;
if (compress::CompressInterface::getPtrList(hdrs, fChunkPtrs) != 0)
{
return ERR_COMP_PARSE_HDRS;
}
// If we have any orphaned chunk pointers (ex: left over after a DML
// rollback), that fall after the HWM, then drop those trailing ptrs.
unsigned int chunkIndex = 0;
unsigned int blockOffsetWithinChunk = 0;
auto compressor = compress::getCompressorByType(
fCompressorPool, fColInfo->column.compressionType);
if (!compressor)
{
return ERR_COMP_WRONG_COMP_TYPE;
}
compressor->locateBlock(fStartingHwm, chunkIndex, blockOffsetWithinChunk);
if ((chunkIndex + 1) < fChunkPtrs.size())
{
fChunkPtrs.resize(chunkIndex + 1);
}
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Reinitialize to-be-compressed column buffer (to empty chunk) prior to
// importing the first chunk of the next extent. Returns startFileOffset
// which indicates file offset (in bytes) where next extent will be starting.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::resetToBeCompressedColBuf(
long long& startFileOffset )
{
// Don't load chunk, once we go to next extent
fPreLoadHWMChunk = false;
// Lazy creation of to-be-compressed buffer
if (!fToBeCompressedBuffer)
{
fToBeCompressedBuffer =
new unsigned char[CompressInterface::UNCOMPRESSED_INBUF_LEN];
}
BlockOp::setEmptyBuf( fToBeCompressedBuffer,
CompressInterface::UNCOMPRESSED_INBUF_LEN,
fColInfo->column.emptyVal,
fColInfo->column.width );
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Initializing empty chunk for next extent: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; hwm-" << fStartingHwm;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
fToBeCompressedCapacity = CompressInterface::UNCOMPRESSED_INBUF_LEN;
// Set file offset past end of last chunk
startFileOffset = CompressInterface::HDR_BUF_LEN * 2;
if (fChunkPtrs.size() > 0)
startFileOffset = fChunkPtrs[ fChunkPtrs.size() - 1 ].first +
fChunkPtrs[ fChunkPtrs.size() - 1 ].second;
// Positition ourselves to start of empty to-be-compressed buffer
fNumBytes = 0;
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Intercept data being copied from the raw-data output buffer to the output
// file, and instead buffer up the data to be compressed in 4M chunks before
// writing it out.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::writeToFile(int startOffset, int writeSize,
bool fillUpWEmpties)
{
if (writeSize == 0) // skip unnecessary write, if 0 bytes given
return NO_ERROR;
int fillUpWEmptiesWriteSize = 0;
if (fillUpWEmpties)
fillUpWEmptiesWriteSize = BYTE_PER_BLOCK - writeSize % BYTE_PER_BLOCK;
// If we are starting a new file, we need to reinit the buffer and
// find out what our file offset should be set to.
if (!fToBeCompressedCapacity)
{
#ifdef PROFILE
Stats::startParseEvent(WE_STATS_COMPRESS_COL_INIT_BUF);
#endif
long long startFileOffset;
int rc = initToBeCompressedBuffer( startFileOffset );
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss << "writeToFile: error initializing to-be-compressed buffer "
"for OID " << fColInfo->curCol.dataFile.fid <<
"; " << ec.errorString(rc);
fLog->logMsg( oss.str(), rc, MSGLVL_ERROR );
return rc;
}
rc = fColInfo->colOp->setFileOffset(fFile, startFileOffset, SEEK_SET);
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss << "writeToFile: error init compressed file offset for " <<
"OID " << fColInfo->curCol.dataFile.fid <<
"; " << startFileOffset <<
"; " << ec.errorString(rc);
fLog->logMsg( oss.str(), rc, MSGLVL_ERROR );
return rc;
}
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_COMPRESS_COL_INIT_BUF);
#endif
}
unsigned char* bufOffset = fToBeCompressedBuffer + fNumBytes;
// Expand the compression buffer size if working with an abbrev extent, and
// the bytes we are about to add will overflow the abbreviated extent.
if ((fToBeCompressedCapacity < CompressInterface::UNCOMPRESSED_INBUF_LEN) &&
((fNumBytes + writeSize + fillUpWEmptiesWriteSize) > fToBeCompressedCapacity) )
{
std::ostringstream oss;
oss << "Expanding abbrev to-be-compressed buffer for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
fToBeCompressedCapacity = CompressInterface::UNCOMPRESSED_INBUF_LEN;
}
if ((fNumBytes + writeSize + fillUpWEmptiesWriteSize) <= fToBeCompressedCapacity)
{
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Buffering data to-be-compressed for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; addBytes-" << writeSize <<
"; extraBytes-" << fillUpWEmptiesWriteSize <<
"; totBytes-" << (fNumBytes + writeSize);
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
memcpy(bufOffset, (fBuffer + startOffset), writeSize);
fNumBytes += writeSize;
fNumBytes += fillUpWEmptiesWriteSize;
}
else // Not enough room to add all the data to the to-be-compressed buffer
{
int startOffsetX = startOffset;
int writeSizeX = writeSize;
// The number of bytes (in fBuffer) to be written, could be larger than
// our to-be-compressed buffer, so we require a loop to potentially
// iterate thru all the bytes to be compresssed and written from fBuffer
while (writeSizeX > 0)
{
idbassert( (fNumBytes <= fToBeCompressedCapacity) ); // DMC-temp debug
size_t writeSizeOut = 0;
if ((fNumBytes + writeSizeX) > fToBeCompressedCapacity)
{
writeSizeOut = fToBeCompressedCapacity - fNumBytes;
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Buffering data (full) to-be-compressed for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; addBytes-" << writeSizeOut <<
"; totBytes-" << (fNumBytes + writeSizeOut);
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
if (writeSizeOut > 0)
{
memcpy(bufOffset, (fBuffer + startOffsetX), writeSizeOut);
fNumBytes += writeSizeOut;
}
//char resp;
//std::cout << "dbg: before writeToFile->compressAndFlush" <<
// std::endl;
//std::cin >> resp;
int rc = compressAndFlush( false );
//std::cout << "dbg: after writeToFile->compressAndFlush" <<
// std::endl;
//std::cin >> resp;
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss;
oss << "writeToFile: error compressing and writing chunk "
"for OID " << fColInfo->curCol.dataFile.fid <<
"; " << ec.errorString(rc);
fLog->logMsg( oss.str(), rc, MSGLVL_ERROR );
return rc;
}
// Start over again loading a new to-be-compressed buffer
BlockOp::setEmptyBuf( fToBeCompressedBuffer,
CompressInterface::UNCOMPRESSED_INBUF_LEN,
fColInfo->column.emptyVal,
fColInfo->column.width );
fToBeCompressedCapacity =
CompressInterface::UNCOMPRESSED_INBUF_LEN;
bufOffset = fToBeCompressedBuffer;
fNumBytes = 0;
}
else
{
writeSizeOut = writeSizeX;
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Buffering data (new) to-be-compressed for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; addBytes-" << writeSizeOut <<
"; totBytes-" << (fNumBytes + writeSizeOut);
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
memcpy(bufOffset, (fBuffer + startOffsetX), writeSizeOut);
fNumBytes += writeSizeOut;
fNumBytes += fillUpWEmptiesWriteSize;
}
startOffsetX += writeSizeOut;
writeSizeX -= writeSizeOut;
} // end of while loop
}
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Compress and write out the data in the to-be-compressed buffer.
// Also may write out the compression header.
//
// bFinishingFile indicates whether we are finished working with this file,
// either because we are completing an extent or because we have reached the
// end of the input data. In either case, if bFinishingFile is true, then
// in addition to flushing the current working chunk to disk, this function
// will also write out the updated compression header to match the data.
//
// This function will also write out the compression header if we are writing
// out the first (starting HWM) chunk for this import. We do this to keep the
// compression header in sync with the data, in case PrimProc is trying to read
// the db file. It is not necessary to immediately update the header for the
// remaining chunks as they are written out, because PrimProc will not be try-
// ing to access those chunk until we update the extentmap HWM at the end of
// this import. It's only the starting HWM chunk that may cause a problem and
// requires the immediate rewriting of the header, because we are modifying
// that chunk and adding rows to it.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::compressAndFlush( bool bFinishingFile )
{
auto compressor = compress::getCompressorByType(
fCompressorPool, fColInfo->column.compressionType);
if (!compressor)
{
return ERR_COMP_WRONG_COMP_TYPE;
}
const size_t OUTPUT_BUFFER_SIZE =
compressor->maxCompressedSize(fToBeCompressedCapacity) +
fUserPaddingBytes +
// Padded len = len + COMPRESSED_SIZE_INCREMENT_CHUNK - (len %
// COMPRESSED_SIZE_INCREMENT_CHUNK) + usePadding
compress::CompressInterface::COMPRESSED_CHUNK_INCREMENT_SIZE;
unsigned char* compressedOutBuf = new unsigned char[ OUTPUT_BUFFER_SIZE ];
boost::scoped_array<unsigned char> compressedOutBufPtr(compressedOutBuf);
size_t outputLen = OUTPUT_BUFFER_SIZE;
#ifdef PROFILE
Stats::startParseEvent(WE_STATS_COMPRESS_COL_COMPRESS);
#endif
int rc = compressor->compressBlock(
reinterpret_cast<char*>(fToBeCompressedBuffer),
fToBeCompressedCapacity, compressedOutBuf, outputLen);
if (rc != 0)
{
return ERR_COMP_COMPRESS;
}
// Round up the compressed chunk size
rc = compressor->padCompressedChunks( compressedOutBuf,
outputLen, OUTPUT_BUFFER_SIZE );
if (rc != 0)
{
return ERR_COMP_PAD_DATA;
}
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_COMPRESS_COL_COMPRESS);
Stats::startParseEvent(WE_STATS_WRITE_COL);
#endif
off64_t fileOffset = fFile->tell();
size_t nitems = fFile->write(compressedOutBuf, outputLen) / outputLen;
if (nitems != 1)
return ERR_FILE_WRITE;
CompChunkPtr compChunk(
(uint64_t)fileOffset, (uint64_t)outputLen);
fChunkPtrs.push_back( compChunk );
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Writing compressed data for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; bytes-" << outputLen <<
"; fileOffset-" << fileOffset;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
// We write out the compression headers if we are finished with this file
// (either because we are through with the extent or the data), or because
// this is the first HWM chunk that we may be modifying.
// See the description that precedes this function for more details.
if ( bFinishingFile || !fFlushedStartHwmChunk )
{
fileOffset = fFile->tell();
RETURN_ON_ERROR( saveCompressionHeaders() );
// If we just updated the chunk header for the starting HWM chunk,
// then we flush our output, to synchronize with compressed chunks,
if ( !fFlushedStartHwmChunk )
{
//char resp;
//std::cout << "dbg: before fflush of hdrs" << std::endl;
//std::cin >> resp;
if (fFile->flush() != 0)
return ERR_FILE_FLUSH;
//std::cout << "dbg: after fflush of hdrs" << std::endl;
//std::cin >> resp;
fFlushedStartHwmChunk = true;
}
// After seeking to the top of the file to write the headers,
// we restore the file offset to continue adding more chunks,
// if we are not through with this file.
if ( !bFinishingFile )
{
RETURN_ON_ERROR( fColInfo->colOp->setFileOffset(
fFile, fileOffset, SEEK_SET) );
}
}
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_WRITE_COL);
#endif
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Final flushing of data and headers prior to closing the file.
// File is also truncated if applicable.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::finishFile(bool bTruncFile)
{
// If capacity is 0, we never got far enough to read in the HWM chunk for
// the current column segment file, so no need to update the file contents.
// But we do continue in case we need to truncate the file before exiting.
// This could happen if our initial block skipping finished an extent.
if (fToBeCompressedCapacity > 0)
{
//char resp;
//std::cout << "dbg: before finishFile->compressAndFlush" << std::endl;
//std::cin >> resp;
// Write out any data still waiting to be compressed
RETURN_ON_ERROR( compressAndFlush( true ) );
//std::cout << "dbg: after finishFile->compressAndFlush" << std::endl;
//std::cin >> resp;
}
#ifdef PROFILE
Stats::startParseEvent(WE_STATS_COMPRESS_COL_FINISH_EXTENT);
#endif
// Truncate file (if applicable) based on offset and size of last chunk
if (bTruncFile && (fChunkPtrs.size() > 0))
{
long long truncateFileSize = fChunkPtrs[fChunkPtrs.size() - 1].first +
fChunkPtrs[fChunkPtrs.size() - 1].second;
// @bug5769 Don't initialize extents or truncate db files on HDFS
if (idbdatafile::IDBPolicy::useHdfs())
{
std::ostringstream oss1;
oss1 << "Finished writing column file"
": OID-" << fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; size-" << truncateFileSize;
fLog->logMsg( oss1.str(), MSGLVL_INFO2 );
}
else
{
std::ostringstream oss1;
oss1 << "Truncating column file"
": OID-" << fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; size-" << truncateFileSize;
fLog->logMsg( oss1.str(), MSGLVL_INFO2 );
int rc = NO_ERROR;
if (truncateFileSize > 0)
rc = fColInfo->colOp->truncateFile( fFile, truncateFileSize );
else
rc = ERR_COMP_TRUNCATE_ZERO;//@bug3913-Catch truncate to 0 bytes
if (rc != NO_ERROR)
{
WErrorCodes ec;
std::ostringstream oss2;
oss2 << "finishFile: error truncating file for " <<
"OID " << fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; size-" << truncateFileSize <<
"; " << ec.errorString(rc);
fLog->logMsg( oss2.str(), rc, MSGLVL_ERROR );
return rc;
}
}
}
// Nothing more to do if we are not updating the file contents.
if (fToBeCompressedCapacity == 0)
{
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_COMPRESS_COL_FINISH_EXTENT);
#endif
return NO_ERROR;
}
fToBeCompressedCapacity = 0;
fNumBytes = 0;
fChunkPtrs.clear();
#ifdef PROFILE
Stats::stopParseEvent(WE_STATS_COMPRESS_COL_FINISH_EXTENT);
#endif
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Write out the updated compression headers.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::saveCompressionHeaders( )
{
// Construct the header records
char hdrBuf[CompressInterface::HDR_BUF_LEN * 2];
RETURN_ON_ERROR(fColInfo->colOp->readHeaders(fFile, hdrBuf));
BRM::LBID_t lbid = compress::CompressInterface::getLBIDByIndex(hdrBuf, 0);
compress::CompressInterface::initHdr(hdrBuf, fColInfo->column.width, fColInfo->column.dataType,
fColInfo->column.compressionType);
compress::CompressInterface::setBlockCount(hdrBuf, (fColInfo->getFileSize() / BYTE_PER_BLOCK));
// If lbid written in the header is not 0 and not equal to `lastupdatedlbid` - we are running
// for the next extent for column segment file.
const auto lastUpdatedLbid = fColInfo->getLastUpdatedLBID();
if (lbid && lastUpdatedLbid != lbid)
{
// Write back lbid, after header initialization.
compress::CompressInterface::setLBIDByIndex(hdrBuf, lbid, 0);
compress::CompressInterface::setLBIDByIndex(hdrBuf, lastUpdatedLbid, 1);
}
else
compress::CompressInterface::setLBIDByIndex(hdrBuf, fColInfo->getLastUpdatedLBID(), 0);
std::vector<uint64_t> ptrs;
for (unsigned i = 0; i < fChunkPtrs.size(); i++)
{
ptrs.push_back( fChunkPtrs[i].first );
}
unsigned lastIdx = fChunkPtrs.size() - 1;
ptrs.push_back( fChunkPtrs[lastIdx].first + fChunkPtrs[lastIdx].second );
compress::CompressInterface::storePtrs(ptrs, hdrBuf);
// Write out the header records
//char resp;
//std::cout << "dbg: before writeHeaders" << std::endl;
//std::cin >> resp;
RETURN_ON_ERROR( fColInfo->colOp->writeHeaders(fFile, hdrBuf) );
//std::cout << "dbg: after writeHeaders" << std::endl;
//std::cin >> resp;
return NO_ERROR;
}
//------------------------------------------------------------------------------
// Allocates to-be-compressed buffer if it has not already been allocated.
// Initializes to-be-compressed buffer with the contents of the chunk containing
// the fStartingHwm block, as long as that chunk is in the pointer list.
// If the chunk is not in the list, then we must be adding a new chunk, in
// which case we just initialize an empty chunk.
// Returns startFileOffset which indicates file offset (in bytes) where the
// next chunk will be starting.
//------------------------------------------------------------------------------
int ColumnBufferCompressed::initToBeCompressedBuffer(long long& startFileOffset)
{
bool bNewBuffer = false;
// Lazy initialization of to-be-compressed buffer
if (!fToBeCompressedBuffer)
{
fToBeCompressedBuffer =
new unsigned char[CompressInterface::UNCOMPRESSED_INBUF_LEN];
BlockOp::setEmptyBuf( fToBeCompressedBuffer,
CompressInterface::UNCOMPRESSED_INBUF_LEN,
fColInfo->column.emptyVal,
fColInfo->column.width );
bNewBuffer = true;
}
// Find the chunk containing the starting HWM, as long as our initial
// block skipping has not caused us to exit the HWM chunk; in which
// case we start a new empty chunk.
unsigned int chunkIndex = 0;
unsigned int blockOffsetWithinChunk = 0;
bool bSkipStartingBlks = false;
auto compressor = compress::getCompressorByType(
fCompressorPool, fColInfo->column.compressionType);
if (!compressor)
{
return ERR_COMP_WRONG_COMP_TYPE;
}
if (fPreLoadHWMChunk)
{
if (fChunkPtrs.size() > 0)
{
compressor->locateBlock(fStartingHwm, chunkIndex,
blockOffsetWithinChunk);
if (chunkIndex < fChunkPtrs.size())
startFileOffset = fChunkPtrs[chunkIndex].first;
else
fPreLoadHWMChunk = false;
}
// If we are at the start of the job, fPreLoadHWMChunk will be true,
// to preload the old HWM chunk. But if we have no chunk ptrs, then
// we are starting on an empty PM. In this case, we skip starting
// blks if fStartingHwm has been set.
else
{
fPreLoadHWMChunk = false;
bSkipStartingBlks = true;
}
}
// Preload (read and uncompress) the chunk for the starting HWM extent only
if (fPreLoadHWMChunk)
{
fPreLoadHWMChunk = false; // only preload HWM chunk in the first extent
std::ostringstream oss;
oss << "Reading HWM chunk for: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; hwm-" << fStartingHwm <<
"; chunk#-" << chunkIndex <<
"; blkInChunk-" << blockOffsetWithinChunk;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
// Read the chunk
RETURN_ON_ERROR( fColInfo->colOp->setFileOffset(
fFile, startFileOffset, SEEK_SET) );
char* compressedOutBuf = new char[ fChunkPtrs[chunkIndex].second ];
boost::scoped_array<char> compressedOutBufPtr(compressedOutBuf);
size_t itemsRead = fFile->read(compressedOutBuf, fChunkPtrs[chunkIndex].second) / fChunkPtrs[chunkIndex].second;
if (itemsRead != 1)
{
std::ostringstream oss;
oss << "Error reading HWM chunk for: " <<
"OID-" << fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; hwm-" << fStartingHwm;
fLog->logMsg( oss.str(), ERR_COMP_READ_BLOCK, MSGLVL_ERROR );
return ERR_COMP_READ_BLOCK;
}
// Uncompress the chunk into our 4MB buffer
size_t outLen = CompressInterface::UNCOMPRESSED_INBUF_LEN;
int rc = compressor->uncompressBlock(
compressedOutBuf,
fChunkPtrs[chunkIndex].second,
fToBeCompressedBuffer,
outLen);
if (rc)
{
WErrorCodes ec;
std::ostringstream oss;
oss << "Error uncompressing HWM chunk for: " <<
"OID-" << fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; hwm-" << fStartingHwm <<
"; " << ec.errorString(rc);
fLog->logMsg( oss.str(), rc, MSGLVL_ERROR );
return ERR_COMP_UNCOMPRESS;
}
fToBeCompressedCapacity = outLen;
// Positition ourselves to start adding data to the HWM block
fNumBytes = blockOffsetWithinChunk * BYTE_PER_BLOCK;
// We are going to add data to, and thus re-add, the last chunk; so we
// drop it from our list.
fChunkPtrs.resize( fChunkPtrs.size() - 1 );
}
else // We have left the HWM chunk; just position file offset,
// without reading anything
{
// If it's not a new buffer, we need to initialize, since we won't be
// reading in anything to overlay what's in the to-be-compressed buffer.
if (!bNewBuffer)
{
BlockOp::setEmptyBuf( fToBeCompressedBuffer,
CompressInterface::UNCOMPRESSED_INBUF_LEN,
fColInfo->column.emptyVal,
fColInfo->column.width );
}
if (fLog->isDebug( DEBUG_2 ))
{
std::ostringstream oss;
oss << "Initializing new empty chunk: OID-" <<
fColInfo->curCol.dataFile.fid <<
"; DBRoot-" << fColInfo->curCol.dataFile.fDbRoot <<
"; part-" << fColInfo->curCol.dataFile.fPartition <<
"; seg-" << fColInfo->curCol.dataFile.fSegment <<
"; hwm-" << fStartingHwm;
fLog->logMsg( oss.str(), MSGLVL_INFO2 );
}
fToBeCompressedCapacity = CompressInterface::UNCOMPRESSED_INBUF_LEN;
// Set file offset to start after last current chunk
startFileOffset = CompressInterface::HDR_BUF_LEN * 2;
if (fChunkPtrs.size() > 0)
startFileOffset = fChunkPtrs[ fChunkPtrs.size() - 1 ].first +
fChunkPtrs[ fChunkPtrs.size() - 1 ].second;
// Position ourselves to start of empty to-be-compressed buffer.
// If we are starting the first extent on a PM, we may employ blk
// skipping at start of import; adjust fNumBytes accordingly.
// (see ColumnInfo::createDelayedFileIfNeeded() for discussion)
if (bSkipStartingBlks)
fNumBytes = fStartingHwm * BYTE_PER_BLOCK;
else
fNumBytes = 0;
}
return NO_ERROR;
}
}
|
{"hexsha": "9131d9ea7dcff561ba49931f2d579888a1a69af3", "size": 32013, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/writeengine/bulk/we_colbufcompressed.cpp", "max_stars_repo_name": "zettadb/zettalib", "max_stars_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/writeengine/bulk/we_colbufcompressed.cpp", "max_issues_repo_name": "zettadb/zettalib", "max_issues_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/writeengine/bulk/we_colbufcompressed.cpp", "max_forks_repo_name": "zettadb/zettalib", "max_forks_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-02-27T14:00:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:24:22.000Z", "avg_line_length": 39.0879120879, "max_line_length": 120, "alphanum_fraction": 0.5638959173, "num_tokens": 7363}
|
import numpy as np
def np_unique_int(array, return_counts=False):
"""
Fast variant of ``np.unique(array, return_counts=True)``
Only works with integer values.
Parameter
---------
array : np.ndarray
Input array. Has to be 1-D.
return_counts : bool, optional
Return the counts next to the unique values.
Default is `False`.
Returns
-------
unique : np.ndarray
Unique elements
unique_counts : np.ndarray, optional
Counts of Unique elements
Raises
------
TypeError
If the input `array.dtype == float` raises an TypeError.
This can be avoided by `array.astype(int)`.
Examples
--------
>>> data = np.array([1,2,3,2,3])
>>> np_unique_int(data)
array([1, 2, 3])
>>> data = np.array([1,2,3,2,3])
>>> np_unique_int(data, return_counts=True)
(array([1, 2, 3]), array([1, 2, 2]))
>>> data = np.array([1,2,3,2,3], dtype=np.float64)
>>> np_unique_int(data.astype(int))
array([1, 2, 3])
"""
bincount = np.bincount(array.astype(int))
unique = np.where(bincount.astype(np.bool))[0]
if return_counts:
unique_counts = bincount[unique]
return unique, unique_counts
return unique
|
{"hexsha": "84ff63be2873d7fc762034ab3e4b5a778d6e44dc", "size": 1264, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpy-extensions/fast_implementations.py", "max_stars_repo_name": "king-michael/numpy-extensions", "max_stars_repo_head_hexsha": "6cebe90b04248f70209e1a46bc57b5207ccd359a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numpy-extensions/fast_implementations.py", "max_issues_repo_name": "king-michael/numpy-extensions", "max_issues_repo_head_hexsha": "6cebe90b04248f70209e1a46bc57b5207ccd359a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-25T13:22:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-25T13:22:50.000Z", "max_forks_repo_path": "numpy-extensions/fast_implementations.py", "max_forks_repo_name": "king-michael/numpy-extensions", "max_forks_repo_head_hexsha": "6cebe90b04248f70209e1a46bc57b5207ccd359a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9818181818, "max_line_length": 64, "alphanum_fraction": 0.5862341772, "include": true, "reason": "import numpy", "num_tokens": 334}
|
#include <boost/spirit/home/x3/directive.hpp>
|
{"hexsha": "9df2423d71d6e3f03da75a5bbd8c81fc8eb2a56f", "size": 46, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_spirit_home_x3_directive.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_spirit_home_x3_directive.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_spirit_home_x3_directive.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.0, "max_line_length": 45, "alphanum_fraction": 0.7826086957, "num_tokens": 13}
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta
! This file was ported from Lean 3 source module category_theory.adjunction.lifting
! leanprover-community/mathlib commit 9bc7dfa6e50f902fb0684c9670a680459ebaed68
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Limits.Shapes.Equalizers
import Mathbin.CategoryTheory.Limits.Shapes.Reflexive
import Mathbin.CategoryTheory.Monad.Adjunction
import Mathbin.CategoryTheory.Monad.Coequalizer
/-!
# Adjoint lifting
This file gives two constructions for building left adjoints: the adjoint triangle theorem and the
adjoint lifting theorem.
The adjoint triangle theorem says that given a functor `U : B ⥤ C` with a left adjoint `F` such
that `ε_X : FUX ⟶ X` is a regular epi. Then for any category `A` with coequalizers of reflexive
pairs, a functor `R : A ⥤ B` has a left adjoint if (and only if) the composite `R ⋙ U` does.
Note that the condition on `U` regarding `ε_X` is automatically satisfied in the case when `U` is
a monadic functor, giving the corollary: `monadic_adjoint_triangle_lift`, i.e. if `U` is monadic,
`A` has reflexive coequalizers then `R : A ⥤ B` has a left adjoint provided `R ⋙ U` does.
The adjoint lifting theorem says that given a commutative square of functors (up to isomorphism):
Q
A → B
U ↓ ↓ V
C → D
R
where `U` and `V` are monadic and `A` has reflexive coequalizers, then if `R` has a left adjoint
then `Q` has a left adjoint.
## Implementation
It is more convenient to prove this theorem by assuming we are given the explicit adjunction rather
than just a functor known to be a right adjoint. In docstrings, we write `(η, ε)` for the unit
and counit of the adjunction `adj₁ : F ⊣ U` and `(ι, δ)` for the unit and counit of the adjunction
`adj₂ : F' ⊣ R ⋙ U`.
## TODO
Dualise to lift right adjoints through comonads (by reversing 1-cells) and dualise to lift right
adjoints through monads (by reversing 2-cells), and the combination.
## References
* https://ncatlab.org/nlab/show/adjoint+triangle+theorem
* https://ncatlab.org/nlab/show/adjoint+lifting+theorem
* Adjoint Lifting Theorems for Categories of Algebras (PT Johnstone, 1975)
* A unified approach to the lifting of adjoints (AJ Power, 1988)
-/
namespace CategoryTheory
open Category Limits
universe v₁ v₂ v₃ v₄ u₁ u₂ u₃ u₄
variable {A : Type u₁} {B : Type u₂} {C : Type u₃}
variable [Category.{v₁} A] [Category.{v₂} B] [Category.{v₃} C]
-- Hide implementation details in this namespace
namespace LiftAdjoint
variable {U : B ⥤ C} {F : C ⥤ B} (R : A ⥤ B) (F' : C ⥤ A)
variable (adj₁ : F ⊣ U) (adj₂ : F' ⊣ R ⋙ U)
/-- To show that `ε_X` is a coequalizer for `(FUε_X, ε_FUX)`, it suffices to assume it's always a
coequalizer of something (i.e. a regular epi).
-/
def counitCoequalises [∀ X : B, RegularEpi (adj₁.counit.app X)] (X : B) :
IsColimit (Cofork.ofπ (adj₁.counit.app X) (adj₁.counit_naturality _)) :=
Cofork.IsColimit.mk' _ fun s =>
by
refine' ⟨(regular_epi.desc' (adj₁.counit.app X) s.π _).1, _, _⟩
· rw [← cancel_epi (adj₁.counit.app (regular_epi.W (adj₁.counit.app X)))]
rw [← adj₁.counit_naturality_assoc]
dsimp only [functor.comp_obj]
rw [← s.condition, ← F.map_comp_assoc, ← U.map_comp, regular_epi.w, U.map_comp,
F.map_comp_assoc, s.condition, ← adj₁.counit_naturality_assoc]
· apply (regular_epi.desc' (adj₁.counit.app X) s.π _).2
· intro m hm
rw [← cancel_epi (adj₁.counit.app X)]
apply hm.trans (regular_epi.desc' (adj₁.counit.app X) s.π _).2.symm
#align category_theory.lift_adjoint.counit_coequalises CategoryTheory.LiftAdjoint.counitCoequalises
include adj₁ adj₂
/-- (Implementation)
To construct the left adjoint, we use the coequalizer of `F' U ε_Y` with the composite
`F' U F U X ⟶ F' U F U R F U' X ⟶ F' U R F' U X ⟶ F' U X`
where the first morphism is `F' U F ι_UX`, the second is `F' U ε_RF'UX`, and the third is `δ_F'UX`.
We will show that this coequalizer exists and that it forms the object map for a left adjoint to
`R`.
-/
def otherMap (X) : F'.obj (U.obj (F.obj (U.obj X))) ⟶ F'.obj (U.obj X) :=
F'.map (U.map (F.map (adj₂.Unit.app _) ≫ adj₁.counit.app _)) ≫ adj₂.counit.app _
#align category_theory.lift_adjoint.other_map CategoryTheory.LiftAdjoint.otherMap
/--
`(F'Uε_X, other_map X)` is a reflexive pair: in particular if `A` has reflexive coequalizers then
it has a coequalizer.
-/
instance (X : B) :
IsReflexivePair (F'.map (U.map (adj₁.counit.app X))) (otherMap _ _ adj₁ adj₂ X) :=
IsReflexivePair.mk' (F'.map (adj₁.Unit.app (U.obj X)))
(by
rw [← F'.map_comp, adj₁.right_triangle_components]
apply F'.map_id)
(by
dsimp [other_map]
rw [← F'.map_comp_assoc, U.map_comp, adj₁.unit_naturality_assoc,
adj₁.right_triangle_components, comp_id, adj₂.left_triangle_components])
variable [HasReflexiveCoequalizers A]
/-- Construct the object part of the desired left adjoint as the coequalizer of `F'Uε_Y` with
`other_map`.
-/
noncomputable def constructLeftAdjointObj (Y : B) : A :=
coequalizer (F'.map (U.map (adj₁.counit.app Y))) (otherMap _ _ adj₁ adj₂ Y)
#align category_theory.lift_adjoint.construct_left_adjoint_obj CategoryTheory.LiftAdjoint.constructLeftAdjointObj
/-- The homset equivalence which helps show that `R` is a right adjoint. -/
@[simps (config := { rhsMd := semireducible })]
noncomputable def constructLeftAdjointEquiv [∀ X : B, RegularEpi (adj₁.counit.app X)] (Y : A)
(X : B) : (constructLeftAdjointObj _ _ adj₁ adj₂ X ⟶ Y) ≃ (X ⟶ R.obj Y) :=
calc
(constructLeftAdjointObj _ _ adj₁ adj₂ X ⟶ Y) ≃
{ f : F'.obj (U.obj X) ⟶ Y //
F'.map (U.map (adj₁.counit.app X)) ≫ f = otherMap _ _ adj₁ adj₂ _ ≫ f } :=
Cofork.IsColimit.homIso (colimit.isColimit _) _
_ ≃
{ g : U.obj X ⟶ U.obj (R.obj Y) //
U.map (F.map g ≫ adj₁.counit.app _) = U.map (adj₁.counit.app _) ≫ g } :=
by
apply (adj₂.hom_equiv _ _).subtypeEquiv _
intro f
rw [← (adj₂.hom_equiv _ _).Injective.eq_iff, eq_comm, adj₂.hom_equiv_naturality_left,
other_map, assoc, adj₂.hom_equiv_naturality_left, ← adj₂.counit_naturality,
adj₂.hom_equiv_naturality_left, adj₂.hom_equiv_unit, adj₂.right_triangle_components,
comp_id, functor.comp_map, ← U.map_comp, assoc, ← adj₁.counit_naturality,
adj₂.hom_equiv_unit, adj₂.hom_equiv_unit, F.map_comp, assoc]
rfl
_ ≃ { z : F.obj (U.obj X) ⟶ R.obj Y // _ } :=
by
apply (adj₁.hom_equiv _ _).symm.subtypeEquiv
intro g
rw [← (adj₁.hom_equiv _ _).symm.Injective.eq_iff, adj₁.hom_equiv_counit,
adj₁.hom_equiv_counit, adj₁.hom_equiv_counit, F.map_comp, assoc, U.map_comp, F.map_comp,
assoc, adj₁.counit_naturality, adj₁.counit_naturality_assoc]
apply eq_comm
_ ≃ (X ⟶ R.obj Y) := (Cofork.IsColimit.homIso (counitCoequalises adj₁ X) _).symm
#align category_theory.lift_adjoint.construct_left_adjoint_equiv CategoryTheory.LiftAdjoint.constructLeftAdjointEquiv
/-- Construct the left adjoint to `R`, with object map `construct_left_adjoint_obj`. -/
noncomputable def constructLeftAdjoint [∀ X : B, RegularEpi (adj₁.counit.app X)] : B ⥤ A :=
by
refine'
adjunction.left_adjoint_of_equiv (fun X Y => construct_left_adjoint_equiv R _ adj₁ adj₂ Y X) _
intro X Y Y' g h
rw [construct_left_adjoint_equiv_apply, construct_left_adjoint_equiv_apply, Function.comp_apply,
Function.comp_apply, Equiv.trans_apply, Equiv.trans_apply, Equiv.trans_apply, Equiv.trans_apply,
Equiv.symm_apply_eq, Subtype.ext_iff, cofork.is_colimit.hom_iso_natural, Equiv.apply_symm_apply,
Equiv.subtypeEquiv_apply, Equiv.subtypeEquiv_apply, Equiv.subtypeEquiv_apply,
Equiv.subtypeEquiv_apply, Subtype.coe_mk, Subtype.coe_mk, Subtype.coe_mk, Subtype.coe_mk, ←
adj₁.hom_equiv_naturality_right_symm, cofork.is_colimit.hom_iso_natural,
adj₂.hom_equiv_naturality_right, functor.comp_map]
#align category_theory.lift_adjoint.construct_left_adjoint CategoryTheory.LiftAdjoint.constructLeftAdjoint
end LiftAdjoint
/-- The adjoint triangle theorem: Suppose `U : B ⥤ C` has a left adjoint `F` such that each counit
`ε_X : FUX ⟶ X` is a regular epimorphism. Then if a category `A` has coequalizers of reflexive
pairs, then a functor `R : A ⥤ B` has a left adjoint if the composite `R ⋙ U` does.
Note the converse is true (with weaker assumptions), by `adjunction.comp`.
See https://ncatlab.org/nlab/show/adjoint+triangle+theorem
-/
noncomputable def adjointTriangleLift {U : B ⥤ C} {F : C ⥤ B} (R : A ⥤ B) (adj₁ : F ⊣ U)
[∀ X : B, RegularEpi (adj₁.counit.app X)] [HasReflexiveCoequalizers A]
[IsRightAdjoint (R ⋙ U)] : IsRightAdjoint R
where
left := LiftAdjoint.constructLeftAdjoint R _ adj₁ (Adjunction.ofRightAdjoint _)
adj := Adjunction.adjunctionOfEquivLeft _ _
#align category_theory.adjoint_triangle_lift CategoryTheory.adjointTriangleLift
/-- If `R ⋙ U` has a left adjoint, the domain of `R` has reflexive coequalizers and `U` is a monadic
functor, then `R` has a left adjoint.
This is a special case of `adjoint_triangle_lift` which is often more useful in practice.
-/
noncomputable def monadicAdjointTriangleLift (U : B ⥤ C) [MonadicRightAdjoint U] {R : A ⥤ B}
[HasReflexiveCoequalizers A] [IsRightAdjoint (R ⋙ U)] : IsRightAdjoint R :=
by
let R' : A ⥤ _ := R ⋙ monad.comparison (adjunction.of_right_adjoint U)
rsuffices : is_right_adjoint R'
· let this : is_right_adjoint (R' ⋙ (monad.comparison (adjunction.of_right_adjoint U)).inv) := by
infer_instance
· let this : R' ⋙ (monad.comparison (adjunction.of_right_adjoint U)).inv ≅ R :=
(iso_whisker_left R (monad.comparison _).asEquivalence.unitIso.symm : _) ≪≫ R.right_unitor
exact adjunction.right_adjoint_of_nat_iso this
let this : is_right_adjoint (R' ⋙ monad.forget (adjunction.of_right_adjoint U).toMonad) :=
adjunction.right_adjoint_of_nat_iso
(iso_whisker_left R (monad.comparison_forget (adjunction.of_right_adjoint U)).symm : _)
let this : ∀ X, regular_epi ((monad.adj (adjunction.of_right_adjoint U).toMonad).counit.app X) :=
by
intro X
simp only [monad.adj_counit]
exact ⟨_, _, _, _, monad.beck_algebra_coequalizer X⟩
exact adjoint_triangle_lift R' (monad.adj _)
#align category_theory.monadic_adjoint_triangle_lift CategoryTheory.monadicAdjointTriangleLift
variable {D : Type u₄}
variable [Category.{v₄} D]
/-- Suppose we have a commutative square of functors
Q
A → B
U ↓ ↓ V
C → D
R
where `U` has a left adjoint, `A` has reflexive coequalizers and `V` has a left adjoint such that
each component of the counit is a regular epi.
Then `Q` has a left adjoint if `R` has a left adjoint.
See https://ncatlab.org/nlab/show/adjoint+lifting+theorem
-/
noncomputable def adjointSquareLift (Q : A ⥤ B) (V : B ⥤ D) (U : A ⥤ C) (R : C ⥤ D)
(comm : U ⋙ R ≅ Q ⋙ V) [IsRightAdjoint U] [IsRightAdjoint V] [IsRightAdjoint R]
[∀ X, RegularEpi ((Adjunction.ofRightAdjoint V).counit.app X)] [HasReflexiveCoequalizers A] :
IsRightAdjoint Q :=
by
let this := adjunction.right_adjoint_of_nat_iso comm
exact adjoint_triangle_lift Q (adjunction.of_right_adjoint V)
#align category_theory.adjoint_square_lift CategoryTheory.adjointSquareLift
/-- Suppose we have a commutative square of functors
Q
A → B
U ↓ ↓ V
C → D
R
where `U` has a left adjoint, `A` has reflexive coequalizers and `V` is monadic.
Then `Q` has a left adjoint if `R` has a left adjoint.
See https://ncatlab.org/nlab/show/adjoint+lifting+theorem
-/
noncomputable def monadicAdjointSquareLift (Q : A ⥤ B) (V : B ⥤ D) (U : A ⥤ C) (R : C ⥤ D)
(comm : U ⋙ R ≅ Q ⋙ V) [IsRightAdjoint U] [MonadicRightAdjoint V] [IsRightAdjoint R]
[HasReflexiveCoequalizers A] : IsRightAdjoint Q :=
by
let this := adjunction.right_adjoint_of_nat_iso comm
exact monadic_adjoint_triangle_lift V
#align category_theory.monadic_adjoint_square_lift CategoryTheory.monadicAdjointSquareLift
end CategoryTheory
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/CategoryTheory/Adjunction/Lifting.lean"}
|
"""
Test Policy class and its methods.
"""
# CODING-STYLE CHECKS:
# pycodestyle test_policy.py
# pylint --disable=locally-disabled test_policy.py
#
# pylint: disable=too-many-lines
import copy
import os
import json
import numpy as np
import pytest
import paramtools as pt
# pylint: disable=import-error
from taxcalc import Policy
def cmp_policy_objs(pol1, pol2, year_range=None, exclude=None):
"""
Compare parameter values two policy objects.
year_range: years over which to compare values.
exclude: list of parameters to exclude from comparison.
"""
if year_range is not None:
pol1.set_state(year=list(year_range))
pol2.set_state(year=list(year_range))
else:
pol1.clear_state()
pol2.clear_state()
for param in pol1._data:
if exclude and param in exclude:
continue
v1 = getattr(pol1, param)
v2 = getattr(pol2, param)
np.testing.assert_allclose(v1, v2)
def test_incorrect_class_instantiation():
"""
Test incorrect instantiation of Policy class object.
"""
with pytest.raises(ValueError):
Policy(gfactors=list())
def test_correct_class_instantiation():
"""
Test correct instantiation of Policy class object.
"""
pol = Policy()
assert pol
pol.implement_reform({})
with pytest.raises(pt.ValidationError):
pol.implement_reform(list())
with pytest.raises(pt.ValidationError):
pol.implement_reform({2099: {'II_em': 99000}})
pol.set_year(2019)
with pytest.raises(pt.ValidationError):
pol.implement_reform({2018: {'II_em': 99000}})
with pytest.raises(pt.ValidationError):
pol.implement_reform({2020: {'II_em': -1000}})
def test_json_reform_url():
"""
Test reading a JSON reform from a URL. Results from the URL are expected
to match the results from the string.
"""
reform_str = """
{
// raise FICA payroll tax rate in 2018 and 2020
"FICA_ss_trt": {
"2018": 0.130,
"2020": 0.140
},
// raise Medicare payroll tax rate in 2019 and 2021
"FICA_mc_trt": {
"2019": 0.030,
"2021": 0.032
}
}
"""
reform_url = ('https://raw.githubusercontent.com/PSLmodels/'
'Tax-Calculator/master/taxcalc/reforms/ptaxes0.json')
params_str = Policy.read_json_reform(reform_str)
params_url = Policy.read_json_reform(reform_url)
assert params_str == params_url
REFORM_JSON = """
// Example of a reform file suitable for Policy.read_json_reform().
// This JSON file can contain any number of trailing //-style comments, which
// will be removed before the contents are converted from JSON to a dictionary.
// The primary keys are parameters and the secondary keys are years.
// Both the primary and secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false (no quotes; all lowercase).
{
"AMT_brk1": // top of first AMT tax bracket
{"2015": 200000,
"2017": 300000
},
"EITC_c": // maximum EITC amount by number of qualifying kids (0,1,2,3+)
{"2016": [ 900, 5000, 8000, 9000],
"2019": [1200, 7000, 10000, 12000]
},
"II_em": // personal exemption amount (see indexing changes below)
{"2016": 6000,
"2018": 7500,
"2020": 9000
},
"II_em-indexed": // personal exemption amount indexing status
{"2016": false, // values in future years are same as this year value
"2018": true // values in future years indexed with this year as base
},
"SS_Earnings_c": // social security (OASDI) maximum taxable earnings
{"2016": 300000,
"2018": 500000,
"2020": 700000
},
"AMT_em-indexed": // AMT exemption amount indexing status
{"2017": false, // values in future years are same as this year value
"2020": true // values in future years indexed with this year as base
}
}
"""
# pylint: disable=protected-access,no-member
@pytest.mark.parametrize("set_year", [False, True])
def test_read_json_reform_file_and_implement_reform(set_year):
"""
Test reading and translation of reform JSON into a reform dictionary
and then using that reform dictionary to implement reform.
"""
pol = Policy()
if set_year:
pol.set_year(2015)
pol.implement_reform(Policy.read_json_reform(REFORM_JSON))
syr = pol.start_year
# pylint: disable=protected-access
amt_brk1 = pol._AMT_brk1
assert amt_brk1[2015 - syr] == 200000
assert amt_brk1[2016 - syr] > 200000
assert amt_brk1[2017 - syr] == 300000
assert amt_brk1[2018 - syr] > 300000
ii_em = pol._II_em
assert ii_em[2016 - syr] == 6000
assert ii_em[2017 - syr] == 6000
assert ii_em[2018 - syr] == 7500
assert ii_em[2019 - syr] > 7500
assert ii_em[2020 - syr] == 9000
assert ii_em[2021 - syr] > 9000
amt_em = pol._AMT_em
assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]
assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]
assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]
assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]
add4aged = pol._ID_Medical_frt_add4aged
assert add4aged[2015 - syr] == -0.025
assert add4aged[2016 - syr] == -0.025
assert add4aged[2017 - syr] == 0.0
assert add4aged[2022 - syr] == 0.0
def test_constant_inflation_rate_with_reform():
"""
Test indexing of policy parameters involved in a reform.
"""
pol = Policy()
# implement reform in year before final year
fyr = Policy.LAST_BUDGET_YEAR
ryr = fyr - 1
reform = {
'II_em': {(ryr - 3): 1000, # to avoid divide-by-zero under TCJA
ryr: 20000}
}
pol.implement_reform(reform)
# extract price inflation rates
pirates = pol.inflation_rates()
syr = Policy.JSON_START_YEAR
irate_b = pirates[ryr - 2 - syr]
irate_a = pirates[ryr - syr]
# check implied inflation rate just before reform
grate = float(pol._II_em[ryr - 1 - syr]) / float(pol._II_em[ryr - 2 - syr])
assert round(grate - 1.0, 4) == round(irate_b, 4)
# check implied inflation rate just after reform
grate = float(pol._II_em[ryr + 1 - syr]) / float(pol._II_em[ryr - syr])
assert round(grate - 1.0, 6) == round(irate_a, 6)
def test_variable_inflation_rate_with_reform():
"""
Test indexing of policy parameters involved in a reform.
"""
pol = Policy()
syr = Policy.JSON_START_YEAR
assert pol._II_em[2013 - syr] == 3900
# implement reform in 2020 which is two years before the last year, 2022
reform = {
'II_em': {2018: 1000, # to avoid divide-by-zero under TCJA
2020: 20000}
}
pol.implement_reform(reform)
pol.set_year(2020)
assert pol.current_year == 2020
# extract price inflation rates
pirates = pol.inflation_rates()
irate2018 = pirates[2018 - syr]
irate2020 = pirates[2020 - syr]
irate2021 = pirates[2021 - syr]
# check implied inflation rate between 2018 and 2019 (before the reform)
grate = float(pol._II_em[2019 - syr]) / float(pol._II_em[2018 - syr])
assert round(grate - 1.0, 5) == round(irate2018, 5)
# check implied inflation rate between 2020 and 2021 (after the reform)
grate = float(pol._II_em[2021 - syr]) / float(pol._II_em[2020 - syr])
assert round(grate - 1.0, 5) == round(irate2020, 5)
# check implied inflation rate between 2021 and 2022 (after the reform)
grate = float(pol._II_em[2022 - syr]) / float(pol._II_em[2021 - syr])
assert round(grate - 1.0, 5) == round(irate2021, 5)
def test_multi_year_reform():
"""
Test multi-year reform involving 1D and 2D parameters.
"""
# specify dimensions of policy Policy object
syr = Policy.JSON_START_YEAR
nyrs = Policy.DEFAULT_NUM_YEARS
pol = Policy()
iratelist = pol.inflation_rates()
ifactor = {}
for i in range(0, nyrs):
ifactor[syr + i] = 1.0 + iratelist[i]
wratelist = pol.wage_growth_rates()
wfactor = {}
for i in range(0, nyrs):
wfactor[syr + i] = 1.0 + wratelist[i]
# specify multi-year reform using a param:year:value-fomatted dictionary
reform = {
'SS_Earnings_c': {2016: 300000,
2017: 500000,
2019: 700000},
'SS_Earnings_c-indexed': {2017: False,
2019: True},
'CTC_c': {2015: 2000},
'EITC_c': {2016: [900, 5000, 8000, 9000],
2019: [1200, 7000, 10000, 12000]},
'II_em': {2016: 7000,
2019: 9000}
}
# implement multi-year reform
pol.implement_reform(reform)
assert pol.current_year == syr
# move policy Policy object forward in time so current_year is syr+2
# Note: this would be typical usage because the first budget year
# is typically greater than Policy start_year.
pol.set_year(pol.start_year + 2)
assert pol.current_year == syr + 2
# confirm that actual parameters have expected post-reform values
check_eitc_c(pol, reform, ifactor)
check_ii_em(pol, reform, ifactor)
check_ss_earnings_c(pol, reform, wfactor)
check_ctc_c(pol, reform)
# end of test_multi_year_reform with the check_* functions below:
def check_ctc_c(ppo, reform):
"""
Compare actual and expected _CTC_c parameter values
generated by the test_multi_year_reform() function above.
Ensure that future-year values in policy_current_law.json
are overwritten by reform.
"""
actual = {}
arr = getattr(ppo, '_CTC_c')
for i in range(0, ppo.num_years):
actual[ppo.start_year + i] = arr[i]
assert actual[2013] == 1000
assert actual[2014] == 1000
e2015 = reform['CTC_c'][2015]
assert actual[2015] == e2015
e2016 = actual[2015]
assert actual[2016] == e2016
e2017 = actual[2016]
assert actual[2017] == e2017
e2018 = actual[2017]
assert actual[2018] == e2018
e2019 = actual[2018]
assert actual[2019] == e2019
def check_eitc_c(ppo, reform, ifactor):
"""
Compare actual and expected _EITC_c parameter values
generated by the test_multi_year_reform() function above.
"""
actual = {}
arr = getattr(ppo, '_EITC_c')
alen = len(arr[0])
for i in range(0, ppo.num_years):
actual[ppo.start_year + i] = arr[i]
assert np.allclose(actual[2013], [487, 3250, 5372, 6044],
atol=0.01, rtol=0.0)
assert np.allclose(actual[2014], [496, 3305, 5460, 6143],
atol=0.01, rtol=0.0)
assert np.allclose(actual[2015], [503, 3359, 5548, 6242],
atol=0.01, rtol=0.0)
e2016 = reform['EITC_c'][2016]
assert np.allclose(actual[2016], e2016, atol=0.01, rtol=0.0)
e2017 = [ifactor[2016] * actual[2016][j] for j in range(0, alen)]
assert np.allclose(actual[2017], e2017, atol=0.01, rtol=0.0)
e2018 = [ifactor[2017] * actual[2017][j] for j in range(0, alen)]
assert np.allclose(actual[2018], e2018, atol=0.01, rtol=0.0)
e2019 = reform['EITC_c'][2019]
assert np.allclose(actual[2019], e2019, atol=0.01, rtol=0.0)
e2020 = [ifactor[2019] * actual[2019][j] for j in range(0, alen)]
assert np.allclose(actual[2020], e2020, atol=0.01, rtol=0.0)
e2021 = [ifactor[2020] * actual[2020][j] for j in range(0, alen)]
assert np.allclose(actual[2021], e2021, atol=0.01, rtol=0.0)
e2022 = [ifactor[2021] * actual[2021][j] for j in range(0, alen)]
assert np.allclose(actual[2022], e2022, atol=0.01, rtol=0.0)
def check_ii_em(ppo, reform, ifactor):
"""
Compare actual and expected _II_em parameter values
generated by the test_multi_year_reform() function above.
"""
actual = {}
arr = getattr(ppo, '_II_em')
for i in range(0, ppo.num_years):
actual[ppo.start_year + i] = arr[i]
assert actual[2013] == 3900
assert actual[2014] == 3950
assert actual[2015] == 4000
e2016 = reform['II_em'][2016]
assert actual[2016] == e2016
e2017 = ifactor[2016] * actual[2016]
assert np.allclose([actual[2017]], [e2017], atol=0.01, rtol=0.0)
e2018 = ifactor[2017] * actual[2017]
assert np.allclose([actual[2018]], [e2018], atol=0.01, rtol=0.0)
e2019 = reform['II_em'][2019]
assert actual[2019] == e2019
e2020 = ifactor[2019] * actual[2019]
assert np.allclose([actual[2020]], [e2020], atol=0.01, rtol=0.0)
e2021 = ifactor[2020] * actual[2020]
assert np.allclose([actual[2021]], [e2021], atol=0.01, rtol=0.0)
e2022 = ifactor[2021] * actual[2021]
assert np.allclose([actual[2022]], [e2022], atol=0.01, rtol=0.0)
def check_ss_earnings_c(ppo, reform, wfactor):
"""
Compare actual and expected _SS_Earnings_c parameter values
generated by the test_multi_year_reform() function above.
"""
actual = {}
arr = getattr(ppo, '_SS_Earnings_c')
for i in range(0, ppo.num_years):
actual[ppo.start_year + i] = arr[i]
assert actual[2013] == 113700
assert actual[2014] == 117000
assert actual[2015] == 118500
e2016 = reform['SS_Earnings_c'][2016]
assert actual[2016] == e2016
e2017 = reform['SS_Earnings_c'][2017]
assert actual[2017] == e2017
e2018 = actual[2017] # no indexing after 2017
assert actual[2018] == e2018
e2019 = reform['SS_Earnings_c'][2019]
assert actual[2019] == e2019
e2020 = wfactor[2019] * actual[2019] # indexing after 2019
assert actual[2020] == e2020
e2021 = wfactor[2020] * actual[2020]
assert np.allclose([actual[2021]], [e2021], atol=0.01, rtol=0.0)
e2022 = wfactor[2021] * actual[2021]
assert np.allclose([actual[2022]], [e2022], atol=0.01, rtol=0.0)
def test_policy_metadata():
"""
Test that metadata() method returns expected dictionary.
"""
clp = Policy()
mdata = clp.metadata()
assert mdata
def test_implement_reform_raises_on_no_year():
"""
Test that implement_reform raises error for missing year.
"""
reform = {'STD_Aged': [1400, 1200, 1400, 1400, 1400]}
ppo = Policy()
with pytest.raises(pt.ValidationError):
ppo.implement_reform(reform)
def test_implement_reform_raises_on_early_year():
"""
Test that implement_reform raises error for early year.
"""
ppo = Policy()
reform = {'STD_Aged': {2010: [1400, 1100, 1100, 1400, 1400]}}
with pytest.raises(pt.ValidationError):
ppo.implement_reform(reform)
def test_reform_with_default_indexed():
"""
Test that implement_reform indexes after first reform year.
"""
ppo = Policy()
reform = {'II_em': {2015: 4300}}
ppo.implement_reform(reform)
# II_em has a default indexing status of true, so
# in 2016 its value should be greater than 4300
ppo.set_year(2016)
assert ppo.II_em > 4300
def test_reform_makes_no_changes_before_year():
"""
Test that implement_reform makes no changes before first reform year.
"""
ppo = Policy()
reform = {'II_em': {2015: 4400}, 'II_em-indexed': {2015: True}}
ppo.implement_reform(reform)
ppo.set_year(2015)
assert np.allclose(ppo._II_em[:3], np.array([3900, 3950, 4400]),
atol=0.01, rtol=0.0)
assert ppo.II_em == 4400
@pytest.mark.parametrize("set_year", [False, True])
def test_read_json_reform_and_implement_reform(set_year):
"""
Test reading and translation of reform file into a reform dictionary
that is then used to call implement_reform method.
NOTE: implement_reform called when policy.current_year == policy.start_year
"""
reform_json = """
// Example of JSON reform text suitable for the
// Policy.read_json_reform() method.
// This JSON text can contain any number of trailing //-style comments,
// which will be removed before the contents are converted from JSON to
// a dictionary.
// The primary keys are policy parameters and secondary keys are years.
// Both the primary & secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false with no quotes and all
// lowercase characters.
{
"AMT_brk1": // top of first AMT tax bracket
{"2015": 200000,
"2017": 300000
},
"EITC_c": // max EITC amount by number of qualifying kids (0,1,2,3+)
{"2016": [ 900, 5000, 8000, 9000],
"2019": [1200, 7000, 10000, 12000]
},
"II_em": // personal exemption amount (see indexing changes below)
{"2016": 6000,
"2018": 7500,
"2020": 9000
},
"II_em-indexed": // personal exemption amount indexing status
{"2016": false, // values in future years are same as this year value
"2018": true // vals in future years indexed with this year as base
},
"SS_Earnings_c": // Social Security (OASDI) maximum taxable earnings
{"2016": 300000,
"2018": 500000,
"2020": 700000
},
"AMT_em-indexed": // AMT exemption amount indexing status
{"2017": false, // values in future years are same as this year value
"2020": true // vals in future years indexed with this year as base
}
}
"""
policy = Policy()
if set_year:
policy.set_year(2015)
reform_dict = Policy.read_json_reform(reform_json)
policy.implement_reform(reform_dict)
syr = policy.start_year
amt_brk1 = policy._AMT_brk1
assert amt_brk1[2015 - syr] == 200000
assert amt_brk1[2016 - syr] > 200000
assert amt_brk1[2017 - syr] == 300000
assert amt_brk1[2018 - syr] > 300000
ii_em = policy._II_em
assert ii_em[2016 - syr] == 6000
assert ii_em[2017 - syr] == 6000
assert ii_em[2018 - syr] == 7500
assert ii_em[2019 - syr] > 7500
assert ii_em[2020 - syr] == 9000
assert ii_em[2021 - syr] > 9000
amt_em = policy._AMT_em
assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]
assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]
assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]
assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]
add4aged = policy._ID_Medical_frt_add4aged
assert add4aged[2015 - syr] == -0.025
assert add4aged[2016 - syr] == -0.025
assert add4aged[2017 - syr] == 0.0
assert add4aged[2022 - syr] == 0.0
def test_pop_the_cap_reform():
"""
Test eliminating the maximum taxable earnings (MTE)
used in the calculation of the OASDI payroll tax.
"""
# create Policy parameters object
ppo = Policy()
assert ppo.current_year == Policy.JSON_START_YEAR
# confirm that MTE has current-law values in 2015 and 2016
mte = ppo._SS_Earnings_c
syr = Policy.JSON_START_YEAR
assert mte[2015 - syr] == 118500
assert mte[2016 - syr] == 118500
# specify a "pop the cap" reform that eliminates MTE cap in 2016
reform = {'SS_Earnings_c': {2016: 9e99}}
ppo.implement_reform(reform)
mte = ppo._SS_Earnings_c
assert mte[2015 - syr] == 118500
assert mte[2016 - syr] == 9e99
assert mte[ppo.end_year - syr] == 9e99
def test_order_of_indexing_and_level_reforms():
"""
Test that the order of the two reform provisions for the same parameter
make no difference to the post-reform policy parameter values.
"""
# specify two reforms that raises the MTE and stops its indexing in 2015
reform = [
{
'SS_Earnings_c': {2015: 500000},
'SS_Earnings_c-indexed': {2015: False}
},
# now reverse the order of the two reform provisions
{
'SS_Earnings_c-indexed': {2015: False},
'SS_Earnings_c': {2015: 500000}
}
]
# specify two Policy objects
ppo = [Policy(), Policy()]
# apply reforms to corresponding Policy object & check post-reform values
syr = Policy.JSON_START_YEAR
for ref in range(len(reform)): # pylint: disable=consider-using-enumerate
# confirm pre-reform MTE values in 2014-2017
mte = ppo[ref]._SS_Earnings_c
assert mte[2014 - syr] == 117000
assert mte[2015 - syr] == 118500
assert mte[2016 - syr] == 118500
assert mte[2017 - syr] < 500000
# implement reform in 2015
ppo[ref].implement_reform(reform[ref])
# confirm post-reform MTE values in 2014-2017
mte = ppo[ref]._SS_Earnings_c
assert mte[2014 - syr] == 117000
assert mte[2015 - syr] == 500000
assert mte[2016 - syr] == 500000
assert mte[2017 - syr] == 500000
def test_misspecified_reform_dictionary():
"""
Demonstrate pitfalls of careless specification of policy reform
dictionaries involving non-unique dictionary keys.
"""
# specify apparently the same reform in two different ways, forgetting
# that Python dictionaries have unique keys
reform1 = {'II_em': {2019: 1000, 2020: 2000}}
# pylint: disable=duplicate-key
reform2 = {'II_em': {2019: 1000}, 'II_em': {2020: 2000}}
# these two reform dictionaries are not the same: the second
# 'II_em' key value for 2020 in reform2 OVERWRITES and REPLACES
# the first 'II_em' key value for 2019 in reform2
assert reform1 != reform2
def test_section_titles(tests_path):
"""
Check section titles in policy_current_law.json and uguide.htmx files.
"""
# pylint: disable=too-many-locals
def generate_section_dictionary(md_text):
"""
Returns dictionary of section titles that is
structured like the VALID_SECTION dictionary (see below) and
extracted from the specified html_text.
"""
sdict = dict()
for line in md_text.splitlines():
# This is shown as an empty case in current law policy and
# validation.
if line.startswith('## Other Parameters (not in Tax-Brain webapp'):
sdict[''] = {}
sdict[''][''] = 0
continue
sec2line = line.startswith('### ')
sec1line = line.startswith('## ')
# Create outer-layer dictionary entry for sec1.
if sec1line:
sec1 = line.replace('##', '', 1).strip()
sdict[sec1] = {}
# Create inner dictionary entry for sec1-sec2.
# Note that sec1 will have been defined from a previous loop.
if sec2line:
sec2 = line.replace('###', '', 1).strip()
sdict[sec1][sec2] = 0
return sdict
# begin main logic of test_section_titles
# specify expected section titles ordered as on the Tax-Brain webapp
ided_ceiling_pct = ('Ceiling On The Benefit Of Itemized Deductions '
'As A Percent Of Deductible Expenses')
cgqd_tax_same = ('Tax All Capital Gains And Dividends The Same '
'As Regular Taxable Income')
# pylint: disable=bad-continuation
valid_dict = {
'': { # empty section_1 implies parameter not displayed in Tax-Brain
'': 0
},
'Parameter Indexing': {
'Offsets': 0
},
'Payroll Taxes': {
'Social Security FICA': 0,
'Medicare FICA': 0,
'Additional Medicare FICA': 0
},
'Social Security Taxability': {
'Threshold For Social Security Benefit Taxability 1': 0,
# 'Social Security Taxable Income Decimal Fraction 1': 0,
'Threshold For Social Security Benefit Taxability 2': 0
# 'Social Security Taxable Income Decimal Fraction 2': 0
},
'Above The Line Deductions': {
'Misc. Adjustment Haircuts': 0,
'Misc. Exclusions': 0,
'Child And Elderly Care': 0
},
'Personal Exemptions': {
'Personal And Dependent Exemption Amount': 0,
# 'Personal Exemption Phaseout Starting Income': 0,
'Personal Exemption Phaseout Rate': 0,
'Repeal for Dependents Under Age 18': 0
},
'Standard Deduction': {
'Standard Deduction Amount': 0,
'Additional Standard Deduction For Blind And Aged': 0
# 'Standard Deduction For Dependents': 0
},
'Nonrefundable Credits': {
'Misc. Credit Limits': 0,
'Child And Dependent Care': 0,
'Personal Nonrefundable Credit': 0
},
'Child/Dependent Credits': {
'Child Tax Credit': 0,
'Additional Child Tax Credit': 0,
'Other Dependent Tax Credit': 0
},
'Itemized Deductions': {
'Medical Expenses': 0,
'State And Local Income And Sales Taxes': 0,
'State, Local, And Foreign Real Estate Taxes': 0,
'State And Local Taxes And Real Estate Taxes': 0,
'Interest Paid': 0,
'Charity': 0,
'Casualty': 0,
'Miscellaneous': 0,
'Itemized Deduction Limitation': 0,
'Surtax On Itemized Deduction Benefits Above An AGI Threshold': 0,
ided_ceiling_pct: 0,
'Ceiling On The Amount Of Itemized Deductions Allowed': 0
},
'Capital Gains And Dividends': {
'Regular - Long Term Capital Gains And Qualified Dividends': 0,
'AMT - Long Term Capital Gains And Qualified Dividends': 0,
cgqd_tax_same: 0
},
'Personal Income': {
'Regular: Non-AMT, Non-Pass-Through': 0,
'Pass-Through': 0,
'Alternative Minimum Tax': 0
},
'Other Taxes': {
'Net Investment Income Tax': 0
},
'Refundable Credits': {
'Earned Income Tax Credit': 0,
'New Refundable Child Tax Credit': 0,
'Personal Refundable Credit': 0,
'Refundable Payroll Tax Credit': 0
},
'Surtaxes': {
'New Minimum Tax': 0,
'New AGI Surtax': 0,
'Lump-Sum Tax': 0
},
'Universal Basic Income': {
'UBI Benefits': 0,
'UBI Taxability': 0
},
'Benefits': {
'Benefit Repeal': 0,
}
}
# check validity of parameter section titles in policy_current_law.json
path = os.path.join(tests_path, '..', 'policy_current_law.json')
with open(path, 'r') as clpfile:
clpdict = json.load(clpfile)
clpdict.pop("schema", None)
# ... make sure ever clpdict section title is in valid_dict
clp_dict = dict() # dictionary of clp section titles structured like valid
for pname in clpdict:
param = clpdict[pname]
assert isinstance(param, dict)
sec1title = param['section_1']
assert sec1title in valid_dict
sec2title = param['section_2']
assert sec2title in valid_dict[sec1title]
if sec1title not in clp_dict:
clp_dict[sec1title] = {}
if sec2title not in clp_dict[sec1title]:
clp_dict[sec1title][sec2title] = 0
# ... make sure every valid_dict section title is in clpdict
for sec1title in valid_dict:
assert isinstance(valid_dict[sec1title], dict)
assert sec1title in clp_dict
for sec2title in valid_dict[sec1title]:
assert sec2title in clp_dict[sec1title]
# check validity of parameter section titles in docs/uguide.htmx skeleton
path = os.path.join(tests_path, '..', '..', 'docs', 'guide',
'policy_params.md')
with open(path, 'r') as md_file:
md_text = md_file.read()
md_dict = generate_section_dictionary(md_text)
# ... make sure every md_dict section title is in valid_dict
for sec1title in md_dict:
assert isinstance(md_dict[sec1title], dict)
assert sec1title in valid_dict
for sec2title in md_dict[sec1title]:
assert sec2title in valid_dict[sec1title]
# ... make sure every valid_dict section title is in md_dict
for sec1title in valid_dict:
assert isinstance(valid_dict[sec1title], dict)
assert sec1title in md_dict
for sec2title in valid_dict[sec1title]:
assert sec2title in md_dict[sec1title]
def test_description_punctuation(tests_path):
"""
Check that each description ends in a period.
"""
# read JSON file into a dictionary
path = os.path.join(tests_path, '..', 'policy_current_law.json')
with open(path, 'r') as jsonfile:
dct = json.load(jsonfile)
dct.pop("schema", None)
all_desc_ok = True
for param in dct.keys():
if not dct[param]['description'].endswith('.'):
all_desc_ok = False
print('param,description=',
str(param),
dct[param]['description'])
assert all_desc_ok
def test_get_index_rate():
"""
Test Parameters.get_index_rate.
"""
pol = Policy()
wgrates = pol.get_index_rate('SS_Earnings_c', 2017)
pirates = pol.get_index_rate('II_em', 2017)
assert isinstance(wgrates, np.float64)
assert wgrates == pol.wage_growth_rates(2017)
assert pirates == pol.inflation_rates(2017)
assert isinstance(pirates, np.float64)
assert pol.inflation_rates() == pol._inflation_rates
assert pol.wage_growth_rates() == pol._wage_growth_rates
def test_reform_with_bad_ctc_levels():
"""
Implement a reform with _ACTC > _CTC_c values.
"""
pol = Policy()
child_credit_reform = {
'CTC_c': {2020: 2200},
'ACTC_c': {2020: 2500}
}
with pytest.raises(pt.ValidationError):
pol.implement_reform(child_credit_reform)
def test_reform_with_removed_parameter(monkeypatch):
"""
Try to use removed parameter in a reform.
"""
policy1 = Policy()
reform1 = {'FilerCredit_c': {2020: 1000}}
with pytest.raises(pt.ValidationError):
policy1.implement_reform(reform1)
policy2 = Policy()
reform2 = {'FilerCredit_c-indexed': {2020: True}}
with pytest.raises(pt.ValidationError):
policy2.implement_reform(reform2)
redefined_msg = {"some_redefined": "some_redefined was redefined."}
monkeypatch.setattr(Policy, "REDEFINED_PARAMS", redefined_msg)
pol = Policy()
with pytest.raises(pt.ValidationError):
pol.implement_reform({"some_redefined": "hello world"})
def test_reform_with_out_of_range_error():
"""
Try to use out-of-range values versus other parameter values in a reform.
"""
pol = Policy()
reform = {'SS_thd85': {2020: [20000, 20000, 20000, 20000, 20000]}}
pol.implement_reform(reform, raise_errors=False)
assert pol.parameter_errors
def test_reform_with_warning():
"""
Try to use warned out-of-range parameter value in reform.
"""
exp_warnings = {
'ID_Medical_frt': [
'ID_Medical_frt[year=2020] 0.05 < min 0.075 '
]
}
pol = Policy()
reform = {'ID_Medical_frt': {2020: 0.05}}
pol.implement_reform(reform, print_warnings=True)
assert pol.warnings == exp_warnings
pol.set_state(year=2020)
assert pol.ID_Medical_frt == np.array([0.05])
pol.implement_reform(reform, print_warnings=False)
assert pol.warnings == {}
pol.set_state(year=2020)
assert pol.ID_Medical_frt == np.array([0.05])
def test_reform_with_scalar_vector_errors():
"""
Test catching scalar-vector confusion.
"""
policy1 = Policy()
reform1 = {'SS_thd85': {2020: 30000}}
with pytest.raises(pt.ValidationError):
policy1.implement_reform(reform1)
policy2 = Policy()
reform2 = {'ID_Medical_frt': {2020: [0.08]}}
with pytest.raises(pt.ValidationError):
policy2.implement_reform(reform2)
policy3 = Policy()
reform3 = {'ID_Medical_frt': [{"year": 2020, "value": [0.08]}]}
with pytest.raises(pt.ValidationError):
policy3.adjust(reform3)
# Check that error is thrown if there are extra elements in array.
policy4 = Policy()
ref4 = {"II_brk1": {2020: [9700, 19400, 9700, 13850, 19400, 19400]}}
with pytest.raises(pt.ValidationError):
policy4.implement_reform(ref4)
policy5 = Policy()
ref5 = {"II_rt1": {2029: [.2, .3]}}
with pytest.raises(pt.ValidationError):
policy5.implement_reform(ref5)
def test_index_offset_reform():
"""
Test a reform that includes both a change in parameter_indexing_CPI_offset
and a change in a variable's indexed status in the same year.
"""
# create policy0 to extract inflation rates before any
# parameter_indexing_CPI_offset
policy0 = Policy()
policy0.implement_reform({'parameter_indexing_CPI_offset': {2017: 0}})
cpiu_rates = policy0.inflation_rates()
reform1 = {'CTC_c-indexed': {2020: True}}
policy1 = Policy()
policy1.implement_reform(reform1)
offset = -0.005
reform2 = {'CTC_c-indexed': {2020: True},
'parameter_indexing_CPI_offset': {2020: offset}}
policy2 = Policy()
policy2.implement_reform(reform2) # caused T-C crash before PR#2364
# extract from policy1 and policy2 the parameter values of CTC_c
pvalue1 = dict()
pvalue2 = dict()
for cyr in [2019, 2020, 2021]:
policy1.set_year(cyr)
pvalue1[cyr] = policy1.CTC_c[0]
policy2.set_year(cyr)
pvalue2[cyr] = policy2.CTC_c[0]
# check that pvalue1 and pvalue2 dictionaries contain the expected values
assert pvalue2[2019] == pvalue1[2019]
assert pvalue2[2020] == pvalue1[2020]
assert pvalue2[2020] == pvalue2[2019]
# ... indexing of CTC_c begins shows up first in 2021 parameter values
assert pvalue1[2021] > pvalue1[2020]
assert pvalue2[2021] > pvalue2[2020]
# ... calculate expected pvalue2[2021] from offset and pvalue1 values
indexrate1 = pvalue1[2021] / pvalue1[2020] - 1.
syear = Policy.JSON_START_YEAR
expindexrate = cpiu_rates[2020 - syear] + offset
expvalue = round(pvalue2[2020] * (1. + expindexrate), 2)
# ... compare expected value with actual value of pvalue2 for 2021
assert np.allclose([expvalue], [pvalue2[2021]])
def test_cpi_offset_affect_on_prior_years():
"""
Test that parameter_indexing_CPI_offset does not have affect
on inflation rates in earlier years.
"""
reform1 = {'parameter_indexing_CPI_offset': {2022: 0}}
reform2 = {'parameter_indexing_CPI_offset': {2022: -0.005}}
p1 = Policy()
p2 = Policy()
p1.implement_reform(reform1)
p2.implement_reform(reform2)
start_year = p1.start_year
p1_rates = np.array(p1.inflation_rates())
p2_rates = np.array(p2.inflation_rates())
# Inflation rates prior to 2022 are the same.
np.testing.assert_allclose(
p1_rates[:2022 - start_year],
p2_rates[:2022 - start_year]
)
# Inflation rate in 2022 was updated.
np.testing.assert_allclose(
p1_rates[2022 - start_year],
p2_rates[2022 - start_year] - (-0.005)
)
def test_cpi_offset_on_reverting_params():
"""
Test that params that revert to their pre-TCJA values
in 2026 revert if a parameter_indexing_CPI_offset is specified.
"""
reform0 = {'parameter_indexing_CPI_offset': {2020: -0.001}}
reform1 = {'STD': {2017: [6350, 12700, 6350, 9350, 12700]},
'parameter_indexing_CPI_offset': {2020: -0.001}}
reform2 = {'STD': {2020: [10000, 20000, 10000, 10000, 20000]},
'parameter_indexing_CPI_offset': {2020: -0.001}}
p0 = Policy()
p1 = Policy()
p2 = Policy()
p0.implement_reform(reform0)
p1.implement_reform(reform1)
p2.implement_reform(reform2)
ryear = 2026
syear = Policy.JSON_START_YEAR
# STD was reverted in 2026
# atol=0.5 because ppp.py rounds params to nearest int
assert np.allclose(
p0._STD[ryear - syear],
p1._STD[ryear - syear], atol=0.5)
# STD was not reverted in 2026 if included in revision
assert not np.allclose(
p1._STD[ryear - syear],
p2._STD[ryear - syear], atol=0.5)
class TestAdjust:
"""
Test update and indexing rules as defined in the Parameters docstring.
Each test implements a Tax-Calculator style reform and a pt styled
reform, checks that the updated values are equal, and then, tests that
values were extended and indexed (or not indexed) correctly.
"""
def test_simple_adj(self):
"""
Test updating a 2D parameter that is indexed to inflation.
"""
pol1 = Policy()
pol1.implement_reform(
{
"EITC_c": {
2020: [10000, 10001, 10002, 10003],
2023: [20000, 20001, 20002, 20003],
}
}
)
pol2 = Policy()
pol2.adjust(
{
"EITC_c": [
{"year": 2020, "EIC": "0kids", "value": 10000},
{"year": 2020, "EIC": "1kid", "value": 10001},
{"year": 2020, "EIC": "2kids", "value": 10002},
{"year": 2020, "EIC": "3+kids", "value": 10003},
{"year": 2023, "EIC": "0kids", "value": 20000},
{"year": 2023, "EIC": "1kid", "value": 20001},
{"year": 2023, "EIC": "2kids", "value": 20002},
{"year": 2023, "EIC": "3+kids", "value": 20003},
]
}
)
cmp_policy_objs(pol1, pol2)
pol0 = Policy()
pol0.set_year(2019)
pol2.set_year(2019)
assert np.allclose(pol0.EITC_c, pol2.EITC_c)
pol2.set_state(year=[2020, 2021, 2022, 2023, 2024])
val2020 = np.array([[10000, 10001, 10002, 10003]])
val2023 = np.array([[20000, 20001, 20002, 20003]])
exp = np.vstack([
val2020,
val2020 * (1 + pol2.inflation_rates(year=2020)),
(
val2020 * (1 + pol2.inflation_rates(year=2020))
).round(2) * (1 + pol2.inflation_rates(year=2021)),
val2023,
val2023 * (1 + pol2.inflation_rates(year=2023)),
]).round(2)
np.testing.assert_allclose(pol2.EITC_c, exp)
def test_adj_without_index_1(self):
"""
Test update indexed parameter after turning off its
indexed status.
"""
pol1 = Policy()
pol1.implement_reform(
{
"EITC_c": {
2020: [10000, 10001, 10002, 10003],
2023: [20000, 20001, 20002, 20003],
},
"EITC_c-indexed": {2019: False},
}
)
pol2 = Policy()
pol2.adjust(
{
"EITC_c": [
{"year": 2020, "EIC": "0kids", "value": 10000},
{"year": 2020, "EIC": "1kid", "value": 10001},
{"year": 2020, "EIC": "2kids", "value": 10002},
{"year": 2020, "EIC": "3+kids", "value": 10003},
{"year": 2023, "EIC": "0kids", "value": 20000},
{"year": 2023, "EIC": "1kid", "value": 20001},
{"year": 2023, "EIC": "2kids", "value": 20002},
{"year": 2023, "EIC": "3+kids", "value": 20003},
],
"EITC_c-indexed": [{"year": 2019, "value": False}],
}
)
cmp_policy_objs(pol1, pol2)
pol0 = Policy()
pol0.set_year(2019)
pol2.set_year(2019)
assert np.allclose(pol0.EITC_c, pol2.EITC_c)
pol2.set_state(year=[2020, 2021, 2022, 2023, 2024])
val2020 = np.array([[10000, 10001, 10002, 10003]])
val2023 = np.array([[20000, 20001, 20002, 20003]])
exp = np.vstack([
val2020,
val2020,
val2020,
val2023,
val2023,
]).round(2)
np.testing.assert_allclose(pol2.EITC_c, exp)
def test_adj_without_index_2(self):
"""
Test updating an indexed parameter, making it unindexed,
and then adjusting it again.
"""
pol1 = Policy()
pol1.implement_reform(
{
"EITC_c": {
2020: [10000, 10001, 10002, 10003],
2023: [20000, 20001, 20002, 20003],
},
"EITC_c-indexed": {2022: False},
}
)
pol2 = Policy()
pol2.adjust(
{
"EITC_c": [
{"year": 2020, "EIC": "0kids", "value": 10000},
{"year": 2020, "EIC": "1kid", "value": 10001},
{"year": 2020, "EIC": "2kids", "value": 10002},
{"year": 2020, "EIC": "3+kids", "value": 10003},
{"year": 2023, "EIC": "0kids", "value": 20000},
{"year": 2023, "EIC": "1kid", "value": 20001},
{"year": 2023, "EIC": "2kids", "value": 20002},
{"year": 2023, "EIC": "3+kids", "value": 20003},
],
"EITC_c-indexed": [{"year": 2022, "value": False}],
}
)
cmp_policy_objs(pol1, pol2)
pol0 = Policy()
pol0.set_year(2019)
pol2.set_year(2019)
assert np.allclose(pol0.EITC_c, pol2.EITC_c)
pol2.set_state(year=[2020, 2021, 2022, 2023, 2024])
val2020 = np.array([[10000, 10001, 10002, 10003]])
val2023 = np.array([[20000, 20001, 20002, 20003]])
exp = np.vstack([
val2020,
val2020 * (1 + pol2.inflation_rates(year=2020)),
(
val2020 * (1 + pol2.inflation_rates(year=2020))
).round(2) * (1 + pol2.inflation_rates(year=2021)),
val2023,
val2023,
]).round(2)
np.testing.assert_allclose(pol2.EITC_c, exp)
def test_activate_index(self):
"""
Test changing a non-indexed parameter to an indexed parameter.
"""
pol1 = Policy()
pol1.implement_reform({
"CTC_c": {2022: 1005},
"CTC_c-indexed": {2022: True}
})
pol2 = Policy()
pol2.adjust(
{
"CTC_c": [{"year": 2022, "value": 1005}],
"CTC_c-indexed": [{"year": 2022, "value": True}],
}
)
cmp_policy_objs(pol1, pol2)
pol0 = Policy()
pol0.set_year(year=2021)
pol2.set_state(year=[2021, 2022, 2023])
exp = np.array([
pol0.CTC_c[0],
1005,
1005 * (1 + pol2.inflation_rates(year=2022))
]).round(2)
np.testing.assert_allclose(pol2.CTC_c, exp)
def test_apply_cpi_offset(self):
"""
Test applying the parameter_indexing_CPI_offset parameter
without any other parameters.
"""
pol1 = Policy()
pol1.implement_reform(
{"parameter_indexing_CPI_offset": {2021: -0.001}}
)
pol2 = Policy()
pol2.adjust(
{"parameter_indexing_CPI_offset": [
{"year": 2021, "value": -0.001}
]}
)
cmp_policy_objs(pol1, pol2)
pol0 = Policy()
pol0.implement_reform({"parameter_indexing_CPI_offset": {2021: 0}})
init_rates = pol0.inflation_rates()
new_rates = pol2.inflation_rates()
start_ix = 2021 - pol2.start_year
exp_rates = copy.deepcopy(new_rates)
exp_rates[start_ix:] -= pol2._parameter_indexing_CPI_offset[start_ix:]
np.testing.assert_allclose(init_rates, exp_rates)
# make sure values prior to 2021 were not affected.
cmp_policy_objs(pol0, pol2, year_range=range(pol2.start_year, 2021))
pol2.set_state(year=[2021, 2022])
np.testing.assert_equal(
(pol2.EITC_c[1] / pol2.EITC_c[0] - 1).round(4),
pol0.inflation_rates(year=2021) + (-0.001),
)
def test_multiple_cpi_swaps(self):
"""
Test changing a parameter's indexed status multiple times.
"""
pol1 = Policy()
pol1.implement_reform(
{
"II_em": {2016: 6000, 2018: 7500, 2020: 9000},
"II_em-indexed": {2016: False, 2018: True},
}
)
pol2 = Policy()
pol2.adjust(
{
"II_em": [
{"year": 2016, "value": 6000},
{"year": 2018, "value": 7500},
{"year": 2020, "value": 9000},
],
"II_em-indexed": [
{"year": 2016, "value": False},
{"year": 2018, "value": True},
],
}
)
cmp_policy_objs(pol1, pol2)
# check inflation is not applied.
pol2.set_state(year=[2016, 2017])
np.testing.assert_equal(
pol2.II_em[0], pol2.II_em[1]
)
# check inflation rate is applied.
pol2.set_state(year=[2018, 2019])
np.testing.assert_equal(
(pol2.II_em[1] / pol2.II_em[0] - 1).round(4),
pol2.inflation_rates(year=2018),
)
# check inflation rate applied for rest of window.
window = list(range(2020, pol2.end_year + 1))
pol2.set_state(year=window)
np.testing.assert_equal(
(pol2.II_em[1:] / pol2.II_em[:-1] - 1).round(4),
[pol2.inflation_rates(year=year) for year in window[:-1]],
)
def test_multiple_cpi_swaps2(self):
"""
Test changing the indexed status of multiple parameters multiple
times.
"""
pol1 = Policy()
pol1.implement_reform(
{
"II_em": {2016: 6000, 2018: 7500, 2020: 9000},
"II_em-indexed": {2016: False, 2018: True},
"SS_Earnings_c": {2016: 300000, 2018: 500000},
"SS_Earnings_c-indexed": {2017: False, 2019: True},
"AMT_em-indexed": {2017: False, 2020: True},
}
)
pol2 = Policy()
pol2.adjust(
{
"SS_Earnings_c": [
{"year": 2016, "value": 300000},
{"year": 2018, "value": 500000},
],
"SS_Earnings_c-indexed": [
{"year": 2017, "value": False},
{"year": 2019, "value": True},
],
"AMT_em-indexed": [
{"year": 2017, "value": False},
{"year": 2020, "value": True},
],
"II_em": [
{"year": 2016, "value": 6000},
{"year": 2018, "value": 7500},
{"year": 2020, "value": 9000},
],
"II_em-indexed": [
{"year": 2016, "value": False},
{"year": 2018, "value": True},
],
}
)
cmp_policy_objs(pol1, pol2)
# Test SS_Earnings_c
# check inflation is still applied from 2016 to 2017.
pol2.set_state(year=[2016, 2017])
np.testing.assert_equal(
(pol2.SS_Earnings_c[1] / pol2.SS_Earnings_c[0] - 1).round(4),
pol2.wage_growth_rates(year=2016),
)
# check inflation rate is not applied after adjustment in 2018.
pol2.set_state(year=[2018, 2019])
np.testing.assert_equal(
pol2.SS_Earnings_c[0], pol2.SS_Earnings_c[1]
)
# check inflation rate applied for rest of window.
window = list(range(2019, pol2.end_year + 1))
pol2.set_state(year=window)
np.testing.assert_equal(
(pol2.SS_Earnings_c[1:] / pol2.SS_Earnings_c[:-1] - 1).round(4),
[pol2.wage_growth_rates(year=year) for year in window[:-1]],
)
# Test AMT
# Check values for 2017 through 2020 are equal.
pol2.set_state(year=[2017, 2018, 2019, 2020])
for i in (1, 2, 3):
np.testing.assert_equal(
pol2.AMT_em[0], pol2.AMT_em[i]
)
# check inflation rate applied for rest of window.
window = list(range(2020, pol2.end_year + 1))
pol2.set_state(year=window)
# repeat inflation rates accross matrix so they can be compared to the
# rates derived from AMT_em, a 5 * N matrix.
exp_rates = [pol2.inflation_rates(year=year) for year in window[:-1]]
exp_rates = np.tile([exp_rates], (5, 1)).transpose()
np.testing.assert_equal(
(pol2.AMT_em[1:] / pol2.AMT_em[:-1] - 1).round(4),
exp_rates,
)
# Test II_em
# check inflation is not applied.
pol2.set_state(year=[2016, 2017])
np.testing.assert_equal(
pol2.II_em[0], pol2.II_em[1]
)
# check inflation rate is applied.
pol2.set_state(year=[2018, 2019])
np.testing.assert_equal(
(pol2.II_em[1] / pol2.II_em[0] - 1).round(4),
pol2.inflation_rates(year=2018),
)
# check inflation rate applied for rest of window.
window = list(range(2020, pol2.end_year + 1))
pol2.set_state(year=window)
np.testing.assert_equal(
(pol2.II_em[1:] / pol2.II_em[:-1] - 1).round(4),
[pol2.inflation_rates(year=year) for year in window[:-1]],
)
def test_adj_CPI_offset_and_index_status(self):
"""
Test changing parameter_indexing_CPI_offset and another
parameter simultaneously.
"""
pol1 = Policy()
pol1.implement_reform({
"CTC_c-indexed": {2020: True},
"parameter_indexing_CPI_offset": {2020: -0.005}},
)
pol2 = Policy()
pol2.adjust(
{
"parameter_indexing_CPI_offset":
[{"year": 2020, "value": -0.005}],
"CTC_c-indexed": [{"year": 2020, "value": True}],
}
)
cmp_policy_objs(pol1, pol2)
# Check no difference prior to 2020
pol0 = Policy()
pol0.implement_reform({"parameter_indexing_CPI_offset": {2020: 0}})
cmp_policy_objs(
pol0,
pol2,
year_range=range(pol2.start_year, 2020 + 1),
exclude=["parameter_indexing_CPI_offset"]
)
pol2.set_state(year=[2021, 2022])
np.testing.assert_equal(
(pol2.CTC_c[1] / pol2.CTC_c[0] - 1).round(4),
pol0.inflation_rates(year=2021) + (-0.005),
)
def test_indexed_status_parsing(self):
pol1 = Policy()
pol1.implement_reform({"EITC_c-indexed": {pol1.start_year: False}})
pol2 = Policy()
pol2.adjust({"EITC_c-indexed": False})
cmp_policy_objs(pol1, pol2)
with pytest.raises(pt.ValidationError):
pol2.adjust({"EITC_c-indexed": 123})
|
{"hexsha": "537e9a11efb6c9e9ea850dde99ab8c578c6c9194", "size": 52694, "ext": "py", "lang": "Python", "max_stars_repo_path": "Tax-Calculator-3.0.0/taxcalc/tests/test_policy.py", "max_stars_repo_name": "grantseiter/Tax-Benefits-Of-Parenthood", "max_stars_repo_head_hexsha": "5350e832e8b877b46c2a3cab070fc8262b914a52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tax-Calculator-3.0.0/taxcalc/tests/test_policy.py", "max_issues_repo_name": "grantseiter/Tax-Benefits-Of-Parenthood", "max_issues_repo_head_hexsha": "5350e832e8b877b46c2a3cab070fc8262b914a52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tax-Calculator-3.0.0/taxcalc/tests/test_policy.py", "max_forks_repo_name": "grantseiter/Tax-Benefits-Of-Parenthood", "max_forks_repo_head_hexsha": "5350e832e8b877b46c2a3cab070fc8262b914a52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1660947152, "max_line_length": 80, "alphanum_fraction": 0.5739363115, "include": true, "reason": "import numpy", "num_tokens": 14841}
|
import numpy as np
# Create an array that contains only elements with values 1 with a shape of (3,5)
# Save it as an object named arr1
arr1 = np.ones((3,5))
arr1
# Save the dimension and size of `arr1` in objects
# named `arr1_dim` and `arr1_size` respectively
arr1_dim = arr1.ndim
arr1_size = arr1.size
|
{"hexsha": "2d2646c604dc0195c0a7942e105540118661f9c2", "size": 312, "ext": "py", "lang": "Python", "max_stars_repo_path": "exercises/en/solution_08_09.py", "max_stars_repo_name": "Lavendulaa/programming-in-python-for-data-science", "max_stars_repo_head_hexsha": "bc41da8afacf4c180ae0ff9c6dc26a7e6292252f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-26T20:15:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-26T20:15:44.000Z", "max_issues_repo_path": "exercises/en/solution_08_09.py", "max_issues_repo_name": "Lavendulaa/programming-in-python-for-data-science", "max_issues_repo_head_hexsha": "bc41da8afacf4c180ae0ff9c6dc26a7e6292252f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-06-15T23:05:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-01T22:07:45.000Z", "max_forks_repo_path": "exercises/en/solution_08_09.py", "max_forks_repo_name": "UBC-MDS/MCL-programming-in-python", "max_forks_repo_head_hexsha": "22836d9013d3e3d1b1074678ba7dc3ee2e66f398", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-25T20:53:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-25T20:53:13.000Z", "avg_line_length": 19.5, "max_line_length": 81, "alphanum_fraction": 0.7243589744, "include": true, "reason": "import numpy", "num_tokens": 94}
|
function dfupdatexlim(newminmax,updateplots)
%DFUPDATEXLIM Update the stored x axis min/max values
% $Revision: 1.1.6.5 $ $Date: 2004/01/24 09:36:03 $
% Copyright 2003-2004 The MathWorks, Inc.
minmax = []; % to become new x limits
oldminmax = dfgetset('xminmax'); % previous limits
ftype = dfgetset('ftype');
if nargin==0
newminmax = [];
end
if isempty(newminmax) && isequal(ftype, 'icdf')
% Default limits span most of the probability range
minmax = [.01 .99];
elseif isempty(newminmax)
% Update limits from datasets with a plotting flag on
dsdb = getdsdb;
ds = down(dsdb);
while(~isempty(ds))
if ds.plot == 1
minmax = combineminmax(minmax,ds.xlim);
end
ds = right(ds);
end
% Update from fits with a plotting flag on
fitdb = getfitdb;
ft = down(fitdb);
while(~isempty(ft))
if ft.plot == 1
minmax = combineminmax(minmax,xlim(ft));
end
ft = right(ft);
end
else
minmax = newminmax;
end
% Now update plot
dffig = dfgetset('dffig');
if ~isempty(minmax) && isequal(zoom(dffig,'getmode'),'off')
ax = get(dffig,'CurrentAxes');
islinscale = isequal(get(ax,'XScale'),'linear');
if ~islinscale && any(minmax<=0)
warning('stats:dfupdatexlim:NegativeDataIgnored',...
'Negative data ignored.');
minmax = [1e-6 1] * max(abs(minmax));
end
if isempty(newminmax) && ~isequal(ftype, 'icdf')
% Adjust axis limits to include a margin around plotted points
if islinscale
dx = diff(minmax) * 0.01 * [-1 1];
if all(dx==0), dx = [-1 1]; end
else
dlogx = .01 * diff(log(minmax));
if dlogx==0, dlogx = 1; end
dx = [minmax(1) * exp(-dlogx), minmax(2) * exp(dlogx)] - minmax;
end
elseif minmax(1)==minmax(2)
if islinscale
dx = [-1 1];
else
dx = [minmax(1)/2, 2*minmax(1)];
end
else
% Don't adjust the limits that were passed in or computed
dx = 0;
end
oldxlim = get(ax,'XLim');
newxlim = minmax + dx;
if ~isequal(oldxlim,newxlim)
set(ax,'XLim',newxlim);
if nargin<2 || updateplots
dfupdateallplots(false,true);
end
end
end
dfgetset('xminmax',minmax);
% ------------ Helper to combine old and new minmax values
function bothmm = combineminmax(oldmm,newmm)
if isempty(oldmm)
bothmm = newmm;
elseif isempty(newmm)
bothmm = oldmm;
else
bothmm = [min(oldmm(1),newmm(1)) max(oldmm(2),newmm(2))];
end
|
{"author": "zouchuhang", "repo": "LayoutNet", "sha": "95293bfb8ff787dd3b02c8a52a147a703024980f", "save_path": "github-repos/MATLAB/zouchuhang-LayoutNet", "path": "github-repos/MATLAB/zouchuhang-LayoutNet/LayoutNet-95293bfb8ff787dd3b02c8a52a147a703024980f/matlab/panoContext_code/Toolbox/SpatialLayout_shrink/spatiallayoutcode/GeometricContext/geomContext_src_07_02_08/src/tools/weightedstats/private/dfupdatexlim.m"}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 09:05:48 2017
@author: r.dewinter
"""
from simplexGauss import simplexGauss
from simplexKriging import simplexKriging
from predictorEGO import predictorEGO
from paretofrontFeasible import paretofrontFeasible
from optimizeSMSEGOcriterion import optimizeSMSEGOcriterion
from hypervolume import hypervolume
from findAllLocalOptimaNew2 import findAllLocalOptimaNew
from visualiseParetoFront import visualiseParetoFront
from RbfInter import trainCubicRBF
from RbfInter import adjustMargins
from functools import partial
import numpy as np
from scipy.special import ndtri
import os
import json
import copy
import time
# use this if you want to execute only one iterations
def CEGOIteration(problemCall, rngMin, rngMax, ref, nconstraints, maxEval=None, smooth=2, runNo=0, epsilonInit=0.01, epsilonMax=0.02, data=None):
"""
based on:
1 Designing Ships using Constrained Multi-Objective Efficient Global Optimization
Roy de Winter, Bas van Stein, Matthys Dijkman and Thomas Baeck
In the Fourth international conference of machinelearning optimization and data science (2018)
2 S-Metric Selection based Efficient Global Optimization (SMS-EGO) for
multi-objective optimization problems
Ponweiser, W.; Wagner, T.; Biermann, D.; Vincze, M.: Multiobjective
Optimization on a Limited Amount of Evaluations Using Model-Assisted
S-Metric Selection. In: Proc. 10th Int'l Conf. Parallel Problem Solving
from Nature (PPSN X), 13.-17. September, Dortmund, Rudolph, G.; Jansen,
T.; Lucas, S.; Poloni, C.; Beume, N. (Eds.). No. 5199 in Lecture Notes
in Computer Science, Springer, Berlin, 2008, pp. 784-794.
ISBN 978-3-540-87699-1. doi: 10.1007/978-3-540-87700-4_78
3 Self-adjusting parameter control for surrogate-assisted constrained
optimization under limited budgets
Samineh Bagheri, Wolfgang Konen, Michael Emmerich, Thomas Baeck
ELSEVIER Applied Soft Computing 61 (2017) 377-393
4 Wagner, T.; Emmerich, M.; Deutz, A.; Ponweiser, W.: On Expected-
Improvement Criteria for Model-Based Multi-Objective Optimization.
In: Proc. 11th Int'l. Conf. Parallel Problem Solving From Nature
(PPSN XI) - Part I, 11..-15. September, Krakau, Polen, Schaefer, R.;
Cotta, C.; Kolodziej, J.; Rudolph, G. (Eds.). No. 6238 in Lecture Notes
in Computer Science, Springer, Berlin, 2010, pp. 718-727.
ISBN 978-3-642-15843-8. doi: 10.1007/978-3-642-15844-5_72
5 Forrester, A.I.J.; Keane, A.J.; Bressloff, N.W.: Design and analysis of
'noisy' computer experiments. In: AIAA Journal, 44 (2006) 10,
pp. 2331-2339. doi: 10.2514/1.20068
call: CONSTRAINED_SMSEGO(problemCall, rngMin, rngMax, ref, nconstraints)
Input arguments
problemCall: function handle to the objective function (required)
rngMin: lower bound of the design space (dim)-np array (required)
rngMax: upper bound of the design space (dim)-np array (required)
ref: the maximum objective values interested in (required)
nconstraints: the number of constraints returned by the problemCall (requried)
data: the data provided to learn the kriging and crbf models.
data = np.column_stack(parameters, constraints, objectives)
Optional input arguments:
maxEval: maximum number of evaluations, default=40*number of variables,
smooth: smoothning function, 1=smoothing with exponential kernel, 2=gaussican kernel,
runNo: run number controlls the seed,
epsilonInit: the "allowed" constrained violation since we are not 100%
confident about the constrained model, default=0.01,
epsilonMax= the maximum "allowed" constrained violation, default=0.02
"""
if problemCall is None or rngMin is None or rngMax is None or ref is None or nconstraints is None or data is None:
raise ValueError('SMSEGO requires at least six arguments (problemCall, rngMin, rngMax, ref, nconstraints, data)')
nVar = len(rngMin)
nObj = len(ref)
evall = len(data)
if maxEval is None:
maxEval = evall+1
EPS = np.array([epsilonInit]*nconstraints)
Cfeas = 0
Cinfeas = 0
functionName = str(problemCall).split(' ')[1]
outdir = 'results/'+str(functionName)+'/'
parameters = np.empty((evall+1, nVar))
objectives = np.empty((evall+1, nObj))
constraints = np.empty((evall+1, nconstraints))
parameters[:] = np.nan
objectives[:] = np.nan
constraints[:] = np.nan
parameters[:evall,:nVar] = data[:,0:nVar]
constraints[:evall,:nconstraints] = data[:,nVar:nVar+nconstraints]
objectives[:evall,:nObj] = data[:,-nObj:]
hypervolumeProgress = np.empty((maxEval,2))
hypervolumeProgress[:] = np.NAN
Z = -1
paretoOptimal = np.array([False]*(maxEval))
for i in range(evall):
feasible = np.all(constraints[i] < 0)
Cfeas, Cinfeas, EPS = adjustMargins(Cfeas,Cinfeas,EPS,epsilonMax,nVar,feasible)
paretoOptimal = np.array([False]*(maxEval))
paretoOptimal[:i] = paretofrontFeasible(objectives[:i,:],constraints[:i,:])
paretoFront = objectives[paretoOptimal]
hypervolumeProgress[i] = [hypervolume(paretoFront, ref),Z]
model = [ [] for i in range(nObj)]
if not os.path.isdir(outdir):
os.makedirs(outdir)
outputFileParameters = str(outdir)+'par_run'+str(runNo)+'.csv'
outputFileObjectives = str(outdir)+'obj_run'+str(runNo)+'.csv'
outputFileConstraints = str(outdir)+'con_run'+str(runNo)+'.csv'
np.savetxt(outputFileParameters, parameters[:evall], delimiter=',')
np.savetxt(outputFileObjectives, objectives[:evall], delimiter=',')
np.savetxt(outputFileConstraints, constraints[:evall], delimiter=',')
paretoOptimal[:evall] = paretofrontFeasible(objectives[:evall,:], constraints[:evall,:])
paretoFront = objectives[paretoOptimal,:]
paretoConstraints = constraints[paretoOptimal,:]
visualiseParetoFront(paretoFront,save=False)
print(paretoFront)
print(paretoConstraints)
iterationTime = time.time()
print('Compute model for each objective')
s=time.time()
for i in range(nObj):
if smooth==0:
raise ValueError("no smoothing, to be implemented")
elif smooth==1:
#smoothing usin gpower exponential kernel with nugget
model[i] = simplexKriging(copy.deepcopy(parameters[:evall,:]), copy.deepcopy(objectives[:evall,i]))[0]
temp = predictorEGO(copy.deepcopy(parameters[:evall,:]), copy.deepcopy(model[i]))[0]
model[i] = simplexKriging(parameters[:evall,:], temp, [1])[0]
elif smooth==2:
#smoothing using gaussian kernel with nugget
model[i] = simplexGauss(copy.deepcopy(parameters[:evall,:]), copy.deepcopy(objectives[:evall,i]))[0]
temp = predictorEGO(copy.deepcopy(parameters[:evall,:]), copy.deepcopy(model[i]))[0]
model[i] = simplexGauss(copy.deepcopy(parameters[:evall,:]),copy.deepcopy(temp),[1])[0]
else:
raise ValueError('Unknown smoothing type')
print("Time to compute surrogate models ",time.time()-s)
print('Optimize infill criterion')
currentHV = hypervolume(paretoFront, ref)
hypervolumeProgress[evall] = [currentHV,Z]
nPF = sum(paretoOptimal)
if nPF < 2:
eps = np.zeros((1,nObj))
else:
maxima = np.array([max(col) for col in paretoFront.T])
minima = np.array([min(col) for col in paretoFront.T])
spread = maxima-minima
c = 1-(1/np.power(2,nObj))
eps = spread/(nPF+c*(maxEval-evall))
gain = -ndtri(0.5*(0.5**(1/float(nObj))))
criterion = partial(optimizeSMSEGOcriterion, model=copy.deepcopy(model),
ref=ref, paretoFront=paretoFront,
currentHV=currentHV, epsilon=np.ndarray.flatten(eps),
gain=gain)
constraintSurrogates = trainCubicRBF(parameters[:evall,:], constraints[:evall], rngMin, rngMax, hypervolumeProgress[:evall])
X,Z = findAllLocalOptimaNew(copy.deepcopy(model), rngMin, rngMax, criterion, constraintSurrogates, EPS)
if len(X)>0:
notSeenBefore = ~np.array([x in parameters for x in X])
else:
notSeenBefore = [0]
if sum(notSeenBefore)>0:
print('Filter local optimal')
ind = np.argmax(notSeenBefore)
X = X[ind]
else:
print('NO FEASIBLE LOCAL MINIMA FOUND!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
X = np.random.rand(nVar)*(rngMax-rngMin)+rngMin
print('Evaluate new solutions')
parameters[evall,:] = X
objectiveValues, constraintValues = problemCall(X)
print(objectiveValues)
print(constraintValues)
objectives[evall,:] = objectiveValues
constraints[evall,:] = constraintValues
evall += 1
np.savetxt(outputFileParameters, parameters[:evall], delimiter=',')
np.savetxt(outputFileObjectives, objectives[:evall], delimiter=',')
np.savetxt(outputFileConstraints, constraints[:evall], delimiter=',')
paretoOptimal[:evall] = paretofrontFeasible(objectives[:evall,:],constraints[:evall,:])
paretoFront = objectives[paretoOptimal]
paretoConstraints = constraints[paretoOptimal]
visualiseParetoFront(paretoFront,save=False)
print(paretoFront)
print(paretoConstraints)
feasible = np.all(constraints[evall-1] < 0)
Cfeas, Cinfeas, EPS = adjustMargins(Cfeas,Cinfeas,EPS,epsilonMax,nVar,feasible)
print('iteration time', (time.time() - iterationTime))
for d in model:
d['corr'] = 'corr'
d['regr'] = 'regr'
for key in d:
if type(d[key]) is np.ndarray:
d[key] = d[key].tolist()
for key in constraintSurrogates:
if type(constraintSurrogates[key]) is np.ndarray:
constraintSurrogates[key] = constraintSurrogates[key].tolist()
with open(str(outdir)+str(runNo)+'obj_model.json', 'w') as fOut:
json.dump(model, fOut)
with open(str(outdir)+str(runNo)+'con_model.json', 'w') as fOut:
json.dump(constraintSurrogates, fOut)
return objectives, constraints, parameters
|
{"hexsha": "2826e0770dd300b3b27a9122b9474472a5286f0f", "size": 10626, "ext": "py", "lang": "Python", "max_stars_repo_path": "CEGO/CEGOIteration.py", "max_stars_repo_name": "napa-jmm/CEGO", "max_stars_repo_head_hexsha": "172d511133a608ca5bf265d9ebd2937b8a171b3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-07-18T06:38:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T21:01:40.000Z", "max_issues_repo_path": "CEGO/CEGOIteration.py", "max_issues_repo_name": "napa-jmm/CEGO", "max_issues_repo_head_hexsha": "172d511133a608ca5bf265d9ebd2937b8a171b3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CEGO/CEGOIteration.py", "max_forks_repo_name": "napa-jmm/CEGO", "max_forks_repo_head_hexsha": "172d511133a608ca5bf265d9ebd2937b8a171b3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-10-15T09:35:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-08T13:40:19.000Z", "avg_line_length": 43.1951219512, "max_line_length": 146, "alphanum_fraction": 0.6510446076, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2791}
|
import numpy as np
import nnet
import nnet.config
try:
import cupy
array_types = (np.ndarray, cupy.ndarray)
except ImportError:
array_types = (np.ndarray)
class Tensor:
__array_priority__ = 200
def __init__(self, data, name=None):
if data is not None:
if isinstance(data, array_types):
self.data = data
elif np.isscalar(data):
self.data = np.array(data)
else:
raise TypeError("{} is not supported".format(type(data)))
else:
self.data = None # for None parameters e.g. bias of layers with no bias used.
self.grad = None
self.creator = None
self.generation = 0
self.name = name
def set_creator(self, module):
"""
This method specifies the instance of the module from which this tensor is computed.
"""
self.creator = module
self.generation = module.generation + 1
def backward(self, retain_grad=False, create_graph=False):
""" backward propagation.
retain_grad (bool) if this flag is true, interim results of gradient computation is preserved. Otherwise, the interim resuls are deleted to save memory.
create_graph (bool) if this flag is true, inputs and outputs are saved in forward propagation of Module class.
The forward propagation of Module class is also called in computation of gradient in backward propagation of Tensor class.
"""
if self.grad is None:
xp = nnet.cuda.get_array_module(self.data)
self.grad = Tensor(xp.ones_like(self.data))
"""
Backward propagation
forward: x -> module -> y
backward: gx (dL/dx) <- module <- gy (dL/dy)
dL/dx = dL/dy*dy/dx
"""
modules = []
seen_set = set()
def add_module(mod):
if mod is not None and mod not in seen_set:
modules.append(mod)
seen_set.add(mod)
modules.sort(key=lambda x: x.generation)
add_module(self.creator)
while modules:
mod = modules.pop()
x = mod.inputs
y = mod.outputs
gys = [output().grad for output in mod.outputs]
with nnet.config.using_config('enable_backprop', create_graph):
gxs = mod.backward(*gys)
if not isinstance(gxs, tuple):
gxs = (gxs,)
for x, gx in zip(mod.inputs, gxs):
if x.grad is None:
x.grad = gx
else:
x.grad = x.grad + gx
if x.creator is not None:
add_module(x.creator)
if not retain_grad:
for y in mod.outputs:
y().grad = None
def cleargrad(self):
self.grad = None
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
def __len__(self):
return len(self.data)
def __repr__(self):
if self.data is None:
return 'tensor(None)'
p = str(self.data).replace('\n', '\n' + ' ' * 7)
return 'tensor(' + p + ')'
def __mul__(self, other):
return nnet.mul(self, other)
def __rmul__(self, other):
return nnet.mul(self, other)
def __add__(self, other):
return nnet.add(self, other)
def __radd__(self, other):
return nnet.add(self, other)
def __neg__(self):
return nnet.neg(self)
def __sub__(self, other):
return nnet.sub(self, other)
def __rsub__(self, other):
return nnet.rsub(self, other)
def __truediv__(self, other):
return nnet.div(self, other)
def __rtruediv__(self, other):
return nnet.rdiv(self, other)
def __pow__(self, c):
return nnet.pow(self, c)
def __getitem__(self, slices):
return nnet.nn.functional.get_item(self, slices)
# def __eq__(self, other):
# if isinstance(other, np.ndarray):
# comp = self.data == other
# else:
# comp = self.data == other.data
# return Tensor(comp)
def reshape(self, *shape):
if len(shape) == 1 and isinstance(shape[0], (tuple, list)):
shape = shape[0]
return nnet.nn.functional.reshape(self, shape)
def transpose(self, *axes):
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)) or axes[0] is None:
axes = axes[0]
return nnet.nn.functional.transpose(self, axes)
def max(self, axis=None, keepdims=False):
return nnet.nn.functional.max(self, axis, keepdims)
def min(self, axis=None, keepdims=False):
return nnet.nn.functional.min(self, axis, keepdims)
@property
def T(self):
return nnet.nn.functional.transpose(self)
def sum(self, axis=None, keepdims=False):
return nnet.sum(self, axis, keepdims)
def to_cpu(self):
if self.data is not None:
self.data = nnet.cuda.as_numpy(self.data)
def to_gpu(self):
if self.data is not None:
self.data = nnet.cuda.as_cupy(self.data)
def to(self, device):
if device == 'gpu':
self.to_gpu()
else:
self.to_cpu()
|
{"hexsha": "be3942b2f1298a3429c695f2f4fd043c585120fe", "size": 5741, "ext": "py", "lang": "Python", "max_stars_repo_path": "nnet/tensor.py", "max_stars_repo_name": "trip2eee/nnet2", "max_stars_repo_head_hexsha": "2061cdf3c8e2ac3f0bdb9e077baa94c67803e99f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nnet/tensor.py", "max_issues_repo_name": "trip2eee/nnet2", "max_issues_repo_head_hexsha": "2061cdf3c8e2ac3f0bdb9e077baa94c67803e99f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nnet/tensor.py", "max_forks_repo_name": "trip2eee/nnet2", "max_forks_repo_head_hexsha": "2061cdf3c8e2ac3f0bdb9e077baa94c67803e99f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2807881773, "max_line_length": 164, "alphanum_fraction": 0.5434593276, "include": true, "reason": "import numpy,import cupy", "num_tokens": 1306}
|
from PIL import Image
import os
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
import torchvision.transforms.functional as F
import torch.nn.functional as functional
import torch.utils.data as data
import random
import time
import glob
import scipy.io as scio
import h5py
import math
class DatasetConstructor(data.Dataset):
def __init__(self):
return
def get_path_tuple(self, i, dataset_name = "SHA"):
if dataset_name == "SHA" or dataset_name == "SHB":
img_name = '/IMG_' + str(i + 1) + ".jpg"
gt_map_name = '/GT_IMG_' + str(i + 1) + ".npy"
elif dataset_name == "QNRF":
img_name = "/img_" + ("%04d" % (i + 1)) + ".jpg"
gt_map_name = '/GT_IMG_' + str(i + 1) + ".npy"
elif dataset_name == "UCF50": # just for testing
test_list = []
if self.scene_index == 1:
test_list = [1, 2, 11, 19, 20, 21, 25, 33, 48, 50]
elif self.scene_index == 2:
test_list = [9, 10, 16, 18, 26, 27, 30, 40, 44, 47]
elif self.scene_index == 3:
test_list = [5, 13, 17, 22, 31, 38, 41, 42, 45, 49]
elif self.scene_index == 4:
test_list = [4, 6, 8, 14, 23, 28, 29, 34, 37, 39]
elif self.scene_index == 5:
test_list = [3, 7, 12, 15, 24, 32, 35, 36, 43, 46]
else:
raise ValueError('...')
img_name = "/" + ("%d" % (test_list[i])) + ".jpg"
gt_map_name = '/GT_IMG_' + str(test_list[i]) + ".npy"
else:
raise NameError("No such dataset, only support SHA, SHB, QNRF")
return img_name, gt_map_name
def resize(self, img, dataset_name):
height = img.size[1]
width = img.size[0]
resize_height = height
resize_width = width
if dataset_name == "SHA" or dataset_name == "UCF50":
if resize_height <= 416:
tmp = resize_height
resize_height = 416
resize_width = (resize_height / tmp) * resize_width
if resize_width <= 416:
tmp = resize_width
resize_width = 416
resize_height = (resize_width / tmp) * resize_height
resize_height = math.ceil(resize_height / 32) * 32
resize_width = math.ceil(resize_width / 32) * 32
elif dataset_name == "SHB":
resize_height = height
resize_width = width
elif dataset_name == "QNRF":
resize_height = 768
resize_width = 1024
else:
raise NameError("No such dataset, only support SHA, SHB, QNRF")
img = transforms.Resize([resize_height, resize_width])(img)
return img
class TrainDatasetConstructor(DatasetConstructor):
def __init__(self,
train_num,
data_dir_path,
gt_dir_path,
mode='crop',
dataset_name="SHA",
device=None,
is_random_hsi=False,
is_flip=False,
fine_size = 400
):
super(TrainDatasetConstructor, self).__init__()
self.train_num = train_num
self.imgs = []
self.fine_size = fine_size
self.permulation = np.random.permutation(self.train_num)
self.data_root, self.gt_root = data_dir_path, gt_dir_path
self.mode = mode
self.device = device
self.is_random_hsi = is_random_hsi
self.is_flip = is_flip
self.dataset_name = dataset_name
self.kernel = torch.FloatTensor(torch.ones(1, 1, 2, 2))
self.img_paths = glob.glob(self.data_root+"/*.jpg")
def __getitem__(self, index):
if self.mode == 'crop':
img_path = self.img_paths[self.permulation[index]]
img = Image.open(img_path).convert("RGB")
gt_map_path = os.path.join(self.gt_root, os.path.basename(img_path).replace(".jpg", ".npy"))
gt_map = Image.fromarray(np.squeeze(np.load(gt_map_path)))
# Additional mask for worldexpo
if img_path.find("104242-") != -1 or img_path.find("200247-") != -1:
prefix_f = os.path.basename(img_path).split('-')[0]
else:
prefix_f = os.path.basename(img_path).split('_')[0]
mask_path = os.path.join(self.data_root, prefix_f, "roi.mat")
mask_path = mask_path.replace("train_frame", "train_label")
mask_info = scio.loadmat(mask_path)
pos_x = mask_info['maskVerticesXCoordinates']
pos_y = mask_info['maskVerticesYCoordinates']
pos = np.concatenate((pos_x, pos_y), 1).astype(np.int32)
mask_tg = np.zeros((576, 720),dtype='uint8')
cv2.fillPoly(mask_tg, [pos], 1) # fill 1 in the mask
self.mask = mask_tg
if self.is_random_hsi:
img = transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2)(img)
if self.is_flip:
flip_random = random.random()
if flip_random > 0.5:
img = F.hflip(img)
gt_map = F.hflip(gt_map)
img, gt_map = transforms.ToTensor()(img), transforms.ToTensor()(gt_map)
img_shape = img.shape # C, H, W
rh, rw = random.randint(0, img_shape[1] - self.fine_size), random.randint(0, img_shape[2] - self.fine_size)
p_h, p_w = self.fine_size, self.fine_size
img = img[:, rh:rh + p_h, rw:rw + p_w]
gt_map = gt_map[:, rh:rh + p_h, rw:rw + p_w]
img = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(img)
gt_map = functional.conv2d(gt_map.view(1, 1, self.fine_size, self.fine_size), self.kernel, bias=None, stride=2, padding=0)
# crop the mask
mask_tg = torch.from_numpy(self.mask)
mask_tg = mask_tg[rh:rh + p_h, rw:rw + p_w]
mask_tg = mask_tg.view(1, 1, 400, 400).float()
mask_tg_small = functional.interpolate(mask_tg, (200, 200), mode='nearest').view(1, 200, 200)
mask_tg = mask_tg.view(1, 400, 400)
return index, img.view(3, self.fine_size, self.fine_size), gt_map.view(1, 200, 200), mask_tg, mask_tg_small
def __len__(self):
return self.train_num
def shuffle(self):
self.permulation = np.random.permutation(self.train_num)
return self
class EvalDatasetConstructor(DatasetConstructor):
def __init__(self,
scene_index, # just for UCF_CC_50 or worldexpo in this repo
validate_num,
data_dir_path,
gt_dir_path,
mode="crop",
dataset_name="SHA",
device=None,
):
super(EvalDatasetConstructor, self).__init__()
self.scene_index = scene_index # just for UCF_CC_50 or WorldExpo
self.validate_num = validate_num
self.imgs = []
self.data_root = data_dir_path
self.gt_root = gt_dir_path
self.mode = mode
self.device = device
self.dataset_name = dataset_name
self.kernel = torch.FloatTensor(torch.ones(1, 1, 2, 2))
# Additional mask for worldexpo (Need to copy 5 roi mats to gt_root)
if self.dataset_name == 'WorldExpo':
self.img_paths = glob.glob(self.data_root+"/*.jpg")
mask_path = os.path.join(self.gt_root, "roi.mat")
mask_info = scio.loadmat(mask_path)
pos_x = mask_info['maskVerticesXCoordinates']
pos_y = mask_info['maskVerticesYCoordinates']
pos = np.concatenate((pos_x, pos_y), 1).astype(np.int32)
mask_tg = np.zeros((576, 720),dtype='uint8')
cv2.fillPoly(mask_tg, [pos], 1) # fill 1 in the mask
self.mask = mask_tg
elif self.dataset_name == 'UCF50':
for i in range(self.validate_num):
i_n, g_n = super(EvalDatasetConstructor, self).get_path_tuple(i, self.dataset_name)
self.imgs.append([self.data_root + i_n, self.gt_root + g_n, i + 1])
def __getitem__(self, index):
if self.mode == 'crop':
if self.dataset_name == 'WorldExpo':
img_path = self.img_paths[index]
gt_map_path = os.path.join(self.gt_root, os.path.basename(img_path).replace(".jpg", ".npy"))
elif self.dataset_name == 'UCF50':
img_path, gt_map_path, img_index = self.imgs[index]
img = Image.open(img_path).convert("RGB")
if self.dataset_name == 'WorldExpo':
img_ori = transforms.Resize([576, 736])(img)
img_ori_tensor = transforms.ToTensor()(img_ori)
elif self.dataset_name == 'UCF50':
img_ori = super(EvalDatasetConstructor, self).resize(img, self.dataset_name)
img_ori_tensor = transforms.ToTensor()(img_ori)
gt_map = Image.fromarray(np.squeeze(np.load(gt_map_path)))
gt_map = transforms.ToTensor()(gt_map)
if self.dataset_name == 'WorldExpo': # also need resize density map, as we do not resize density map as other datasets.
gt_map = gt_map.unsqueeze(0)
gt_map = functional.interpolate(gt_map, (576, 736), mode='bilinear').squeeze(0)
img_shape, gt_shape = img_ori_tensor.shape, gt_map.shape # C, H, W
img = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))(img_ori_tensor)
# downsample the mask
if self.dataset_name == 'WorldExpo':
mask_tg = torch.from_numpy(self.mask).view(1, 1, 576, 720).float()
mask_tg = functional.interpolate(mask_tg, (576, 736), mode='nearest') # first, slightly largen the mask
# --- 736 is divided by 32 when testing, must be divided by 16 when training ---
mask_tg_small = functional.interpolate(mask_tg, (576//2, 736//2), mode='nearest').view(1, 288, 368)
mask_tg = mask_tg.view(1, 576, 736)
elif self.dataset_name == 'UCF50':
mask_tg = torch.ones(1, img_shape[1], img_shape[2])
mask_tg_small = torch.ones(1, img_shape[1]//2, img_shape[2]//2)
# For evaluation, because, the cropped mechanism, mask the input will degradation the performance
# img = img * mask_tg
patch_height, patch_width = (img_shape[1]) // 2, (img_shape[2]) // 2
imgs = []
for i in range(3):
for j in range(3):
start_h, start_w = (patch_height // 2) * i, (patch_width // 2) * j
imgs.append(img[:, start_h:start_h + patch_height, start_w:start_w + patch_width])
imgs = torch.stack(imgs)
gt_map = functional.conv2d(gt_map.view(1, *(gt_shape)), self.kernel, bias=None, stride=2, padding=0)
# here I also return img_path and original img
img_index = index
return img_path, img_ori_tensor, img_index, imgs, gt_map.view(1, gt_shape[1] // 2, gt_shape[2] // 2), mask_tg, mask_tg_small
def __len__(self):
return self.validate_num
|
{"hexsha": "af5739e22c626522843eac0a6c95cc7df58b509b", "size": 11363, "ext": "py", "lang": "Python", "max_stars_repo_path": "Phase0_Train_WE_And_Test_WE_UCFCC/Dataset/DatasetConstructor.py", "max_stars_repo_name": "Zhaoyi-Yan/DCANet", "max_stars_repo_head_hexsha": "1d99481494f4ef3cfe5abf227fa49a51011364bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-10-09T08:53:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T08:59:09.000Z", "max_issues_repo_path": "Phase0_Train_WE_And_Test_WE_UCFCC/Dataset/DatasetConstructor.py", "max_issues_repo_name": "Zhaoyi-Yan/DCANet", "max_issues_repo_head_hexsha": "1d99481494f4ef3cfe5abf227fa49a51011364bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Phase0_Train_WE_And_Test_WE_UCFCC/Dataset/DatasetConstructor.py", "max_forks_repo_name": "Zhaoyi-Yan/DCANet", "max_forks_repo_head_hexsha": "1d99481494f4ef3cfe5abf227fa49a51011364bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-11T08:35:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T08:35:55.000Z", "avg_line_length": 44.7362204724, "max_line_length": 136, "alphanum_fraction": 0.5695678958, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2922}
|
import numpy as np
class Ant:
"""
Class realizing single ant functionality
Attributes:
*** operational attributes ***
- number: oridnal number of an ant
- node_memory: current edge in form of list of two nodes
- src_node: start and first target node of an ant
- dst_node: second target node of an ant
*** statistical attributes ***
- cost_sum: statistical measure:
- passes: counter of times an ant reached one of its destination node
Methods:
- findNext(graph_data, parameters) -> next_node:
determines next node for the ant to move to
- depositPheromones(graph_data, parameters) -> graph_data:
updates pheromones on the current edge and returns updated graph_data
- move(graph_data, parameters) -> graph_data:
moves an Ant to the next edge determined by findNext() and updated appriopriately
data in graph_data, finally returns updated graph_data
- showState():
prints the curruent state of an ant (all of its attributes values)
- showFindNext(graph_data, probs, next_node):
prints all data relevant in context of findNext() for debugging purposes
"""
def __init__(self, src_node, dst_node, number):
self.number= number
self.node_memory = [src_node,src_node]
self.src_node = src_node
self.dst_node = dst_node
self.passes = []
self.cost_sum = 0.0
def findNext(self, graph_data, parameters):
"""
Determines the next node for the ant to move to based
on the pheromones on an edge: the more pheromones the better an edge
Arguments:
graph_data: all data about nodes & edges as a dict
parameters: dict of all steering parameters
Return:
next_node: id of the node that has been chosen as the next to go
"""
# extract pheromone values for all candidates for next_node
pheromones = list(graph_data[self.node_memory[-1]]['pheromones'].values())
# find the index of the last visited node respective to the order of the pheromones list
if self.node_memory[0] != self.node_memory[-1] and self.node_memory[-1]!= self.dst_node :
prev_node = list(graph_data[self.node_memory[-1]]['pheromones'].keys()).index(self.node_memory[0])
else:
prev_node = -1
# calculate probability for each candidate
probs = []
for i in range(len(pheromones)):
if len(pheromones)==1: # if there is only one possible candidate, take it
probs.append(1)
elif prev_node==i: # if the candidate was visited in last move, do not take it
probs.append(0)
else:
probs.append(pow(pheromones[i],parameters['alpha']))
# convert probabilites list to an array and normalize probabilities such that their sum equals 1
probs = np.asarray( probs )
probs = probs/sum(probs)
# determine next node for the ant to move to
next_node = np.random.choice(list(graph_data[self.node_memory[-1]]['pheromones'].keys()), p=probs)
#self.showFindNext(graph_data, probs, next_node)
return next_node
def depositPheromones(self, graph_data, parameters ):
"""
Deposits pheromones on current edge
increase factor is equal ((max_vol - cur_vol)/max_vol))*enhancement_rate
and is inversely proportional in current volume of an edge
Arguments:
graph_data: all data about nodes & edges as a dict
parameters: dict of all steering parameters
Return:
graph_data: with updated pheromone levels
"""
# extract current and maximum capacity for current edge
cur_vol = graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]]
max_vol = graph_data[self.node_memory[0]]['max_vol'][self.node_memory[-1]]
neighbors = len(graph_data[self.node_memory[-1]]['cur_vol'])
# add costs for current edge to the overall sum for statistics
self.cost_sum += graph_data[self.node_memory[0]]['costs'][self.node_memory[-1]]
# deposite pheromones only if there is capacity available on current edge and it is not a dead end
if cur_vol <= max_vol and neighbors > 1:
pheromone = graph_data[self.node_memory[0]]['pheromones'][self.node_memory[-1]]
cost = graph_data[self.node_memory[0]]['costs'][self.node_memory[-1]] +1
growth = (pheromone/cost)*((max_vol-cur_vol)/(max_vol+1))*parameters["enhancement_rate"]
graph_data[self.node_memory[0]]['pheromones'][self.node_memory[-1]] += growth
graph_data[self.node_memory[-1]]['pheromones'][self.node_memory[0]] += growth
return graph_data
def move(self, graph_data, parameters):
"""
Executes the move of an ant based on the heuristics function
Arguments:
graph_data: all data about nodes & edges as a dict
enhancement_rate: by how much should be the pheromone increase
after visiting an edge
Return:
graph_data: with updated pheromon and current capacity (!) values
"""
nextNode = self.findNext(graph_data, parameters)
# decrement current capacity on current edge only if
# it is not initial state and capaciy is greater than zero
if self.node_memory[-1]!=self.node_memory[0] and graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] > 0:
graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] -= 1
graph_data[self.node_memory[-1]]['cur_vol'][self.node_memory[0]] -= 1
self.node_memory.append(nextNode)
self.node_memory.pop(0)
# update pheromones on the current edge
graph_data = self.depositPheromones(graph_data, parameters)
# increment current volume on the new current edge
graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] += 1
graph_data[self.node_memory[-1]]['cur_vol'][self.node_memory[0]] += 1
if (self.node_memory[-1] == self.dst_node ):
self.src_node, self.dst_node = self.dst_node, self.src_node
self.passes.append(self.cost_sum)
self.cost_sum = 0.0
if(parameters['verbose']):
print("Ant ", self.number, " - took (",self.node_memory[-1],",",self.node_memory[0],")")
return graph_data
def showState(self):
"""
Prints current state of an ant.
"""
print("[ant]: current state of the ant...............................")
print(" ant nr: ",self.number," node_memory: ", self.node_memory," src: ",self.src_node," dst: ",self.dst_node)
def showFindNext(self, graph_data, probs, next_node):
"""
Prints all data relevant in context of findNext():
- current edge
- all possible adge candidates
- their costs
- their current and maximum volumes
- their probabilities
- decision that was undertaken
Arguments:
graph_data:
probs: array of probabilites for all candidates
next_node: the candidate chosen by findNext()
"""
probs = list(probs)
pheromones = list(graph_data[self.node_memory[-1]]['pheromones'].values())
costs = list(graph_data[self.node_memory[-1]]['costs'].values())
cur_vol = list(graph_data[self.node_memory[-1]]['cur_vol'].values())
max_vol = list(graph_data[self.node_memory[-1]]['max_vol'].values())
print("[ant]: I ( ant nr",self.number,")am at edge (",self.node_memory[0],",",self.node_memory[-1],") and I can choose between: ")
for i in list(graph_data[self.node_memory[-1]]['pheromones'].keys()):
print(" edge (",self.node_memory[-1],",",i,") phe: ", pheromones.pop(0)," cost: ",costs.pop(0),"cur_vol: ",cur_vol.pop(0)," max_vol: ",max_vol.pop(0)," prob: ",probs.pop(0))
print(" I decided to take edge (",self.node_memory[-1],",",next_node,")")
|
{"hexsha": "06b0eafe70d6017f03cb7cebffe652c6048d8980", "size": 8530, "ext": "py", "lang": "Python", "max_stars_repo_path": "ant.py", "max_stars_repo_name": "twardzikf/aco-in-urban-transport", "max_stars_repo_head_hexsha": "89228ced89b425400a240a455d9585d0f7ef1861", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ant.py", "max_issues_repo_name": "twardzikf/aco-in-urban-transport", "max_issues_repo_head_hexsha": "89228ced89b425400a240a455d9585d0f7ef1861", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ant.py", "max_forks_repo_name": "twardzikf/aco-in-urban-transport", "max_forks_repo_head_hexsha": "89228ced89b425400a240a455d9585d0f7ef1861", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-04T02:17:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-04T02:17:01.000Z", "avg_line_length": 46.1081081081, "max_line_length": 188, "alphanum_fraction": 0.6030480657, "include": true, "reason": "import numpy", "num_tokens": 1937}
|
# -*- coding: utf-8 -*-
# File: __init__.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import numpy # avoid https://github.com/tensorflow/tensorflow/issues/2034
import cv2 # avoid https://github.com/tensorflow/tensorflow/issues/1924
from tensorpack.train import *
from tensorpack.models import *
from tensorpack.utils import *
from tensorpack.tfutils import *
from tensorpack.callbacks import *
from tensorpack.dataflow import *
from tensorpack.predict import *
if int(numpy.__version__.split('.')[1]) < 9:
logger.warn("Numpy < 1.9 could be extremely slow on some tasks.")
|
{"hexsha": "71ed132f0484c4a518521413d1d240e66574f6cb", "size": 578, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorpack/__init__.py", "max_stars_repo_name": "yinglanma/AI-project", "max_stars_repo_head_hexsha": "db145c59f57f519177f3eedde14c3ce033b2a11d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorpack/__init__.py", "max_issues_repo_name": "yinglanma/AI-project", "max_issues_repo_head_hexsha": "db145c59f57f519177f3eedde14c3ce033b2a11d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorpack/__init__.py", "max_forks_repo_name": "yinglanma/AI-project", "max_forks_repo_head_hexsha": "db145c59f57f519177f3eedde14c3ce033b2a11d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1111111111, "max_line_length": 73, "alphanum_fraction": 0.7508650519, "include": true, "reason": "import numpy", "num_tokens": 148}
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import unittest
from extensions.front.DropoutWithRandomUniformReplacer import DropoutWithRandomUniformReplacer
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, regular_op
class DropoutWithRandomUniformReplacerTest(unittest.TestCase):
def test(self):
nodes = {
**regular_op('input', {'type': 'Parameter'}),
**regular_op('shape', {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}),
**regular_op('random_uniform', {'type': 'RandomUniform', 'kind': 'op', 'op': 'RandomUniform',
'name': 'dropout/RU'}),
**regular_op('mul', {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}),
**regular_op('add', {'type': 'Add', 'kind': 'op', 'op': 'Add'}),
**regular_op('add2', {'type': 'Add', 'kind': 'op', 'op': 'Add'}),
**regular_op('floor', {'type': 'Floor', 'kind': 'op', 'op': 'Floor'}),
'add_const': {'kind': 'op', 'op': 'Const', 'value': np.array(0.0), 'data_type': np.float32},
**result('result'),
# new nodes to be added
'broadcast_const': {'kind': 'op', 'op': 'Const', 'value': np.array(0.5), 'data_type': np.float32},
**regular_op('broadcast', {'type': 'Broadcast', 'kind': 'op', 'op': 'Broadcast'}),
}
edges = [('input', 'shape'),
('shape', 'random_uniform'),
('random_uniform', 'mul'),
('mul', 'add'),
('add_const', 'add'),
('add', 'add2'),
('add2', 'floor'),
('floor', 'result')]
graph = build_graph(nodes, edges, nodes_with_edges_only=True)
graph.graph['layout'] = 'NCHW'
graph.stage = 'front'
DropoutWithRandomUniformReplacer().find_and_replace_pattern(graph)
edges_ref = [('input', 'shape'),
('broadcast_const', 'broadcast'),
('shape', 'broadcast'),
('broadcast', 'mul'),
('mul', 'add'),
('add_const', 'add'),
('add', 'add2'),
('add2', 'floor'),
('floor', 'result')]
graph_ref = build_graph(nodes, edges_ref, nodes_with_edges_only=True)
# check graph structure after the transformation and output name
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Broadcast')[0]]['name'] == 'dropout/RU')
|
{"hexsha": "f09178683a9f22ac33c51125b60db0a29a8585c5", "size": 3249, "ext": "py", "lang": "Python", "max_stars_repo_path": "model-optimizer/extensions/front/DropoutWithRandomUniformReplacer_test.py", "max_stars_repo_name": "calvinfeng/openvino", "max_stars_repo_head_hexsha": "11f591c16852637506b1b40d083b450e56d0c8ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model-optimizer/extensions/front/DropoutWithRandomUniformReplacer_test.py", "max_issues_repo_name": "calvinfeng/openvino", "max_issues_repo_head_hexsha": "11f591c16852637506b1b40d083b450e56d0c8ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2021-03-26T08:11:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T13:06:26.000Z", "max_forks_repo_path": "model-optimizer/extensions/front/DropoutWithRandomUniformReplacer_test.py", "max_forks_repo_name": "calvinfeng/openvino", "max_forks_repo_head_hexsha": "11f591c16852637506b1b40d083b450e56d0c8ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-28T17:30:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-28T17:30:46.000Z", "avg_line_length": 43.9054054054, "max_line_length": 111, "alphanum_fraction": 0.5635580179, "include": true, "reason": "import numpy", "num_tokens": 746}
|
import numpy as np
import torch
from torch import nn
class MDense(nn.Module):
def __init__(self, input_features, output_features):
super(MDense, self).__init__()
self.weight1 = nn.Parameter(torch.randn(output_features, input_features), requires_grad=True)
nn.init.xavier_uniform_(self.weight1, gain=torch.nn.init.calculate_gain("tanh"))
self.weight2 = nn.Parameter(torch.randn(output_features, input_features), requires_grad=True)
nn.init.xavier_uniform_(self.weight2, gain=torch.nn.init.calculate_gain("tanh"))
self.bias1 = nn.Parameter(torch.randn(output_features), requires_grad=True)
self.bias2 = nn.Parameter(torch.randn(output_features), requires_grad=True)
self.factor1 = nn.Parameter(torch.ones(output_features), requires_grad=True)
self.factor2 = nn.Parameter(torch.ones(output_features), requires_grad=True)
def forward(self, inputs):
output1 = inputs.matmul(self.weight1.t()) + self.bias1
output2 = inputs.matmul(self.weight2.t()) + self.bias2
output1 = torch.tanh(output1) * self.factor1
output2 = torch.tanh(output2) * self.factor2
output = output1 + output2
return output
class LPCNetModelBunch(nn.Module):
def __init__(self, hparams):
super(LPCNetModelBunch, self).__init__()
self.n_samples_per_step = hparams.n_samples_per_step
self.embedding_pitch_size = hparams.embedding_pitch_size
self.embedding_size = hparams.embedding_size
self.dense_feature_size = hparams.dense_feature_size
self.ulaw = 2**hparams.ulaw
self.rnn_units1 = hparams.rnn_units1
self.rnn_units2 = hparams.rnn_units2
self.frame_size = hparams.frame_size
self.embed_pitch = nn.Embedding(hparams.pitch_max_period, self.embedding_pitch_size)
self.embed_sig = nn.Embedding(self.ulaw, self.embedding_size)
self.feature_conv1 = nn.Conv1d(self.embedding_pitch_size + hparams.nb_used_features,
self.dense_feature_size, kernel_size=3)
torch.nn.init.xavier_uniform_(self.feature_conv1.weight, gain=torch.nn.init.calculate_gain("tanh"))
self.feature_conv2 = nn.Conv1d(self.dense_feature_size, self.dense_feature_size, kernel_size=3)
torch.nn.init.xavier_uniform_(self.feature_conv2.weight, gain=torch.nn.init.calculate_gain("tanh"))
self.feature_dense1 = nn.Linear(self.dense_feature_size, self.dense_feature_size)
torch.nn.init.xavier_uniform_(self.feature_dense1.weight, gain=torch.nn.init.calculate_gain("tanh"))
self.feature_dense2 = nn.Linear(self.dense_feature_size, self.dense_feature_size)
torch.nn.init.xavier_uniform_(self.feature_dense2.weight, gain=torch.nn.init.calculate_gain("tanh"))
self.gru_a = nn.GRU(3*self.embedding_size*self.n_samples_per_step + self.dense_feature_size,
self.rnn_units1, batch_first=True)
self.gru_b = nn.GRU(self.rnn_units1 + self.dense_feature_size,
self.rnn_units2, batch_first=True)
self.md_1 = MDense(self.rnn_units2, self.ulaw)
self.md_2 = MDense(self.rnn_units2 + self.embedding_size, self.ulaw)
self.p_teacher_forcing = hparams.teacher_forcing
def parse_decoder_inputs(self, cpcm_cexc):
# [batch, 15*frame_size, 3*embedding_size] ==> [batch, 15*frame_size//r, 3*embedding_size*r]
cpcm_cexc = cpcm_cexc.contiguous().view(cpcm_cexc.size(0), cpcm_cexc.size(1)//self.n_samples_per_step, -1)
return cpcm_cexc
def forward(self, in_data, features, periods, targets):
"""
:param in_data: [batch, 15*frame_size, 3] (sig, pred, exc) shared embedding
:param features: features: [batch, 15, nb_used_features]
:param periods: periods: [batch, 15, 1]
:param targets: [batch, 15*frame_size, 1]
:return: ulaw_probs: [batch, 15*frame_size, 2**ulaw]
"""
###################
##### Encoder #####
###################
# [batch, 15, 38] + [batch, 15, embedding_pitch_size] ==> [batch, 15, 38 + embedding_pitch_size]
pitch = self.embed_pitch(periods).squeeze(2)
cat_feat = torch.cat((features, pitch), 2)
# [batch, 15, 38 + embedding_pitch_size] ==> [batch, 15, embedding_size]
cat_feat1 = cat_feat.permute(0, 2, 1)
c_feat2 = torch.tanh(self.feature_conv1(cat_feat1))
cfeat = torch.tanh(self.feature_conv2(c_feat2))
c_feat2 = cfeat.permute(0, 2, 1)
# [batch, 15, embedding_size] ==> [batch, 15, embedding_size]
fdense1 = torch.tanh(self.feature_dense1(c_feat2))
fdense2 = torch.tanh(self.feature_dense2(fdense1))
# repeat features by self.frame_size//r times ==> [batch, 15*frame_size//r, dense_feature_size]
repeat_tensor = in_data.new_ones(fdense2.size(1), dtype=torch.long) * self.frame_size//self.n_samples_per_step
repeat_fdense2 = torch.repeat_interleave(fdense2, repeat_tensor, dim=1)
###################
##### Decoder #####
###################
# [batch, 15*frame_size, 3] ==> [batch, 15*frame_size, 3*embedding_size]
cpcm_exc = self.embed_sig(in_data)
cpcm_exc = cpcm_exc.contiguous().view(cpcm_exc.size(0), cpcm_exc.size(1), -1)
# [batch, 15*frame_size, 3*embedding_size] ==> [batch, 15*frame_size//r, 3*embedding_size*r]
cpcm_exc = self.parse_decoder_inputs(cpcm_exc)
# gru_a
"""
[batch, 15*frame_size//r, 3*embed_size + dense_feature_size] ==> [batch, 15*frame_size//r, 384]
"""
rnn_in = torch.cat((cpcm_exc, repeat_fdense2), 2)
self.gru_a.flatten_parameters()
gru_out1, _ = self.gru_a(rnn_in)
# gru_b
rnn_in2 = torch.cat((gru_out1, repeat_fdense2), 2) # [batch, 15*frame_size//r, 384 + dense_feature_size]
self.gru_b.flatten_parameters()
gru_b_out, _ = self.gru_b(rnn_in2) # [batch, 15*frame_size//r, rnn_units2]
# results
ulaw_probs = gru_b_out.new_zeros((targets.size(0), targets.size(1), self.ulaw))
ulaw_probs[:, ::self.n_samples_per_step] = self.md_1(gru_b_out)
context = gru_b_out
threshold = np.random.uniform(0.0, 1.0)
if threshold <= self.p_teacher_forcing:
pred_exc = targets[:, 0::self.n_samples_per_step]
else:
pred_exc = torch.softmax(ulaw_probs[:, ::self.n_samples_per_step], dim=-1).argmax(-1)
context = torch.cat((context, self.embed_sig(pred_exc).squeeze(2)), dim=-1)
ulaw_probs[:, 1::self.n_samples_per_step] = self.md_2(context)
return ulaw_probs
|
{"hexsha": "af4f3396c6af0832ffe7863aade069bc56c0a7b3", "size": 6711, "ext": "py", "lang": "Python", "max_stars_repo_path": "training_torch/lpcnet_bunched.py", "max_stars_repo_name": "ishine/BunchedLPCnet", "max_stars_repo_head_hexsha": "5480ba83fc204e5d79477583ec6023f1057d7c37", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-03-08T09:40:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T07:57:15.000Z", "max_issues_repo_path": "training_torch/lpcnet_bunched.py", "max_issues_repo_name": "ishine/BunchedLPCnet", "max_issues_repo_head_hexsha": "5480ba83fc204e5d79477583ec6023f1057d7c37", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-08T06:45:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T07:56:32.000Z", "max_forks_repo_path": "training_torch/lpcnet_bunched.py", "max_forks_repo_name": "ishine/BunchedLPCnet", "max_forks_repo_head_hexsha": "5480ba83fc204e5d79477583ec6023f1057d7c37", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-08T02:01:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T10:23:56.000Z", "avg_line_length": 46.6041666667, "max_line_length": 118, "alphanum_fraction": 0.6602592758, "include": true, "reason": "import numpy", "num_tokens": 1738}
|
import os
import glob
import rasterio
from PIL import Image
import numpy as np
import click
from object_detection.utils.np_box_list import BoxList
from rv.utils import save_geojson, make_empty_dir
def png_to_geojson(geotiff_path, label_png_path, output_path, object_half_len):
"""Convert COWC PNG labels to GeoJSON format.
In the COWC dataset, the center position of cars is represented as
non-zero pixels in PNG files that are aligned with the GeoTIFFs.
This script converts the PNG file to a GeoJSON representation.
"""
image_dataset = rasterio.open(geotiff_path)
label_im = np.array(Image.open(label_png_path))
point_inds = np.argwhere(label_im[:, :, 0] != 0).astype(np.float)
# Normalize inds
point_inds[:, 0] /= label_im.shape[0]
point_inds[:, 1] /= label_im.shape[1]
# Convert to geotiff image inds
point_inds[:, 0] *= image_dataset.height
point_inds[:, 1] *= image_dataset.width
point_inds = point_inds.astype(np.int)
# Turn points into squares and ensure edges aren't outside the array
y_min = np.clip(point_inds[:, 0:1] - object_half_len, 0,
image_dataset.height)
x_min = np.clip(point_inds[:, 1:2] - object_half_len, 0,
image_dataset.width)
y_max = np.clip(point_inds[:, 0:1] + object_half_len, 0,
image_dataset.height)
x_max = np.clip(point_inds[:, 1:2] + object_half_len, 0,
image_dataset.width)
# Write to GeoJSON
boxes = np.hstack([y_min, x_min, y_max, x_max]).astype(np.float)
boxlist = BoxList(boxes)
save_geojson(output_path, boxlist, image_dataset=image_dataset)
return boxlist
@click.command()
@click.argument('geotiff_dir')
@click.argument('label_png_dir')
@click.argument('output_dir')
@click.option('--object-half-len', default=50)
def prepare_potsdam(geotiff_dir, label_png_dir, output_dir, object_half_len):
label_paths = glob.glob(
os.path.join(label_png_dir, 'top_potsdam_*_RGB_Annotated_Cars.png'))
make_empty_dir(output_dir)
for label_path in label_paths:
geotiff_base = os.path.basename(label_path)[0:-19]
geotiff_path = os.path.join(geotiff_dir, geotiff_base + 'IR.tif')
output_path = os.path.join(output_dir, geotiff_base + 'IR.json')
boxlist = png_to_geojson(
geotiff_path,
label_path,
output_path,
object_half_len=object_half_len)
print('Saved {} with {} boxes.'.format(output_path,
boxlist.num_boxes()))
if __name__ == '__main__':
prepare_potsdam()
|
{"hexsha": "764ef197e5817173bc9e67eb08af6e1ac954b1b5", "size": 2637, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/cowc/data/prepare_potsdam.py", "max_stars_repo_name": "yoninachmany/raster-vision-examples", "max_stars_repo_head_hexsha": "ef4098cb46a42e19119b42084e3e59bb789110a2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 172, "max_stars_repo_stars_event_min_datetime": "2018-09-26T20:03:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-02T11:25:03.000Z", "max_issues_repo_path": "cowc/data/prepare_potsdam.py", "max_issues_repo_name": "geo-py/raster-vision-examples", "max_issues_repo_head_hexsha": "4a58b6ff76d508943a7aa2f421608ef1e3d9930c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 70, "max_issues_repo_issues_event_min_datetime": "2018-12-21T15:38:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T13:01:47.000Z", "max_forks_repo_path": "rastervision_pytorch_backend/rastervision/pytorch_backend/examples/object_detection/cowc_potsdam_data_prep/prepare_potsdam.py", "max_forks_repo_name": "jamesmcclain/raster-vision", "max_forks_repo_head_hexsha": "597c196e9fa0b66163ab9049645134b4962e7456", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2018-09-26T15:48:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T19:03:03.000Z", "avg_line_length": 34.6973684211, "max_line_length": 79, "alphanum_fraction": 0.673871824, "include": true, "reason": "import numpy", "num_tokens": 662}
|
from typing import Callable, List, Sequence
import numpy as np
from sklearn.svm import SVC
def onehot(x, nclass=2):
result = np.zeros((len(x), nclass))
result[np.arange(len(x)), x] = 1
return result
class lbp_model:
def __init__(self,
descriptor: Callable,
model: SVC,
threshold: float):
""" Model that extracts features using the "descriptor", then uses
the "model" to obtain a classification score.
Parameters
----------
descriptor: Callable
The function that extracts features from an image
model: sklearn.svm.SVC
Classifier with a "decision_function" function, that takes a
feature vector and outputs a score
threshold: float
The decision threshold
"""
self.model = model
self.threshold = threshold
self.descriptor = descriptor
def bounds(self):
""" Returns the bounds of each pixel in the image"""
return [0, 255]
def predictions(self, img: np.ndarray):
""" Return the prediction for one image
Parameters
----------
img: np.ndarray
Input image
Returns
-------
np.ndarray: 1 x 2
The one-hot prediction (either (1, 0) or (0, 1))
"""
features = self.descriptor(img)
pred = self.model.decision_function(features) >= self.threshold
return onehot(pred.astype(np.int)).squeeze()
def predict_score(self, img):
""" Returns the predicted score for one image
Parameters
----------
img: np.ndarray
Input image
Returns
-------
float:
The score of the image, according to the model
"""
features = self.descriptor(img)
pred = self.model.decision_function(features)
return pred
def batch_predictions(self, imgs: Sequence[np.ndarray]):
""" Returns predictions for a list of images
Parameters
----------
imgs: a sequence (e.g. list) of np.ndarray
List of N images
Returns
-------
np.ndarray (N x 2)
The one-hot predictions, for each image in the list
"""
features = [self.descriptor(img) for img in imgs]
features = np.concatenate(features)
pred = self.model.decision_function(features) >= self.threshold
return onehot(pred.astype(np.int))
|
{"hexsha": "d4009fa651597127cd57588785e0e437b48ad417", "size": 2528, "ext": "py", "lang": "Python", "max_stars_repo_path": "clbp/lbp_model_utils.py", "max_stars_repo_name": "luizgh/adversarial_signatures", "max_stars_repo_head_hexsha": "01daa8050f64c70d75bb2b81b0dbcc0ece9860e5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-01-20T12:31:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T11:12:15.000Z", "max_issues_repo_path": "clbp/lbp_model_utils.py", "max_issues_repo_name": "luizgh/adversarial_signatures", "max_issues_repo_head_hexsha": "01daa8050f64c70d75bb2b81b0dbcc0ece9860e5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-30T13:23:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-14T09:49:02.000Z", "max_forks_repo_path": "clbp/lbp_model_utils.py", "max_forks_repo_name": "luizgh/adversarial_signatures", "max_forks_repo_head_hexsha": "01daa8050f64c70d75bb2b81b0dbcc0ece9860e5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-08-14T04:31:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-23T09:53:24.000Z", "avg_line_length": 27.4782608696, "max_line_length": 74, "alphanum_fraction": 0.5628955696, "include": true, "reason": "import numpy", "num_tokens": 517}
|
#####################################################################
# basic scrapping code (scrapes from basketball-reference.com) #
# utilizes beautiful soup framework & panda framework to quickly #
# and easily scrape all stats and stores stats in a excel db #
#####################################################################
from urllib import urlopen
from bs4 import BeautifulSoup
import pandas as pd
import lxml.html
import numpy as np
import math
import operator
import collections
from collections import OrderedDict
from datetime import date
# NBA seasons we will be analyzing
# note: leave out 2012 because of nba lockout
year = [2011, 2013, 2014, 2015, 2016, 2017, 2018, 2019]
month = [ "november", "december", "january", "february", "march"]
searchValues = []
temp = []
dates = []
for x in year:
for y in month:
# URL page we will scraping
url = "https://www.basketball-reference.com/leagues/NBA_{}_games-{}.html".format(x, y)
print(url)
html = urlopen(url)
soup = BeautifulSoup(html)
results = soup.find(id = "schedule")
gameIDS = results.find_all('th')
#find all the search values (to plug into the url)
for gIDS in gameIDS:
link = gIDS.get('csk')
if link != None:
searchValues.append(link)
#find all the team abbreviations for to calculate rest days
gameIDS = results.find_all('td')
for gIDS in gameIDS:
link = gIDS.get('csk')
if link != None:
temp.append(link)
#calculate rest days
for x in range(len(temp)):
searchV = temp[x]
searchV = searchV[:3]
for i in range(x):
prev = temp[x-i-1]
prev = prev[:3]
if(prev == searchV):
holder = temp[x-i-1]
one = int(holder[10:12])
two = int(holder[8:10])
three = int(holder[4:8])
fdate = date(three, two, one)
holder = temp[x]
one = int(holder[10:12])
two = int(holder[8:10])
three = int(holder[4:8])
ldate = date(three, two, one)
holder = ldate - fdate
dates.append(holder)
break
#stats that we want for our future machine learning
stats = pd.DataFrame(columns =["teamName", "oppoTeam", "minutes", "fgm", "fga", "fgp", "fg3m", "fg3a",
"fg3p", "ftm", "fta", "ftp", "orb", "drb", "trb", "ast", "stl", "blk", "tov", "fouls", "pts", "result",
"game_location", "date", "year", "gameID", "prev_game"])
indexNum = 0
#for every single nba game (defined above) get all the defined stats
for Y in searchValues:
url = "https://www.basketball-reference.com/boxscores/{}.html".format(Y)
print(url)
html = urlopen(url)
soup = BeautifulSoup(html)
results = soup.find_all(class_="table_outer_container")[1]
nameA = results.find('caption').get_text()
resultsA = results.find('tfoot')
nameA = nameA[:-11]
nameA = nameA.upper()
#needs to check for OT because the html slightly changes
OT = resultsA.find_all("td")[0].get_text()
if OT == "240":
OTChecker = 9
elif OT == "265":
OTChecker = 10
elif OT == "290":
OTChecker = 11
elif OT == "315":
OTChecker = 12
elif OT == "340":
OTChecker = 13
elif OT == "365":
OTChecker = 14
elif OT == "390":
OTChecker = 15
#just incase of lots of OT
resultsB = soup.findAll(class_="section_content")[OTChecker]
nameB = resultsB.find('caption').get_text()
resultsB = resultsB.find("tfoot")
nameB = nameB[:-11]
nameB = nameB.upper()
#add data to two arrays for home and away
teamA = np.array([])
teamB = np.array([])
#teamName
teamA = np.append(teamA, nameA)
teamB = np.append(teamB, nameB)
#oppoName
teamA = np.append(teamA, nameB)
teamB = np.append(teamB, nameA)
for x in range(19):
if x == 18:
#result
a = resultsA.find_all("td")[x].get_text()
b = resultsB.find_all("td")[x].get_text()
teamA = np.append(teamA, a)
teamB = np.append(teamB, b)
if a > b:
teamA = np.append(teamA, "won")
teamB = np.append(teamB, "lost")
else:
teamB = np.append(teamB, "won")
teamA = np.append(teamA, "lost")
else:
teamA = np.append(teamA, resultsA.find_all("td")[x].get_text())
teamB = np.append(teamB, resultsB.find_all("td")[x].get_text())
#game_location
teamA = np.append(teamA, "away")
teamB = np.append(teamB, "home")
#date of game
ID = Y
date = ID[:-4]
teamA = np.append(teamA, date)
teamB = np.append(teamB, date)
date = date[:-4]
#year of game
teamA = np.append(teamA, date)
teamB = np.append(teamB, date)
#unique game ID
teamA = np.append(teamA, ID)
teamB = np.append(teamB, ID)
#rest days(calculated above)
teamA = np.append(teamA, dates[indexNum])
teamB = np.append(teamB, dates[indexNum])
new_row = {'teamName':teamA[0] , 'oppoTeam':teamA[1], 'minutes':teamA[2], 'fgm':teamA[3], 'fga':teamA[4],
'fgp':teamA[5], 'fg3m':teamA[6],'fg3a':teamA[7], 'fg3p':teamA[8], 'ftm':teamA[9], 'fta':teamA[10],
'ftp':teamA[11], 'orb':teamA[12], 'drb':teamA[13], 'trb':teamA[14], 'ast':teamA[15], 'stl':teamA[16],
'blk':teamA[17], 'tov':teamA[18], 'fouls':teamA[19], 'pts':teamA[20], 'result':teamA[21],
'game_location':teamA[22],'date':teamA[23], 'year':teamA[24], 'gameID': teamA[25], 'prev_game':teamA[26]}
stats = stats.append(new_row, ignore_index=True)
new_row = {'teamName':teamB[0] , 'oppoTeam':teamB[1],'minutes':teamB[2], 'fgm':teamB[3], 'fga':teamB[4],
'fgp':teamB[5], 'fg3m':teamB[6],'fg3a':teamB[7], 'fg3p':teamB[8], 'ftm':teamB[9], 'fta':teamB[10],
'ftp':teamB[11], 'orb':teamB[12], 'drb':teamB[13], 'trb':teamB[14], 'ast':teamB[15], 'stl':teamB[16],
'blk':teamB[17], 'tov':teamB[18], 'fouls':teamB[19], 'pts':teamB[20], 'result':teamB[21],
'game_location':teamB[22], 'date':teamB[23], 'year':teamB[24], "gameID": teamB[25], 'prev_game':teamB[26]}
stats = stats.append(new_row, ignore_index=True)
indexNum += 1
#print(stats)
#save all data to my local nba folder
#if copying this code to a different machine you must change the below save location
stats.to_excel(r"C:/Users/Parsons/Desktop/nba/test.xlsx", sheet_name='sheet1', index=False)
print("Completed Scrape to Test Excel File.")
|
{"hexsha": "47076d34bc4d9a977a3ed159508cff669a54f2a5", "size": 6215, "ext": "py", "lang": "Python", "max_stars_repo_path": "scrapeNBAGames.py", "max_stars_repo_name": "lparsons00/scrapeNBA", "max_stars_repo_head_hexsha": "00093e41ae92b4495f92fb617a63046278ed620f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scrapeNBAGames.py", "max_issues_repo_name": "lparsons00/scrapeNBA", "max_issues_repo_head_hexsha": "00093e41ae92b4495f92fb617a63046278ed620f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scrapeNBAGames.py", "max_forks_repo_name": "lparsons00/scrapeNBA", "max_forks_repo_head_hexsha": "00093e41ae92b4495f92fb617a63046278ed620f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2352941176, "max_line_length": 108, "alphanum_fraction": 0.6111021722, "include": true, "reason": "import numpy", "num_tokens": 2016}
|
from scripts.train_script import ModelTrainer
from rllab.misc.instrument import stub, run_experiment_lite
import itertools
from rllab import config
stub(globals())
from distutils.dir_util import copy_tree
import numpy as np
import os, shutil
srcmodeldirs = ['../train/strikebig/']
modeldir = 'model/'
if os.path.exists(modeldir):
shutil.rmtree(modeldir)
for srcdir in srcmodeldirs:
copy_tree(srcdir, modeldir)
config.AWS_IMAGE_ID = "ami-5ce4944a"
config.AWS_INSTANCE_TYPE = "p2.xlarge"
config.AWS_SPOT_PRICE = "1.903"
subnet = 'us-east-1c'
config.AWS_NETWORK_INTERFACES = [
dict(
SubnetId=config.ALL_SUBNET_INFO[subnet]["SubnetID"],
Groups=[config.ALL_SUBNET_INFO[subnet]["Groups"]],
DeviceIndex=0,
AssociatePublicIpAddress=True,
)
]
# trainer = ModelTrainer(idims=(299, 299), nvideos=100,
# ntrain = 80, batch_size=25, model='ContextAEInception',
# nitr = 1000, save_every = 600, nlen=25, nskip=2,
# rescale=False, inception=True,
# strides=[1,2,1,2], kernels=[3,3,3,3], filters=[1024, 1024, 512, 512])
trainer = ModelTrainer(idims=(299, 299), nvideos=2500,
ntrain = 2300, batch_size=25, model='ContextAEInception',
nitr = 100000, save_every = 5000, nlen=25, nskip=2,
rescale=False, inception=True,
strides=[1,2,1,2], kernels=[3,3,3,3], filters=[1024, 1024, 512, 512])
run_experiment_lite(
trainer.train(),
exp_prefix="r-strike-big-inception-train7c",
# n_parallel=4,
# dry=True,
# snapshot_mode="all",
# seed=seed,
mode="ec2_mujoco",
sync_s3_pkl=True,
# terminate_machine=False
)
|
{"hexsha": "ffa25c8fe1b75385a46e6cbe2cd4deef7abc0a0b", "size": 1605, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/andrew/run_train_strike_inception.py", "max_stars_repo_name": "leopauly/Observation-Learning-Simulations", "max_stars_repo_head_hexsha": "462c04a87c45aae51537b8ea5b44646afa31d3a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 49, "max_stars_repo_stars_event_min_datetime": "2017-12-11T11:00:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T05:19:31.000Z", "max_issues_repo_path": "sandbox/andrew/run_train_strike_inception.py", "max_issues_repo_name": "leopauly/Observation-Learning-Simulations", "max_issues_repo_head_hexsha": "462c04a87c45aae51537b8ea5b44646afa31d3a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-01-01T17:39:56.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-24T04:49:08.000Z", "max_forks_repo_path": "sandbox/andrew/run_train_strike_inception.py", "max_forks_repo_name": "leopauly/Observation-Learning-Simulations", "max_forks_repo_head_hexsha": "462c04a87c45aae51537b8ea5b44646afa31d3a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2017-12-13T11:52:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-03T00:53:29.000Z", "avg_line_length": 29.1818181818, "max_line_length": 75, "alphanum_fraction": 0.6965732087, "include": true, "reason": "import numpy", "num_tokens": 493}
|
from data import Data
from learning_machine import LearningMachine, LDA, Logistic_regression
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
if __name__ == "__main__":
#initializing data
data = Data('project_train.csv')
data._preprocess()
# data.show_scattermatrix()
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=0)
print(data.df.columns)
model.fit(data.df,data.labels)
output = model.predict(data.df)
true_false = np.array(output == data.labels)
print(true_false)
print(output)
print(data.labels)
# ### COLOUR WHEEL FOR OUPUT
# color_labels = [None for i in LM.output]
# print(LM.output,LM.data.labels)
# print("\n")
# for i in range(0,sizeOfdata):
# if LM.output[i] == LM.data.labels[i]:
# if LM.output[i] == 1:
# color_labels[i] = 1
# else:
# color_labels[i] = 4
# else:
# if LM.output[i] == 1:
# color_labels[i] = 2
# else:
# color_labels[i] = 3
#
# color_wheel = {1: "green", 2: "yellow", 3: "purple", 4: "red"}
# colors = [color_wheel.get(x) for x in color_labels]
#
# list_of_numerical_column_names = list(LM.data.num_bound_col)
#
# print(list_of_numerical_column_names)
#
# pd.plotting.scatter_matrix(LM.data.df[list_of_numerical_column_names],
# alpha=0.5,
# c=colors,
# s=7.5,
# diagonal='kde')
# plt.savefig('scatter_matrix_numerical_features_LR.png', dpi=1600)
# plt.show()
#
# exit()
|
{"hexsha": "f4d8f95a9f332a63b3c97e5fda763229a35c0173", "size": 1843, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "sergi-andreu/MMSL", "max_stars_repo_head_hexsha": "1f15095000606733400b1c737906f983eca4f09b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "sergi-andreu/MMSL", "max_issues_repo_head_hexsha": "1f15095000606733400b1c737906f983eca4f09b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-09-24T11:47:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-24T11:48:40.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "sergi-andreu/MMSL", "max_forks_repo_head_hexsha": "1f15095000606733400b1c737906f983eca4f09b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7166666667, "max_line_length": 76, "alphanum_fraction": 0.5854584916, "include": true, "reason": "import numpy", "num_tokens": 465}
|
from __future__ import print_function
import numpy as np
def delta(x, scale=1, center=0):
r""" Dirac Delta function
It is equal to zero except for the value of `x` closest to `center`.
Parameters
----------
x: list or :class:`~numpy:numpy.ndarray`
domain of the function
scale: float
integrated intensity of the curve. Default to 1.
center: float
position of the peak. Default to 0.
Return
------
:class:`~numpy:numpy.ndarray`
output array containing an impulse signal
Examples
--------
>>> delta([0, 1, 2], 1, 0)
array([1., 0., 0.])
>>> delta([0, 1, 2, 3, 4], 5, 2)
array([0., 0., 5., 0., 0.])
Notes
-----
* A Delta (Dirac) function is defined as
.. math::
\text{Delta}(x, \text{scale}, \text{center}) = \text{scale}\
\delta(x- \text{center})
* For non-zero values, the amplitude of the Delta function is divided by
the x-spacing.
* **Equivalence between different implementations**
+-------------+--------------------+
| QENSmodels | Mantid |
+=============+====================+
| ``delta`` | ``DeltaFunction`` |
+-------------+--------------------+
| ``scale`` | Height |
+-------------+--------------------+
| ``center`` | Centre |
+-------------+--------------------+
"""
# Input validation
if isinstance(x, (float, int)):
x = [float(x)]
x = np.asarray(x)
model = np.zeros(x.size)
try:
if x.min() <= center <= x.max():
# if center within x-range, delta is non-zero in this interval
# otherwise do nothing
idx = np.argmin(np.abs(x - center))
if len(x) > 1:
dx = (x.max() - x.min()) / (len(x) - 1) # domain spacing
else:
dx = 1.
model[idx] = scale / dx
finally:
return model
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{"hexsha": "06578941a9c8a3c1b75b0920559fed0c85cc2ec6", "size": 2067, "ext": "py", "lang": "Python", "max_stars_repo_path": "QENSmodels/delta.py", "max_stars_repo_name": "celinedurniak/test_nbsphinx", "max_stars_repo_head_hexsha": "f4bf376b933d5958cb921965cfb1430926fb10a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "QENSmodels/delta.py", "max_issues_repo_name": "celinedurniak/test_nbsphinx", "max_issues_repo_head_hexsha": "f4bf376b933d5958cb921965cfb1430926fb10a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-07-09T05:43:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-21T08:29:42.000Z", "max_forks_repo_path": "QENSmodels/delta.py", "max_forks_repo_name": "celinedurniak/test_nbsphinx", "max_forks_repo_head_hexsha": "f4bf376b933d5958cb921965cfb1430926fb10a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7586206897, "max_line_length": 76, "alphanum_fraction": 0.4625060474, "include": true, "reason": "import numpy", "num_tokens": 539}
|
file_labels = Dict(
"free_convection" => "Free convection",
"strong_wind" => "Strong wind",
"strong_wind_no_coriolis" => "Strong wind, no rotation",
"weak_wind_strong_cooling" => "Weak wind, strong cooling",
"strong_wind_weak_cooling" => "Strong wind, weak cooling",
"strong_wind_weak_heating" => "Strong wind, weak heating"
)
zs = Dict(
"u" => 𝒟 -> 𝒟.u.z, "v" => 𝒟 -> 𝒟.v.z, "T" => 𝒟 -> 𝒟.T.z,
"uw" => 𝒟 -> 𝒟.uw.z, "vw" => 𝒟 -> 𝒟.vw.z, "wT" => 𝒟 -> 𝒟.wT.z
)
scaling_factor = Dict(
"u" => 1,
"v" => 1,
"T" => 1,
"uw" => 1e4,
"vw" => 1e4,
"wT" => 1e4,
"T" => 1
)
x_labels = Dict(
"u" => "U (m/s)",
"v" => "V (m/s)",
"T" => "T (m/s)",
"uw" => "U'W' x 10⁴ (m²/s²)",
"vw" => "V'W' x 10⁴ (m²/s²)",
"wT" => "W'T' x 10⁴ (C⋅m/s)",
"T" => "T (C)"
)
# titles = Dict(
# "uw" => "Zonal momentum flux, U'W'",
# "vw" => "Meridional momentum flux, V'W'",
# "wT" => "Temperature flux, W'T'",
# "T" => "Temperature, T",
# )
function animate_prediction(xs, name, 𝒟, test_file; filename=name, legend_labels=["" for i in 1:length(xs)], directory="Output")
filepath = pwd() * "/" * directory * "/"
isdir(dirname(filepath)) || mkpath(filepath)
anim = @animate for n in 1:size(xs[1],2)
x_max = maximum([maximum(x) for x in xs]).*scaling_factor[name]
x_min = minimum([minimum(x) for x in xs]).*scaling_factor[name]
fig = plot(xlim=(x_min, x_max), legend=:bottom, size=(400,400), xlabel=x_labels[name], ylabel="Depth (m)")
for i in reverse(1:length(xs))
plot!(fig, xs[i][:,n].*scaling_factor[name], zs[name](𝒟), label=legend_labels[i], title=file_labels[test_file]*", $(round(𝒟.t[n]/86400, digits=1)) days", linewidth=4, la=0.5, palette=:Set1_3)
end
end
gif(anim, pwd() * "/$(directory)/$(filename).gif", fps=20)
end
|
{"hexsha": "d2cfc9d452078c6dfa49ee8c7fe32dfd944fe17c", "size": 1884, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "wind_mixing/src/animate_prediction.jl", "max_stars_repo_name": "CliMA/ClimateParameterizations.jl", "max_stars_repo_head_hexsha": "1263e2edefced4e03e925d6bfa60ba1f1940e8c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2020-12-23T06:55:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T20:05:51.000Z", "max_issues_repo_path": "wind_mixing/src/animate_prediction.jl", "max_issues_repo_name": "CliMA/OceanParameterizations.jl", "max_issues_repo_head_hexsha": "5942c66ba8724b9661db170acb239ca3a2abd5c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-12-05T02:43:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-26T14:27:03.000Z", "max_forks_repo_path": "wind_mixing/src/animate_prediction.jl", "max_forks_repo_name": "ali-ramadhan/ClimateParameterizations.jl", "max_forks_repo_head_hexsha": "1263e2edefced4e03e925d6bfa60ba1f1940e8c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-17T18:06:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T18:06:40.000Z", "avg_line_length": 31.9322033898, "max_line_length": 203, "alphanum_fraction": 0.5392781316, "num_tokens": 694}
|
[STATEMENT]
lemma Ord_succ_vsusbset_Vfrom_succ:
assumes "Transset A" and "Ord a" and "a \<in>\<^sub>\<circ> Vfrom A i"
shows "succ a \<subseteq>\<^sub>\<circ> Vfrom A (succ i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ZFC_in_HOL.succ a \<subseteq>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
proof(intro vsubsetI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
from Vfrom_in_mono[OF vsubset_reflexive]
[PROOF STATE]
proof (chain)
picking this:
?i \<in>\<^sub>\<circ> ?j \<Longrightarrow> Vfrom ?A ?i \<in>\<^sub>\<circ> Vfrom ?A ?j
[PROOF STEP]
have i_succi:
"Vfrom A i \<in>\<^sub>\<circ> Vfrom A (succ i)"
[PROOF STATE]
proof (prove)
using this:
?i \<in>\<^sub>\<circ> ?j \<Longrightarrow> Vfrom ?A ?i \<in>\<^sub>\<circ> Vfrom ?A ?j
goal (1 subgoal):
1. Vfrom A i \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Vfrom A i \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
assume prems: "x \<in>\<^sub>\<circ> succ a"
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a
[PROOF STEP]
consider \<open>x \<in>\<^sub>\<circ> a\<close> | \<open>x = a\<close>
[PROOF STATE]
proof (prove)
using this:
x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a
goal (1 subgoal):
1. \<lbrakk>x \<in>\<^sub>\<circ> a \<Longrightarrow> thesis; x = a \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
unfolding succ_def
[PROOF STATE]
proof (prove)
using this:
x \<in>\<^sub>\<circ> vinsert a a
goal (1 subgoal):
1. \<lbrakk>x \<in>\<^sub>\<circ> a \<Longrightarrow> thesis; x = a \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>x \<in>\<^sub>\<circ> a \<Longrightarrow> ?thesis; x = a \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. \<And>x. x \<in>\<^sub>\<circ> ZFC_in_HOL.succ a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>x \<in>\<^sub>\<circ> a \<Longrightarrow> ?thesis; x = a \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
[PROOF STEP]
show "x \<in>\<^sub>\<circ> Vfrom A (succ i)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>x \<in>\<^sub>\<circ> a \<Longrightarrow> ?thesis; x = a \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. x \<in>\<^sub>\<circ> a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
2. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> a
goal (2 subgoals):
1. x \<in>\<^sub>\<circ> a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
2. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
have "x \<in>\<^sub>\<circ> Vfrom A i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in>\<^sub>\<circ> Vfrom A i
[PROOF STEP]
by (rule Vfrom_trans[OF assms(1) 1 assms(3)])
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> Vfrom A i
goal (2 subgoals):
1. x \<in>\<^sub>\<circ> a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
2. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in>\<^sub>\<circ> Vfrom A i
[PROOF STEP]
show "x \<in>\<^sub>\<circ> Vfrom A (succ i)"
[PROOF STATE]
proof (prove)
using this:
x \<in>\<^sub>\<circ> Vfrom A i
goal (1 subgoal):
1. x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
by (rule Vfrom_trans[OF assms(1) _ i_succi])
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
goal (1 subgoal):
1. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
case 2
[PROOF STATE]
proof (state)
this:
x = a
goal (1 subgoal):
1. x = a \<Longrightarrow> x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
a \<in>\<^sub>\<circ> Vfrom A i
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
a \<in>\<^sub>\<circ> Vfrom A i
goal (1 subgoal):
1. x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
unfolding 2
[PROOF STATE]
proof (prove)
using this:
a \<in>\<^sub>\<circ> Vfrom A i
goal (1 subgoal):
1. a \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
[PROOF STEP]
by (intro Vfrom_trans[OF assms(1) _ i_succi])
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<in>\<^sub>\<circ> Vfrom A (ZFC_in_HOL.succ i)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2491, "file": "CZH_Foundations_czh_sets_ex_CZH_EX_Replacement", "length": 27}
|
[STATEMENT]
lemma approximating_bigstep_fun_induct[case_names Empty Decision Nomatch Match] : "
(\<And>\<gamma> p s. P \<gamma> p [] s) \<Longrightarrow>
(\<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X)) \<Longrightarrow>
(\<And>\<gamma> p m a rs.
\<not> matches \<gamma> m a p \<Longrightarrow> P \<gamma> p rs Undecided \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided) \<Longrightarrow>
(\<And>\<gamma> p m a rs.
matches \<gamma> m a p \<Longrightarrow> (a = Log \<Longrightarrow> P \<gamma> p rs Undecided) \<Longrightarrow> (a = Empty \<Longrightarrow> P \<gamma> p rs Undecided) \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided) \<Longrightarrow>
P \<gamma> p rs s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<And>\<gamma> p s. P \<gamma> p [] s; \<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X); \<And>\<gamma> p m a rs. \<lbrakk>\<not> matches \<gamma> m a p; P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<And>\<gamma> p m a rs. \<lbrakk>matches \<gamma> m a p; a = Log \<Longrightarrow> P \<gamma> p rs Undecided; a = Empty \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p rs s
[PROOF STEP]
apply (rule approximating_bigstep_fun.induct[of P \<gamma> p rs s])
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>\<gamma> p s. \<lbrakk>\<And>\<gamma> p s. P \<gamma> p [] s; \<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X); \<And>\<gamma> p m a rs. \<lbrakk>\<not> matches \<gamma> m a p; P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<And>\<gamma> p m a rs. \<lbrakk>matches \<gamma> m a p; a = Log \<Longrightarrow> P \<gamma> p rs Undecided; a = Empty \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p [] s
2. \<And>\<gamma> p v va X. \<lbrakk>\<And>\<gamma> p s. P \<gamma> p [] s; \<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X); \<And>\<gamma> p m a rs. \<lbrakk>\<not> matches \<gamma> m a p; P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<And>\<gamma> p m a rs. \<lbrakk>matches \<gamma> m a p; a = Log \<Longrightarrow> P \<gamma> p rs Undecided; a = Empty \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (v # va) (Decision X)
3. \<And>\<gamma> p m a rs. \<lbrakk>\<And>\<gamma> p s. P \<gamma> p [] s; \<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X); \<And>\<gamma> p m a rs. \<lbrakk>\<not> matches \<gamma> m a p; P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<And>\<gamma> p m a rs. \<lbrakk>matches \<gamma> m a p; a = Log \<Longrightarrow> P \<gamma> p rs Undecided; a = Empty \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<not> matches \<gamma> m a p \<Longrightarrow> P \<gamma> p rs Undecided; \<lbrakk>\<not> \<not> matches \<gamma> m a p; a = Log\<rbrakk> \<Longrightarrow> P \<gamma> p rs Undecided; \<lbrakk>\<not> \<not> matches \<gamma> m a p; a = Empty\<rbrakk> \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided
[PROOF STEP]
apply (simp_all)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>\<gamma> p m a rs. \<lbrakk>\<And>\<gamma> p s. P \<gamma> p [] s; \<And>\<gamma> p r rs X. P \<gamma> p (r # rs) (Decision X); \<And>\<gamma> p m a rs. \<lbrakk>\<not> matches \<gamma> m a p; P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<And>\<gamma> p m a rs. \<lbrakk>matches \<gamma> m a p; a = Log \<Longrightarrow> P \<gamma> p rs Undecided; a = Empty \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided; \<not> matches \<gamma> m a p \<Longrightarrow> P \<gamma> p rs Undecided; \<lbrakk>matches \<gamma> m Log p; a = Log\<rbrakk> \<Longrightarrow> P \<gamma> p rs Undecided; \<lbrakk>matches \<gamma> m Empty p; a = Empty\<rbrakk> \<Longrightarrow> P \<gamma> p rs Undecided\<rbrakk> \<Longrightarrow> P \<gamma> p (Rule m a # rs) Undecided
[PROOF STEP]
by metis
|
{"llama_tokens": 1609, "file": "Iptables_Semantics_Semantics_Ternary_Semantics_Ternary", "length": 3}
|
import cvxpy as cvx
import gym
import energym
import numpy as np
import pandas as pd
import os
import logging
import datetime
class EmptyDataException(Exception):
def __init__(self):
super().__init__()
class OptimizationException(Exception):
def __init__(self):
super().__init__()
logging.getLogger().setLevel(logging.INFO)
class ExpertAgent(object):
def __init__(self):
# this is the environment on which the controller will be applied
self.env = gym.make('energy_market_battery-v0')
# we create those environments to get some info (clearing prices + battery dynamic)
self.market = gym.make('energy_market-v0')
self.battery = gym.make('battery-v0')
# to create the prediction prices (perfect forecast)
self.data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
self.price_prediction_file_path = os.path.join(self.data_path, "price_prediction.csv")
self.get_prediction_cleared_prices()
self.price_prediction_df = self.load_price_predictions().dropna()
# parameters for the online controller
self.reset_memory_dict()
self.planning_frequency = 1
self.time_horizon = 16
self.max_soe, self.min_soe, self.max_power, self.min_power, self.battery_efficiency = self.battery.get_parameters_battery()
# create the optimization problem
self.problem = self.create_optimization_problem()
def get_prediction_cleared_prices(self):
# We run the all simulation without the battery (considering we are price take we do not influence the market).
# This function needs to be called once and then we store the result in a pickle
if not os.path.exists(self.price_prediction_file_path):
logging.info('---- Create Prediction Prices ----')
done = False
action = np.array([0, 100000])
i = 0
price_prediction_dict = {'time_step': [], 'values': []}
while not done:
ob, reward, done, info_dict = self.market.step(action)
price_prediction_dict['values'].append(info_dict['price_cleared'])
price_prediction_dict['time_step'].append(info_dict['date'])
if i % 100 == 0 :
logging.info('----> Step %s' % (info_dict['date']))
i += 1
price_prediction_df = pd.DataFrame.from_dict(price_prediction_dict)
price_prediction_df.to_csv(self.price_prediction_file_path)
def load_price_predictions(self):
logging.info('---- Load Prediction Prices ----')
price_prediction_df = pd.read_csv(self.price_prediction_file_path)
mean_price = price_prediction_df.mean()
covariance_matrix = price_prediction_df.cov()
return price_prediction_df
def create_optimization_problem(self):
# create a generic optimization problem solved for planning
self.price_predictions_interval = cvx.Parameter(self.time_horizon)
self.initial_soe = cvx.Parameter()
self.soe = cvx.Variable(self.time_horizon)
self.planned_power = cvx.Variable(self.time_horizon)
opt = cvx.Maximize(self.price_predictions_interval * self.planned_power)
constraints = [self.soe[0] == self.initial_soe]
for i in range(self.time_horizon-1):
constraints += [self.soe[i+1] == self.soe[i] - self.battery_efficiency * self.planned_power[i]]
constraints += [self.soe <= self.max_soe] + [self.min_soe <= self.soe]
constraints += [self.planned_power <= self.max_power] + [self.min_power <= self.planned_power]
return cvx.Problem(opt, constraints)
def planning(self, step, initial_soc):
# solve optimization problem from actual time step for a certain horizon
self.price_prediction_df['time_step'] = pd.to_datetime(self.price_prediction_df['time_step'])
step = datetime.datetime.strptime(step.strftime("%Y-%m-%d %H:%M:%S.%f"), "%Y-%m-%d %H:%M:%S.%f")
values_planning_horizon = self.price_prediction_df[self.price_prediction_df['time_step'] >= step]['values']
self.price_predictions_interval.value = np.resize(values_planning_horizon.values[:self.time_horizon], (self.time_horizon,))
self.initial_soe.value = initial_soc
# logging.info('---- Solve Optimization ----')
self.problem.solve(solver=cvx.CVXOPT, verbose=False)
# logging.info('---- Status: %s ----' % self.problem.status)
planned_actions = self.planned_power.value
return planned_actions
def running(self, planned_actions):
# run until time to re-plan, collect same outputs as the RL agent
done = False
for i in range(self.time_horizon):
if i >= self.planning_frequency or done:
break
action = [planned_actions[i], self.price_predictions_interval.value[i]]
ob, reward, done, info_dict = self.env.step(action)
self.memory_dict['soe'].append(ob[0])
self.memory_dict['power_cleared'].append(ob[1])
self.memory_dict['price_bid'].append(self.price_predictions_interval.value[i])
self.memory_dict['reward'].append(reward)
self.memory_dict['time_step'].append(info_dict['date'])
self.memory_dict['done'].append(done)
self.memory_dict['power_bid'].append(planned_actions[i])
return done
def reset_memory_dict(self):
ob = self.env.reset()
self.memory_dict = {'soe': [ob[0]],
'power_cleared': [0],
'price_bid': [0],
'reward': [0],
'done': [0],
'time_step': [0],
'power_bid': [0],
}
|
{"hexsha": "fe6a0a6dd5d4352c3035e65cdae6f64a2c87df0b", "size": 5871, "ext": "py", "lang": "Python", "max_stars_repo_path": "energym/envs/grid_scale/utils.py", "max_stars_repo_name": "mathildebadoual/energym", "max_stars_repo_head_hexsha": "bcdba783ea50a2c3adb9e6c86ecdfb1949bd59a5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-11-20T23:21:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-18T08:43:15.000Z", "max_issues_repo_path": "energym/envs/grid_scale/utils.py", "max_issues_repo_name": "mathildebadoual/energym", "max_issues_repo_head_hexsha": "bcdba783ea50a2c3adb9e6c86ecdfb1949bd59a5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-26T20:49:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-26T20:51:32.000Z", "max_forks_repo_path": "energym/envs/grid_scale/utils.py", "max_forks_repo_name": "mathildebadoual/energym", "max_forks_repo_head_hexsha": "bcdba783ea50a2c3adb9e6c86ecdfb1949bd59a5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1691176471, "max_line_length": 131, "alphanum_fraction": 0.638903083, "include": true, "reason": "import numpy,import cvxpy", "num_tokens": 1252}
|
__author__ = 'feurerm'
import copy
import unittest
import numpy as np
import sklearn.datasets
import sklearn.metrics
from autosklearn.pipeline.components.data_preprocessing.balancing.balancing \
import Balancing
from autosklearn.pipeline.classification import SimpleClassificationPipeline
from autosklearn.pipeline.components.classification.adaboost import AdaboostClassifier
from autosklearn.pipeline.components.classification.decision_tree import DecisionTree
from autosklearn.pipeline.components.classification.extra_trees import ExtraTreesClassifier
from autosklearn.pipeline.components.classification.gradient_boosting import GradientBoostingClassifier
from autosklearn.pipeline.components.classification.random_forest import RandomForest
from autosklearn.pipeline.components.classification.liblinear_svc import LibLinear_SVC
from autosklearn.pipeline.components.classification.libsvm_svc import LibSVM_SVC
from autosklearn.pipeline.components.classification.sgd import SGD
from autosklearn.pipeline.components.feature_preprocessing\
.extra_trees_preproc_for_classification import ExtraTreesPreprocessorClassification
from autosklearn.pipeline.components.feature_preprocessing.liblinear_svc_preprocessor import LibLinear_Preprocessor
class BalancingComponentTest(unittest.TestCase):
def test_balancing_get_weights_treed_single_label(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'adaboost', None, None, None)
self.assertTrue(np.allclose(fit_params['classifier:sample_weight'],
np.array([0.4] * 80 + [1.6] * 20)))
#init_params, fit_params = balancing.get_weights(
# Y, None, 'extra_trees_preproc_for_classification', None, None)
#self.assertTrue(np.allclose(fit_params['preprocessor:sample_weight'],
# np.array([0.4] * 80 + [1.6] * 20)))
def test_balancing_get_weights_treed_multilabel(self):
Y = np.array([[0, 0, 0]] * 100 + [[1, 0, 0]] * 100 + [[0, 1, 0]] * 100 +
[[1, 1, 0]] * 100 + [[0, 0, 1]] * 100 + [[1, 0, 1]] * 10)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'adaboost', None, None, None)
self.assertTrue(np.allclose(fit_params['classifier:sample_weight'],
np.array([0.4] * 500 + [4.0] * 10)))
#init_params, fit_params = balancing.get_weights(
# Y, None, 'extra_trees_preproc_for_classification', None, None)
#self.assertTrue(np.allclose(fit_params['preprocessor:sample_weight'],
# np.array([0.4] * 500 + [4.0] * 10)))
def test_balancing_get_weights_svm_sgd(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'libsvm_svc', None, None, None)
self.assertEqual(("classifier:class_weight", "auto"),
list(init_params.items())[0])
init_params, fit_params = balancing.get_weights(
Y, None, 'liblinear_svc_preprocessor', None, None)
self.assertEqual(("preprocessor:class_weight", "auto"),
list(init_params.items())[0])
def test_weighting_effect(self):
data = sklearn.datasets.make_classification(
n_samples=200, n_features=10, n_redundant=2, n_informative=2,
n_repeated=2, n_clusters_per_class=2, weights=[0.8, 0.2],
random_state=1)
for name, clf, acc_no_weighting, acc_weighting in \
[('adaboost', AdaboostClassifier, 0.810, 0.735),
('decision_tree', DecisionTree, 0.780, 0.643),
('extra_trees', ExtraTreesClassifier, 0.75, 0.800),
('gradient_boosting', GradientBoostingClassifier,
0.789, 0.762),
('random_forest', RandomForest, 0.75, 0.821),
('libsvm_svc', LibSVM_SVC, 0.769, 0.706),
('liblinear_svc', LibLinear_SVC, 0.762, 0.72),
('sgd', SGD, 0.739, 0.735)
]:
for strategy, acc in [('none', acc_no_weighting),
('weighting', acc_weighting)]:
# Fit
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
include = {'classifier': [name],
'preprocessor': ['no_preprocessing']}
classifier = SimpleClassificationPipeline(
random_state=1, include=include)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.f1_score(predictions, Y_test),
places=3)
# pre_transform and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
classifier.set_hyperparameters(configuration=default)
Xt, fit_params = classifier.pre_transform(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.f1_score(
predictions, Y_test),
places=3)
for name, pre, acc_no_weighting, acc_weighting in \
[('extra_trees_preproc_for_classification',
ExtraTreesPreprocessorClassification, 0.625, 0.634),
('liblinear_svc_preprocessor', LibLinear_Preprocessor,
0.75, 0.706)]:
for strategy, acc in [('none', acc_no_weighting),
('weighting', acc_weighting)]:
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
include = {'classifier': ['sgd'], 'preprocessor': [name]}
classifier = SimpleClassificationPipeline(
random_state=1, include=include)
cs = classifier.get_hyperparameter_search_space()
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier.set_hyperparameters(default)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.f1_score(
predictions, Y_test),
places=3)
# pre_transform and fit_estimator
data_ = copy.copy(data)
X_train = data_[0][:100]
Y_train = data_[1][:100]
X_test = data_[0][100:]
Y_test = data_[1][100:]
default._values['balancing:strategy'] = strategy
classifier = SimpleClassificationPipeline(
default, random_state=1, include=include)
Xt, fit_params = classifier.pre_transform(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, **fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.f1_score(
predictions, Y_test),
places=3)
|
{"hexsha": "4c69804a03ebba980ae82324c095d91fea8cfbeb", "size": 8560, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_pipeline/components/data_preprocessing/test_balancing.py", "max_stars_repo_name": "wsyjwps1983/autosklearn", "max_stars_repo_head_hexsha": "2e29ebaca6bc26fa838f7c3b8b13960c600884e4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_pipeline/components/data_preprocessing/test_balancing.py", "max_issues_repo_name": "wsyjwps1983/autosklearn", "max_issues_repo_head_hexsha": "2e29ebaca6bc26fa838f7c3b8b13960c600884e4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_pipeline/components/data_preprocessing/test_balancing.py", "max_forks_repo_name": "wsyjwps1983/autosklearn", "max_forks_repo_head_hexsha": "2e29ebaca6bc26fa838f7c3b8b13960c600884e4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-04-01T11:53:20.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-01T11:53:20.000Z", "avg_line_length": 50.9523809524, "max_line_length": 115, "alphanum_fraction": 0.5789719626, "include": true, "reason": "import numpy", "num_tokens": 1784}
|
import astropy.units as u
import json
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.io import fits
from datetime import datetime as dt
from datetime import timedelta as tdelta
# Generate Fake Postange Stamp Cube (FITS cube)
sky_background = 1000.
sky_sigma = 5.
nx = 12
ny = 16
nt = 42
data_cube = np.random.normal(sky_background, sky_sigma, (nt, ny, nx))
obstime = dt.utcnow()
unit = 'PAN000' # I've given this the ID of PAN000 just in case it gets confused for data from a real unit.
camera = '0x2A'
target_name = 'faketarg'
seq_id = '{}{}_{}'.format(unit, camera, obstime.strftime('%Y%m%d_%H%M%SUT'))
xpixorg = 1042
ypixorg = 2042
exptime = 100. # seconds
c = SkyCoord.from_name('HR8799', frame='fk5')
hdu = fits.PrimaryHDU(data_cube)
metadata = {'SEQID': seq_id,
'FIELD': target_name,
'RA': c.ra.to(u.degree).value,
'DEC': c.dec.to(u.degree).value,
'EQUINOX': c.equinox.value,
'OBSTIME': obstime.isoformat(),
'XPIXORG': xpixorg,
'YPIXORG': ypixorg,
}
for t in range(nt):
# slightly randomize time gap between images
gap = tdelta(0, exptime + np.random.normal(5, 1))
obstime = obstime + gap
metadata['TIME{:04d}'.format(t)] = obstime.isoformat()
hdu.header.extend(metadata)
print(metadata)
hdu.writeto('PSC_0002.fits', clobber=True)
# Generate Fake Lightcurve
with open('PSC_0002.json', 'w') as FO:
data = []
for t in range(nt):
time = hdu.header['TIME{:04d}'.format(t)]
sig_r = 0.010
sig_g = 0.006
sig_b = 0.017
r = np.random.normal(1, sig_r)
g = np.random.normal(1, sig_g)
b = np.random.normal(1, sig_b)
entry = {
'Time': time,
'R': r,
'G': g,
'B': b,
'sig_r': sig_r,
'sig_g': sig_g,
'sig_b': sig_b
}
data.append(entry)
json.dump(data, FO)
|
{"hexsha": "c99bd1a98389386321640743dbd3e16584b651a9", "size": 1968, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/generate_PSC.py", "max_stars_repo_name": "wtgee/panoptes-pipeline", "max_stars_repo_head_hexsha": "3e7398698d5ce97aa6b40a11aa7af4dc480eb3af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/generate_PSC.py", "max_issues_repo_name": "wtgee/panoptes-pipeline", "max_issues_repo_head_hexsha": "3e7398698d5ce97aa6b40a11aa7af4dc480eb3af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/generate_PSC.py", "max_forks_repo_name": "wtgee/panoptes-pipeline", "max_forks_repo_head_hexsha": "3e7398698d5ce97aa6b40a11aa7af4dc480eb3af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1142857143, "max_line_length": 108, "alphanum_fraction": 0.5985772358, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 583}
|
import tensorflow as tf
import numpy as np
import re
from utils.bert import bert_utils
try:
from .trf_gpt_noise import model_fn_builder as noise_dist
from .trf_ebm_bert import model_fn_builder as ebm_dist
from .trf_classifier import (get_ebm_loss,
get_residual_ebm_loss,
get_ebm_mlm_adv_loss,
get_noise_loss,
ebm_noise_train_metric,
ebm_noise_eval_metric,
ebm_eval_metric,
ebm_train_metric,
get_ebm_mlm_adv_softmax_loss)
from .trf_ebm_noise_mlm_sample import model_fn_builder as mlm_noise_dist
except:
from trf_gpt_noise import model_fn_builder as noise_dist
from trf_ebm_bert import model_fn_builder as ebm_dist
from trf_ebm_noise_mlm_sample import model_fn_builder as mlm_noise_dist
from trf_classifier import (get_ebm_loss,
get_residual_ebm_loss,
get_ebm_mlm_adv_loss,
get_noise_loss,
ebm_noise_train_metric,
ebm_noise_eval_metric,
ebm_eval_metric,
ebm_train_metric,
get_ebm_mlm_adv_softmax_loss)
import tensorflow as tf
import numpy as np
from optimizer import optimizer
from optimizer import distributed_optimizer
from model_io import model_io
import tensorflow as tf
from metric import tf_metrics
from collections import OrderedDict
def get_train_op(model_cls, optimizer_fn, opt_config,
ebm_dist_config, noise_dist_config,
mlm_dist_config,
features, labels, mode, params,
**kargs):
init_lr_dict = OrderedDict(zip(['ebm', 'ebm_logz', 'generator'], [ebm_dist_config['init_lr'], ebm_dist_config.get('logz_init_lr', ebm_dist_config['init_lr']), mlm_dist_config['init_lr']]))
optimizer_type_dict = OrderedDict(zip(['ebm', 'ebm_logz', 'generator'], [ebm_dist_config['optimizer_type'], ebm_dist_config['logz_optimizer_type'], mlm_dist_config['optimizer_type']]))
loop_step_dict = OrderedDict(zip(['ebm', 'ebm_logz', 'generator'], [ebm_dist_config.get("steps", 1), ebm_dist_config.get("logz_steps", 1), mlm_dist_config.get("steps", 1)]))
if_grad_clip_dict = OrderedDict(zip(['ebm', 'ebm_logz', 'generator'], [True, True, True]))
use_tpu = 1 if kargs.get('use_tpu', False) else 0
def get_train_op(optimizer, loss, tvars, grad_name, if_grad_clip, **kargs):
if if_grad_clip:
if use_tpu:
grads = optimizer_fn.grad_clip_fn(loss, tvars, **kargs)
grads_and_vars = zip(grads, tvars)
else:
grads_and_vars = optimizer_fn.grad_clip_fn(optimizer, loss, tvars, grad_name=grad_name, **kargs)
else:
if use_tpu:
grads = tf.gradients(loss, tvars)
grads_and_vars = zip(grads, tvars)
else:
grads_and_vars = optimizer.compute_gradients(loss, tvars)
grads = [grad for grad, var in grads_and_vars]
use_norm = tf.global_norm(grads)
tf.summary.scalar(grad_name+'/total_grad_norm', use_norm)
for grad, var in grads_and_vars:
if grad is not None:
var_grad_norm = tf.global_norm([grad])
tf.summary.scalar(grad_name+"/"+var.name, var_grad_norm)
with tf.variable_scope(grad_name+"/"+"optimizer", reuse=tf.AUTO_REUSE):
op = optimizer.apply_gradients(
grads_and_vars)
return op
alternate_order = kargs.get("alternate_order", list(loop_step_dict.keys()))
ebm_logz_update_circle = kargs.get("ebm_logz_update_circle", True)
ebm_logz_update = kargs.get("ebm_logz_update", 1)
model_cls.get_opt(optimizer_fn, init_lr_dict, optimizer_type_dict,
alternate_order=alternate_order,
ebm_logz_update_circle=ebm_logz_update_circle,
ebm_logz_update=ebm_logz_update,
use_tpu=kargs.get('use_tpu', False))
step2order = OrderedDict({})
cumsum_steps = [0]+np.cumsum([loop_step_dict[key] for key in alternate_order], axis=0).tolist()
for step, order in enumerate(alternate_order):
step_range = list(range(cumsum_steps[step], cumsum_steps[step+1]))
for i in step_range:
step2order[i] = order
print("==step2order==", step2order)
train_op = kargs.get('train_op_type', 'joint')
if train_op == 'alternate':
tf.logging.info("****** alternate optimization *******")
prev_op = tf.no_op()
for step in range(cumsum_steps[-1]):
order = alternate_order[step]
with tf.control_dependencies([prev_op]):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
model_cls.get_loss(features, labels, mode, params, **kargs)
order = step2order[step]
if order == 'ebm':
loss = model_cls.ebm_opt_dict['loss']
tvars = model_cls.ebm_opt_dict['tvars']+model_cls.ebm_opt_dict['logz_tvars']
opt = model_cls.optimizer_dict['ebm']
elif order == 'generator':
loss = model_cls.ebm_opt_dict['mlm_adv_loss']
tvars = model_cls.ebm_opt_dict['mlm_tvars']
opt = model_cls.optimizer_dict['generator']
prev_op = get_train_op(opt, loss, tvars, order, if_grad_clip_dict[order])
elif train_op == 'joint':
tf.logging.info("****** joint optimization *******")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
model_cls.get_loss(features, labels, mode, params, **kargs)
loss = model_cls.ebm_opt_dict['loss']
tvars = model_cls.ebm_opt_dict['tvars']+model_cls.ebm_opt_dict['logz_tvars']
print(model_cls.ebm_opt_dict['logz_tvars'], '====logz_tvars=====')
opt = model_cls.optimizer_dict['ebm']
order = 'ebm'
ebm_op = get_train_op(opt, loss, tvars, order, if_grad_clip_dict[order])
prev_op = ebm_op
elif train_op == 'mlm_nce':
tf.logging.info("****** mlm_nce *******")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
model_cls.get_loss(features, labels, mode, params, **kargs)
loss = model_cls.ebm_opt_dict['loss']+model_cls.ebm_opt_dict['mlm_loss']
tvars = model_cls.ebm_opt_dict['tvars']+model_cls.ebm_opt_dict['logz_tvars']+model_cls.ebm_opt_dict['mlm_tvars']
tvars = list(set(tvars))
print(model_cls.ebm_opt_dict['logz_tvars'], '====logz_tvars=====')
print(tvars, '===train tvars===')
opt = model_cls.optimizer_dict['ebm']
order = 'ebm'
ebm_op = get_train_op(opt, loss, tvars, order, if_grad_clip_dict[order])
prev_op = ebm_op
with tf.control_dependencies([prev_op]):
train_op = optimizer_fn.global_step.assign_add(1)
return train_op
def token_seq_truncted(token_seq, finished_index, max_length):
seq_shape = bert_utils.get_shape_list(token_seq, expected_rank=[2,3])
batch_size = seq_shape[0]
token_seq = token_seq[:, :max_length]
token_seq = tf.concat([token_seq, finished_index*tf.cast(tf.ones((batch_size, 1)), tf.int32)], axis=-1)
token_seq = tf.cast(token_seq, tf.int32)
seq_shape = bert_utils.get_shape_list(token_seq, expected_rank=[2,3])
match_indices = tf.where( # [[5, 5, 2, 5, 4],
tf.equal(finished_index, token_seq), # [0, 5, 2, 3, 5],
x=tf.range(seq_shape[1]) * tf.ones_like(token_seq), # [5, 1, 5, 5, 5]]
y=(seq_shape[1])*tf.ones_like(token_seq))
finished_pos = tf.reduce_min(match_indices, axis=1)
sequence_mask = tf.sequence_mask(finished_pos+1, maxlen=seq_shape[1])
token_seq = tf.cast(sequence_mask, tf.float32) * tf.cast(token_seq, tf.float32)
return tf.cast(token_seq, tf.int32)
def mixed_sample(features, mix_ratio=0.2):
shape = bert_utils.get_shape_list(features['input_mask'], expected_rank=[2,3])
sample_probs = tf.ones((shape[0]))
sample_probs = mix_ratio * tf.cast(sample_probs, tf.float32) #+ 0.8 * tf.cast(must_have_one, tf.float32) # mask 15% token
noise_dist = tf.distributions.Bernoulli(probs=sample_probs, dtype=tf.float32)
mixed_mask = noise_dist.sample()
mixed_mask = tf.cast(mixed_mask, tf.float32)
return mixed_mask
def get_finised_pos_v1(token_seq, finished_index, max_length):
seq_shape = bert_utils.get_shape_list(token_seq, expected_rank=[2,3])
match_indices = tf.where( # [[5, 5, 2, 5, 4],
tf.equal(finished_index, token_seq), # [0, 5, 2, 3, 5],
x=tf.range(seq_shape[1]) * tf.ones_like(token_seq), # [5, 1, 5, 5, 5]]
y=(seq_shape[1])*tf.ones_like(token_seq))
finished_pos = tf.reduce_min(match_indices, axis=1)
# sequence_mask = tf.sequence_mask(finished_pos, maxlen=max_length)
sequence_mask = tf.cast(tf.one_hot(finished_pos, max_length), tf.float32) # [batch, max_length]
return sequence_mask
def transfer2lm(input_ids, input_mask, finished_index=102, sentence_end_index=105):
shape = bert_utils.get_shape_list(input_ids, expected_rank=[2,3])
sequence_mask = get_finised_pos_v1(input_ids, finished_index, shape[1])
sequence_mask = tf.cast(sequence_mask, tf.float32)
modified_token_seq = tf.cast(input_ids, tf.float32) - float(finished_index) * sequence_mask + sequence_mask * sentence_end_index
return tf.cast(modified_token_seq, tf.int32)
def ebm_gen_distance(ebm_model_dict, gen_model_dict, **kargs):
gen_tvars = gen_model_dict['tvars']
ebm_tvars = ebm_model_dict['tvars']
loss = tf.constant(0.0)
gen_var_dict = {}
for var in gen_tvars:
if var.name in gen_var_dict:
continue
else:
gen_var_dict[var.name] = var
for key in gen_var_dict:
print(key, gen_var_dict[key], '==========gen var====')
for var in ebm_tvars:
print(var, var.name, '==========ebm var====')
for name in gen_var_dict:
print(name, gen_var_dict[name], '==========gen var====')
if var.name in name:
loss += tf.reduce_mean(tf.abs(var-gen_var_dict[name]))
print(name, '==========gen var match ebm====', var.name)
break
if not kargs.get('use_tpu', False):
tf.summary.scalar('ebm_gen_loss_gap', loss)
class EBM_NOISE_NCE(object):
def __init__(self, model_config_dict,
num_labels_dict,
init_checkpoint_dict,
load_pretrained_dict,
model_io_config={},
opt_config={},
exclude_scope_dict={},
not_storage_params_dict={},
target_dict={},
**kargs):
self.model_config_dict = model_config_dict
self.init_checkpoint_dict = init_checkpoint_dict
self.load_pretrained_dict = load_pretrained_dict
self.exclude_scope_dict = exclude_scope_dict
self.target_dict = target_dict
self.not_storage_params_dict = not_storage_params_dict
self.model_io_config = model_io_config
self.opt_config = opt_config
self.num_labels_dict = num_labels_dict
self.train_op_type = kargs.get('train_op_type', 'joint')
self.ebm_prob_ln = False
self.stop_gradient_mlm = True
self.ebm_dist_fn = ebm_dist(self.model_config_dict['ebm_dist'],
self.num_labels_dict['ebm_dist'],
self.init_checkpoint_dict['ebm_dist'],
model_reuse=None,
load_pretrained=self.load_pretrained_dict['ebm_dist'],
model_io_config=self.model_io_config,
opt_config=self.opt_config,
exclude_scope=self.exclude_scope_dict.get('ebm_dist', ""),
not_storage_params=self.not_storage_params_dict.get('ebm_dist', []),
target=self.target_dict['ebm_dist'],
prob_ln=self.ebm_prob_ln,
transform=False,
transformer_activation="linear",
logz_mode='none',
normalized_constant="logv_constant_ln",
energy_pooling="cls",
softplus_features=False,
use_token_type=self.model_config_dict['ebm_dist'].get('use_token_type', True),
**kargs)
tf.logging.info("****** using bert mlm for noise dist sample *******")
global_step = tf.train.get_or_create_global_step()
# self.noise_sample_ratio = tf.train.polynomial_decay(
# 0.25,
# global_step,
# self.opt_config.num_train_steps,
# end_learning_rate=0.10,
# power=1.0,
# cycle=False)
gap = int(opt_config.num_train_steps / 5)
boundaries = [gap, 2*gap, 3*gap, 4*gap ]
values = [0.25, 0.20, 0.15, 0.1, 0.1]
tf.logging.info("==piecewise_constant==", boundaries)
tf.logging.info("==piecewise_constant==", values)
# self.noise_sample_ratio = tf.train.piecewise_constant(
# global_step,
# boundaries,
# values)
self.noise_sample_ratio = 0.15
self.mlm_noise_dist_fn = mlm_noise_dist(self.model_config_dict['generator'],
self.num_labels_dict['generator'],
self.init_checkpoint_dict['generator'],
model_reuse=None,
load_pretrained=self.load_pretrained_dict['generator'],
model_io_config=self.model_io_config,
opt_config=self.opt_config,
exclude_scope=self.exclude_scope_dict.get('generator', ""),
not_storage_params=self.not_storage_params_dict.get('generator', []),
target=self.target_dict['generator'],
mask_probability=self.noise_sample_ratio,
replace_probability=0.0,
original_probability=0.0,
use_token_type=self.model_config_dict['generator'].get('use_token_type', True),
stop_gradient_mlm=self.stop_gradient_mlm,
**kargs)
def get_opt(self, optimizer_fn, init_lr_dict, optimizer_type_dict, **kargs):
self.init_lr_dict = init_lr_dict
self.optimizer_type_dict = optimizer_type_dict
self.optimizer_dict = {}
self.alternate_order = kargs.get('alternate_order', list(self.init_lr_dict.keys()))
print("==alternate order==", self.alternate_order)
for key in self.alternate_order:
init_lr = self.init_lr_dict[key]
optimizer_type = self.optimizer_type_dict[key]
if optimizer_type != 'radam' and key not in ['ebm_logz']:
learning_rate = optimizer_fn.lr_decay_fn(init_lr, self.opt_config.num_train_steps, **kargs)
learning_rate = optimizer_fn.warm_up(learning_rate, init_lr, **kargs)
tf.logging.info("****** leanring rate warm up:%s ******", key)
elif key == 'ebm_logz':
tf.logging.info("****** ebm logz learning rate ******")
if kargs.get('ebm_logz_update_circle', False):
lr_ratio = tf.floormod(
tf.train.get_or_create_global_step(),
kargs.get('ebm_logz_update', 5),
name="ebm_logz_update"
)
lr_ratio = tf.cast(tf.equal(tf.cast(lr_ratio, tf.int32), 0), tf.float32)
tf.logging.info("****** learning_rate circle update ****** with %s circle", kargs.get('ebm_logz_update', 5))
else:
lr_ratio = 1.0
tf.logging.info("****** normal learning_rate ******")
if not kargs.get("use_tpu", False):
tf.summary.scalar('{}_lr_ratio'.format(key), lr_ratio)
learning_rate = init_lr * lr_ratio
if not kargs.get("use_tpu", False):
tf.summary.scalar('{}_learning_rate'.format(key), learning_rate)
tf.logging.info("****** model:%s, optimizer: %s, learning_rate:%s", key, optimizer_type, str(init_lr))
opt = optimizer_fn.optimizer_op(learning_rate, train_op=optimizer_type, **kargs)
if kargs.get("use_tpu", False):
tf.logging.info("***** Using tpu cross shard optimizer *****")
opt = tf.contrib.tpu.CrossShardOptimizer(opt)
self.optimizer_dict[key] = opt
def get_loss(self, features, labels, mode, params, **kargs):
true_features = {}
for key in features:
if key == 'input_ori_ids':
true_features["input_ids"] = tf.cast(features['input_ori_ids'], tf.int32)
# true_features["input_ids"] = transfer2lm(true_features['input_ids'],
# features['input_mask'])
if key in ['input_mask', 'segment_ids']:
true_features[key] = tf.cast(features[key], tf.int32)
self.mlm_noise_dist_dict = self.mlm_noise_dist_fn(features, labels, mode, params)
# third, get fake ebm dict
fake_features = {}
fake_features["input_ids"] = self.mlm_noise_dist_dict['sampled_ids']
fake_features["input_mask"] = self.mlm_noise_dist_dict['sampled_mask']
# fake_features['input_mask'] = tf.cast(features['input_mask'], tf.int32)
fake_features['segment_ids'] = tf.zeros_like(features['input_mask'])
tf.logging.info("****** using bert mlm stop gradient *******")
# second, get true ebm dict
self.true_ebm_dist_dict = self.ebm_dist_fn(true_features, labels, mode, params)
self.fake_ebm_dist_dict = self.ebm_dist_fn(fake_features, labels, mode, params)
# self.ebm_loss = get_residual_ebm_loss(self.true_ebm_dist_dict['logits'],
# self.fake_ebm_dist_dict['logits'],
# use_tpu=kargs.get('use_tpu', False))
self.gan_type = kargs.get('gan_type', 'JS')
tf.logging.info("****** gan type *******", self.gan_type)
# [self.ebm_loss, self.mlm_adv_loss] = get_ebm_mlm_adv_loss(self.true_ebm_dist_dict['logits'],
# self.fake_ebm_dist_dict['logits'],
# gan_type=self.gan_type,
# use_tpu=kargs.get('use_tpu', False),
# valid_mask=self.mlm_noise_dist_dict.get('valid_mask', None))
[self.ebm_loss, self.mlm_adv_loss] = get_ebm_mlm_adv_softmax_loss(self.true_ebm_dist_dict['logits'],
self.fake_ebm_dist_dict['logits'],
gan_type=self.gan_type,
use_tpu=kargs.get('use_tpu', False),
valid_mask=self.mlm_noise_dist_dict.get('valid_mask', None))
self.true_ebm_dist_dict['logz_loss'] = self.ebm_loss
tf.logging.info("****** logging ebm gen diff *******")
ebm_gen_distance(self.true_ebm_dist_dict,
self.mlm_noise_dist_dict,
**kargs)
if self.model_config_dict['ebm_dist'].get('fix_embeddings', False):
tf.logging.info("****** generator parameter *******")
self.true_ebm_dist_vars = []
for var in self.true_ebm_dist_dict['tvars']:
if 'embeddings' in var.name:
print(var, "==emebdding vars==")
else:
self.true_ebm_dist_vars.append(var)
else:
self.true_ebm_dist_vars = self.true_ebm_dist_dict['tvars']
self.ebm_opt_dict = {
"ebm_loss":self.ebm_loss,
"loss":self.ebm_loss * self.model_config_dict['ebm_dist'].get("ebm_loss_ratio", 10),
"tvars":self.true_ebm_dist_vars,
"logz_tvars":self.true_ebm_dist_dict['logz_tvars'],
"logz_loss":self.true_ebm_dist_dict['logz_loss'],
"mlm_adv_loss":self.mlm_adv_loss,
"mlm_tvars":self.mlm_noise_dist_dict['tvars'],
"mlm_loss":self.mlm_noise_dist_dict['loss'],
"all_loss":self.mlm_noise_dist_dict['loss']+self.ebm_loss * self.model_config_dict['ebm_dist'].get("ebm_loss_ratio", 10)
}
self.loss = self.ebm_loss
self.tvars = self.true_ebm_dist_dict['logz_tvars'] + self.true_ebm_dist_dict['tvars'] + self.mlm_noise_dist_dict['tvars']
def load_pretrained_model(self, **kargs):
self.var_checkpoint_dict_list = []
for key in self.init_checkpoint_dict:
if self.load_pretrained_dict[key] == "yes":
if key == 'ebm_dist':
tmp = {
"tvars":self.true_ebm_dist_dict['tvars']+self.true_ebm_dist_dict['logz_tvars'],
"init_checkpoint":self.init_checkpoint_dict['ebm_dist'],
"exclude_scope":self.exclude_scope_dict[key],
"restore_var_name":self.model_config_dict['ebm_dist'].get('restore_var_name', [])
}
if kargs.get("sharing_mode", "none") != "none":
tmp['exclude_scope'] = ''
self.var_checkpoint_dict_list.append(tmp)
elif key == 'generator':
tmp = {
"tvars":self.mlm_noise_dist_dict['tvars'],
"init_checkpoint":self.init_checkpoint_dict['generator'],
"exclude_scope":self.exclude_scope_dict[key],
"restore_var_name":self.model_config_dict['generator'].get('restore_var_name', [])
}
if kargs.get("sharing_mode", "none") != "none":
tmp['exclude_scope'] = ''
self.var_checkpoint_dict_list.append(tmp)
def classifier_model_fn_builder(
model_config_dict,
num_labels_dict,
init_checkpoint_dict,
load_pretrained_dict,
model_io_config={},
opt_config={},
exclude_scope_dict={},
not_storage_params_dict={},
target_dict={},
**kargs):
def model_fn(features, labels, mode, params):
train_op_type = kargs.get('train_op_type', 'joint')
ebm_noise_fce = EBM_NOISE_NCE(model_config_dict,
num_labels_dict,
init_checkpoint_dict,
load_pretrained_dict,
model_io_config=model_io_config,
opt_config=opt_config,
exclude_scope_dict=exclude_scope_dict,
not_storage_params_dict=not_storage_params_dict,
target_dict=target_dict,
**kargs)
model_io_fn = model_io.ModelIO(model_io_config)
use_tpu = 1 if kargs.get('use_tpu', False) else 0
if mode == tf.estimator.ModeKeys.TRAIN:
if kargs.get('use_tpu', False):
optimizer_fn = optimizer.Optimizer(opt_config)
use_tpu = 1
else:
optimizer_fn = distributed_optimizer.Optimizer(opt_config)
use_tpu = 0
train_op = get_train_op(
ebm_noise_fce,
optimizer_fn,
opt_config,
model_config_dict['ebm_dist'],
model_config_dict['noise_dist'],
model_config_dict['generator'],
features, labels, mode, params,
use_tpu=use_tpu,
train_op_type=train_op_type,
alternate_order=['ebm', 'generator'])
ebm_noise_fce.load_pretrained_model(**kargs)
var_checkpoint_dict_list = ebm_noise_fce.var_checkpoint_dict_list
loss = ebm_noise_fce.loss
tvars = ebm_noise_fce.tvars
if len(var_checkpoint_dict_list) >= 1:
scaffold_fn = model_io_fn.load_multi_pretrained(
var_checkpoint_dict_list,
use_tpu=use_tpu)
else:
scaffold_fn = None
metric_dict = ebm_train_metric(
ebm_noise_fce.true_ebm_dist_dict['logits'],
ebm_noise_fce.fake_ebm_dist_dict['logits']
)
if not kargs.get('use_tpu', False):
for key in metric_dict:
tf.summary.scalar(key, metric_dict[key])
tf.summary.scalar("ebm_loss", ebm_noise_fce.ebm_opt_dict['ebm_loss'])
tf.summary.scalar("mlm_loss", ebm_noise_fce.ebm_opt_dict['mlm_loss'])
tf.summary.scalar("all_loss", ebm_noise_fce.ebm_opt_dict['all_loss'])
model_io_fn.print_params(tvars, string=", trainable params")
if kargs.get('use_tpu', False):
estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op)
return estimator_spec
elif mode == tf.estimator.ModeKeys.EVAL:
ebm_noise_fce.get_loss(features, labels, mode, params, **kargs)
ebm_noise_fce.load_pretrained_model(**kargs)
var_checkpoint_dict_list = ebm_noise_fce.var_checkpoint_dict_list
loss = ebm_noise_fce.loss
if len(var_checkpoint_dict_list) >= 1:
scaffold_fn = model_io_fn.load_multi_pretrained(
var_checkpoint_dict_list,
use_tpu=use_tpu)
else:
scaffold_fn = None
tpu_eval_metrics = (ebm_eval_metric,
[
ebm_noise_fce.true_ebm_dist_dict['logits'],
ebm_noise_fce.fake_ebm_dist_dict['logits']
])
gpu_eval_metrics = ebm_eval_metric(
ebm_noise_fce.true_ebm_dist_dict['logits'],
ebm_noise_fce.fake_ebm_dist_dict['logits']
)
if kargs.get('use_tpu', False):
estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=tpu_eval_metrics,
scaffold_fn=scaffold_fn)
else:
estimator_spec = tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=gpu_eval_metrics)
return estimator_spec
else:
raise NotImplementedError()
return model_fn
|
{"hexsha": "5865b8711a83133f6583774bc499d78da52352a8", "size": 23050, "ext": "py", "lang": "Python", "max_stars_repo_path": "t2t_bert/pretrain_finetuning/trf_bert_ebm_residual_estimator.py", "max_stars_repo_name": "yyht/bert", "max_stars_repo_head_hexsha": "480c909e0835a455606e829310ff949c9dd23549", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2018-12-19T01:00:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-26T09:36:37.000Z", "max_issues_repo_path": "t2t_bert/pretrain_finetuning/trf_bert_ebm_residual_estimator.py", "max_issues_repo_name": "yyht/bert", "max_issues_repo_head_hexsha": "480c909e0835a455606e829310ff949c9dd23549", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-12-25T03:37:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T14:43:58.000Z", "max_forks_repo_path": "t2t_bert/pretrain_finetuning/trf_bert_ebm_residual_estimator.py", "max_forks_repo_name": "yyht/bert", "max_forks_repo_head_hexsha": "480c909e0835a455606e829310ff949c9dd23549", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-12-27T08:00:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-08T03:05:14.000Z", "avg_line_length": 38.3527454243, "max_line_length": 189, "alphanum_fraction": 0.7005639913, "include": true, "reason": "import numpy", "num_tokens": 6364}
|
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is part of a kth fold optimization tool
#pandas_datarader is deprecated, use YahooGrabber
#Import modules
import numpy as np
import pandas as pd
from pandas_datareader import data
#Request/read in data
s1 = data.DataReader('^GSPC', 'yahoo', start='01/01/1950', end='01/01/1972')
testset1 = pd.read_pickle('SP500RSI50_72_4M')
#Duplicate columns
testset1 = testset1.loc[:,~testset1.columns.duplicated()]
#For all param sets
for i in testset1:
#Load params
a = testset1[i].iloc[0]
aa = a.astype(int)
b = testset1[i].iloc[1]
c = testset1[i].iloc[2]
d = testset1[i].iloc[3]
e = testset1[i].iloc[4]
#Empty structures
openspace = []
openseries = pd.Series()
#Calculate log returns
s1['LogRet'] = np.log(s1['Adj Close']/s1['Adj Close'].shift(1))
s1['LogRet'] = s1['LogRet'].fillna(0)
#RSI calculation
close = s1['Adj Close']
window = aa
delta = close.diff()
delta = delta[1:]
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
AvgGain = up.rolling(window).mean()
AvgLoss = down.abs().rolling(window).mean()
RS = AvgGain/AvgLoss
RSI = 100 - (100/(1.0+RS))
s1['RSI'] = RSI
s1['RSI'] = s1['RSI'].fillna(0)
#Directional methodology
s1['Touch'] = np.where(s1['RSI'] < b, 1, 0) #long signal
s1['Touch'] = np.where(s1['RSI'] > c, -1, s1['Touch']) #short signal
s1['Sustain'] = np.where(s1['Touch'].shift(1) == 1, 1, 0) # never true when optimized
s1['Sustain'] = np.where(s1['Sustain'].shift(1) == 1, 1,
s1['Sustain'])
s1['Sustain'] = np.where(s1['Touch'].shift(1) == -1, -1, 0) #true when previous day touch is -1, and current RSI is > line 37 threshold
s1['Sustain'] = np.where(s1['Sustain'].shift(1) == -1, -1,
s1['Sustain'])
s1['Sustain'] = np.where(s1['RSI'] > d, 0, s1['Sustain']) #if RSI is greater than threshold, sustain is forced to 0
s1['Sustain'] = np.where(s1['RSI'] < e, 0, s1['Sustain']) #never actually true when optimized
s1['Regime'] = s1['Touch'] + s1['Sustain']
#Apply direction to returns
s1['Strategy'] = (s1['Regime'][window:]).shift(1)*s1['LogRet'][window:]
s1['Strategy'] = s1['Strategy'].fillna(0)
#Compound strategy vs log returns - use np.exp // np.cumsum
endgains = 1
endreturns = 1
var = []
avar = []
intvar = []
for g in s1['LogRet']:
slate = endreturns * (1+g)
endreturns = slate
for q in s1['Strategy']:
otherslate = endgains * (1+q)
endgains = otherslate
# if endreturns > endgains:
# continue
#Performance metric
sharpe = (s1['Strategy'].mean()-s1['LogRet'].mean())/s1['Strategy'].std()
|
{"hexsha": "b9df5b4766279baef32ac75a3cfa99aac5e27cc4", "size": 2951, "ext": "py", "lang": "Python", "max_stars_repo_path": "KthFold+RSII.py", "max_stars_repo_name": "adamrvfisher/TechnicalAnalysisLibrary", "max_stars_repo_head_hexsha": "38a22b2b2b5052623f81edb11b3c5460fc254e45", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-04-26T11:13:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-10T05:58:16.000Z", "max_issues_repo_path": "KthFold+RSII.py", "max_issues_repo_name": "adamrvfisher/TechnicalAnalysisLibrary", "max_issues_repo_head_hexsha": "38a22b2b2b5052623f81edb11b3c5460fc254e45", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "KthFold+RSII.py", "max_forks_repo_name": "adamrvfisher/TechnicalAnalysisLibrary", "max_forks_repo_head_hexsha": "38a22b2b2b5052623f81edb11b3c5460fc254e45", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8875, "max_line_length": 141, "alphanum_fraction": 0.5737038292, "include": true, "reason": "import numpy", "num_tokens": 978}
|
import numpy as np
from fnc_common import (get_unique_2d)
from fnc_data import (load_examined_coords)
def calculate_PCF(coords, r_max, eu_side):
# calculate Pair Correlation Function (PCF) for evidence units represented by their coordinates
"""
Compute the two-dimensional pair correlation function, also known
as the radial distribution function, for a set of circular particles
contained in a square region of a plane. This simple function finds
reference particles such that a circle of radius r_max drawn around the
particle will fit entirely within the square, eliminating the need to
compensate for edge effects. If no such particles exist, an error is
returned
Modified after Craig Finch (https://github.com/cfinch/Shocksolution_Examples/tree/master/PairCorrelation)
"""
# inputs:
# coords = [[X, Y], ...]; unique coordinates of evidence units
# r_max = maximum radius to calculate for
# eu_side = evidence unit square side (m)
# returns a numpy array: pcf = [[r, g], ...]; where r = radius of the annulus used to compute g(r), g = average correlation function g(r) based on all solutions
sx = coords[:, 0].max() - coords[:, 0].min()
sy = coords[:, 1].max() - coords[:, 1].min()
if r_max is None:
r_max = min(sx, sy) / 4
# Number of particles in ring/area of ring/number of reference particles/number density
# area of ring = pi*(r_outer**2 - r_inner**2)
edges = np.arange(0., r_max + 1.1 * eu_side, eu_side)
num_increments = len(edges) - 1
radii = np.zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.
r_outer = edges[i + 1]
r_inner = edges[i]
x, y = (coords - coords.min(axis=0)).T
# Find particles which are close enough to the box center that a circle of radius
# r_max will not cross any edge of the box
bools1 = x > r_max
bools2 = x < (sx - r_max)
bools3 = y > r_max
bools4 = y < (sy - r_max)
interior_indices, = np.where(bools1 * bools2 * bools3 * bools4)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
return None
g = np.zeros([num_interior_particles, num_increments])
number_density = len(x) / (sx * sy)
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = np.sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2)
d[index] = 2 * r_max
d[np.isnan(d)] = 0
(result, bins) = np.histogram(d, bins=edges, normed=False)
g[p, :] = result / number_density
# Average g(r) for all interior particles and compute radii
g_average = np.zeros(num_increments)
for i in range(num_increments):
g_average[i] = np.mean(g[:, i]) / (np.pi * (r_outer ** 2 - r_inner ** 2))
return np.vstack((radii, g_average)).T
def calculate_PCF_solutions(solutions, coords, eu_side):
# calculate PCF for the whole set of solutions
# inputs:
# solutions[si, i, pi] = True/False; where si = index of solution, i = index in coords and pi = index of phase
# coords = [[X, Y], ...]; unique coordinates of evidence units
# eu_side = evidence unit square side (m)
# returns a numpy array: pcf[pi] = [r, g]; where pi = index of phase, r = radius of the annulus used to compute g(r), g = average correlation function g(r)
"""
Modified after Craig Finch (https://github.com/cfinch/Shocksolution_Examples/tree/master/PairCorrelation)
"""
def calculate_PCF_phase(solutions, pi, coords, sx, sy, r_max, eu_side):
# returns a numpy array: pcf = [[r, g], ...]; where r = radius of the annulus used to compute g(r), g = average correlation function g(r) based on all solutions
# Number of particles in ring/area of ring/number of reference particles/number density
# area of ring = pi*(r_outer**2 - r_inner**2)
edges = np.arange(0., r_max + 1.1 * eu_side, eu_side)
num_increments = len(edges) - 1
radii = np.zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i + 1]) / 2.
r_outer = edges[i + 1]
r_inner = edges[i]
g = {} # {si: g, ...}
for si in range(solutions.shape[0]):
coords_si = coords[solutions[si, :, pi]]
coords_si -= coords_si.min(axis=0)
x, y = coords_si.T
# Find particles which are close enough to the box center that a circle of radius
# r_max will not cross any edge of the box
bools1 = x > r_max
bools2 = x < (sx - r_max)
bools3 = y > r_max
bools4 = y < (sy - r_max)
interior_indices, = np.where(bools1 * bools2 * bools3 * bools4)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
g[si] = None
continue
g[si] = np.zeros([num_interior_particles, num_increments])
number_density = len(x) / (sx * sy)
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = np.sqrt((x[index] - x) ** 2 + (y[index] - y) ** 2)
d[index] = 2 * r_max
d[np.isnan(d)] = 0
(result, bins) = np.histogram(d, bins=edges, normed=False)
g[si][p, :] = result / number_density
# Average g(r) for all interior particles and compute radii
g_average = np.zeros(num_increments)
for i in range(num_increments):
g_i = np.array([])
for si in g:
if not g[si] is None:
g_i = np.hstack((g_i, g[si][:, i]))
g_i = g_i[~np.isnan(g_i)]
if g_i.size:
g_average[i] = np.mean(g_i) / (np.pi * (r_outer ** 2 - r_inner ** 2))
else:
g_average[i] = np.nan
return np.vstack((radii, g_average)).T
# find maximum search radius for PCF
sx = coords[:, 0].max() - coords[:, 0].min()
sy = coords[:, 1].max() - coords[:, 1].min()
r_max = min(sx, sy) / 4
pcf = []
for pi in range(solutions.shape[2]):
pcf.append(calculate_PCF_phase(solutions, pi, coords, sx, sy, r_max, eu_side))
pcf = np.array(
pcf) # pcf[pi] = [r, g]; where pi = index of phase, r = radius of the annulus used to compute g(r), g = average correlation function g(r)
return pcf
def calculate_PCF_randomized(solutions, path_coords_examined, extent, eu_side, randomize_n):
# calculate PCF for a randomized set of solutions, generated based on the actual solutions
# inputs:
# solutions[si, i, pi] = True/False; where si = index of solution, i = index in coords and pi = index of phase
# path_coords_examined = path in string format to a CSV file containing all examined coordinates
# extent
# eu_side = evidence unit square side (m)
# randomize_n = number of randomized solutions to generate when calculating the PCF
# returns a list: pcf_randomized
# pcf_randomized[pi] = [[radii, g_lower, g_upper], ...]; where pi = index of phase; radii = [r, ...] and g_lower, g_upper = [g, ...]; in order of radii
# g = correlation function g(r)
# r = radius of the annulus used to compute g(r)
# g_lower, g_upper = 5th and 95th percentiles of randomly generated values of g for phase pi
solutions_n = solutions.shape[0]
phases_n = solutions.shape[2]
# load coordinates of all examined units (walked fields and excavation sites)
coords_examined = load_examined_coords(path_coords_examined) # [[X, Y], ...]
# reduce resolution of coords_examined to eu_side
coords_examined = get_unique_2d((np.round(coords_examined / eu_side) * eu_side).astype(int))
# crop coords_examined to extent of analysis
coords_examined = coords_examined[(coords_examined[:, 0] > extent[0]) & (coords_examined[:, 0] < extent[1]) & (
coords_examined[:, 1] > extent[2]) & (coords_examined[:, 1] < extent[3])]
# find maximum search radius for PCF
dx = coords_examined[:, 0].max() - coords_examined[:, 0].min()
dy = coords_examined[:, 1].max() - coords_examined[:, 1].min()
r_max = min(dx, dy) / 4
# generate randomized solutions
pcf_randomized = [] # pcf_randomized[pi] = [[radii, g_lower, g_upper], ...]; where radii = [r, ...] and g_lower, g_upper = [g, ...]; in order of radii
coords_n = dict([(pi, int(round(sum([solutions[si, :, pi].sum() for si in range(solutions_n)]) / solutions_n))) for pi in range(phases_n)])
# coords_n = {pi: n, ...}
for pi in range(phases_n):
print("\rrandomizing phase %d/%d " % (pi + 1, phases_n), end = "")
res = {} # {radius: g_r, ...}
for ri in range(randomize_n):
pcf_rnd = calculate_PCF(
coords_examined[np.random.choice(coords_examined.shape[0], coords_n[pi], replace=False)], r_max,
eu_side) # [[r, g], ...]; where r = radius of the annulus used to compute g(r), g = average correlation function g(r) based on all solutions
if pcf_rnd is not None:
for r, g in pcf_rnd:
if r is not None:
r = int(round(r))
if r not in res:
res[r] = []
res[r].append(g)
radii = np.unique(list(res.keys())).astype(int)
g_lower = np.zeros(radii.shape[0])
g_upper = np.zeros(radii.shape[0])
for i, r in enumerate(radii):
g_lower[i] = np.percentile(res[r], 5)
g_upper[i] = np.percentile(res[r], 95)
pcf_randomized.append([radii, g_lower, g_upper])
return pcf_randomized
|
{"hexsha": "2ea38883a77057e70f16c9e975be53f9156170fb", "size": 8912, "ext": "py", "lang": "Python", "max_stars_repo_path": "fnc_pcf.py", "max_stars_repo_name": "demjanp/chrono_spatial_modelling", "max_stars_repo_head_hexsha": "14fde811ebdfaa156238ce0cb9da84274877496e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-07-10T17:00:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-22T07:34:51.000Z", "max_issues_repo_path": "fnc_pcf.py", "max_issues_repo_name": "demjanp/chrono_spatial_modelling", "max_issues_repo_head_hexsha": "14fde811ebdfaa156238ce0cb9da84274877496e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fnc_pcf.py", "max_forks_repo_name": "demjanp/chrono_spatial_modelling", "max_forks_repo_head_hexsha": "14fde811ebdfaa156238ce0cb9da84274877496e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2599118943, "max_line_length": 162, "alphanum_fraction": 0.6763913824, "include": true, "reason": "import numpy", "num_tokens": 2725}
|
import numpy as np
import pandas as pd
# manually creating dataframe
df = pd.DataFrame({
'Population': [35.467, 63.951, 80.94 , 60.665, 127.061, 64.511, 318.523],
'GDP': [
1785387,
2833687,
3874437,
2167744,
4602367,
2950039,
17348075
],
'Surface Area': [
9984670,
640679,
357114,
301336,
377930,
242495,
9525067
],
'HDI': [
0.913,
0.888,
0.916,
0.873,
0.891,
0.907,
0.915
],
'Continent': [
'America',
'Europe',
'Europe',
'Europe',
'Asia',
'Europe',
'America'
]
}, columns=['Population', 'GDP', 'Surface Area', 'HDI', 'Continent'])
# Creating index for the dataframe
df.index = [
'Canada',
'France',
'Germany',
'Italy',
'Japan',
'United Kingdom',
'United States',
]
# Selecting a row based on the index
df.loc['Canada']
# Selecting a row based on sequential index
df.iloc[0]
# Selecting the columns
df['Population']
# Selecting multiple columns
df[['Population', 'GDP']]
# selecting multiple rows; from 1 upto 2nd row
df.iloc[1:3]
# selecting multiple rows; from 1 upto 3rd row
df.loc['France': 'Italy']
# Second arguments can be added for columns
df.loc['France': 'Italy', 'Population']
df.loc['France': 'Italy', ['Population', 'GDP']]
df.iloc[1:3, [0, 3]]
# Conditional Selection
df.loc[df['Population'] > 70]
df.loc[df['Population'] > 70, ['Population', 'GDP']]
# Dropping rows
df.drop(['Canada', 'Japan'])
# Given that df.index returns all indices value, any of them
# can be selected with [] operator
df.drop(df.index[1])
# Dropping columns
df.drop(['Population', 'HDI'], axis=1)
# Dropping columns
df.drop(['Italy', 'Canada'], axis=0)
# Operation
df[['Population', 'GDP']] / 100
# Radical index changes
df.reset_index()
df.set_index('Population')
# Creating columns from other columns
df['GDP Per Capita'] = df['GDP'] / df['Population']
|
{"hexsha": "493da892ad2764ae69b6621cb17b84759aff4ed0", "size": 2043, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pandas/pandas_dataframe.py", "max_stars_repo_name": "barnwalp/machine_learning", "max_stars_repo_head_hexsha": "d2ec23001a10b8f3bd70c821374fa1ab91a9f599", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Pandas/pandas_dataframe.py", "max_issues_repo_name": "barnwalp/machine_learning", "max_issues_repo_head_hexsha": "d2ec23001a10b8f3bd70c821374fa1ab91a9f599", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pandas/pandas_dataframe.py", "max_forks_repo_name": "barnwalp/machine_learning", "max_forks_repo_head_hexsha": "d2ec23001a10b8f3bd70c821374fa1ab91a9f599", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.43, "max_line_length": 77, "alphanum_fraction": 0.5814977974, "include": true, "reason": "import numpy", "num_tokens": 614}
|
[STATEMENT]
lemma coeffs_poly_of_vec:
"coeffs (poly_of_vec v) = rev (dropWhile ((=) 0) (list_of_vec v))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. coeffs (poly_of_vec v) = rev (dropWhile ((=) (0::'a)) (list_of_vec v))
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. coeffs (poly_of_vec v) = rev (dropWhile ((=) (0::'a)) (list_of_vec v))
[PROOF STEP]
obtain n f where v: "v = vec n f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>n f. v = vec n f \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by transfer auto
[PROOF STATE]
proof (state)
this:
v = vec n f
goal (1 subgoal):
1. coeffs (poly_of_vec v) = rev (dropWhile ((=) (0::'a)) (list_of_vec v))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. coeffs (poly_of_vec v) = rev (dropWhile ((=) (0::'a)) (list_of_vec v))
[PROOF STEP]
by (simp add: v poly_of_vec_vec)
[PROOF STATE]
proof (state)
this:
coeffs (poly_of_vec v) = rev (dropWhile ((=) (0::'a)) (list_of_vec v))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 489, "file": "LLL_Basis_Reduction_Missing_Lemmas", "length": 6}
|
import os
import logging
import numpy as np
def rename_dir(url,reverse=True):
""""
根据static的值进行文件夹自动重命名,命名规则
YYYYMMDDhhmmsss+3[001]
directory.ini
网->0
于->1
:param url:,文件夹地址,
:param static:给出的需要进是关于地址是否是相对地址
:param reverse:确定是行反向目录生成,还是正向目录生成
:return:
Tip:需要判断url是否存在,是否为文件夹,对于conf目录需要注意是否已经存在,没有存在需要进行创建。另外对于directory.ini文件也需要判断是否存在。
建议对这里的工作进行定义多个子函数。定义的子函数请以_开头。
在进行一些具体的操作,需要输出相关日志操作
"""""
if _exist_(url):
print(os.listdir(url))
url = os.path.abspath(url)
print(url,"\n")
a = 0
doc=os.listdir(url)
print(os.path.basename(url))
for files in doc:
doc_name=os.path.splitext(files);#文件名
#filetype=os.path.splitext(files);#文件扩展名
#Newdir=os.path.join(path,str(count)+filetype);#新的文件路径
print(files)
os.renames(files, "b")
_store_(doc_name, a)
++a
def _exist_(url):
""" s=os.path.abspath('../ssdfdssdf')
print(s)
print(os.path.exists(s),os.path.isdir(s))"""
if os.path.exists(url):
s=url
else:
s = os.path.abspath(url)
print(s)
print(os.path.exists(s),os.path.isdir(s))
if os.path.exists(s) and os.path.isdir(s):
return True
else:
print(url + " don't exist or isn't a dir")
def _store_(doc_name, a):
store = open("directory.ini", "w+")
list = [doc_name, '=', a]
store.write(list)
store.append("\n")
store.close()
if __name__ == "__main__":
rename_dir(url='D:\Work\Workspace\liber\src\\python\\ssdfdssdf\\abc',static=True,reverse=True)
|
{"hexsha": "7c69cb1afd8f5e1e920ab69147c283e6ac126cb9", "size": 1711, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main/python/qxy/rename_test.py", "max_stars_repo_name": "gwdgithubnom/ox-patient", "max_stars_repo_head_hexsha": "cddf4fe381cb4506db8e0d62803dd2044cf7ad92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main/python/qxy/rename_test.py", "max_issues_repo_name": "gwdgithubnom/ox-patient", "max_issues_repo_head_hexsha": "cddf4fe381cb4506db8e0d62803dd2044cf7ad92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main/python/qxy/rename_test.py", "max_forks_repo_name": "gwdgithubnom/ox-patient", "max_forks_repo_head_hexsha": "cddf4fe381cb4506db8e0d62803dd2044cf7ad92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-14T00:45:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-14T00:45:38.000Z", "avg_line_length": 23.1216216216, "max_line_length": 99, "alphanum_fraction": 0.5733489188, "include": true, "reason": "import numpy", "num_tokens": 551}
|
[STATEMENT]
lemma CONSTRAINT_D:
assumes "CONSTRAINT (P::'a => bool) x"
shows "P x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P x
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
CONSTRAINT P x
goal (1 subgoal):
1. P x
[PROOF STEP]
unfolding CONSTRAINT_def
[PROOF STATE]
proof (prove)
using this:
P x
goal (1 subgoal):
1. P x
[PROOF STEP]
by simp
|
{"llama_tokens": 177, "file": "Refine_Imperative_HOL_Sepref_Constraints", "length": 3}
|
\subsection{Semirings}\label{subsec:semirings}
We will start by defining semirings, and to do that we will first motivate distributivity.
\begin{proposition}\label{thm:monoid_distributivity}
Fix an \hyperref[rem:additive_magma/multiplication]{additive} \hyperref[def:monoid]{monoid} \( (R, +, \cdot) \), where \( +: R \times R \to R \) is the monoid operation and \( \cdot: \BbbN \times R \to R \) is defined via \eqref{eq:rem:additive_magma/multiplication}.
We have the following property, which we call \term{distributivity} of \( \cdot \) over \( + \):
\begin{equation}\label{eq:thm:monoid_distributivity}
n \cdot (x + y) = n \cdot x + n \cdot y.
\end{equation}
\end{proposition}
\begin{proof}
We use induction on \( n \). The case \( n = 0 \) is trivial. Suppose that \eqref{eq:thm:monoid_distributivity} holds. Then
\begin{equation*}
(n + 1) \cdot (x + y)
\reloset {\eqref{eq:def:magma/exponentiation}} =
n \cdot (x + y) + (x + y)
\reloset {\T{ind.}} =
n \cdot x + n \cdot y + (x + y)
\reloset {\eqref{eq:def:magma/exponentiation}} =
(n + 1) \cdot x + (n + 1) \cdot y.
\end{equation*}
\end{proof}
\begin{definition}\label{def:semiring}\mcite[1]{Golan2010}
A \term{semiring} is a \hyperref[def:magma/commutative]{commutative} \hyperref[def:monoid]{monoid} \( (R, +) \) with a second \hyperref[def:magma/associative]{associative} operation \( \cdot: R \times R \to R \) called \term{multiplication}, which extends multiplication with natural numbers. The precise compatibility axioms are listed in \fullref{def:semiring/theory} because they fit nicely into first-order logic (unlike the \hyperref[def:semimodule/theory]{theory of semimodules}, for example, for which we prefer expressing these conditions in the metalogic).
Although not strictly necessary, it will be convenient for us to assume that multiplication has an identity. If a multiplicative identity does not exist, we call \( (R, +, \cdot) \) a \term{nonunital semiring}. A canonical example of a nonunital semiring is a \hyperref[def:semiring_ideal]{semiring ideal}. We will not use nonunital semirings, but it is important to acknowledge their existence. In this context, if an identity exists, we will sometimes call \( (R, + \cdot) \) a \term{unital semiring}.
We call \( (R, +) \) the \term{additive monoid} and \( (R, \cdot) \) the \term{multiplicative monoid} of the semiring. We also consider the \term{additive group} and the \term{multiplicative group} as the subsets of \hyperref[def:monoid_inverse]{invertible} elements. Both are instances of \fullref{thm:invertible_submonoid_is_group}. The multiplicative group is denoted by \( R^\times \); it is discussed further in \hyperref[def:divisibility/unit]{units}.
Semirings have the following metamathematical properties:
\begin{thmenum}
\thmitem{def:semiring/theory} The \hyperref[def:first_order_theory]{first-order theory} for semirings extends the \hyperref[def:monoid/theory]{theory of monoids}.
First, we add another \hyperref[rem:first_order_formula_conventions/infix]{infix} binary functional symbol \( \cdot \) and a constant \( 1 \). The notation for the constant is justified by \fullref{thm:semiring_characteristic_homomorphism}.
We then extend the theory of monoids with \hyperref[def:magma/commutative]{commutativity} for \( + \), \hyperref[def:magma/associative]{associativity} for \( \cdot \), and the following axioms:
\begin{thmenum}
\thmitem{def:semiring/left_distributivity} Multiplication on the left distributes over addition:
\begin{equation}\label{eq:def:semiring/left_distributivity}
\xi \cdot (\eta + \zeta) \doteq \xi \cdot \eta + \xi \cdot \zeta.
\end{equation}
\thmitem{def:semiring/right_distributivity} Multiplication on the right also distributes over addition:
\begin{equation}\label{eq:def:semiring/right_distributivity}
(\xi + \eta) \cdot \zeta \doteq \xi \cdot \zeta + \eta \cdot \zeta.
\end{equation}
If multiplication is commutative, right distributivity follows from left distributivity.
\thmitem{def:semiring/absorption} Zero is an absorbing element:
\begin{equation}\label{eq:def:semiring/absorption}
\xi \cdot 0 \doteq 0 \wedge 0 \cdot \xi \doteq 0.
\end{equation}
\thmitem{def:semiring/identity} We also restate the identity axiom \eqref{eq:def:monoid/theory/identity} for the multiplicative unit \( 1 \) to highlight its connection with \eqref{eq:def:semiring/absorption}:
\begin{equation}\label{eq:def:semiring/identity}
\xi \cdot 1 \doteq \xi \wedge 1 \cdot \xi \doteq \xi.
\end{equation}
\end{thmenum}
\thmitem{def:semiring/homomorphism} A \hyperref[def:first_order_homomorphism]{first-order homomorphism} from the semiring \( R \) to \( T \) is a function \( \varphi: R \to T \) that is a \hyperref[def:monoid/homomorphism]{monoid homomorphism} both for their additive monoids also for their multiplicative monoids.
\thmitem{def:semiring/submodel} The set \( A \subseteq R \) is a \hyperref[thm:substructure_is_model]{submodel} of \( R \) if it is a both \hyperref[def:monoid/submodel]{submonoid} of the additive monoid and also of the multiplicative monoid. We call \( A \) a \term{sub-semiring}.
As a consequence of \fullref{thm:positive_formulas_preserved_under_homomorphism}, the \hyperref[def:multi_valued_function/image]{image} of a homomorphism \( \varphi: R \to T \) is a sub-semiring of \( A \).
For an arbitrary set \( A \), we denote the \hyperref[def:first_order_generated_substructure]{generated submodel} by \( \braket{ A } \).
\thmitem{def:semiring/trivial} The \hyperref[thm:substructures_form_complete_lattice/bottom]{trivial} semiring is the \hyperref[def:pointed_set/trivial]{trivial pointed set} \( \set{ 0 } \).
See \fullref{ex:def:semiring/trivial} for some properties of the trivial semiring.
\thmitem{def:semiring/exponentiation} As we shall see in \fullref{thm:semiring_characteristic_homomorphism}, multiplication in \( \cdot \) extends left multiplication with natural numbers in the monoid \( (R, +) \). We do have a third operation, however --- \hyperref[def:monoid/exponentiation]{monoid exponentiation} in \( (R, \cdot) \).
For any integer \( n \), we have the fundamental property \( 1^n = 1 \).
\thmitem{def:semiring/commutative} If multiplication is commutative, we call the semiring itself \term{commutative}. Unless multiplication corresponds to function composition, most semirings we will encounter will be commutative.
Notable exceptions to this rule are \hyperref[def:ordinal]{ordinals}. A \hyperref[def:successor_and_limit_ordinal]{limit ordinal} \( \alpha \), regarded as the set of all smaller ordinals, is a semiring. It is not commutative, however, as shown in \fullref{ex:ordinal_addition}.
\thmitem{def:semiring/power_set} Similarly to power set magmas defined in \fullref{def:magma/power_set}, the power set \( \pow(R) \) of a semiring is also a semiring with the operations
\begin{align*}
A \oplus B &\coloneqq \set{ x + y \given x \in A \T{and} y \in B } \\
A \odot B &\coloneqq \set{ x \cdot y \given x \in A \T{and} y \in B }
\end{align*}
\thmitem{def:semiring/category} The corresponding \hyperref[def:category_of_small_first_order_models]{category of \( \mscrU \)-small models} \( \ucat{SRing} \) is \hyperref[def:concrete_category]{concrete} over \hyperref[def:monoid]{\( \ucat{CMon} \)} with the forgetful functor taking the additive monoids. We denote the category of commutative semirings by \( \cat{CSRing} \).
\thmitem{def:semiring/opposite}\mcite[555]{Knapp2016BasicAlgebra} The \term{opposite semiring} of \( (R, +, \cdot) \) is the semiring \( (R, +, \star) \), with multiplication defined as \( x \star y = y \cdot x \).
\end{thmenum}
\end{definition}
\begin{remark}\label{rem:semiring_etymology}
In \fullref{def:semiring}, we require semirings to have both an additive identity and a multiplicative identity. This is not consistent with semigroups defined in \fullref{def:magma/associative}, which in general do not have identities.
\cite[ch. 3]{GondranMinoux1984Graphs} suggest using \enquote{dioid} (short for \enquote{double monoid}) instead of \enquote{semiring}. \cite[xi]{Golan2010} describes how the term \enquote{dioid} may refer to semirings with idempotent addition, i.e. a general form of the tropical semirings defined in \fullref{def:tropical_semiring}.
We thus prefer using the term \enquote{semiring} as we have defined it in \fullref{def:semiring}.
\end{remark}
\begin{example}\label{ex:def:semiring}
We list several examples of \hyperref[def:semiring]{semirings} that are not \hyperref[def:ring]{rings}.
\begin{thmenum}
\thmitem{ex:def:semiring/trivial} A \hyperref[def:semiring/homomorphism]{semiring} is trivial if and only if \( 0_R = 1_R \). This follows from \eqref{eq:def:semiring/absorption} and \eqref{eq:def:semiring/identity}.
As a consequence, if \( \varphi: \set{ 0 } \to R \) is a \hyperref[def:semiring/homomorphism]{semiring} homomorphism, \( R \) is a trivial semiring. This is further strengthened by \fullref{thm:semiring_embedding_preserves_characterstic}.
\thmitem{ex:def:semiring/natural_numbers} The \hyperref[def:set_of_natural_numbers]{natural numbers} are the quintessential example of a semiring. We prove in \fullref{thm:natural_number_multiplication_properties} that they are a semiring.
\thmitem{ex:def:semiring/ordinals} Every \hyperref[def:successor_and_limit_ordinal]{limit ordinal} is a monoid under addition, as discussed in \fullref{ex:def:semiring/ordinals}, however it is not commutative.
\hyperref[def:cardinal_arithmetic/addition]{Cardinal addition} is commutative, however, and hence for every \hyperref[def:successor_and_limit_cardinal/weak_limit]{limit cardinal} \( \kappa \), the set of all cardinals smaller than \( \kappa \) are a semiring.
\thmitem{ex:def:semiring/lattice} We discussed in \fullref{ex:def:monoid/semilattice} that in a \hyperref[def:semilattice/bounded]{bounded lattice} \( (X, \vee, \wedge, \top, \bot) \), both \( (X, \vee, \bot) \) and \( (X, \wedge, \top) \) are monoids.
As a consequence of \fullref{thm:bounded_lattice_absorbing}, \( \bot \) is absorbing with respect to \( \wedge \) and \( \top \) with respect to \( \vee \). Therefore, if the lattice is \hyperref[def:semilattice/distributive_lattice]{distributive}, as a consequence of \fullref{thm:bounded_lattice_absorbing}, both \( (X, \vee, \wedge) \) and \( (X, \wedge, \vee) \) are semirings.
We refer to these semirings are the positive and negative semiring of the lattice. This terminology comes from \fullref{ex:def:ordered_semiring/lattice}.
\end{thmenum}
\end{example}
\begin{definition}\label{def:tropical_semiring}\mcite[exmpl. 1.12]{Golan2010}
Consider the additive monoid \( (\BbbN, +) \) of natural numbers or, more generally, an \hyperref[def:ordered_magma]{ordered} \hyperref[def:magma/commutative]{commutative} \hyperref[def:monoid]{monoid} \( (M, +, \leq) \).
We adjoin a \hyperref[def:partially_ordered_set_extremal_points/top_and_bottom]{top element} \( \infty \) to \( M \) that is absorbing with respect to addition. That is, \( x + \infty = \infty \) for every \( x \in M \).
The \( \min \)-plus semiring over \( M \) is the triple \( (M \cup \set{ \infty }, \min, +) \). The \hyperref[def:partially_ordered_set_extremal_points/maximum_and_minimum]{minimum} as a binary operation plays the role of semiring addition, with \( \infty \) as the zero element. The usual addition in \( M \) extended with \( \infty \) plays the role of semiring multiplication, with \( 0 \) as the multiplicative identity.
We analogously define the \( \max \)-plus semiring, adjoining a \hyperref[def:partially_ordered_set_extremal_points/top_and_bottom]{bottom element} \( -\infty \) rather than a top element \( \infty \).
We will sometimes use \enquote{tropical semiring} to refer to either type of semirings. See \fullref{rem:tropical_semiring_etymology}.
\end{definition}
\begin{defproof}
We will only show \hyperref[def:semiring/left_distributivity]{distributivity}. If \( x \leq y \), since \( \leq \) is compatible with \( + \), we have
\begin{equation*}
\underbrace{\min\set{ x, y }}_{x} + z = x + z \leq y + z.
\end{equation*}
Therefore,
\begin{equation*}
\min\set{ x, y } + z = \min\set{ x + z , y + z }.
\end{equation*}
\end{defproof}
\begin{remark}\label{rem:tropical_semiring_etymology}
\hyperref[def:tropical_semiring]{\( \min \)-plus} and \( \max \)-plus semirings are sometimes referred to as the \term{tropical semirings}. This term is ambiguous, unfortunately, but it gives rise to the terms \enquote{tropical geometry} and \enquote{tropical optimization}.
According to \cite{Pin1994}, the name \enquote{tropical semiring} is a dedication to the Brazilian-born Imre Simon. The paper also introduces the terms \enquote{tropical integers}, \enquote{tropical reals}, etc. \cite[3]{Golan2010} refers to the more general notion of additively-idempotent semirings. Both reserve the term \enquote{tropical semiring} for the case where \( M = \BbbN \). \cite[ch. 3]{GondranMinoux1984Graphs} does not explicitly use the word \enquote{tropical}, but instead refers to semirings as \enquote{dioids}, and the latter term sometimes refers to additively-idempotent semirings.
\end{remark}
\begin{proposition}\label{thm:semiring_characteristic_homomorphism}
For every \hyperref[def:semiring/identity]{semiring}, multiplication extends the abelian group multiplication.
More precisely, denote the additive identity by \( 0_R \) and the multiplicative identity by \( 1_R \). Define the following semiring homomorphism:
\begin{equation}\label{eq:thm:semiring_characteristic_homomorphism}
\begin{aligned}
&\iota: \BbbN \to R \\
&\iota(n) \coloneqq \begin{cases}
0_R &n = 0, \\
\iota(n - 1) + 1_R &n > 0.
\end{cases}
\end{aligned}
\end{equation}
This is the unique homomorphism from \( \BbbN \) to \( R \). Furthermore, we have the following analogue to \eqref{eq:def:magma/exponentiation}:
\begin{equation}\label{eq:thm:semiring_characteristic_homomorphism/multiplication}
\iota(n) \cdot x \coloneqq \begin{cases}
0_R, &n = 0, \\
\iota(n - 1) \cdot x + x, &n > 1.
\end{cases}
\end{equation}
\end{proposition}
\begin{proof}
First note that \eqref{eq:thm:semiring_characteristic_homomorphism/multiplication} follows from \eqref{eq:thm:semiring_characteristic_homomorphism} via \hyperref[def:semiring/right_distributivity]{right distributivity}.
It remains to show that \( \iota \) is a monoid homomorphism, and that it is unique. Clearly \( \iota(0) = 0_R \) and \( \iota(1) = 1_R \). Proving \( \iota(n + m) = \iota(n) + \iota(m) \) and \( \iota(nm) = \iota(n) \cdot \iota(m) \) can be done via nested induction.
Now suppose \( \varphi: \BbbN \to R \) is a homomorphism. It is clear that \( \varphi(0) = 0_R \) and \( \varphi(1) = 1_R \), and also
\begin{equation*}
\varphi(n + 1) = \varphi(n) + \varphi(1) = \varphi(n) + 1_R.
\end{equation*}
This implies \( \iota = \varphi \).
\end{proof}
\begin{proposition}\label{thm:category_of_semirings_properties}
The \hyperref[def:semiring/category]{category of semirings} has the following basic properties:
\begin{thmenum}
\thmitem{thm:category_of_semirings_properties/initial} The \hyperref[def:set_of_integers]{ring of integers} \( \BbbZ \) is an \hyperref[def:universal_objects/initial]{initial object}.
\thmitem{thm:category_of_semirings_properties/terminal} The trivial semiring \( \set{ 0 } \) is an \hyperref[def:universal_objects/terminal]{terminal object}.
\end{thmenum}
\end{proposition}
\begin{proof}
\SubProofOf{thm:category_of_semirings_properties/initial} Follows from \fullref{thm:semiring_characteristic_homomorphism}.
\SubProofOf{thm:category_of_semirings_properties/terminal} Follows from \fullref{ex:def:semiring/trivial}.
\end{proof}
\begin{definition}\label{def:ordered_semiring}\mcite[224]{Golan2010}
An \term{ordered semiring} is a \hyperref[def:magma/commutative]{commutative} semiring \( R \) with a \hyperref[def:partially_ordered_set]{partial order} \( \leq \) such that \( (R, +) \) is an \hyperref[def:ordered_magma]{ordered magma} and, additionally, \( x \leq y \) and \( 0 \leq z \) imply \( xz \leq yz \).
As in \fullref{def:ordered_magma}, the commutativity condition can be avoided, but then we would need to also require \( zx \leq zy \).
If the semiring is \hyperref[def:totally_ordered_set]{totally ordered}, we can use the usual terminology that is conventional for real numbers:
\begin{itemize}
\item \( x \) is \term{positive} if \( x > 0 \).
\item \( x \) is \term{nonnegative} if \( x \geq 0 \).
\item \( x \) is \term{negative} if \( x < 0 \).
\item \( x \) is \term{nonpositive} if \( x \leq 0 \).
\end{itemize}
\end{definition}
\begin{example}\label{ex:def:ordered_semiring}
We list several examples of \hyperref[def:ordered_semirings]{ordered semirings}.
\begin{thmenum}
\thmitem{ex:def:ordered_semiring/natural_numbers} The \hyperref[def:set_of_natural_numbers]{natural numbers} form an ordered semiring as shown in \fullref{thm:natural_numbers_are_well_ordered}.
\thmitem{ex:def:ordered_semiring/lattice} We discussed in \fullref{ex:def:semiring/lattice} that a \hyperref[def:semilattice/bounded]{bounded} \hyperref[def:semilattice/distributive_lattice]{distributive} \hyperref[def:semilattice/lattice]{lattice} \( (X, \vee, \wedge) \) can be regarded as a semiring, and so can its opposite lattice.
We discussed in \fullref{ex:def:ordered_magma/semilattice} that both \( (X, \vee) \) and \( (X, \wedge) \) are \hyperref[def:ordered_magma]{ordered magmas}. Both \( (X, \vee, \wedge) \) and \( (X, \wedge, \vee) \) vacuously satisfy the condition from \fullref{def:ordered_semiring}, which makes them ordered semirings.
All elements of the ordered semiring \( (X, \vee, \wedge) \) are nonnegative and all elements of \( (X, \wedge, \vee) \) are nonpositive. With a slight abuse of notation, we refer to them as the \term{positive} and \term{negative} semirings of the lattice.
\end{thmenum}
\end{example}
\begin{definition}\label{def:divisibility}\mimprovised
Fix an arbitrary element \( x \) in a \hyperref[def:semiring]{semiring}. If there exist elements \( l \) and \( r \) such that \( x = lr \), we say that \( l \) is a \term{left divisor} of \( x \), and that \( r \) is a \term{right divisor}.
In a \hyperref[def:semiring/commutative]{commutative semiring}, the two notions coincide, and we simply use the term \enquote{divisor}. If \( x \) is a divisor of \( y \), we write \( x \mid y \) and say that \( y \) is a \term{multiple} of \( x \). Most rings we will encounter will be commutative, but it is useful to have the weaker notions of left and right divisors.
\begin{thmenum}
\thmitem{def:divisibility/zero}\mcite[4]{Golan2010} Divisors of \( 0 \) are called \term{zero divisors}. Due to \hyperref[def:semiring/absorption]{absorption}, every semiring element is a zero divisor. If \( lr = 0 \) for nonzero \( l \) and \( r \), we say that \( l \) (resp. \( r \)) is a \term{nontrivial} left (resp. right) zero divisor.
\thmitem{def:divisibility/unit} Divisors of \( 1 \) are called \term{invertible}, since they are precisely the \hyperref[def:monoid_inverse]{monoid inverses} under multiplication. They are also sometimes called \term{units}.
The set of all two-sided units of \( R \) is precisely the \hyperref[def:semiring]{multiplicative group} \( R^\times \).
\end{thmenum}
\end{definition}
\begin{example}\label{ex:def:divisibility}
\hfill
\begin{thmenum}
\thmitem{ex:def:divisibility/integers} The positive integers are commutative and their left and right divisors coincide. They have no \hyperref[def:divisibility/zero]{nontrivial zero divisors} as a consequence of \fullref{thm:natural_number_multiplication_properties}.
\thmitem{ex:def:divisibility/matrix_zero_divisors} A simple example of nontrivial zero divisors is given by the \hyperref[def:matrix_algebra]{matrix algebra} \( \BbbZ^{2 \times 2} \). We have
\begin{equation*}
\underbrace
{
\begin{pmatrix}
0 & 1 \\
0 & 0
\end{pmatrix}
}_{L}
\underbrace
{
\begin{pmatrix}
0 & 0 \\
0 & 1
\end{pmatrix}
}_{R}
=
\begin{pmatrix}
0 & 0 \\
0 & 0
\end{pmatrix}.
\end{equation*}
Therefore, \( L \) is a left zero divisor and \( R \) is a right zero divisor. The two do not commute because
\begin{equation*}
\underbrace
{
\begin{pmatrix}
0 & 0 \\
0 & 1
\end{pmatrix}
}_{R}
\underbrace
{
\begin{pmatrix}
0 & 1 \\
0 & 0
\end{pmatrix}
}_{L}
=
\begin{pmatrix}
0 & 0 \\
1 & 0
\end{pmatrix}.
\end{equation*}
Nevertheless, \( RLRL \) is the zero matrix, so \( R \) is a left zero divisor and \( L \) is a right zero divisor.
\end{thmenum}
\end{example}
\begin{proposition}\label{thm:divisibility_and_isomorphisms}
Suppose that \( R \) and \( S \) are \hyperref[def:semiring/commutative]{commutative semirings}.
\begin{thmenum}
\thmitem{thm:divisibility_and_isomorphisms/divisibility} If \( \varphi: R \to S \) is any homomorphism, then \( x \mid y \) implies \( \varphi(x) \mid \varphi(y) \). The converse holds of \( \varphi \) is an isomorphism.
\thmitem{thm:divisibility_and_isomorphisms/zero} If \( R \) and \( S \) are isomorphic, the \hyperref[def:divisibility/zero]{zero divisors} of \( R \) are precisely the zero divisors of \( S \).
\thmitem{thm:divisibility_and_isomorphisms/unit} If \( R \) and \( S \) are isomorphic, the \hyperref[def:divisibility/unit]{units} of \( R \) are precisely the units of \( S \).
\end{thmenum}
\end{proposition}
\begin{proof}
\SubProofOf{thm:divisibility_and_isomorphisms/divisibility} If \( x \mid y \), then \( xr = y \) for some \( r \in R \). Then \( \varphi(x) \varphi(r) = \varphi(y) \), hence \( \varphi(x) \mid \varphi(y) \). If \( \varphi \) is an isomorphism, the converse follows by using \( \varphi^{-1}: S \to R \).
\SubProofOf{thm:divisibility_and_isomorphisms/zero} Follows from \fullref{thm:divisibility_and_isomorphisms/divisibility} by noting that homomorphisms preserve zeros.
\SubProofOf{thm:divisibility_and_isomorphisms/unit} Follows from \fullref{thm:divisibility_and_isomorphisms/divisibility} by noting that homomorphisms preserve ones.
\end{proof}
\begin{proposition}\label{thm:semiring_cancellative_iff_no_zero_divisors}
An element of a \hyperref[def:semiring/commutative]{commutative semiring} is cancellable if and only if it is not a \hyperref[def:divisibility]{zero divisor}. That is, \( x \mid 0 \) if and only if \( xy = xz \) does not imply \( y = z \).
\end{proposition}
\begin{proof}
Let \( x \) be a nonzero element.
\SufficiencySubProof Suppose that \( x \) is a zero divisor and let \( y \) be such that \( xy = 0 \). For any element \( z \), we have
\begin{equation*}
xy = 0 = x(yz).
\end{equation*}
But \( y \neq yz \) unless \( z = 1 \). Thus, \( x \) is not cancellable.
\NecessitySubProof Suppose that \( x \) is cancellable.
Suppose also that \( xy = 0 \) for some nonzero \( y \). Then \( xy = x0 \), which implies \( y = 0 \). But this contradicts out choice of \( y \).
Thus, \( x \) is not a zero divisor.
\end{proof}
\begin{definition}\label{def:entire_semiring}
We say that the \hyperref[def:semiring]{semiring} \( R \) is \term{entire} if any of the following equivalent conditions hold:
\begin{thmenum}
\thmitem{def:entire_semiring/zero_divisors}\mcite[4]{Golan2010} \( R \) has no \hyperref[def:divisibility/zero]{nontrivial zero divisors}.
\thmitem{def:entire_semiring/cancellation} \( R \setminus \set{ 0_R } \) is a \hyperref[def:magma/cancellative]{cancellative} \hyperref[def:monoid]{monoid} with respect to multiplication.
\end{thmenum}
\end{definition}
\begin{defproof}
The equivalence follows from \fullref{thm:semiring_cancellative_iff_no_zero_divisors}.
\end{defproof}
\begin{proposition}\label{thm:semiring_divisibility_order}
In an \hyperref[def:entire_semiring]{entire} \hyperref[def:semiring/commutative]{commutative semiring}, the \hyperref[def:divisibility]{divisibility} relation is a \hyperref[def:preordered_set]{preorder}.
It is not a partial order in general. To avoid the nonuniqueness problems described in \fullref{ex:preorder_nonuniqueness}, we instead prefer working with ideals. See \fullref{rem:lattice_of_principal_ideals} and \fullref{rem:lattice_of_principal_ideals} for the general approach.
\end{proposition}
\begin{proof}
Fix a semiring \( R \).
\SubProofOf[def:binary_relation/reflexive]{reflexivity} Clearly every element of \( R \) divides itself.
\SubProofOf[def:binary_relation/transitive]{transitivity} Let \( x \mid y \mid z \). Then there exist elements \( a \) and \( b \) such that \( y = a x \) and \( z = b y \). Hence, \( z = (ba) x \) and \( x \mid z \).
\end{proof}
\begin{definition}\label{def:zerosumfree}\mcite[4]{Golan2010}
We say that an \hyperref[rem:additive_magma]{additive} \hyperref[def:monoid]{monoid} is \term{zerosumfree} if the \hyperref[thm:invertible_submonoid_is_group]{additive group} is trivial. That is, if \( x + y = 0 \) implies \( x = y = 0 \).
\end{definition}
\begin{example}\label{ex:def:zerosumfree}
We list several examples of \hyperref[def:zerosumfree]{zerosumfree} semirings:
\begin{thmenum}
\thmitem{ex:def:zerosumfree/natural_numbers} By \fullref{thm:natural_number_addition_properties}, the natural numbers are zerosumfree.
\thmitem{ex:def:zerosumfree/lattice} We discussed in \fullref{ex:def:semiring/lattice} that every bounded distributive lattice \( (X, \vee, \wedge) \) has two associated semirings.
We will show that the positive semiring \( (X, \vee, \wedge) \) is zerosumfree. The proof only relies on \( \vee \) being idempotent. Suppose that \( x \vee y = \bot \). Then
\begin{equation*}
\bot
=
x \vee y
\reloset {\eqref{eq:def:magma/idempotent}} =
(x \vee x) \vee y
\reloset {\eqref{eq:def:magma/associative}} =
x \vee (x \vee y)
=
x \vee \bot
\reloset {\eqref{eq:thm:binary_lattice_operations/identity/join}} =
x.
\end{equation*}
Therefore, \( x = \bot \). But \( \bot \vee y = y \), hence \( x \vee y = \bot \) implies \( y = \bot \).
This demonstrates that the positive semiring is zerosumfree.
\thmitem{ex:def:zerosumfree/tropical} The \hyperref[def:tropical_semiring]{\( \min \)-plus semiring} \( (\BbbN \cup \set{ \infty }, \min, +) \) discussed in \fullref{def:tropical_semiring} is also zerosumfree. Indeed, \( \min \) is idempotent, and the proof is analogous to the one for lattices in \fullref{ex:def:zerosumfree/lattice}.
\end{thmenum}
\end{example}
|
{"hexsha": "cf077421fa1eea643df8b1425149891398a68944", "size": 27232, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/semirings.tex", "max_stars_repo_name": "v--/anthology", "max_stars_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/semirings.tex", "max_issues_repo_name": "v--/anthology", "max_issues_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/semirings.tex", "max_forks_repo_name": "v--/anthology", "max_forks_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 68.7676767677, "max_line_length": 606, "alphanum_fraction": 0.7072194477, "num_tokens": 8598}
|
'Create a dual VAE-HMM model.'
import argparse
import logging
import pickle
import yaml
import numpy as np
import torch
import beer
logging.basicConfig(format='%(levelname)s: %(message)s')
encoder_normal_layer = {
'isotropic': beer.nnet.NormalIsotropicCovarianceLayer,
'diagonal': beer.nnet.NormalDiagonalCovarianceLayer
}
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--encoder-cov-type',
choices=['isotropic', 'diagonal'],
help='type of covariance for the encoder.')
parser.add_argument('--decoder-cov-type',
choices=list(encoder_normal_layer.keys()),
help='type of covariance for the decoder.')
parser.add_argument('stats', help='training data stats for ' \
'initialization')
parser.add_argument('encoder_out_dim', type=int,
help='dimension of output of the encoder.')
parser.add_argument('latent_dim', type=int,
help='dimension of the latent space')
parser.add_argument('encoder', help='encoder network')
parser.add_argument('nflow1', help='first normalizing flow network')
parser.add_argument('nflow2', help='second normalizing flow network')
parser.add_argument('latent_model1', help='model over the fist latent space')
parser.add_argument('latent_model2', help='model over the second latent space')
parser.add_argument('decoder', help='decoder network')
parser.add_argument('out', help='output model')
args = parser.parse_args()
stats = np.load(args.stats)
with open(args.encoder, 'rb') as fid:
encoder = pickle.load(fid)
with open(args.nflow1, 'rb') as fid:
nnet_flow1, flow_params_dim1 = pickle.load(fid)
with open(args.nflow2, 'rb') as fid:
nnet_flow2, flow_params_dim2 = pickle.load(fid)
with open(args.latent_model1, 'rb') as fid:
latent_model1 = pickle.load(fid)
with open(args.latent_model2, 'rb') as fid:
latent_model2 = pickle.load(fid)
with open(args.decoder, 'rb') as fid:
decoder = pickle.load(fid)
prob_layer1 = encoder_normal_layer[args.encoder_cov_type]
enc_prob_layer1 = prob_layer1(args.encoder_out_dim, args.latent_dim)
nflow1 = beer.nnet.InverseAutoRegressiveFlow(
dim_in=args.encoder_out_dim,
flow_params_dim=flow_params_dim1,
normal_layer=enc_prob_layer1,
nnet_flow=nnet_flow1
)
prob_layer2 = encoder_normal_layer[args.encoder_cov_type]
enc_prob_layer2 = prob_layer2(args.encoder_out_dim, args.latent_dim)
nflow2 = beer.nnet.InverseAutoRegressiveFlow(
dim_in=args.encoder_out_dim,
flow_params_dim=flow_params_dim2,
normal_layer=enc_prob_layer2,
nnet_flow=nnet_flow2
)
data_mean = torch.from_numpy(stats['mean']).float()
data_var = torch.from_numpy(stats['var']).float()
normal = beer.Normal.create(data_mean, data_var,
cov_type=args.decoder_cov_type)
vae = beer.DualVAEGlobalMeanVariance(
encoder,
nflow1,
nflow2,
decoder,
normal,
latent_model1,
latent_model2
)
with open(args.out, 'wb') as fid:
pickle.dump(vae, fid)
if __name__ == '__main__':
main()
|
{"hexsha": "41996795978e4d25427a60dc6faa64f7614ce763", "size": 3386, "ext": "py", "lang": "Python", "max_stars_repo_path": "recipes/timit_v2/utils/dual-vae-hmm-create.py", "max_stars_repo_name": "RobinAlgayres/beer", "max_stars_repo_head_hexsha": "15ad0dad5a49f98e658e948724e05df347ffe3b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2018-02-27T18:15:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T22:10:55.000Z", "max_issues_repo_path": "recipes/timit_v2/utils/dual-vae-hmm-create.py", "max_issues_repo_name": "RobinAlgayres/beer", "max_issues_repo_head_hexsha": "15ad0dad5a49f98e658e948724e05df347ffe3b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2018-01-26T14:18:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-05T09:34:00.000Z", "max_forks_repo_path": "recipes/timit_v2/utils/dual-vae-hmm-create.py", "max_forks_repo_name": "RobinAlgayres/beer", "max_forks_repo_head_hexsha": "15ad0dad5a49f98e658e948724e05df347ffe3b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2018-03-12T14:03:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-24T21:15:01.000Z", "avg_line_length": 32.2476190476, "max_line_length": 83, "alphanum_fraction": 0.6582988777, "include": true, "reason": "import numpy", "num_tokens": 772}
|
# The incredible pressures at this depth are starting to put a strain on your
# submarine. The submarine has polymerization equipment that would produce
# suitable materials to reinforce the submarine, and the nearby
# volcanically-active caves should even have the necessary input elements in
# sufficient quantities.
#
# The submarine manual contains instructions for finding the optimal polymer
# formula; specifically, it offers a polymer template and a list of pair
# insertion rules (your puzzle input). You just need to work out what polymer
# would result after repeating the pair insertion process a few times.
#
# For example:
#
# NNCB
#
# CH -> B
# HH -> N
# CB -> H
# NH -> C
# HB -> C
# HC -> B
# HN -> C
# NN -> C
# BH -> H
# NC -> B
# NB -> B
# BN -> B
# BB -> N
# BC -> B
# CC -> N
# CN -> C
#
# The first line is the polymer template - this is the starting point of the
# process.
#
# The following section defines the pair insertion rules. A rule like AB -> C
# means that when elements A and B are immediately adjacent, element C should
# be inserted between them. These insertions all happen simultaneously.
#
# So, starting with the polymer template NNCB, the first step simultaneously
# considers all three pairs:
#
# - The first pair (NN) matches the rule NN -> C, so element C is inserted
# between the first N and the second N.
# - The second pair (NC) matches the rule NC -> B, so element B is inserted
# between the N and the C.
# - The third pair (CB) matches the rule CB -> H, so element H is inserted
# between the C and the B.
#
# Note that these pairs overlap: the second element of one pair is the first
# element of the next pair. Also, because all pairs are considered
# simultaneously, inserted elements are not considered to be part of a pair
# until the next step.
#
# After the first step of this process, the polymer becomes NCNBCHB.
#
# Here are the results of a few steps using the above rules:
#
# Template: NNCB
# After step 1: NCNBCHB
# After step 2: NBCCNBBBCBHCB
# After step 3: NBBBCNCCNBBNBNBBCHBHHBCHB
# After step 4: NBBNBNBBCCNBCNCCNBBNBBNBBBNBBNBBCBHCBHHNHCBBCBHCB
#
# This polymer grows quickly. After step 5, it has length 97; After step 10, it
# has length 3073. After step 10, B occurs 1749 times, C occurs 298 times, H
# occurs 161 times, and N occurs 865 times; taking the quantity of the most
# common element (B, 1749) and subtracting the quantity of the least common
# element (H, 161) produces 1749 - 161 = 1588.
#
# Apply 10 steps of pair insertion to the polymer template and find the most
# and least common elements in the result. What do you get if you take the
# quantity of the most common element and subtract the quantity of the least
# common element?
const steps = 10
# read input
iter = eachline("input.txt")
(template, _) = iterate(iter)
iterate(iter) # blank line
rules = Dict([split(l, " -> ") for l in iter])
# Count elements in the starting template and initialize other elements to 0
cnts = Dict([(c, 1) for c in template])
for (_,c) in rules
c = c[1]
if !haskey(cnts, c)
cnts[c] = 0
end
end
# recursive function to do each step
function step(l, r, depth=1)
depth > steps && return
m = rules[l*r][1]
cnts[m] += 1
step(l, m, depth+1)
step(m, r, depth+1)
end
# do eet - iterate over (1,2), (2,3), ..., (len-1, len)
foreach(zip(1:(length(template)-1), 2:length(template))) do (i,j)
step(template[i], template[j])
end
println("Result: ", abs(-(extrema(last, cnts)...)))
|
{"hexsha": "00da95fa0150f7100ae81192ffb8819b7fdc6801", "size": 3484, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "day14/part1.jl", "max_stars_repo_name": "bmatcuk/adventofcode2021", "max_stars_repo_head_hexsha": "57b9297213acd271b73784fbbe38c5cc248d7c28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-07T14:21:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T14:21:53.000Z", "max_issues_repo_path": "day14/part1.jl", "max_issues_repo_name": "bmatcuk/adventofcode2021", "max_issues_repo_head_hexsha": "57b9297213acd271b73784fbbe38c5cc248d7c28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day14/part1.jl", "max_forks_repo_name": "bmatcuk/adventofcode2021", "max_forks_repo_head_hexsha": "57b9297213acd271b73784fbbe38c5cc248d7c28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5607476636, "max_line_length": 79, "alphanum_fraction": 0.7086681975, "num_tokens": 991}
|
[STATEMENT]
lemma iso_char:
shows "iso \<mu> \<longleftrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)"
and "iso \<mu> \<Longrightarrow> inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>)) &&& (local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
2. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
have 1: "iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
assume \<mu>: "iso \<mu>"
[PROOF STATE]
proof (state)
this:
local.iso \<mu>
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
obtain \<nu> where \<nu>: "inverse_arrows \<mu> \<nu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>\<nu>. inverse_arrows \<mu> \<nu> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using \<mu>
[PROOF STATE]
proof (prove)
using this:
local.iso \<mu>
goal (1 subgoal):
1. (\<And>\<nu>. inverse_arrows \<mu> \<nu> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
inverse_arrows \<mu> \<nu>
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
have "B.inverse_arrows (Map \<mu>) (Map \<nu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B.inverse_arrows (Map \<mu>) (Map \<nu>)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
2. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
show "B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
have "Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
[PROOF STEP]
using \<mu> \<nu> inverse_arrows_def Map_comp arr_char seq_char
[PROOF STATE]
proof (prove)
using this:
local.iso \<mu>
inverse_arrows \<mu> \<nu>
inverse_arrows ?f ?g \<equiv> ide (?g \<cdot> ?f) \<and> ide (?f \<cdot> ?g)
\<lbrakk>arr ?f; arr ?g; Dom ?g = Cod ?f\<rbrakk> \<Longrightarrow> Map (?g \<cdot> ?f) = Map ?g \<cdot>\<^sub>B Map ?f
arr ?F = (E.Nml (Dom ?F) \<and> E.Ide (Dom ?F) \<and> E.Nml (Cod ?F) \<and> E.Ide (Cod ?F) \<and> E.Src (Dom ?F) = E.Src (Cod ?F) \<and> E.Trg (Dom ?F) = E.Trg (Cod ?F) \<and> \<guillemotleft>Map ?F : EVAL (Dom ?F) \<rightarrow>\<^sub>B EVAL (Cod ?F)\<guillemotright> \<and> ?F \<noteq> Null)
seq ?g ?f = (arr ?f \<and> arr ?g \<and> Dom ?g = Cod ?f)
goal (1 subgoal):
1. Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
[PROOF STEP]
by (metis (no_types, lifting) ide_compE)
[PROOF STATE]
proof (state)
this:
Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
have "B.ide ..."
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B.ide (Map (\<mu> \<cdot> \<nu>))
[PROOF STEP]
using \<nu> ide_char
[PROOF STATE]
proof (prove)
using this:
inverse_arrows \<mu> \<nu>
ide ?F = (endo ?F \<and> B.ide (Map ?F))
goal (1 subgoal):
1. B.ide (Map (\<mu> \<cdot> \<nu>))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
B.ide (Map (\<mu> \<cdot> \<nu>))
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
B.ide (Map (\<mu> \<cdot> \<nu>))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Map \<mu> \<cdot>\<^sub>B Map \<nu> = Map (\<mu> \<cdot> \<nu>)
B.ide (Map (\<mu> \<cdot> \<nu>))
goal (1 subgoal):
1. B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
B.ide (Map \<mu> \<cdot>\<^sub>B Map \<nu>)
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
show "B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
have "Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
[PROOF STEP]
using \<mu> \<nu> inverse_arrows_def comp_char [of \<nu> \<mu>]
[PROOF STATE]
proof (prove)
using this:
local.iso \<mu>
inverse_arrows \<mu> \<nu>
inverse_arrows ?f ?g \<equiv> ide (?g \<cdot> ?f) \<and> ide (?f \<cdot> ?g)
\<nu> \<cdot> \<mu> = (if seq \<nu> \<mu> then MkArr (Dom \<mu>) (Cod \<nu>) (Map \<nu> \<cdot>\<^sub>B Map \<mu>) else null)
goal (1 subgoal):
1. Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
have "B.ide ..."
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B.ide (Map (\<nu> \<cdot> \<mu>))
[PROOF STEP]
using \<nu> ide_char
[PROOF STATE]
proof (prove)
using this:
inverse_arrows \<mu> \<nu>
ide ?F = (endo ?F \<and> B.ide (Map ?F))
goal (1 subgoal):
1. B.ide (Map (\<nu> \<cdot> \<mu>))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
B.ide (Map (\<nu> \<cdot> \<mu>))
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
B.ide (Map (\<nu> \<cdot> \<mu>))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
Map \<nu> \<cdot>\<^sub>B Map \<mu> = Map (\<nu> \<cdot> \<mu>)
B.ide (Map (\<nu> \<cdot> \<mu>))
goal (1 subgoal):
1. B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
B.ide (Map \<nu> \<cdot>\<^sub>B Map \<mu>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
B.inverse_arrows (Map \<mu>) (Map \<nu>)
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
thus "arr \<mu> \<and> B.iso (Map \<mu>)"
[PROOF STATE]
proof (prove)
using this:
B.inverse_arrows (Map \<mu>) (Map \<nu>)
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
using \<mu>
[PROOF STATE]
proof (prove)
using this:
B.inverse_arrows (Map \<mu>) (Map \<nu>)
local.iso \<mu>
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
arr \<mu> \<and> B.iso (Map \<mu>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
goal (2 subgoals):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
2. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
let ?\<nu> = "MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
2. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
have 2: "arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> iso \<mu> \<and> inv \<mu> = ?\<nu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu> \<and> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
2. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
assume \<mu>: "arr \<mu> \<and> B.iso (Map \<mu>)"
[PROOF STATE]
proof (state)
this:
arr \<mu> \<and> B.iso (Map \<mu>)
goal (2 subgoals):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
2. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
have \<nu>: "\<guillemotleft>?\<nu> : cod \<mu> \<Rightarrow> dom \<mu>\<guillemotright>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<guillemotleft>MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) : cod \<mu> \<Rightarrow> local.dom \<mu>\<guillemotright>
[PROOF STEP]
using \<mu> arr_char dom_char cod_char
[PROOF STATE]
proof (prove)
using this:
arr \<mu> \<and> B.iso (Map \<mu>)
arr ?F = (E.Nml (Dom ?F) \<and> E.Ide (Dom ?F) \<and> E.Nml (Cod ?F) \<and> E.Ide (Cod ?F) \<and> E.Src (Dom ?F) = E.Src (Cod ?F) \<and> E.Trg (Dom ?F) = E.Trg (Cod ?F) \<and> \<guillemotleft>Map ?F : EVAL (Dom ?F) \<rightarrow>\<^sub>B EVAL (Cod ?F)\<guillemotright> \<and> ?F \<noteq> Null)
local.dom ?f = (if arr ?f then MkIde (Dom ?f) else null)
cod ?f = (if arr ?f then MkIde (Cod ?f) else null)
goal (1 subgoal):
1. \<guillemotleft>MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) : cod \<mu> \<Rightarrow> local.dom \<mu>\<guillemotright>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<guillemotleft>MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) : cod \<mu> \<Rightarrow> local.dom \<mu>\<guillemotright>
goal (2 subgoals):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
2. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
have 4: "inverse_arrows \<mu> ?\<nu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inverse_arrows \<mu> (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
2. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
show "ide (?\<nu> \<cdot> \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
[PROOF STEP]
have "?\<nu> \<cdot> \<mu> = dom \<mu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu> = local.dom \<mu>
[PROOF STEP]
using \<mu> \<nu> MkArr_Map comp_char seq_char B.comp_inv_arr' dom_char
[PROOF STATE]
proof (prove)
using this:
arr \<mu> \<and> B.iso (Map \<mu>)
\<guillemotleft>MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) : cod \<mu> \<Rightarrow> local.dom \<mu>\<guillemotright>
arr ?f \<Longrightarrow> MkArr (Dom ?f) (Cod ?f) (Map ?f) = ?f
?g \<cdot> ?f = (if seq ?g ?f then MkArr (Dom ?f) (Cod ?g) (Map ?g \<cdot>\<^sub>B Map ?f) else null)
seq ?g ?f = (arr ?f \<and> arr ?g \<and> Dom ?g = Cod ?f)
B.iso ?f \<Longrightarrow> B.inv ?f \<cdot>\<^sub>B ?f = B.dom ?f
local.dom ?f = (if arr ?f then MkIde (Dom ?f) else null)
goal (1 subgoal):
1. MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu> = local.dom \<mu>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu> = local.dom \<mu>
goal (1 subgoal):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu> = local.dom \<mu>
goal (1 subgoal):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
[PROOF STEP]
using \<mu>
[PROOF STATE]
proof (prove)
using this:
MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu> = local.dom \<mu>
arr \<mu> \<and> B.iso (Map \<mu>)
goal (1 subgoal):
1. ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
ide (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) \<cdot> \<mu>)
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
show "ide (\<mu> \<cdot> ?\<nu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
have "\<mu> \<cdot> ?\<nu> = cod \<mu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) = cod \<mu>
[PROOF STEP]
using \<mu> \<nu> MkArr_Map comp_char seq_char B.comp_arr_inv' cod_char
[PROOF STATE]
proof (prove)
using this:
arr \<mu> \<and> B.iso (Map \<mu>)
\<guillemotleft>MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) : cod \<mu> \<Rightarrow> local.dom \<mu>\<guillemotright>
arr ?f \<Longrightarrow> MkArr (Dom ?f) (Cod ?f) (Map ?f) = ?f
?g \<cdot> ?f = (if seq ?g ?f then MkArr (Dom ?f) (Cod ?g) (Map ?g \<cdot>\<^sub>B Map ?f) else null)
seq ?g ?f = (arr ?f \<and> arr ?g \<and> Dom ?g = Cod ?f)
B.iso ?f \<Longrightarrow> ?f \<cdot>\<^sub>B B.inv ?f = B.cod ?f
cod ?f = (if arr ?f then MkIde (Cod ?f) else null)
goal (1 subgoal):
1. \<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) = cod \<mu>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) = cod \<mu>
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) = cod \<mu>
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
using \<mu>
[PROOF STATE]
proof (prove)
using this:
\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)) = cod \<mu>
arr \<mu> \<and> B.iso (Map \<mu>)
goal (1 subgoal):
1. ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
ide (\<mu> \<cdot> MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
inverse_arrows \<mu> (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
goal (2 subgoals):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
2. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
thus "iso \<mu>"
[PROOF STATE]
proof (prove)
using this:
inverse_arrows \<mu> (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
goal (1 subgoal):
1. local.iso \<mu>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
local.iso \<mu>
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
show "inv \<mu> = ?\<nu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
using 4 inverse_unique
[PROOF STATE]
proof (prove)
using this:
inverse_arrows \<mu> (MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>)))
inverse_arrows ?f ?g \<Longrightarrow> local.inv ?f = ?g
goal (1 subgoal):
1. local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu> \<and> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
goal (2 subgoals):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
2. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
have 3: "arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> iso \<mu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu> \<and> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
goal (1 subgoal):
1. arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
goal (2 subgoals):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
2. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
show "iso \<mu> \<longleftrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
[PROOF STEP]
using 1 3
[PROOF STATE]
proof (prove)
using this:
local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu>
goal (1 subgoal):
1. local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
local.iso \<mu> = (arr \<mu> \<and> B.iso (Map \<mu>))
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
show "iso \<mu> \<Longrightarrow> inv \<mu> = ?\<nu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
using 1 2
[PROOF STATE]
proof (prove)
using this:
local.iso \<mu> \<Longrightarrow> arr \<mu> \<and> B.iso (Map \<mu>)
arr \<mu> \<and> B.iso (Map \<mu>) \<Longrightarrow> local.iso \<mu> \<and> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
goal (1 subgoal):
1. local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
local.iso \<mu> \<Longrightarrow> local.inv \<mu> = MkArr (Cod \<mu>) (Dom \<mu>) (B.inv (Map \<mu>))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8825, "file": "Bicategory_Strictness", "length": 84}
|
"""
Process the data downloaded from original source
"""
import h5py
import os
import pickle
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def process_data():
"""
Extracts the SBP and DBP values of 10 seconds long episodes
while taking new episodes 5 seconds apart
and stores them as .csv files
This function is likely to take 6-7 days to run on a Intel Core i7-7700 CPU
"""
fs = 125 # sampling frequency
t = 10 # length of ppg episodes
dt = 5 # step size of taking the next episode
samples_in_episode = round(fs * t) # number of samples in an episode
d_samples = round(fs * dt) # number of samples in a step
try: # create the processed_data directory
os.makedirs('processed_data')
except Exception as e:
print(e)
for k in range(1,5): # process for the 4 different parts of the data
print("Processing file part {} out of 4".format(k))
f = h5py.File(os.path.join('raw_data','Part_{}.mat'.format(k)), 'r') # loads the data
ky = 'Part_' + str(k) # key
for i in tqdm(range(len(f[ky])),desc='Reading Records'): # reading the records
signal = [] # ppg signal
bp = [] # abp signal
output_str = '10s,SBP,DBP\n' # starting text for a new csv file
for j in tqdm(range(len(f[f[ky][i][0]])),desc='Reading Samples from Record {}/3000'.format(i+1)): # reading samples from records
signal.append(f[f[ky][i][0]][j][0]) # ppg signal
bp.append(f[f[ky][i][0]][j][1]) # abp signal
for j in tqdm(range(0,len(f[f[ky][i][0]])-samples_in_episode, d_samples),desc='Processing Episodes from Record {}/3000'.format(i+1)): # computing the sbp and dbp values
sbp = max(bp[j:j+samples_in_episode]) # sbp value
dbp = min(bp[j:j+samples_in_episode]) # dbp value
output_str += '{},{},{}\n'.format(j,sbp,dbp) # append to the csv file
fp = open(os.path.join('processed_data','Part_{}_{}.csv'.format(k,i)),'w') # create the csv file
fp.write(output_str) # write the csv file
fp.close() # close the csv file
def observe_processed_data():
"""
Observe the sbp and dbps of the 10s long episodes
"""
files = next(os.walk('processed_data'))[2]
sbps = []
dbps = []
for fl in tqdm(files,desc='Browsing through Files'):
lines = open(os.path.join('processed_data',fl),'r').read().split('\n')[1:-1]
for line in tqdm(lines,desc='Browsing through Episodes from File'):
values = line.split(',')
sbp = int(float(values[1]))
dbp = int(float(values[2]))
sbps.append(sbp)
dbps.append(dbp)
plt.subplot(2,1,1)
plt.hist(sbps,bins=180)
plt.title('SBP')
plt.subplot(2,1,2)
plt.hist(dbps,bins=180)
plt.title('DBP')
plt.show()
def downsample_data(minThresh=2500, ratio=0.25):
"""
Downsamples the data based on the scheme proposed in the manuscript
Keyword Arguments:
minThresh {int} -- maximum number of episoeds (default: {2500})
ratio {float} -- ratio of total signals of certain bin to take (default: {0.25})
"""
files = next(os.walk('processed_data'))[2] # load all csv files
sbps_dict = {} # dictionary to store sbp and dbp values
dbps_dict = {}
sbps_cnt = {} # dictionary containing count of specific sbp and dbp values
dbps_cnt = {}
dbps_taken = {} # dictionary containing count of specific sbp and dbp taken
sbps_taken = {}
sbps = [] # list of sbps and dbps
dbps = []
candidates = [] # list of candidate episodes
lut = {} # look up table
for fl in tqdm(files, desc='Browsing Files'): # iterating over the csv files
lines = open(os.path.join('processed_data', fl), 'r').read().split('\n')[1:-1] # fetching the episodes
for line in tqdm(lines, desc='Reading Episodes'): # iterating over the episodes
values = line.split(',')
file_no = int(fl.split('_')[1]) # id of the file
record_no = int(fl.split('.')[0].split('_')[2]) # id of the record
episode_st = int(values[0]) # start of the episode
sbp = int(float(values[1])) # sbp of that episode
dbp = int(float(values[2])) # dbp of that episode
if(sbp not in sbps_dict): # new sbp found
sbps_dict[sbp] = [] # initialize
sbps_cnt[sbp] = 0
sbps_dict[sbp].append((file_no, record_no, episode_st)) # add the file, record and episode info
sbps_cnt[sbp] += 1 # increment
if(dbp not in dbps_dict): # new dbp found
dbps_dict[dbp] = [] # initialize
dbps_cnt[dbp] = 0
dbps_dict[dbp].append((file_no, record_no, episode_st, sbp)) # add the file, record and episode info
dbps_cnt[dbp] += 1 # increment
sbp_keys = list(sbps_dict) # all the different sbp values
dbp_keys = list(dbps_dict) # all the different dbp values
sbp_keys.sort() # sorting the sbp values
dbp_keys.sort() # sorting the dbp values
for dbp in tqdm(dbp_keys, desc='DBP Binning'): # iterating through the dbp values
cnt = min(int(dbps_cnt[dbp]*ratio), minThresh) # how many episodes of this dbp to take
for i in tqdm(range(cnt), desc='Picking Random Indices'):
indix = np.random.randint(len(dbps_dict[dbp])) # picking a random index
candidates.append([dbps_dict[dbp][indix][0], dbps_dict[dbp][indix][1], dbps_dict[dbp][indix][2]]) # add the file, record and episode info in the candidates list
if(dbp not in dbps_taken): # this dbp has not been taken
dbps_taken[dbp] = 0 # initialize
dbps_taken[dbp] += 1 # increment
if(dbps_dict[dbp][indix][3] not in sbps_taken): # checking if the sbp of that episode has been taken or not
sbps_taken[dbps_dict[dbp][indix][3]] = 0 # initialize
sbps_taken[dbps_dict[dbp][indix][3]] += 1 # increment
if(dbps_dict[dbp][indix][0] not in lut): # this file is not in look up table
lut[dbps_dict[dbp][indix][0]] = {} # add the file in look up table
if(dbps_dict[dbp][indix][1] not in lut[dbps_dict[dbp][indix][0]]): # this record is not in look up table
lut[dbps_dict[dbp][indix][0]][dbps_dict[dbp][indix][1]] = {} # add the record in look up table
if(dbps_dict[dbp][indix][2] not in lut[dbps_dict[dbp][indix][0]][dbps_dict[dbp][indix][1]]): # this episode is not in look up table
lut[dbps_dict[dbp][indix][0]][dbps_dict[dbp][indix][1]][dbps_dict[dbp][indix][2]] = 1 # add this episode in look up table
dbps_dict[dbp].pop(indix) # remove this episode, so that this episode is not randomly selected again
for sbp in tqdm(sbp_keys, desc='SBP Binning'): # iterating on the sbps
if sbp not in sbps_taken: # this sbp has not yet been taken
sbps_taken[sbp] = 0 # initialize
cnt = min(int(sbps_cnt[sbp]*ratio), minThresh) - sbps_taken[sbp] # how many episodes of this sbp to take, removed the count already included during dbp based binning
for i in tqdm(range(cnt), desc='Picking Random Indices'): # iterate over how many episodes to take
while len(sbps_dict[sbp]) > 0: # while there are some episodes with that sbp left
try:
indix = np.random.randint(len(sbps_dict[sbp])) # picking a random episode
except:
pass
try: # see if that episode is contained in the look up table
dumi = lut[sbps_dict[sbp][indix][0]][sbps_dict[sbp][indix][1]][sbps_dict[sbp][indix][2]]
except:
sbps_dict[sbp].pop(indix)
continue
candidates.append([sbps_dict[sbp][indix][0], sbps_dict[sbp][indix][1], sbps_dict[sbp][indix][2]]) # add new candidate
sbps_taken[sbp] += 1 # increment
sbps_dict[sbp].pop(indix) # remove that episode
break # repeat the process
sbps_dict = {} # garbage collection
dbps_dict = {}
sbps_cnt = {} # garbage collection
dbps_cnt = {}
sbps = [] # garbage collection
dbps = []
lut = {} # garbage collection
print('Total {} episodes have been selected'.format(len(candidates)))
pickle.dump(candidates, open('candidates.p', 'wb')) # save the candidates
'''
plotting the downsampled episodes
'''
sbp_keys = list(sbps_taken)
dbp_keys = list(dbps_taken)
sbp_keys.sort()
dbp_keys.sort()
for sbp in sbp_keys:
sbps.append(sbps_taken[sbp])
for dbp in dbp_keys:
dbps.append(dbps_taken[dbp])
plt.figure()
plt.subplot(2, 1, 1)
plt.bar(sbp_keys, sbps)
plt.title('SBP')
plt.subplot(2, 1, 2)
plt.bar(dbp_keys, dbps)
plt.title('DBP')
plt.show()
def extract_episodes(candidates):
"""
Extracts the episodes from the raw data
This function is likely to take 3-4 days to run on a Intel Core i7-7700 CPU
"""
try: # making the necessary directories
os.makedirs('ppgs')
except Exception as e:
print(e)
try:
os.makedirs('abps')
except Exception as e:
print(e)
for k in tqdm(range(1,5), desc='Reading from Files'): # iterating throug the files
f = h5py.File('./raw_data/Part_{}.mat'.format(k), 'r')
fs = 125 # sampling frequency
t = 10 # length of ppg episodes
samples_in_episode = round(fs * t) # number of samples in an episode
ky = 'Part_' + str(k) # key
for indix in tqdm(range(len(candidates)), desc='Reading from File {}/4'.format(k)): # iterating through the candidates
if(candidates[indix][0] != k): # this candidate is from a different file
continue
record_no = int(candidates[indix][1]) # record no of the episode
episode_st = int(candidates[indix][2]) # start of that episode
ppg = [] # ppg signal
abp = [] # abp signal
for j in tqdm(range(episode_st, episode_st+samples_in_episode), desc='Reading Episode Id {}'.format(indix)):
ppg.append(f[f[ky][record_no][0]][j][0]) # ppg signal
abp.append(f[f[ky][record_no][0]][j][1]) # abp signal
pickle.dump(np.array(ppg), open(os.path.join('ppgs', '{}.p'.format(indix)), 'wb')) # saving the ppg signal
pickle.dump(np.array(abp), open(os.path.join('abps', '{}.p'.format(indix)), 'wb')) # saving the abp signal
def merge_episodes():
"""
Merges the extracted episodes
and saves them as a hdf5 file
"""
try: # creates the necessary directory
os.makedirs('data')
except Exception as e:
print(e)
files = next(os.walk('abps'))[2] # all the extracted episodes
np.random.shuffle(files) # random shuffling, we perform the random shuffling now
# so that we can split the data straightforwardly next step
data = [] # initialize
for fl in tqdm(files):
abp = pickle.load(open(os.path.join('abps',fl),'rb')) # abp signal
ppg = pickle.load(open(os.path.join('ppgs',fl),'rb')) # ppg signal
data.append([abp, ppg]) # adding the signals
f = h5py.File(os.path.join('data','data.hdf5'), 'w') # saving the data as hdf5 file
dset = f.create_dataset('data', data=data)
def main():
process_data()
observe_processed_data()
downsample_data()
candidates = pickle.load(open('./candidates.p', 'rb'))
extract_episodes(candidates)
merge_episodes()
if __name__ == '__main__':
main()
|
{"hexsha": "3d374a5bc462d2cdcc518ddfbae9e6fd6075e1aa", "size": 11095, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/data_processing.py", "max_stars_repo_name": "nguyenngocsang1410/PPG2ABP", "max_stars_repo_head_hexsha": "6109d4d0c213655486a17dd25900675b746beabd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2020-05-07T13:52:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:50:15.000Z", "max_issues_repo_path": "codes/data_processing.py", "max_issues_repo_name": "shangjianshizhe/PPG2ABP", "max_issues_repo_head_hexsha": "6109d4d0c213655486a17dd25900675b746beabd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-05-18T18:18:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:38:27.000Z", "max_forks_repo_path": "codes/data_processing.py", "max_forks_repo_name": "shangjianshizhe/PPG2ABP", "max_forks_repo_head_hexsha": "6109d4d0c213655486a17dd25900675b746beabd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2020-05-10T15:15:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T09:33:40.000Z", "avg_line_length": 30.2316076294, "max_line_length": 171, "alphanum_fraction": 0.6452456061, "include": true, "reason": "import numpy", "num_tokens": 3396}
|
x = randn(16,5)
wt = wavelet(WT.haar)
xw = cat([wpd(x[:,i], wt) for i in axes(x,2)]..., dims=3)
xsw = cat([swpd(x[:,i], wt) for i in axes(x,2)]..., dims=3)
xacw = cat([acwpd(x[:,i], wt) for i in axes(x,2)]..., dims=3)
# bb
@test isvalidtree(x[:,1], bestbasistree(xw[:,:,1], BB()))
@test isvalidtree(x[:,1], bestbasistree(xw, method=BB())[:,1])
@test isvalidtree(x[:,1],
bestbasistree(xw, BB(LogEnergyEntropyCost(), false))[:,1])
@test isvalidtree(x[:,1], bestbasistree(xsw, BB(redundant=true))[:,1])
@test isvalidtree(x[:,1], bestbasistree(xacw, BB(redundant=true))[:,1])
# jbb
@test isvalidtree(x[:,1], bestbasistree(xw))
@test isvalidtree(x[:,1], bestbasistree(xw, JBB(NormCost(), false)))
@test isvalidtree(x[:,1], bestbasistree(xsw, JBB(redundant=true)))
# lsdb
@test isvalidtree(x[:,1], bestbasistree(xw, method=LSDB()))
@test isvalidtree(x[:,1], bestbasistree(xw, LSDB()))
@test isvalidtree(x[:,1], bestbasistree(xsw, LSDB(redundant=true)))
# siwpd_bb
x = randn(16)
xw0 = siwpd(x, wt)
xw4 = siwpd(circshift(x,4), wt)
bt0 = map(node -> sum(node), bestbasistree(xw0, 4, SIBB()))
bt4 = map(node -> sum(node), bestbasistree(xw4, 4, SIBB()))
@test bt0 == bt4
# misc
@test_throws ArgumentError bestbasis_treeselection(randn(15), 8, :fail)
@test_throws AssertionError bestbasis_treeselection(randn(7), 3, :fail) # true n=4
|
{"hexsha": "5afb695eeaa71a8a7aa7f2452e93cf96f6631c9c", "size": 1360, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/bestbasis.jl", "max_stars_repo_name": "ShozenD/WaveletsExt.jl", "max_stars_repo_head_hexsha": "602c26c239c925b1de3f174c2a7b50aab991153a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/bestbasis.jl", "max_issues_repo_name": "ShozenD/WaveletsExt.jl", "max_issues_repo_head_hexsha": "602c26c239c925b1de3f174c2a7b50aab991153a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/bestbasis.jl", "max_forks_repo_name": "ShozenD/WaveletsExt.jl", "max_forks_repo_head_hexsha": "602c26c239c925b1de3f174c2a7b50aab991153a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7777777778, "max_line_length": 88, "alphanum_fraction": 0.6470588235, "num_tokens": 498}
|
import numpy as np
import cv2
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
# NOTE!!!:
# Use cv2.COLOR_RGB2GRAY if you've read in an image using mpimg.imread().
# Use cv2.COLOR_BGR2GRAY if you've read in an image using cv2.imread().
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
elif orient == 'y':
sobel = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
else:
raise TypeError("Only 'x' and 'y' orientations supported!")
#print(sobel)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
sobel_binary_output = np.zeros_like(scaled_sobel)
sobel_binary_output[(scaled_sobel > thresh[0]) & (scaled_sobel <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return sobel_binary_output
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
# NOTE!!!:
# Use cv2.COLOR_RGB2GRAY if you've read in an image using mpimg.imread().
# Use cv2.COLOR_BGR2GRAY if you've read in an image using cv2.imread().
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
#print(sobel)
# 3) Calculate the gradient magnitude (absolute value)
grad_mag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
grad_mag = np.uint8(255*grad_mag/np.max(grad_mag))
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(grad_mag)
binary_output[(grad_mag >= mag_thresh[0]) & (grad_mag <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
# NOTE!!!:
# Use cv2.COLOR_RGB2GRAY if you've read in an image using mpimg.imread().
# Use cv2.COLOR_BGR2GRAY if you've read in an image using cv2.imread().
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
grad_dir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(grad_dir)
binary_output[(grad_dir >= thresh[0]) & (grad_dir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
|
{"hexsha": "976f3d5a0297dbac8dcf677039f922bda932b96d", "size": 3885, "ext": "py", "lang": "Python", "max_stars_repo_path": "HelperProjects/Gradients-and-Color-Spaces/thresholds.py", "max_stars_repo_name": "luk6xff/SelfDrivingCarND", "max_stars_repo_head_hexsha": "1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-06-05T18:32:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-29T07:41:24.000Z", "max_issues_repo_path": "HelperProjects/Gradients-and-Color-Spaces/thresholds.py", "max_issues_repo_name": "luk6xff/SelfDrivingCarND", "max_issues_repo_head_hexsha": "1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HelperProjects/Gradients-and-Color-Spaces/thresholds.py", "max_forks_repo_name": "luk6xff/SelfDrivingCarND", "max_forks_repo_head_hexsha": "1ad0a203f3c1ebd8ee3c114d8efc0d0cf99ddc42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-05T18:33:08.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-05T18:33:08.000Z", "avg_line_length": 36.308411215, "max_line_length": 91, "alphanum_fraction": 0.6738738739, "include": true, "reason": "import numpy", "num_tokens": 1149}
|
[STATEMENT]
lemma measure_space_measure_of_st_vec': "measure_space UNIV UNIV (measure_of_st_vec' x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. measure_space UNIV UNIV (measure_of_st_vec' x)
[PROOF STEP]
unfolding measure_space_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sigma_algebra UNIV UNIV \<and> positive UNIV (measure_of_st_vec' x) \<and> countably_additive UNIV (measure_of_st_vec' x)
[PROOF STEP]
proof (simp, simp add: countably_additive_def measure_of_st_vec'_def disjoint_family_on_def,
clarify, goal_cases)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
case (1 A)
[PROOF STATE]
proof (state)
this:
\<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {}
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
let ?x = "st_vec x"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
define N where "N = {i. A i \<noteq> {}}"
[PROOF STATE]
proof (state)
this:
N = {i. A i \<noteq> {}}
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
let ?A = "\<Union>(A ` N)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
have "finite B \<Longrightarrow> B \<subseteq> ?A \<Longrightarrow> \<exists> K. finite K \<and> K \<subseteq> N \<and> B \<subseteq> \<Union>(A ` K)" for B
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>finite B; B \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> B \<subseteq> \<Union> (A ` K)
[PROOF STEP]
proof (induct rule: finite_induct)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. {} \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> {} \<subseteq> \<Union> (A ` K)
2. \<And>x F. \<lbrakk>finite F; x \<notin> F; F \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> F \<subseteq> \<Union> (A ` K); insert x F \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> insert x F \<subseteq> \<Union> (A ` K)
[PROOF STEP]
case (insert b B)
[PROOF STATE]
proof (state)
this:
finite B
b \<notin> B
B \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> B \<subseteq> \<Union> (A ` K)
insert b B \<subseteq> \<Union> (A ` N)
goal (2 subgoals):
1. {} \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> {} \<subseteq> \<Union> (A ` K)
2. \<And>x F. \<lbrakk>finite F; x \<notin> F; F \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> F \<subseteq> \<Union> (A ` K); insert x F \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> insert x F \<subseteq> \<Union> (A ` K)
[PROOF STEP]
from insert(3-4)
[PROOF STATE]
proof (chain)
picking this:
B \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> B \<subseteq> \<Union> (A ` K)
insert b B \<subseteq> \<Union> (A ` N)
[PROOF STEP]
obtain K where K: "finite K" "K \<subseteq> N" "B \<subseteq> \<Union>(A ` K)"
[PROOF STATE]
proof (prove)
using this:
B \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> B \<subseteq> \<Union> (A ` K)
insert b B \<subseteq> \<Union> (A ` N)
goal (1 subgoal):
1. (\<And>K. \<lbrakk>finite K; K \<subseteq> N; B \<subseteq> \<Union> (A ` K)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite K
K \<subseteq> N
B \<subseteq> \<Union> (A ` K)
goal (2 subgoals):
1. {} \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> {} \<subseteq> \<Union> (A ` K)
2. \<And>x F. \<lbrakk>finite F; x \<notin> F; F \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> F \<subseteq> \<Union> (A ` K); insert x F \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> insert x F \<subseteq> \<Union> (A ` K)
[PROOF STEP]
from insert(4)
[PROOF STATE]
proof (chain)
picking this:
insert b B \<subseteq> \<Union> (A ` N)
[PROOF STEP]
obtain a where a: "a \<in> N" "b \<in> A a"
[PROOF STATE]
proof (prove)
using this:
insert b B \<subseteq> \<Union> (A ` N)
goal (1 subgoal):
1. (\<And>a. \<lbrakk>a \<in> N; b \<in> A a\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a \<in> N
b \<in> A a
goal (2 subgoals):
1. {} \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> {} \<subseteq> \<Union> (A ` K)
2. \<And>x F. \<lbrakk>finite F; x \<notin> F; F \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> F \<subseteq> \<Union> (A ` K); insert x F \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> insert x F \<subseteq> \<Union> (A ` K)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>K. finite K \<and> K \<subseteq> N \<and> insert b B \<subseteq> \<Union> (A ` K)
[PROOF STEP]
by (intro exI[of _ "insert a K"], insert a K, auto)
[PROOF STATE]
proof (state)
this:
\<exists>K. finite K \<and> K \<subseteq> N \<and> insert b B \<subseteq> \<Union> (A ` K)
goal (1 subgoal):
1. {} \<subseteq> \<Union> (A ` N) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> {} \<subseteq> \<Union> (A ` K)
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
\<lbrakk>finite ?B; ?B \<subseteq> \<Union> (A ` N)\<rbrakk> \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> ?B \<subseteq> \<Union> (A ` K)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
from this[OF _ subset_refl]
[PROOF STATE]
proof (chain)
picking this:
finite (\<Union> (A ` N)) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> \<Union> (A ` N) \<subseteq> \<Union> (A ` K)
[PROOF STEP]
obtain K where *: "finite K" "K \<subseteq> N" "\<Union>(A ` K) = ?A"
[PROOF STATE]
proof (prove)
using this:
finite (\<Union> (A ` N)) \<Longrightarrow> \<exists>K. finite K \<and> K \<subseteq> N \<and> \<Union> (A ` N) \<subseteq> \<Union> (A ` K)
goal (1 subgoal):
1. (\<And>K. \<lbrakk>finite K; K \<subseteq> N; \<Union> (A ` K) = \<Union> (A ` N)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
assume "K \<subset> N"
[PROOF STATE]
proof (state)
this:
K \<subset> N
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
K \<subset> N
[PROOF STEP]
obtain n where **: "n \<in> N" "n \<notin> K"
[PROOF STATE]
proof (prove)
using this:
K \<subset> N
goal (1 subgoal):
1. (\<And>n. \<lbrakk>n \<in> N; n \<notin> K\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
n \<in> N
n \<notin> K
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
from this[unfolded N_def]
[PROOF STATE]
proof (chain)
picking this:
n \<in> {i. A i \<noteq> {}}
n \<notin> K
[PROOF STEP]
obtain a where a: "a \<in> A n"
[PROOF STATE]
proof (prove)
using this:
n \<in> {i. A i \<noteq> {}}
n \<notin> K
goal (1 subgoal):
1. (\<And>a. a \<in> A n \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a \<in> A n
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
with ** *
[PROOF STATE]
proof (chain)
picking this:
n \<in> N
n \<notin> K
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
a \<in> A n
[PROOF STEP]
obtain k where ***: "k \<in> K" "a \<in> A k"
[PROOF STATE]
proof (prove)
using this:
n \<in> N
n \<notin> K
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
a \<in> A n
goal (1 subgoal):
1. (\<And>k. \<lbrakk>k \<in> K; a \<in> A k\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
k \<in> K
a \<in> A k
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
from ** ***
[PROOF STATE]
proof (chain)
picking this:
n \<in> N
n \<notin> K
k \<in> K
a \<in> A k
[PROOF STEP]
have "n \<noteq> k"
[PROOF STATE]
proof (prove)
using this:
n \<in> N
n \<notin> K
k \<in> K
a \<in> A k
goal (1 subgoal):
1. n \<noteq> k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
n \<noteq> k
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
from 1[rule_format, OF this]
[PROOF STATE]
proof (chain)
picking this:
A n \<inter> A k = {}
[PROOF STEP]
have "A n \<inter> A k = {}"
[PROOF STATE]
proof (prove)
using this:
A n \<inter> A k = {}
goal (1 subgoal):
1. A n \<inter> A k = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
A n \<inter> A k = {}
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
with *** a
[PROOF STATE]
proof (chain)
picking this:
k \<in> K
a \<in> A k
a \<in> A n
A n \<inter> A k = {}
[PROOF STEP]
have False
[PROOF STATE]
proof (prove)
using this:
k \<in> K
a \<in> A k
a \<in> A n
A n \<inter> A k = {}
goal (1 subgoal):
1. False
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
False
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
K \<subset> N \<Longrightarrow> False
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
with *
[PROOF STATE]
proof (chain)
picking this:
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
K \<subset> N \<Longrightarrow> False
[PROOF STEP]
have fin: "finite N"
[PROOF STATE]
proof (prove)
using this:
finite K
K \<subseteq> N
\<Union> (A ` K) = \<Union> (A ` N)
K \<subset> N \<Longrightarrow> False
goal (1 subgoal):
1. finite N
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite N
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
have id: "\<Union>(A ` UNIV) = ?A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (range A) = \<Union> (A ` N)
[PROOF STEP]
unfolding N_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (range A) = \<Union> (A ` {i. A i \<noteq> {}})
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> (range A) = \<Union> (A ` N)
goal (1 subgoal):
1. \<And>A. \<forall>m n. m \<noteq> n \<longrightarrow> A m \<inter> A n = {} \<Longrightarrow> (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
show "(\<Sum>i. ennreal (sum (($h) ?x) (A i))) =
ennreal (sum (($h) ?x) (\<Union>(A ` UNIV)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
[PROOF STEP]
unfolding id
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (A ` N)))
[PROOF STEP]
apply (subst suminf_finite[OF fin], (auto simp: N_def)[1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>n\<in>N. ennreal (sum (($h) (st_vec x)) (A n))) = ennreal (sum (($h) (st_vec x)) (\<Union> (A ` N)))
[PROOF STEP]
apply (subst sum_ennreal, (insert non_neg_vec_st_vec[of x], auto simp: non_neg_vec_def intro!: sum_nonneg)[1])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ennreal (\<Sum>n\<in>N. sum (($h) (st_vec x)) (A n)) = ennreal (sum (($h) (st_vec x)) (\<Union> (A ` N)))
[PROOF STEP]
apply (rule arg_cong[of _ _ ennreal])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>n\<in>N. sum (($h) (st_vec x)) (A n)) = sum (($h) (st_vec x)) (\<Union> (A ` N))
[PROOF STEP]
apply (subst sum.UNION_disjoint[OF fin], insert 1, auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
(\<Sum>i. ennreal (sum (($h) (st_vec x)) (A i))) = ennreal (sum (($h) (st_vec x)) (\<Union> (range A)))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 6389, "file": "Stochastic_Matrices_Stochastic_Vector_PMF", "length": 56}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''macf.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Oct 2017
This contains the ACF period-finding algorithm from McQuillan+ 2013a and
McQuillan+ 2014.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from multiprocessing import Pool, cpu_count
import numpy as np
# import these to avoid lookup overhead
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, \
arctan as nparctan, nanargmax as npnanargmax, nanargmin as npnanargmin, \
empty as npempty, ceil as npceil, mean as npmean, \
digitize as npdigitize, unique as npunique, \
argmax as npargmax, argmin as npargmin
from scipy.signal import argrelmax, argrelmin, savgol_filter
from astropy.convolution import convolve, Gaussian1DKernel
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import phase_magseries, sigclip_magseries, time_bin_magseries, \
phase_bin_magseries, fill_magseries_gaps
from ..varbase.autocorr import autocorr_magseries
############
## CONFIG ##
############
NCPUS = cpu_count()
######################
## HELPER FUNCTIONS ##
######################
def _smooth_acf(acf, windowfwhm=7, windowsize=21):
'''
This returns a smoothed version of the ACF.
Convolves the ACF with a Gaussian of given windowsize and windowfwhm
'''
convkernel = Gaussian1DKernel(windowfwhm, x_size=windowsize)
smoothed = convolve(acf, convkernel, boundary='extend')
return smoothed
def _smooth_acf_savgol(acf, windowsize=21, polyorder=2):
'''
This returns a smoothed version of the ACF.
This version uses the Savitsky-Golay smoothing filter
'''
smoothed = savgol_filter(acf, windowsize, polyorder)
return smoothed
def _get_acf_peakheights(lags, acf, npeaks=20, searchinterval=1):
'''This calculates the relative peak heights for first npeaks in ACF.
Usually, the first peak or the second peak (if its peak height > first peak)
corresponds to the correct lag. When we know the correct lag, the period is
then:
bestperiod = time[lags == bestlag] - time[0]
'''
maxinds = argrelmax(acf, order=searchinterval)[0]
maxacfs = acf[maxinds]
maxlags = lags[maxinds]
mininds = argrelmin(acf, order=searchinterval)[0]
minacfs = acf[mininds]
minlags = lags[mininds]
relpeakheights = np.zeros(npeaks)
relpeaklags = np.zeros(npeaks,dtype=np.int64)
peakindices = np.zeros(npeaks,dtype=np.int64)
for peakind, mxi in enumerate(maxinds[:npeaks]):
# check if there are no mins to the left
# throw away this peak because it's probably spurious
# (FIXME: is this OK?)
if np.all(mxi < mininds):
continue
leftminind = mininds[mininds < mxi][-1] # the last index to the left
rightminind = mininds[mininds > mxi][0] # the first index to the right
relpeakheights[peakind] = (
acf[mxi] - (acf[leftminind] + acf[rightminind])/2.0
)
relpeaklags[peakind] = lags[mxi]
peakindices[peakind] = peakind
# figure out the bestperiod if possible
if relpeakheights[0] > relpeakheights[1]:
bestlag = relpeaklags[0]
bestpeakheight = relpeakheights[0]
bestpeakindex = peakindices[0]
else:
bestlag = relpeaklags[1]
bestpeakheight = relpeakheights[1]
bestpeakindex = peakindices[1]
return {'maxinds':maxinds,
'maxacfs':maxacfs,
'maxlags':maxlags,
'mininds':mininds,
'minacfs':minacfs,
'minlags':minlags,
'relpeakheights':relpeakheights,
'relpeaklags':relpeaklags,
'peakindices':peakindices,
'bestlag':bestlag,
'bestpeakheight':bestpeakheight,
'bestpeakindex':bestpeakindex}
def plot_acf_results(acfp, outfile, maxlags=5000, yrange=[-0.4,0.4]):
'''
This plots the unsmoothed/smoothed ACF vs lag.
acfp is the resultdict from macf_period_find below.
'''
import matplotlib.pyplot as plt
lags = acfp['acfresults']['lags'][:maxlags]
smoothedacf = acfp['acf'][:maxlags]
unsmoothedacf = acfp['acfresults']['acf'][:maxlags]
acfparams = acfp['kwargs']['smoothfunckwargs'].copy()
acfparams.update({'peakinterval': int(acfp['kwargs']['smoothacf']/2.0)})
# plot the ACFs
fig, ax1 = plt.subplots()
# this is lags vs acf
ax1.plot(lags, unsmoothedacf, label='unsmoothed ACF',color='#1f77b4')
ax1.plot(lags, smoothedacf, label='smoothed ACF', color='#ff7f0e')
ax1.set_xlim((0,maxlags))
ax1.set_xlabel('lags')
# overplot the identified peaks
acfmaxinds = acfp['acfpeaks']['maxinds']
for i, maxind in enumerate(acfmaxinds):
if i == 0:
ax1.axvline(maxind,
linewidth=2.0,
color='red',
ymin=0.2, ymax=0.3,
label='identified ACF peaks')
else:
ax1.axvline(maxind,
linewidth=2.0,
color='red',
ymin=0.2, ymax=0.3)
plt.ylabel('ACF')
plt.ylim(yrange)
ax1.legend()
plt.title('%s' % repr(acfparams))
plt.tight_layout()
plt.savefig(outfile)
plt.close('all')
return outfile
############################
## PERIOD FINDER FUNCTION ##
############################
def macf_period_find(
times,
mags,
errs,
fillgaps=0.0,
filterwindow=11,
forcetimebin=None,
maxlags=None,
maxacfpeaks=10,
smoothacf=21, # set for Kepler-type LCs, see details below
smoothfunc=_smooth_acf_savgol,
smoothfunckwargs={},
magsarefluxes=False,
sigclip=3.0,
verbose=True,
periodepsilon=0.1, # doesn't do anything, for consistent external API
nworkers=None, # doesn't do anything, for consistent external API
startp=None, # doesn't do anything, for consistent external API
endp=None, # doesn't do anything, for consistent external API
autofreq=None, # doesn't do anything, for consistent external API
stepsize=None, # doesn't do anything, for consistent external API
):
'''This finds periods using the McQuillan+ (2013a, 2014) method.
Args
----
times, mags, errs are np.arrays.
fillgaps is what to use to fill in gaps in the time series. If this is
'noiselevel', will smooth the light curve using a point window size of
filterwindow (this should be an odd integer), subtract the smoothed LC from
the actual LC and estimate the RMS. This RMS will be used to fill in the
gaps. Other useful values here are 0.0, and np.nan.
forcetimebin is used to force a particular cadence in the light curve other
than the automatically determined cadence. This effectively rebins the light
curve to this cadence.
maxlags is maximum number of lags to calculate. If None, will calculate all
lags.
maxacfpeaks is the maximum number of ACF peaks to use when finding the
highest peak and obtaining a fit period.
smoothacf is the number of points to use as the window size when smoothing
the acf with the smoothfunc. This should be an odd integer value. If this is
None, will not smooth the ACF, but this will probably lead to finding
spurious peaks in a generally noisy ACF.
For Kepler, a value between 21 and 51 seems to work fine. For ground based
data, much larger values may be necessary: between 1001 and 2001 seem to
work best for the HAT surveys. This is dependent on cadence, RMS of the
light curve, the periods of the objects you're looking for, and finally, any
correlated noise in the light curve. Make a plot of the smoothed/unsmoothed
ACF vs. lag using the result dict of this function and the plot_acf_results
function above to see the identified ACF peaks and what kind of smoothing
might be needed.
The value of smoothacf will also be used to figure out the interval to use
when searching for local peaks in the ACF: this interval is 1/2 of the
smoothacf value.
smoothfunc is a function to use when smoothing the ACF. This should take at
least one kwarg: 'windowsize'. Other kwargs can be passed in using a dict
provided in smoothfunckwargs. By default, this uses a Savitsky-Golay filter,
a Gaussian filter is also provided but not used. Another good option would
be an actual low-pass filter (generated using scipy.signal?) to remove all
high frequency noise from the ACF.
magarefluxes is True if the measurements provided in mags are actually
fluxes, False otherwise.
sigclip is the sigma to use when sigma-clipping the magnitude time series.
Returns
-------
Returns a dictionary with results. dict['bestperiod'] is the estimated best
period and dict['fitperiodrms'] is its estimated error. Other interesting
things in the output include:
- dict['acfresults']: all results from calculating the ACF. in particular,
the unsmoothed ACF might be of interest: dict['acfresults']['acf'] and
dict['acfresults']['lags'].
- dict['lags'] and dict['acf'] contain the ACF after smoothing was applied.
- dict['periods'] and dict['lspvals'] can be used to construct a
pseudo-periodogram.
- dict['naivebestperiod'] is obtained by multiplying the lag at the highest
ACF peak with the cadence. This is usually close to the fit period
(dict['fitbestperiod']), which is calculated by doing a fit to the lags
vs. peak index relation as in McQuillan+ 2014.
'''
# get the ACF
acfres = autocorr_magseries(
times,
mags,
errs,
maxlags=maxlags,
fillgaps=fillgaps,
forcetimebin=forcetimebin,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
filterwindow=filterwindow,
verbose=verbose
)
xlags = acfres['lags']
# smooth the ACF if requested
if smoothacf and isinstance(smoothacf, int) and smoothacf > 0:
sfkwargs = smoothfunckwargs.copy()
sfkwargs.update({'windowsize':smoothacf})
xacf = smoothfunc(acfres['acf'], **sfkwargs)
else:
xacf = acfres['acf']
# get the relative peak heights and fit best lag
peakres = _get_acf_peakheights(xlags, xacf, npeaks=maxacfpeaks,
searchinterval=int(smoothacf/2))
# this is the best period's best ACF peak height
bestlspval = peakres['bestpeakheight']
try:
# get the fit best lag from a linear fit to the peak index vs time(peak
# lag) function as in McQillian+ (2014)
fity = np.concatenate((
[0.0, peakres['bestlag']],
peakres['relpeaklags'][peakres['relpeaklags'] > peakres['bestlag']]
))
fity = fity*acfres['cadence']
fitx = np.arange(fity.size)
fitcoeffs, fitcovar = np.polyfit(fitx, fity, 1, cov=True)
# fit best period is the gradient of fit
fitbestperiod = fitcoeffs[0]
bestperiodrms = np.sqrt(fitcovar[0,0]) # from the covariance matrix
except:
LOGWARNING('linear fit to time at each peak lag '
'value vs. peak number failed, '
'naively calculated ACF period may not be accurate')
fitcoeffs = np.array([np.nan, np.nan])
fitcovar = np.array([[np.nan, np.nan], [np.nan, np.nan]])
fitbestperiod = np.nan
bestperiodrms = np.nan
raise
# calculate the naive best period using delta_tau = lag * cadence
naivebestperiod = peakres['bestlag']*acfres['cadence']
if fitbestperiod < naivebestperiod:
LOGWARNING('fit bestperiod = %.5f may be an alias, '
'naively calculated bestperiod is = %.5f' %
(fitbestperiod, naivebestperiod))
if np.isfinite(fitbestperiod):
bestperiod = fitbestperiod
else:
bestperiod = naivebestperiod
return {'bestperiod':bestperiod,
'bestlspval':bestlspval,
'nbestpeaks':maxacfpeaks,
# for compliance with the common pfmethod API
'nbestperiods':np.concatenate([
[fitbestperiod],
peakres['relpeaklags'][1:maxacfpeaks]*acfres['cadence']
]),
'nbestlspvals':peakres['maxacfs'][:maxacfpeaks],
'lspvals':xacf,
'periods':xlags*acfres['cadence'],
'acf':xacf,
'lags':xlags,
'method':'acf',
'naivebestperiod':naivebestperiod,
'fitbestperiod':fitbestperiod,
'fitperiodrms':bestperiodrms,
'periodfitcoeffs':fitcoeffs,
'periodfitcovar':fitcovar,
'kwargs':{'maxlags':maxlags,
'maxacfpeaks':maxacfpeaks,
'fillgaps':fillgaps,
'filterwindow':filterwindow,
'smoothacf':smoothacf,
'smoothfunckwargs':sfkwargs,
'magsarefluxes':magsarefluxes,
'sigclip':sigclip},
'acfresults':acfres,
'acfpeaks':peakres}
|
{"hexsha": "bcd27194ea87e848aff0c6d332fb70fc52371173", "size": 15206, "ext": "py", "lang": "Python", "max_stars_repo_path": "astrobase/periodbase/macf.py", "max_stars_repo_name": "adrn/astrobase", "max_stars_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "astrobase/periodbase/macf.py", "max_issues_repo_name": "adrn/astrobase", "max_issues_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astrobase/periodbase/macf.py", "max_forks_repo_name": "adrn/astrobase", "max_forks_repo_head_hexsha": "7af71167deec58dffc8f668c0b34cb75ed44ae6a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6133056133, "max_line_length": 80, "alphanum_fraction": 0.6215967381, "include": true, "reason": "import numpy,from numpy,from scipy,from astropy", "num_tokens": 3968}
|
import joblib
import numpy as np
from tqdm import tqdm
import torch
from torch.utils.data import TensorDataset, DataLoader
from torch import nn, optim
import matplotlib.pyplot as plt
X_train = joblib.load('ch08/X_train.joblib')
y_train = joblib.load('ch08/y_train.joblib')
X_train = torch.from_numpy(X_train.astype(np.float32)).clone()
y_train = torch.from_numpy(y_train.astype(np.int64)).clone()
X_valid = joblib.load('ch08/X_valid.joblib')
y_valid = joblib.load('ch08/y_valid.joblib')
X_valid = torch.from_numpy(X_valid.astype(np.float32)).clone()
y_valid = torch.from_numpy(y_valid.astype(np.int64)).clone()
X_test = joblib.load('ch08/X_test.joblib')
y_test = joblib.load('ch08/y_test.joblib')
X_test = torch.from_numpy(X_test.astype(np.float32)).clone()
y_test = torch.from_numpy(y_test.astype(np.int64)).clone()
X = X_train
y = y_train
X = X.to('cuda:0')
y = y.to('cuda:0')
ds = TensorDataset(X, y)
net = nn.Linear(X.size()[1], 4)
net = net.to('cuda:0')
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01)
batchSize = [1, 2, 4, 8]
for bs in batchSize:
loader = DataLoader(ds, batch_size=bs, shuffle=True)
train_losses = []
valid_losses = []
train_accs = []
valid_accs = []
for epoc in tqdm(range(100)):
train_running_loss = 0.0
valid_running_loss = 0.0
for xx, yy in loader:
y_pred = net(xx)
loss = loss_fn(y_pred, yy)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_running_loss += loss.item()
valid_running_loss += loss_fn(net(X_valid), y_valid).item()
joblib.dump(net.state_dict(), f'ch08/state_dict_{epoc}.joblib')
train_losses.append(train_running_loss)
valid_losses.append(valid_running_loss)
_, y_pred_train = torch.max(net(X), 1)
train_accs.append((y_pred_train == y).sum().item() / len(y))
_, y_pred_valid = torch.max(net(X_valid), 1)
valid_accs.append((y_pred_valid == y_valid).sum().item() / len(y_valid))
plt.plot(train_losses, label='train loss')
plt.plot(valid_losses, label='valid loss')
plt.legend()
plt.show()
plt.plot(train_accs, label='train acc')
plt.plot(valid_accs, label='valid acc')
plt.legend()
plt.show()
|
{"hexsha": "f4c1da7a7651fcab65ce74b12790b7c67627aeda", "size": 2282, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch08/ans78.py", "max_stars_repo_name": "upura/nlp100v2020", "max_stars_repo_head_hexsha": "37d4d208d5d527d163356793b630f36eb7595779", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 66, "max_stars_repo_stars_event_min_datetime": "2020-04-07T13:27:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T10:43:08.000Z", "max_issues_repo_path": "ch08/ans78.py", "max_issues_repo_name": "upura/nlp100v2020", "max_issues_repo_head_hexsha": "37d4d208d5d527d163356793b630f36eb7595779", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-30T21:11:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-13T02:33:08.000Z", "max_forks_repo_path": "ch08/ans78.py", "max_forks_repo_name": "upura/nlp100v2020", "max_forks_repo_head_hexsha": "37d4d208d5d527d163356793b630f36eb7595779", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-04-10T16:26:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T06:17:22.000Z", "avg_line_length": 29.2564102564, "max_line_length": 80, "alphanum_fraction": 0.67572305, "include": true, "reason": "import numpy", "num_tokens": 603}
|
import numpy as np
import pandas as pd
from pathlib import Path
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
from libs.index import IndexManager
''' Add FrameHash and FramePath to a interface-style csv annotations file.'''
datasetPath = Path(dirs.iter_folder)/ "full_dataset/iteration_1/sampled_images"
sampledIndexPath = Path(dirs.iter_folder)/ "full_dataset/iteration_1/olavo_uniformsampling_4676_corrections_train_val_split.csv"
newLabeledIndexPath = Path(dirs.iter_folder)/ "full_dataset/iteration_1/sampled_images_iteration_1.csv"
# Add folder path
def _add_folder_path(path):
path = datasetPath / Path(path)
return str(path)
# Load model outputs and unlabeled images index
indexSampled = IndexManager(sampledIndexPath)
indexSampled.index["FramePath"] = indexSampled.index["imagem"].map(_add_folder_path)
eTime = indexSampled.compute_frame_hashes(reference_column="FramePath")
print(eTime)
print(indexSampled.index.loc[:20, "FrameHash"])
print(indexSampled.index.shape)
print(indexSampled.index.head())
# indexSampled.index.set_index('FrameHash', drop=False, inplace=True)
# indexSampled.index.reset_index(drop=True, inplace=True)
indexSampled.write_index(dest_path=newLabeledIndexPath, make_backup=False, prompt=False)
|
{"hexsha": "891b02926e7d74cc4a12ba05108743e149510151", "size": 1373, "ext": "py", "lang": "Python", "max_stars_repo_path": "run/old_scripts/process_annotations_file.py", "max_stars_repo_name": "olavosamp/semiauto-video-annotation", "max_stars_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run/old_scripts/process_annotations_file.py", "max_issues_repo_name": "olavosamp/semiauto-video-annotation", "max_issues_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2019-07-15T21:49:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-09T14:35:03.000Z", "max_forks_repo_path": "run/old_scripts/process_annotations_file.py", "max_forks_repo_name": "olavosamp/semiauto-video-annotation", "max_forks_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3823529412, "max_line_length": 131, "alphanum_fraction": 0.7625637291, "include": true, "reason": "import numpy", "num_tokens": 309}
|
import os
import collections
import yaml
import numpy as np
import torch
import gtn
from mathtools import utils, metrics, torchutils
from seqtools import fstutils_gtn as libfst
def sampleGT(transition_probs, initial_probs):
cur_state = np.random.choice(initial_probs.shape[0], p=initial_probs)
gt_seq = [cur_state]
while True:
transitions = transition_probs[cur_state, :]
cur_state = np.random.choice(transitions.shape[0], p=transitions)
if cur_state == transitions.shape[0] - 1:
return np.array(gt_seq)
gt_seq.append(cur_state)
def sampleScores(gt_seq, num_states):
""" score[i, j, k] := weight(sample i | state j -> state k) """
num_samples = len(gt_seq) - 1
scores = np.random.random_sample(size=(num_samples, num_states, num_states))
return scores
def samplePair(transition_probs, initial_probs):
gt_seq = sampleGT(transition_probs, initial_probs)
score_seq = sampleScores(gt_seq, initial_probs.shape[0])
return gt_seq, score_seq
def simulate(num_samples, transition, initial, final):
transition_probs = np.hstack((transition, final[:, None]))
transition_probs /= transition_probs.sum(axis=1)[:, None]
initial_probs = initial.copy()
initial_probs /= initial_probs.sum()
simulated_dataset = tuple(
samplePair(transition_probs, initial_probs)
for __ in range(num_samples)
)
return simulated_dataset
def main(
out_dir=None, gpu_dev_id=None,
num_samples=10, random_seed=None,
learning_rate=1e-3, num_epochs=500,
dataset_kwargs={}, dataloader_kwargs={}, model_kwargs={}):
if out_dir is None:
out_dir = os.path.join('~', 'data', 'output', 'seqtools', 'test_gtn')
out_dir = os.path.expanduser(out_dir)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fig_dir = os.path.join(out_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
vocabulary = ['a', 'b', 'c', 'd', 'e']
transition = np.array(
[[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 1],
[0, 0, 0, 0, 0]], dtype=float
)
initial = np.array([1, 0, 1, 0, 0], dtype=float)
final = np.array([0, 1, 0, 0, 1], dtype=float) / 10
seq_params = (transition, initial, final)
simulated_dataset = simulate(num_samples, *seq_params)
label_seqs, obsv_seqs = tuple(zip(*simulated_dataset))
seq_params = tuple(map(lambda x: -np.log(x), seq_params))
dataset = torchutils.SequenceDataset(obsv_seqs, label_seqs, **dataset_kwargs)
data_loader = torch.utils.data.DataLoader(dataset, **dataloader_kwargs)
train_loader = data_loader
val_loader = data_loader
transition_weights = torch.tensor(transition, dtype=torch.float).log()
initial_weights = torch.tensor(initial, dtype=torch.float).log()
final_weights = torch.tensor(final, dtype=torch.float).log()
model = libfst.LatticeCrf(
vocabulary,
transition_weights=transition_weights,
initial_weights=initial_weights, final_weights=final_weights,
debug_output_dir=fig_dir,
**model_kwargs
)
gtn.draw(
model._transition_fst, os.path.join(fig_dir, 'transitions-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
model._duration_fst, os.path.join(fig_dir, 'durations-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
if True:
for i, (inputs, targets, seq_id) in enumerate(train_loader):
arc_scores = model.scores_to_arc(inputs)
arc_labels = model.labels_to_arc(targets)
batch_size, num_samples, num_classes = arc_scores.shape
obs_fst = libfst.linearFstFromArray(arc_scores[0].reshape(num_samples, -1))
gt_fst = libfst.fromSequence(arc_labels[0])
d1_fst = gtn.compose(obs_fst, model._duration_fst)
d1_fst = gtn.project_output(d1_fst)
denom_fst = gtn.compose(d1_fst, model._transition_fst)
# denom_fst = gtn.project_output(denom_fst)
num_fst = gtn.compose(denom_fst, gt_fst)
viterbi_fst = gtn.viterbi_path(denom_fst)
pred_fst = gtn.remove(gtn.project_output(viterbi_fst))
loss = gtn.subtract(gtn.forward_score(num_fst), gtn.forward_score(denom_fst))
loss = torch.tensor(loss.item())
if torch.isinf(loss).any():
denom_alt = gtn.compose(obs_fst, model._transition_fst)
d1_min = gtn.remove(gtn.project_output(d1_fst))
denom_alt = gtn.compose(d1_min, model._transition_fst)
num_alt = gtn.compose(denom_alt, gt_fst)
gtn.draw(
obs_fst, os.path.join(fig_dir, 'observations-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
gt_fst, os.path.join(fig_dir, 'labels-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
d1_fst, os.path.join(fig_dir, 'd1-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
d1_min, os.path.join(fig_dir, 'd1-min-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
denom_fst, os.path.join(fig_dir, 'denominator-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
denom_alt, os.path.join(fig_dir, 'denominator-alt-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
num_fst, os.path.join(fig_dir, 'numerator-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
num_alt, os.path.join(fig_dir, 'numerator-alt-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
viterbi_fst, os.path.join(fig_dir, 'viterbi-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
pred_fst, os.path.join(fig_dir, 'pred-init.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
import pdb; pdb.set_trace()
# Train the model
train_epoch_log = collections.defaultdict(list)
val_epoch_log = collections.defaultdict(list)
metric_dict = {
'Avg Loss': metrics.AverageLoss(),
'Accuracy': metrics.Accuracy()
}
criterion = model.nllLoss
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=1.00)
model, last_model_wts = torchutils.trainModel(
model, criterion, optimizer, scheduler, train_loader,
val_loader,
metrics=metric_dict,
test_metric='Avg Loss',
train_epoch_log=train_epoch_log,
val_epoch_log=val_epoch_log,
num_epochs=num_epochs
)
gtn.draw(
model._transition_fst, os.path.join(fig_dir, 'transitions-trained.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
gtn.draw(
model._duration_fst, os.path.join(fig_dir, 'durations-trained.png'),
isymbols=model._arc_symbols, osymbols=model._arc_symbols
)
torchutils.plotEpochLog(
train_epoch_log, title="Train Epoch Log",
fn=os.path.join(fig_dir, "train-log.png")
)
if __name__ == "__main__":
# Parse command-line args and config file
cl_args = utils.parse_args(main)
config, config_fn = utils.parse_config(cl_args, script_name=__file__)
# Create output directory, instantiate log file and write config options
out_dir = os.path.expanduser(config['out_dir'])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(os.path.join(out_dir, config_fn), 'w') as outfile:
yaml.dump(config, outfile)
utils.copyFile(__file__, out_dir)
main(**config)
|
{"hexsha": "02b59eb8245537d9a79b84ff0d2bd0fbf1011328", "size": 8491, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_gtn.py", "max_stars_repo_name": "jd-jones/seqtools", "max_stars_repo_head_hexsha": "280e2fe1d8a925e03f436d73ff81ab638a4ce7b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-19T17:00:18.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-19T17:00:18.000Z", "max_issues_repo_path": "tests/test_gtn.py", "max_issues_repo_name": "jd-jones/seqtools", "max_issues_repo_head_hexsha": "280e2fe1d8a925e03f436d73ff81ab638a4ce7b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_gtn.py", "max_forks_repo_name": "jd-jones/seqtools", "max_forks_repo_head_hexsha": "280e2fe1d8a925e03f436d73ff81ab638a4ce7b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7575757576, "max_line_length": 89, "alphanum_fraction": 0.6254858085, "include": true, "reason": "import numpy", "num_tokens": 2033}
|
import copy
import os
from argparse import ArgumentParser
import xml.etree.ElementTree as ET
import numpy as np
from pycocotools.coco import COCO
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from mmdet.datasets.builder import build_dataset
from tqdm import tqdm
import mmcv
from mmcv import Config, DictAction
def iou(box, clusters):
"""
计算一个ground truth边界盒和k个先验框(Anchor)的交并比(IOU)值。
参数box: 元组或者数据,代表ground truth的长宽。
参数clusters: 形如(k,2)的numpy数组,其中k是聚类Anchor框的个数
返回:ground truth和每个Anchor框的交并比。
"""
x = np.minimum(clusters[:, 0], box[0])
y = np.minimum(clusters[:, 1], box[1])
if np.count_nonzero(x == 0) > 0 or np.count_nonzero(y == 0) > 0:
raise ValueError("Box has no area")
intersection = x * y
box_area = box[0] * box[1]
cluster_area = clusters[:, 0] * clusters[:, 1]
iou_ = intersection / (box_area + cluster_area - intersection)
return iou_
def avg_iou(boxes, clusters):
"""
计算一个ground truth和k个Anchor的交并比的均值。
"""
return np.mean([np.max(iou(boxes[i], clusters)) for i in range(boxes.shape[0])])
class kMean_parse:
def __init__(self, ration_n_clusters, size_n_clusters, data):
self.ration_n_clusters = ration_n_clusters
self.size_n_clusters = size_n_clusters
self.ratio_km = KMeans(n_clusters=self.ration_n_clusters,init="k-means++",n_init=10,max_iter=3000000,tol=1e-3,random_state=0)
self.size_km = KMeans(n_clusters=self.size_n_clusters,init="k-means++",n_init=10,max_iter=3000000,tol=1e-3,random_state=0)
self.data = data
def parse_data (self):
self.one_data = (self.data[:,1] / self.data[:,0]).reshape(-1, 1)
# print(self.data.shape, self.one_data.shape)
self.y_k = self.ratio_km.fit_predict(self.one_data)
print('ratio: ', sorted(self.ratio_km.cluster_centers_))
self.one_data = np.sqrt((self.data[:,1] ** 2 + self.data[:,0] ** 2).reshape(-1, 1))
self.y_k = self.size_km.fit_predict(self.one_data)
print('size: ', sorted(np.sqrt(self.size_km.cluster_centers_ ** 2 / 2)))
def plot_data (self):
cValue = ['orange','r','y','green','b','gray','black','purple','brown','tan']
for i in range(self.n_clusters):
plt.scatter(self.data[self.y_k == i, 0], self.data[self.y_k == i, 1], s=50, c=cValue[i%len(cValue)], marker="o",
label="cluster "+str(i))
# draw the centers
plt.scatter(self.km.cluster_centers_[:, 0], self.km.cluster_centers_[:, 1], s=250, marker="*", c="red", label="cluster center")
plt.legend()
plt.grid()
plt.show()
def Iou_Kmeans(boxes, k, dist=np.median):
"""
利用IOU值进行K-means聚类
参数boxes: 形状为(r, 2)的ground truth框,其中r是ground truth的个数
参数k: Anchor的个数
参数dist: 距离函数
返回值:形状为(k, 2)的k个Anchor框
"""
# 即是上面提到的r
rows = boxes.shape[0]
# 距离数组,计算每个ground truth和k个Anchor的距离
distances = np.empty((rows, k))
# 上一次每个ground truth"距离"最近的Anchor索引
last_clusters = np.zeros((rows,))
# 设置随机数种子
np.random.seed()
# 初始化聚类中心,k个簇,从r个ground truth随机选k个
clusters = boxes[np.random.choice(rows, k, replace=False)]
# 开始聚类
while True:
# 计算每个ground truth和k个Anchor的距离,用1-IOU(box,anchor)来计算
for row in range(rows):
distances[row] = 1 - iou(boxes[row], clusters)
# 对每个ground truth,选取距离最小的那个Anchor,并存下索引
nearest_clusters = np.argmin(distances, axis=1)
# 如果当前每个ground truth"距离"最近的Anchor索引和上一次一样,聚类结束
if (last_clusters == nearest_clusters).all():
break
# 更新簇中心为簇里面所有的ground truth框的均值
for cluster in range(k):
clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)
# 更新每个ground truth"距离"最近的Anchor索引
last_clusters = nearest_clusters
return clusters
def id2name(coco):
classes = dict()
classes_id = []
for cls in coco.dataset['categories']:
classes[cls['id']] = cls['name']
for key in classes.keys():
classes_id.append(key)
return classes, classes_id
def retrieve_data_cfg(config_path, skip_type, cfg_options):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
if train_data_cfg.type == 'RepeatDataset':
train_data_cfg = train_data_cfg.dataset
train_data_cfg['pipeline'] = [
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
]
return cfg
def load_dataset(cfg):
bboxes = np.zeros((0,2))
dataset = build_dataset(cfg.data.train)
for i in tqdm(range(len(dataset))):
item = dataset.__getitem__(i)
gt_bboxes_wh = item['gt_bboxes'][:, 2:] - item['gt_bboxes'][:, :2]
bboxes = np.concatenate((bboxes, gt_bboxes_wh), axis=0)
return bboxes
def main():
parser = ArgumentParser(description='COCO Dataset Analysis Tool')
parser.add_argument(
'--config',
default='configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py',
help='config file path')
parser.add_argument(
'--ratio_clusters',
default=3,
help='config file path')
parser.add_argument(
'--size_clusters',
default=5,
help='config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
data = load_dataset(cfg)
kmean_parse = kMean_parse(args.ratio_clusters, args.size_clusters, data)
kmean_parse.parse_data()
# kmean_parse.plot_data()
# print('ratio : ', out)
# print('size', size)
# anchor = np.array(out) * Inputdim
# print("Boxes: {} ".format(anchor))
# print("Accuracy: {:.2f}%".format(avg_iou(data, out) * 100))
# final_anchors = np.around(out[:, 0] / out[:, 1], decimals=2).tolist()
# print("Before Sort Ratios:\n {}".format(final_anchors))
# print("After Sort Ratios:\n {}".format(sorted(final_anchors)))
if __name__ == '__main__':
main()
|
{"hexsha": "21265edf472462c5af3930f7ef0542a96023e28f", "size": 6951, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/analysis_tools/analyze_dataset.py", "max_stars_repo_name": "jiangwenj02/mmdetection", "max_stars_repo_head_hexsha": "cdc0b7937cd23ee3ab2eef50d002d6cac6956cac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/analysis_tools/analyze_dataset.py", "max_issues_repo_name": "jiangwenj02/mmdetection", "max_issues_repo_head_hexsha": "cdc0b7937cd23ee3ab2eef50d002d6cac6956cac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/analysis_tools/analyze_dataset.py", "max_forks_repo_name": "jiangwenj02/mmdetection", "max_forks_repo_head_hexsha": "cdc0b7937cd23ee3ab2eef50d002d6cac6956cac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.755, "max_line_length": 135, "alphanum_fraction": 0.6439361243, "include": true, "reason": "import numpy", "num_tokens": 2045}
|
import numpy as np
import PIL
from PIL import Image
import os
from torch.utils.data import Dataset
from torchvision import transforms
import torchvision.transforms.functional as TF
def read_labeled_image_list(data_dir, data_list):
"""Reads txt file containing paths to images and ground truth masks.
Args:
data_dir: path to the directory with images and masks.
data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
Returns:
Two lists with all file names for images and masks, respectively.
"""
f = open(data_list, 'r')
images = []
masks = []
for line in f:
try:
image, mask = line.strip("\n").split(' ')
except ValueError: # Adhoc for test.
image = mask = line.strip("\n")
images.append(data_dir + image)
masks.append(data_dir + mask)
return images, masks
class NYUD(Dataset):
"""MultiTaskDataset."""
def __init__(self, data_dir, data_list_1, data_list_2, output_size,
random_scale, random_mirror, random_crop, color_jitter, ignore_label):
"""
Initialise an Multitask Dataloader.
:param data_dir: path to the directory with images and masks.
:param data_list_1: path to the file with lines of the form '/path/to/image /path/to/mask'.
:param data_list_2: path to the file with lines of the form '/path/to/image /path/to/mask'.
:param output_size: a tuple with (height, width) values, to which all the images will be resized to.
:param random_scale: whether to randomly scale the images.
:param random_mirror: whether to randomly mirror the images.
:param random_crop: whether to randomly crop the images.
:param ignore_label: index of label to ignore during the training.
"""
super(NYUD, self).__init__()
self.data_dir = data_dir
self.data_list_1 = data_list_1
self.data_list_2 = data_list_2
self.output_size = output_size
self.random_scale = random_scale
self.random_mirror = random_mirror
self.random_crop = random_crop
self.color_jitter = color_jitter
if self.color_jitter:
self.cj = transforms.ColorJitter(hue=.05, saturation=.05)
self.ignore_label = ignore_label
image_list_1, self.label_list_1 = read_labeled_image_list(self.data_dir, self.data_list_1)
image_list_2, self.label_list_2 = read_labeled_image_list(self.data_dir, self.data_list_2)
assert (image_list_1 == image_list_2)
self.image_list = image_list_1
self.to_tensor = transforms.ToTensor()
self.normalize = transforms.Normalize((122.67891434, 116.66876762, 104.00698793), (1., 1., 1.))
def __len__(self):
return len(self.image_list)
def __getitem__(self, idx):
image = Image.open(self.image_list[idx])
label_1 = Image.open(self.label_list_1[idx])
label_2 = Image.open(self.label_list_2[idx])
w, h = image.size
if self.random_scale:
scale = int(min(w, h) * (np.random.uniform() + 0.5))
resize_bl = transforms.Resize(size=scale, interpolation=PIL.Image.BILINEAR)
resize_nn = transforms.Resize(size=scale, interpolation=PIL.Image.NEAREST)
image = resize_bl(image)
label_1 = resize_nn(label_1)
label_2 = resize_nn(label_2)
if self.random_mirror:
if np.random.uniform() < 0.5:
image = TF.hflip(image)
label_1 = TF.hflip(label_1)
label_2 = TF.hflip(label_2)
if self.color_jitter:
image = self.cj(image)
if self.random_crop:
# pad the width if needed
if image.size[0] < self.output_size[1]:
image = TF.pad(image, (self.output_size[1] - image.size[0], 0))
label_1 = TF.pad(label_1, (self.output_size[1] - label_1.size[0], 0), self.ignore_label, 'constant')
label_2 = TF.pad(label_2, (self.output_size[1] - label_2.size[0], 0),
tuple([self.ignore_label] * 3), 'constant')
# pad the height if needed
if image.size[1] < self.output_size[0]:
image = TF.pad(image, (0, self.output_size[0] - image.size[1]))
label_1 = TF.pad(label_1, (0, self.output_size[0] - label_1.size[1]), self.ignore_label, 'constant')
label_2 = TF.pad(label_2, (0, self.output_size[0] - label_2.size[1]),
tuple([self.ignore_label] * 3), 'constant')
i, j, h, w = transforms.RandomCrop.get_params(
image, output_size=self.output_size)
image = TF.crop(image, i, j, h, w)
label_1 = TF.crop(label_1, i, j, h, w)
label_2 = TF.crop(label_2, i, j, h, w)
image = self.normalize(self.to_tensor(np.array(image) - 255.).float() + 255.)
label_1 = self.to_tensor(np.array(label_1) - 255.) + 255.
label_2 = self.to_tensor(np.array(label_2) - 255.) + 255.
return image, label_1.long(), label_2.float()
if __name__ == '__main__':
dataset = NYUD(
data_dir='../datasets/nyud',
data_list_1='../datasets/nyud/list/testing_seg.txt',
data_list_2='../datasets/nyud/list/testing_normal_mask.txt',
output_size=(321, 321),
random_scale=True,
random_mirror=True,
random_crop=True,
ignore_label=255,
)
img, lb_1, lb_2 = dataset[0]
print(img.size())
|
{"hexsha": "3ddc882ddf68c547e0a77eb0bf1a8cb492269ace", "size": 5583, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/nyud/NYUD.py", "max_stars_repo_name": "slyviacassell/Multi-taks-UNITE", "max_stars_repo_head_hexsha": "a010a92c94c0ee0f1ffed27df6d89da58d6d34c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/nyud/NYUD.py", "max_issues_repo_name": "slyviacassell/Multi-taks-UNITE", "max_issues_repo_head_hexsha": "a010a92c94c0ee0f1ffed27df6d89da58d6d34c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/nyud/NYUD.py", "max_forks_repo_name": "slyviacassell/Multi-taks-UNITE", "max_forks_repo_head_hexsha": "a010a92c94c0ee0f1ffed27df6d89da58d6d34c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5957446809, "max_line_length": 116, "alphanum_fraction": 0.6181264553, "include": true, "reason": "import numpy", "num_tokens": 1397}
|
'''Examples: comparing OLS and RLM
robust estimators and outliers
RLM is less influenced by outliers than OLS and has estimated slope
closer to true slope and not tilted like OLS.
Note: uncomment plt.show() to display graphs
'''
import numpy as np
#from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.ones(nsample)]
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [0.5, 5.]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# Example: estimate linear function (true is linear)
plt.figure()
plt.plot(x1, y2, 'o', x1, y_true2, 'b-')
res2 = sm.OLS(y2, X).fit()
print("OLS: parameter estimates: slope, constant")
print(res2.params)
print("standard deviation of parameter estimates")
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
plt.plot(x1, res2.fittedvalues, 'r-')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
#compare with robust estimator
resrlm2 = sm.RLM(y2, X).fit()
print("\nRLM: parameter estimates: slope, constant")
print(resrlm2.params)
print("standard deviation of parameter estimates")
print(resrlm2.bse)
plt.plot(x1, resrlm2.fittedvalues, 'g.-')
plt.title('Data with Outliers; blue: true, red: OLS, green: RLM')
# see also help(sm.RLM.fit) for more options and
# module sm.robust.scale for scale options
plt.show()
|
{"hexsha": "107ad720850224085b3096258a2a3c273861470e", "size": 1611, "ext": "py", "lang": "Python", "max_stars_repo_path": "statsmodels/examples/tut_ols_rlm_short.py", "max_stars_repo_name": "madhushree14/statsmodels", "max_stars_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6931, "max_stars_repo_stars_event_min_datetime": "2015-01-01T11:41:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:03:24.000Z", "max_issues_repo_path": "statsmodels/examples/tut_ols_rlm_short.py", "max_issues_repo_name": "madhushree14/statsmodels", "max_issues_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6137, "max_issues_repo_issues_event_min_datetime": "2015-01-01T00:33:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:17.000Z", "max_forks_repo_path": "statsmodels/examples/tut_ols_rlm_short.py", "max_forks_repo_name": "madhushree14/statsmodels", "max_forks_repo_head_hexsha": "04f00006a7aeb1c93d6894caa420698400da6c33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2608, "max_forks_repo_forks_event_min_datetime": "2015-01-02T21:32:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:38:30.000Z", "avg_line_length": 25.5714285714, "max_line_length": 69, "alphanum_fraction": 0.7262569832, "include": true, "reason": "import numpy,from scipy,import statsmodels,from statsmodels", "num_tokens": 490}
|
#' @title get_day
#' @description This function provides the number of days in each month, given a year and month. This script was originally part of the date.picker() function, but was separated since it might be useful on its own
#' @note It accounts for leapyears from 1904-2096. Leapyears are not simply every 4 years - please look it up should this script live long enough to need modification
#' @param \code{the.year} YYYY
#' @param \code{the.month} MM
#' @author Mike McMahon, \email{Mike.McMahon@@dfo-mpo.gc.ca}
#' @family date functions
#' @export
get_day<-function(the.year,the.month){
leapyears = seq(1904, 2096, 4 )
if (the.month %in% c(1,3,5,7,8,10,12)){
the.days = c(1:31)
} else if (the.month %in% c(4,6,9,11)){
the.days = c(1:30)
} else if (the.year %in% leapyears){
the.days = c(1:29)
}else{
the.days = c(1:28)
}
return(the.days)
}
|
{"hexsha": "cacfc3bb953177dbcf9849a7ecd1f90851d616b5", "size": 882, "ext": "r", "lang": "R", "max_stars_repo_path": "R/get_day.r", "max_stars_repo_name": "AtlanticR/bio.utilities", "max_stars_repo_head_hexsha": "aaa52cf86afa4ee9e6f46c4516a48d27cc0bfed9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/get_day.r", "max_issues_repo_name": "AtlanticR/bio.utilities", "max_issues_repo_head_hexsha": "aaa52cf86afa4ee9e6f46c4516a48d27cc0bfed9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/get_day.r", "max_forks_repo_name": "AtlanticR/bio.utilities", "max_forks_repo_head_hexsha": "aaa52cf86afa4ee9e6f46c4516a48d27cc0bfed9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0, "max_line_length": 213, "alphanum_fraction": 0.679138322, "num_tokens": 281}
|
[STATEMENT]
lemma le_multiset_empty_right[simp]: "\<not> M < {#}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> M < {#}
[PROOF STEP]
using subset_mset.le_zero_eq less_multiset_def multp_def less_multiset\<^sub>D\<^sub>M
[PROOF STATE]
proof (prove)
using this:
(?n \<subseteq># {#}) = (?n = {#})
(?M < ?N) = multp (<) ?M ?N
multp ?r ?M ?N = ((?M, ?N) \<in> mult {(x, y). ?r x y})
(?M < ?N) = (\<exists>X Y. X \<noteq> {#} \<and> X \<subseteq># ?N \<and> ?M = ?N - X + Y \<and> (\<forall>k. k \<in># Y \<longrightarrow> (\<exists>a. a \<in># X \<and> k < a)))
goal (1 subgoal):
1. \<not> M < {#}
[PROOF STEP]
by blast
|
{"llama_tokens": 296, "file": null, "length": 2}
|
using JuMP, EAGO
m = Model()
EAGO.register_eago_operators!(m)
@variable(m, -1 <= x[i=1:5] <= 1)
@variable(m, -6.148474362391325 <= q <= 10.677081718106185)
add_NL_constraint(m, :(gelu(-0.2518902526786948 + 0.9847866884384731*gelu(0.2752793536861313 + -0.1568397657479923*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.7649756572019379*gelu(0.9918790053549298 + 0.85981905523253*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.10087195663512549*gelu(-0.8202416682813327 + -0.17766510212211006*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) + gelu(0.638435553115452 + 0.09453389081965424*gelu(0.2752793536861313 + -0.1568397657479923*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.07927014075141203*gelu(0.9918790053549298 + 0.85981905523253*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.2624391124261729*gelu(-0.8202416682813327 + -0.17766510212211006*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) + gelu(0.5273095272255199 + 0.4025366346817978*gelu(0.2752793536861313 + -0.1568397657479923*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1062303426643516*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + 0.5642457669318222*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + -0.585290488444234*gelu(0.9918790053549298 + 0.85981905523253*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + 0.9639515148457134*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.08816238542180388*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5]))) + 0.1647823489958915*gelu(-0.8202416682813327 + -0.17766510212211006*gelu(0.860587138591963 + -0.8175452953351838*$(x[1]) + -0.2911974841119127*$(x[2]) + -0.03381280038289569*$(x[3]) + -0.021805451939043596*$(x[4]) + 0.5056820468199099*$(x[5])) + -0.1793020696087071*gelu(0.40215662414311426 + 0.47796901525165936*$(x[1]) + -0.8905174133817706*$(x[2]) + 0.3815447257070974*$(x[3]) + -0.7235559998197827*$(x[4]) + -0.43821768660130234*$(x[5])) + -0.41892665263312834*gelu(-0.371366893645773 + 0.03574445051584929*$(x[1]) + 0.07916257211879385*$(x[2]) + 0.5096612381208421*$(x[3]) + 0.1651890232922466*$(x[4]) + -0.4691295251335359*$(x[5])))) - $q <= 0.0))
@objective(m, Min, q)
return m
|
{"hexsha": "ebe7bac421c5b31d6389428baaa9fb92293fa6fe", "size": 6213, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_gelu_5_3_3.jl", "max_stars_repo_name": "PSORLab/RSActivationFunctions", "max_stars_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_gelu_5_3_3.jl", "max_issues_repo_name": "PSORLab/RSActivationFunctions", "max_issues_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "solver_benchmarking/MINLPLib.jl/instances/ANN_Env/06_gelu_5_3_3.jl", "max_forks_repo_name": "PSORLab/RSActivationFunctions", "max_forks_repo_head_hexsha": "0bf8b4500b21144c076ea958ce93dbdd19a53314", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 365.4705882353, "max_line_length": 5871, "alphanum_fraction": 0.6761628843, "num_tokens": 3065}
|
__author__ = 'ferrard'
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import scipy as sp
import matplotlib.pyplot as plt
import math
# ---------------------------------------------------------------
# Class
# ---------------------------------------------------------------
class GraphPlotter:
"""We can add functions to the graph plotter so that we can plot and show them.
One by one, together in one plot or in one window but separate plots"""
# ---------------------------------------------------------------
# Constants
# ---------------------------------------------------------------
COLORS = [
'gray',
'blue',
'red',
'green',
'orange',
'brown',
'violet'
]
MAX_FUNCTIONS = len(COLORS)
PTS_PER_PLOT = 100
# ---------------------------------------------------------------
# Initialisation
# ---------------------------------------------------------------
def __init__(self):
self._functions = []
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def add_function(self, function, name="untitled"):
"""Adds a new function (with some name) to the graph plotter"""
if len(self._functions) >= self.MAX_FUNCTIONS:
raise Exception("Reached max. number of functions")
self._functions.append((function, name))
def plot_on_grid(self, x_from=-1, x_to=1, y_from=None, y_to=None):
"""Plots each function in a separate plot, but displays all the plots in one window"""
grid_cols = grid_rows = math.ceil(math.sqrt(len(self._functions)))
if grid_cols*(grid_rows - 1) >= len(self._functions):
grid_rows -= 1
f = plt.figure(figsize=(40/grid_cols, 30/grid_rows)) # change the size of the picture
f.subplotpars.update(wspace=0.5, hspace=0.5) # put some more space between plots on the grid
for i in range(len(self._functions)):
plt.subplot(grid_rows, grid_cols, i + 1)
self.__plot(i, x_from, x_to, y_from, y_to)
plt.show()
def plot_together(self, x_from=-1, x_to=1, y_from=None, y_to=None):
"""Plots all the functions together in one plot and one window"""
for i in range(len(self._functions)):
self.__plot(i, x_from, x_to, y_from, y_to)
plt.show()
def plot_one_by_one(self, x_from=-1, x_to=1, y_from=None, y_to=None):
"""Plots each function in a separate plot, displayed, one by one, each in its own window"""
for i in range(len(self._functions)):
self.__plot(i, x_from, x_to, y_from, y_to)
plt.show()
# ---------------------------------------------------------------
# Implementation
# ---------------------------------------------------------------
def __plot(self, j, x_from, x_to, y_from, y_to):
"""Makes a plot for the j-th function on the interval x_from .. x_to.
y_from and y_to may be specified or None, in which case optimal values will be determined
"""
# get the function and its name
function = self._functions[j][0]
name = self._functions[j][1]
# plot the function - but beware of points where can't be computed
x_points_to_try = sp.linspace(x_from, x_to, self.PTS_PER_PLOT)
x_points = []
y_points = []
first_plot = True
def plot_what_we_have():
nonlocal first_plot
if len(x_points) != 0:
plt.plot(x_points, y_points, '-', color=self.COLORS[j], label=(name if first_plot else None))
if first_plot:
first_plot = False
for x in x_points_to_try:
# noinspection PyBroadException
try:
y = function(x)
x_points.append(x)
y_points.append(y)
except Exception:
plot_what_we_have()
x_points.clear()
y_points.clear()
plot_what_we_have()
# set limits, labels, grid, legend ...
if y_from is not None and y_to is not None:
plt.ylim(y_from, y_to)
plt.xlabel("x")
plt.ylabel("y")
plt.axhline(y=0, color='black') # show the X-axis
plt.axvline(x=0, color='black') # show the Y-axis
plt.legend(loc=4) # bottom right corner
plt.xlim(x_from, x_to + (x_to - x_from)/2) # specifies the viewport of our plot
plt.grid()
# ---------------------------------------------------------------
# Neuroscience example
# ---------------------------------------------------------------
POTASSIUM = 'K'
SODIUM = 'Na'
CHLORIDE = 'Cl'
IONS = [POTASSIUM, SODIUM, CHLORIDE]
F = 9.648*(10**4) # Faraday's constant (C mol^-1)
R = 8.314 # Gas constant (J K^-1 mol^-1)
T = 279.45 # Temperature (K)
VALENCY = {POTASSIUM: 1, SODIUM: 1, CHLORIDE: -1} # Valency (no units)
PERM = {POTASSIUM: 1*10**(-2), SODIUM: 0.03*10**(-2), CHLORIDE: 0.001} # Permeability (mol cm^-2 s^-1)
CONC_IN = {POTASSIUM: 400*10**(-6), SODIUM: 50*10**(-6), CHLORIDE: 40*10**(-6)} # Inside concentration (mol cm^-3)
CONC_OUT = {POTASSIUM: 20*10**(-6), SODIUM: 440*10**(-6), CHLORIDE: 560*10**(-6)} # Outside concentration (mol cm^-3)
def current_density(ion, voltage):
left = PERM[ion]*(VALENCY[ion]**2)*(F**2)*voltage/(R*T)
exp_part = sp.exp(-VALENCY[ion]*F*voltage/(R*T))
nominator = CONC_IN[ion] - CONC_OUT[ion]*exp_part
denominator = 1 - exp_part
return left*(nominator/denominator)
def neuroscience():
g = GraphPlotter()
g.add_function(lambda x: current_density(POTASSIUM, x), POTASSIUM)
g.add_function(lambda x: current_density(SODIUM, x), SODIUM)
g.add_function(lambda x: current_density(CHLORIDE, x), CHLORIDE)
g.plot_together(-0.1, 0.1)
g.plot_one_by_one(-0.1, 0.1)
g.plot_on_grid(-0.1, 0.1)
# ---------------------------------------------------------------
# Other example
# ---------------------------------------------------------------
def square(x):
return x**2
def log_10_x(x):
return math.exp(x**(1/5))
def primes(x):
return x/math.log(x)
def example():
g = GraphPlotter()
# g.add_function(square, "square of x")
# g.add_function(log_10_x, "log_10(x)")
g.add_function(primes, "~π(x)")
g.add_function(lambda x: 1/x, "1/x")
g.add_function(lambda x: 1/math.log(abs(x) - 5), "1/log(|x| - 5)")
g.add_function(lambda x: x**3, "x cubed")
g.add_function(lambda x: x**4, "x^4")
g.add_function(lambda x: math.exp(x), "exp(x)")
g.add_function(lambda x: math.exp(math.sqrt(abs(x))), "e^(sqrt(|x|))")
# g.plot_together(-10, 10)
# g.plot_together(-1, 1)
# g.plot_one_by_one(-10, 10)
g.plot_on_grid(-10, 10)
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main():
example()
#neuroscience()
if __name__ == "__main__":
main()
|
{"hexsha": "f4d2fc354c457864e91ce2adb523f822e6c6ffba", "size": 7191, "ext": "py", "lang": "Python", "max_stars_repo_path": "s07_graph_plotter/solutions/sol_graph_plotter.py", "max_stars_repo_name": "silverfield/pythonsessions", "max_stars_repo_head_hexsha": "bf5d82dded7616a5d6998da4eb445708c728794f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "s07_graph_plotter/solutions/sol_graph_plotter.py", "max_issues_repo_name": "silverfield/pythonsessions", "max_issues_repo_head_hexsha": "bf5d82dded7616a5d6998da4eb445708c728794f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "s07_graph_plotter/solutions/sol_graph_plotter.py", "max_forks_repo_name": "silverfield/pythonsessions", "max_forks_repo_head_hexsha": "bf5d82dded7616a5d6998da4eb445708c728794f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7605633803, "max_line_length": 118, "alphanum_fraction": 0.4995132805, "include": true, "reason": "import scipy", "num_tokens": 1825}
|
#pragma once
#define GIF_FRAME_LENGTH 33
#include "concurrentmap.hpp"
#include "emojis.hpp"
#include "messages/lazyloadedimage.hpp"
#include "signalvector.hpp"
#include "twitch/emotevalue.hpp"
#include <QMap>
#include <QMutex>
#include <QRegularExpression>
#include <QString>
#include <QTimer>
#include <boost/signals2.hpp>
namespace chatterino {
class WindowManager;
struct EmoteData {
EmoteData()
{
}
EmoteData(messages::LazyLoadedImage *_image)
: image(_image)
{
}
messages::LazyLoadedImage *image = nullptr;
};
typedef ConcurrentMap<QString, EmoteData> EmoteMap;
class EmoteManager
{
public:
explicit EmoteManager(WindowManager &_windowManager);
static EmoteManager *instance;
void loadGlobalEmotes();
void reloadBTTVChannelEmotes(const QString &channelName,
std::weak_ptr<EmoteMap> channelEmoteMap);
void reloadFFZChannelEmotes(const QString &channelName,
std::weak_ptr<EmoteMap> channelEmoteMap);
ConcurrentMap<QString, twitch::EmoteValue *> &getTwitchEmotes();
EmoteMap &getFFZEmotes();
EmoteMap &getChatterinoEmotes();
EmoteMap &getBTTVChannelEmoteFromCaches();
EmoteMap &getEmojis();
ConcurrentMap<int, EmoteData> &getFFZChannelEmoteFromCaches();
ConcurrentMap<long, EmoteData> &getTwitchEmoteFromCache();
EmoteData getCheerImage(long long int amount, bool animated);
EmoteData getTwitchEmoteById(long int id, const QString &emoteName);
int getGeneration()
{
return _generation;
}
void incGeneration()
{
_generation++;
}
boost::signals2::signal<void()> &getGifUpdateSignal();
// Bit badge/emotes?
ConcurrentMap<QString, messages::LazyLoadedImage *> miscImageCache;
private:
WindowManager &windowManager;
/// Emojis
QRegularExpression findShortCodesRegex;
// shortCodeToEmoji maps strings like "sunglasses" to its emoji
QMap<QString, EmojiData> emojiShortCodeToEmoji;
// Maps the first character of the emoji unicode string to a vector of possible emojis
QMap<QChar, QVector<EmojiData>> emojiFirstByte;
// url Emoji-one image
EmoteMap emojiCache;
EmoteMap emojis;
void loadEmojis();
public:
void parseEmojis(std::vector<std::tuple<EmoteData, QString>> &parsedWords, const QString &text);
QString replaceShortCodes(const QString &text);
std::vector<std::string> emojiShortCodes;
/// Twitch emotes
void refreshTwitchEmotes(const std::string &roomID);
struct TwitchAccountEmoteData {
struct TwitchEmote {
std::string id;
std::string code;
};
// emote set
std::map<std::string, std::vector<TwitchEmote>> emoteSets;
std::vector<std::string> emoteCodes;
bool filled = false;
};
std::map<std::string, TwitchAccountEmoteData> twitchAccountEmotes;
private:
// emote code
ConcurrentMap<QString, twitch::EmoteValue *> _twitchEmotes;
// emote id
ConcurrentMap<long, EmoteData> _twitchEmoteFromCache;
/// BTTV emotes
EmoteMap bttvChannelEmotes;
public:
ConcurrentMap<QString, EmoteMap> bttvChannels;
EmoteMap bttvGlobalEmotes;
SignalVector<std::string> bttvGlobalEmoteCodes;
// roomID
std::map<std::string, SignalVector<std::string>> bttvChannelEmoteCodes;
EmoteMap _bttvChannelEmoteFromCaches;
private:
void loadBTTVEmotes();
/// FFZ emotes
EmoteMap ffzChannelEmotes;
public:
ConcurrentMap<QString, EmoteMap> ffzChannels;
EmoteMap ffzGlobalEmotes;
SignalVector<std::string> ffzGlobalEmoteCodes;
std::map<std::string, SignalVector<std::string>> ffzChannelEmoteCodes;
private:
ConcurrentMap<int, EmoteData> _ffzChannelEmoteFromCaches;
void loadFFZEmotes();
/// Chatterino emotes
EmoteMap _chatterinoEmotes;
boost::signals2::signal<void()> _gifUpdateTimerSignal;
QTimer _gifUpdateTimer;
bool _gifUpdateTimerInitiated = false;
int _generation = 0;
// methods
static QString getTwitchEmoteLink(long id, qreal &scale);
};
} // namespace chatterino
|
{"hexsha": "b64cbd85dc8ff48041ead5ec786eedbdad086af3", "size": 4207, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/emotemanager.hpp", "max_stars_repo_name": "chrisduerr/chatterino2", "max_stars_repo_head_hexsha": "6df531017123570c0f43127bbdc2517dde4071a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-03-22T21:48:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-22T21:48:27.000Z", "max_issues_repo_path": "src/emotemanager.hpp", "max_issues_repo_name": "chrisduerr/chatterino2", "max_issues_repo_head_hexsha": "6df531017123570c0f43127bbdc2517dde4071a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/emotemanager.hpp", "max_forks_repo_name": "chrisduerr/chatterino2", "max_forks_repo_head_hexsha": "6df531017123570c0f43127bbdc2517dde4071a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6023391813, "max_line_length": 100, "alphanum_fraction": 0.6926550986, "num_tokens": 1083}
|
import numpy as np
from LSTM_language_model.model import LSTM_language_model
from LSTM_language_model.utility import onehot, make_input_output
from pathlib import Path
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--model", type=Path, required=True)
parser.add_argument("--dataset", type=Path, required=True)
parser.add_argument("--batch_size", type=int, required=True)
parser.add_argument("--latent_node", type=int, required=True)
parser.add_argument("--epoch", type=int, required=True)
parser.add_argument("--continue_train", action="store_true")
args = parser.parse_args()
npz_obj = np.load(args.dataset)
N, T = npz_obj["shape"]
D = npz_obj["depth"]
sentence_matrix = npz_obj["sentences"]
input, output = make_input_output(sentence_matrix)
input_matrix = onehot(input, depth=D)
output_matrix = onehot(output, depth=D)
words = list(npz_obj["words"])
BOS_index = 0
EOS_index = D-1
batch_size = args.batch_size
epochs = args.epoch
latent_node = args.latent_node
args.model.parent.mkdir(exist_ok=True, parents=True)
if args.continue_train:
lstmlm = LSTM_language_model(D, None, BOS_index=BOS_index, EOS_index=EOS_index, load_model_path=args.model)
else:
lstmlm = LSTM_language_model(D, latent_node, BOS_index=BOS_index, EOS_index=EOS_index)
lstmlm.fit(
input_matrix, output_matrix,
batch_size=batch_size, epochs=epochs,
# validation_split=0.1
)
lstmlm.save_model(args.model)
|
{"hexsha": "eb6c5951e7f6fa809b4b9fea6dc0cc1e6c50e650", "size": 1440, "ext": "py", "lang": "Python", "max_stars_repo_path": "sample/LSTMLM_train.py", "max_stars_repo_name": "RyoOzaki/LSTM_language_model", "max_stars_repo_head_hexsha": "7f4382783a317c52b9f9053cb2fe5bd1a64571a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sample/LSTMLM_train.py", "max_issues_repo_name": "RyoOzaki/LSTM_language_model", "max_issues_repo_head_hexsha": "7f4382783a317c52b9f9053cb2fe5bd1a64571a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sample/LSTMLM_train.py", "max_forks_repo_name": "RyoOzaki/LSTM_language_model", "max_forks_repo_head_hexsha": "7f4382783a317c52b9f9053cb2fe5bd1a64571a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6382978723, "max_line_length": 111, "alphanum_fraction": 0.7805555556, "include": true, "reason": "import numpy", "num_tokens": 353}
|
[STATEMENT]
lemma (in encoding) indRelRPO_is_preorder:
shows "preorder indRelRPO"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. preorder indRelRPO
[PROOF STEP]
unfolding preorder_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. refl indRelRPO \<and> trans indRelRPO
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. refl indRelRPO
2. trans indRelRPO
[PROOF STEP]
show "refl indRelRPO"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. refl indRelRPO
[PROOF STEP]
by (rule indRelRPO_refl)
[PROOF STATE]
proof (state)
this:
refl indRelRPO
goal (1 subgoal):
1. trans indRelRPO
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. trans indRelRPO
[PROOF STEP]
show "trans indRelRPO"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trans indRelRPO
[PROOF STEP]
unfolding trans_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x y z. x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R y \<longrightarrow> y \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z \<longrightarrow> x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z
[PROOF STEP]
proof clarify
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y z. x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R y \<and> y \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z \<Longrightarrow> x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z
[PROOF STEP]
fix P Q R
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y z. x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R y \<and> y \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z \<Longrightarrow> x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z
[PROOF STEP]
assume "P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R Q" and "Q \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R"
[PROOF STATE]
proof (state)
this:
P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R Q
Q \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R
goal (1 subgoal):
1. \<And>x y z. x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R y \<and> y \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z \<Longrightarrow> x \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R z
[PROOF STEP]
thus "P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R"
[PROOF STATE]
proof (prove)
using this:
P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R Q
Q \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R
goal (1 subgoal):
1. P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R
[PROOF STEP]
by (rule indRelRPO.trans)
[PROOF STATE]
proof (state)
this:
P \<lesssim>\<lbrakk>\<cdot>\<rbrakk>R R
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
trans indRelRPO
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1062, "file": "Encodability_Process_Calculi_SourceTargetRelation", "length": 14}
|
"""Link functions related to log."""
# author: Benjamin Cross
# email: btcross26@yahoo.com
# created: 2019-08-26
import numpy as np
from .base_class import BaseLink
class LogLink(BaseLink):
"""Implementation of the log link function."""
def __init__(self, summand: float = 0.0):
"""
Class initializer.
Extends the BaseLink class intializer.
Parameters
----------
summand: float
Summand of the log in the link implementation - i.e.,
link = log(y + summand).
"""
super().__init__()
self.summand_ = summand
def _link(self, y: np.ndarray) -> np.ndarray:
"""
Get the link, eta, as a function of y.
Overrides BaseLink._link.
"""
return np.log(y + self.summand_)
def _inverse_link(self, eta: np.ndarray) -> np.ndarray:
"""
Get the target, y, as a function of the link, `eta`.
Overrides BaseLink._inverse_link.
"""
return np.exp(eta) - self.summand_
def dydeta(self, y: np.ndarray) -> np.ndarray:
"""
Get the derivative of `y` with respect to the link as a function of y.
Overrides BaseLink.dydeta.
"""
return y + self.summand_
def d2ydeta2(self, y: np.ndarray) -> np.ndarray:
"""
Get the second derivative of `y` with respect to the link as a function of y.
Overrides BaseLink.d2ydeta2.
"""
return y + self.summand_
class Logp1Link(LogLink):
"""Log plus 1 link function implementation."""
def __init__(self) -> None:
"""Class initializer extends LogLink initializer, setting summand equal to 1.0."""
super().__init__(summand=1.0)
|
{"hexsha": "1ec2aaeefece20082e40d4cca922b4ee10ebe21f", "size": 1747, "ext": "py", "lang": "Python", "max_stars_repo_path": "genestboost/link_functions/log_links.py", "max_stars_repo_name": "btcross26/forward_stagewise_regression", "max_stars_repo_head_hexsha": "be14503ea253cb8b72bb168608c581f238c57d50", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-05-04T01:25:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T00:54:15.000Z", "max_issues_repo_path": "genestboost/link_functions/log_links.py", "max_issues_repo_name": "btcross26/forward_stagewise_regression", "max_issues_repo_head_hexsha": "be14503ea253cb8b72bb168608c581f238c57d50", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2020-08-13T14:47:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T02:31:45.000Z", "max_forks_repo_path": "genestboost/link_functions/log_links.py", "max_forks_repo_name": "btcross26/genestboost", "max_forks_repo_head_hexsha": "be14503ea253cb8b72bb168608c581f238c57d50", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9571428571, "max_line_length": 90, "alphanum_fraction": 0.5872925014, "include": true, "reason": "import numpy", "num_tokens": 427}
|
import numpy as np
import matplotlib.pyplot as plt
import keyboard as kb
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import SGD, Adam
from keras.datasets import fashion_mnist
def load_data():
(XtrainMat, Ytrain), (XtestMat, Ytest) = fashion_mnist.load_data()
n_train = len(Ytrain)
n_test = len(Ytest)
p_train = np.random.permutation(n_train)
p_test = np.random.permutation(n_test)
XtrainMat, Ytrain = XtrainMat[p_train] / 255, Ytrain[p_train]
XtestMat, Ytest = XtestMat[p_test] / 255, Ytest[p_test]
Xtrain = np.array([image.flatten() for image in XtrainMat])
Xtest = np.array([image.flatten() for image in XtestMat])
Xtrain = np.concatenate((np.ones((n_train, 1)), Xtrain), axis=1)
Xtest = np.concatenate((np.ones((n_test, 1)), Xtest), axis=1)
return Xtrain, Ytrain, Xtest, Ytest, XtrainMat, XtestMat
def build_model_1(lr=0.001):
model = Sequential()
model.add(Conv2D(16, (3, 3), padding='same',
activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=lr)
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy',
metrics=['acc'])
return model
def build_model_2(lr=0.001):
model = Sequential()
model.add(Conv2D(16, (3, 3), padding='same',
activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(8, (3, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(10, activation='softmax'))
adam = Adam(lr=lr)
model.compile(optimizer=adam, loss='sparse_categorical_crossentropy',
metrics=['acc'])
return model
def train_model(model, Xtrain, Ytrain, bs=10, e=5):
train_hist = model.fit(Xtrain, Ytrain, batch_size=bs, epochs=e)
return (train_hist.history['loss'], train_hist.history['acc'])
def test_model(model, Xtest, Ytest):
loss, acc = model.test_on_batch(Xtest, Ytest)
return (loss, acc)
def test_lrs(Xtrain, Ytrain, lrs):
loss = []
accuracy = []
# create figure
fig, plots = plt.subplots(1, len(lrs))
fig.tight_layout()
# train models with lrs
for model, lr in enumerate(lrs):
print('\nLearning Rate:', lr)
new_loss, new_acc = train_model(build_model_1(lr), Xtrain, Ytrain)
loss.append(new_loss)
accuracy.append(new_acc)
# plot loss & accuracy
plots.ravel()[model].set_title(str(lr))
plots.ravel()[model].plot(loss[model], c='R')
plots.ravel()[model].plot(accuracy[model], c='G')
plt.show(fig)
def test_train_size(Xtrain, Ytrain, Xtest, Ytest, train_sizes):
loss = [[], []]
accuracy = [[], []]
for train_size in train_sizes:
# (re-)build models
models = [build_model_1(), build_model_2()]
# train & test models
print('\nTraining Set Size:', train_size)
for n, model in enumerate(models):
print(f"\nModel: {n + 1}")
train_model(model, Xtrain[:train_size], Ytrain[:train_size], e=10)
new_loss, new_acc = test_model(model, Xtest, Ytest)
loss[n].append(new_loss)
accuracy[n].append(new_acc)
# create figure
fig, plots = plt.subplots(1, 2)
fig.tight_layout()
# plot loss & accuracy
for model in range(2):
plots.ravel()[model].set_title(f"Model {model + 1}")
plots.ravel()[model].set_ylim(0, 1)
plots.ravel()[model].plot(train_sizes, loss[model], c='R')
plots.ravel()[model].plot(train_sizes, accuracy[model], c='G')
plt.show()
lrs = [1, 0.1, 0.01, 0.001, 0.0001]
train_sizes = [500, 2500, 15000, 30000]
Xtrain, Ytrain, Xtest, Ytest, XtrainMat, XtestMat = load_data()
# ----- 1 -----
# test_lrs(XtrainMat[:20000].reshape((-1, 28, 28, 1)), Ytrain[:20000], lrs)
# ----- 2 -----
# test_train_size(XtrainMat.reshape((-1, 28, 28, 1)), Ytrain,
# XtestMat.reshape((-1, 28, 28, 1)), Ytest, train_sizes)
|
{"hexsha": "53bffd00a10d0c23270720ac8d22585a1441ccbd", "size": 4509, "ext": "py", "lang": "Python", "max_stars_repo_path": "11.py", "max_stars_repo_name": "mifimigahna/ANN", "max_stars_repo_head_hexsha": "e4476ff29ff017ad0e49f99c4d428a0dd23cdc8a", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "11.py", "max_issues_repo_name": "mifimigahna/ANN", "max_issues_repo_head_hexsha": "e4476ff29ff017ad0e49f99c4d428a0dd23cdc8a", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "11.py", "max_forks_repo_name": "mifimigahna/ANN", "max_forks_repo_head_hexsha": "e4476ff29ff017ad0e49f99c4d428a0dd23cdc8a", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8835616438, "max_line_length": 78, "alphanum_fraction": 0.630738523, "include": true, "reason": "import numpy", "num_tokens": 1281}
|
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for defining and visualizing workspaces for manipulation tasks.
Workspaces define distributions from which the initial positions and/or
orientations of the hand and prop(s) are sampled, plus other task-specific
spatial parameters such as target sizes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from dm_control.composer.variation import distributions
from dm_control.composer.variation import rotations
from dm_control.entities.manipulators import base
from dm_control.manipulation.shared import constants
import numpy as np
_MIN_SITE_DIMENSION = 1e-6 # Ensures that all site dimensions are positive.
_VISIBLE_GROUP = 0
_INVISIBLE_GROUP = 3 # Invisible sensor sites live in group 4 by convention.
DOWN_QUATERNION = base.DOWN_QUATERNION
BoundingBox = collections.namedtuple('BoundingBox', ['lower', 'upper'])
uniform_z_rotation = rotations.QuaternionFromAxisAngle(
axis=(0., 0., 1.),
# NB: We must specify `single_sample=True` here otherwise we will sample a
# length-4 array of angles rather than a scalar. This happens because
# `PropPlacer` passes in the previous quaternion as `initial_value`,
# and by default `distributions.Distribution` assumes that the shape
# of the output array should be the same as that of `initial_value`.
angle=distributions.Uniform(-np.pi, np.pi, single_sample=True))
def add_bbox_site(body, lower, upper, visible=False, **kwargs):
"""Adds a site for visualizing a bounding box to an MJCF model.
Args:
body: An `mjcf.Element`, the (world)body to which the site should be added.
lower: A sequence of lower x,y,z bounds.
upper: A sequence of upper x,y,z bounds.
visible: Whether the site should be visible by default.
**kwargs: Keyword arguments used to set other attributes of the newly
created site.
Returns:
An `mjcf.Element` representing the newly created site.
"""
upper = np.array(upper)
lower = np.array(lower)
pos = (upper + lower) / 2.
size = np.maximum((upper - lower) / 2., _MIN_SITE_DIMENSION)
group = None if visible else constants.TASK_SITE_GROUP
return body.add(
'site', type='box', pos=pos, size=size, group=group, **kwargs)
def add_target_site(body, radius, visible=False, **kwargs):
"""Adds a site for visualizing a target location.
Args:
body: An `mjcf.Element`, the (world)body to which the site should be added.
radius: The radius of the target.
visible: Whether the site should be visible by default.
**kwargs: Keyword arguments used to set other attributes of the newly
created site.
Returns:
An `mjcf.Element` representing the newly created site.
"""
group = None if visible else constants.TASK_SITE_GROUP
return body.add(
'site', type='sphere', size=[radius], group=group, **kwargs)
|
{"hexsha": "84e111e8b3e373953b795b8a2eee184830c4abf5", "size": 3564, "ext": "py", "lang": "Python", "max_stars_repo_path": "custom_dmcontrol/dm_control/manipulation/shared/workspaces.py", "max_stars_repo_name": "haorang/285", "max_stars_repo_head_hexsha": "3b7369b8eb4433952c9cdf27d4feaa015a6c40e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2021-11-05T08:46:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T05:53:57.000Z", "max_issues_repo_path": "dm_control/manipulation/shared/workspaces.py", "max_issues_repo_name": "zzlqwq/dm_control", "max_issues_repo_head_hexsha": "fb0199e61db3323a17f56fd089af61f7c949f41e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-19T11:13:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-30T09:08:04.000Z", "max_forks_repo_path": "dm_control/manipulation/shared/workspaces.py", "max_forks_repo_name": "zzlqwq/dm_control", "max_forks_repo_head_hexsha": "fb0199e61db3323a17f56fd089af61f7c949f41e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-05T08:46:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T21:56:58.000Z", "avg_line_length": 38.7391304348, "max_line_length": 79, "alphanum_fraction": 0.7264309764, "include": true, "reason": "import numpy", "num_tokens": 823}
|
# XXX sets the objective function according to the arguments provided in obj_dic
"""
Set the objective of the model's underlying optimization problem.
```julia
setObjective!(obj_dic::Union{Dict{Symbol,Float64},Symbol},anyM::anyModel)
```
`obj_dic` is a key-word argument that specifies the respective objective. To enable multi-criteria optimization, it can also be a dictionary assigning a weight to each objective. So far, the only supported key-word is `costs`.
"""
function setObjective!(obj_dic::Union{Dict{Symbol,Float64},Symbol},anyM::anyModel,minimize::Bool=true)
# XXX converts input into dictionary, if only a symbol was provided, if :none keyword was provided returns a dummy objective function
if typeof(obj_dic) == Symbol
if obj_dic == :none @objective(anyM.optModel, Min, 1); return, produceMessage(anyM.options,anyM.report, 1," - Set an empty objective function") end
obj_dic = Dict(obj_dic => 1.0)
end
# XXX create empty variables table for objective variables, if already other object defined, these variables and equations are removed from the model
partObj = anyM.parts.obj
if !(:objVar in keys(partObj.var))
partObj.var[:objVar] = DataFrame(name = Symbol[], group = Symbol[], var = AffExpr[])
partObj.cns[:objEqn] = DataFrame(name = Symbol[], group = Symbol[], cns = ConstraintRef[])
end
# XXX create variables and equations required for specified objectives
for objGrp in setdiff(keys(obj_dic),unique(partObj.var[:objVar][!,:group]))
createObjective!(objGrp,partObj,anyM)
end
# XXX sets overall objective variable with upper limits and according to weights provided in dictionary
objBd_flt = anyM.options.bound.obj |> (x -> isnan(x) ? NaN : x / anyM.options.scaFac.obj)
obj_var = JuMP.add_variable(anyM.optModel, JuMP.build_variable(error, VariableInfo(false, NaN, !isnan(objBd_flt), objBd_flt, false, NaN, false, NaN, false, false)),"obj") * anyM.options.scaFac.obj
obj_eqn = @constraint(anyM.optModel, obj_var == sum(map(x -> sum(filter(r -> r.group == x,partObj.var[:objVar])[!,:var])*obj_dic[x], collectKeys(keys(obj_dic)))))
if minimize
@objective(anyM.optModel, Min, obj_var / anyM.options.scaFac.obj)
else
@objective(anyM.optModel, Max, obj_var / anyM.options.scaFac.obj)
end
produceMessage(anyM.options,anyM.report, 1," - Set objective function according to inputs")
end
createObjective!(objGrp::Symbol, partObj::OthPart,anyM::anyModel) = createObjective!(Val{objGrp}(), partObj::OthPart,anyM::anyModel)
# XXX create variables and equations for cost objective
function createObjective!(objGrp::Val{:costs},partObj::OthPart,anyM::anyModel)
parObj_arr = collectKeys(keys(partObj.par))
techIdx_arr = collect(keys(anyM.parts.tech))
varToPart_dic = Dict(:exc => :exc, :ctr => :bal,:trdSell => :trd, :trdBuy => :trd)
# computes discount factors from discount rate provided and saves them as new parameter elements
computeDisFac!(partObj,anyM)
# XXX add elements for expansion costs of technologies
for va in (:Conv, :StIn, :StOut, :StSize, :Exc)
# XXX compute expansion costs
var_sym = Symbol(:exp,va)
costPar_sym = Symbol(:costExp,va)
if !(costPar_sym in parObj_arr) continue end
# get all variables
allExp_df = getAllVariables(var_sym,anyM)
if isempty(allExp_df)
continue
else
allExp_df = rename(allExp_df,:var => :exp)
end
# add economic lifetime to table where it is defined
if Symbol(:lifeEco,va) in parObj_arr
ecoLife_df = matchSetParameter(allExp_df,partObj.par[Symbol(:lifeEco,va)],anyM.sets,newCol = :life)
noEcoLife_df = antijoin(allExp_df,ecoLife_df, on = intCol(allExp_df))
noEcoLife_df[!,:life] .= nothing
allExp_df = vcat(ecoLife_df,noEcoLife_df)
else
allExp_df[!,:life] .= nothing
end
techFilt_arr = filter(y -> var_sym in keys(anyM.parts.tech[y].var), techIdx_arr)
# use technical lifetime where no economic lifetime could be obtained
if va != :Exc
allPar_arr = filter(w -> !(isempty(w)),map(x -> anyM.parts.tech[x].par[Symbol(:life,va)].data,filter(y -> var_sym in keys(anyM.parts.tech[y].var), techFilt_arr)))
union(intCol.(allPar_arr)...) |> (z -> map(x -> map(y -> insertcols!(x,1,(y => fill(0,size(x,1)))) , setdiff(z,intCol(x)) ) ,allPar_arr))
lifePar_obj = copy(anyM.parts.tech[techFilt_arr[1]].par[Symbol(:life,va)],vcat(allPar_arr...))
else
lifePar_obj = anyM.parts.exc.par[:lifeExc]
end
techLife_df = matchSetParameter(filter(x -> isnothing(x.life),allExp_df)[!,Not(:life)],lifePar_obj,anyM.sets,newCol = :life)
allExp_df = vcat(techLife_df,filter(x -> !isnothing(x.life),allExp_df))
# gets expansion costs and interest rate to compute annuity
allExp_df = matchSetParameter(convertExcCol(allExp_df),partObj.par[costPar_sym],anyM.sets,newCol = :costExp)
if isempty(allExp_df) continue end
# uses tech specific discount rate and fall back on general discount rate as default
if Symbol(:rateExp,va) in keys(partObj.par)
techRate_df = matchSetParameter(allExp_df,partObj.par[Symbol(:rateExp,va)],anyM.sets,newCol = :rate)
else
techRate_df = filter(x -> false,allExp_df); techRate_df[!,:rate] .= Float64[]
end
# obtains general discount rate
generRate_df = rename(antijoin(allExp_df,techRate_df,on = intCol(techRate_df)),:Ts_expSup => :Ts_disSup, :Ts_disSup => :Ts_expSup)
if va != :Exc
generRate_df = matchSetParameter(generRate_df, partObj.par[:rateDisc],anyM.sets,newCol = :rate)
else
rateB_arr = matchSetParameter(rename(generRate_df,:R_a => :R_exp), partObj.par[:rateDisc],anyM.sets,newCol = :rateA)[!,:rateA]
rateA_arr = matchSetParameter(rename(generRate_df,:R_b => :R_exp), partObj.par[:rateDisc],anyM.sets,newCol = :rateB)[!,:rateB]
generRate_df[!,:rate] = 0.5 .* (rateA_arr .+ rateB_arr)
end
allExp_df = vcat(techRate_df,rename(generRate_df, :Ts_expSup => :Ts_disSup, :Ts_disSup => :Ts_expSup))
# compute annuity costs
allExp_df[!,:costAnn] = map(x -> x.costExp * (x.rate == 0.0 ? 1/x.life : (x.rate * (1 + x.rate)^x.life) / ((1 + x.rate)^x.life-1)), eachrow(allExp_df))
select!(allExp_df,Not([:costExp,:life,:rate]))
allExp_df = flatten(allExp_df,:Ts_disSup)
# adds discount factor and computes cost expression
allExp_df = matchSetParameter(convertExcCol(allExp_df),partObj.par[va != :Exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allExp_df = rename(combine(x -> (expr = sum(x.disFac .* x.exp .* x.costAnn),) ,groupby(allExp_df,va != :Exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C])),:Ts_disSup => :Ts_exp)
transferCostEle!(allExp_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costCapa,anyM.options.checkRng)
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for expansion costs")
# XXX add elements for operational costs of technologies
# if decommissioning is enabled, capacity costs depend on commissioned and not on installed capacities
capaTyp_sym = anyM.options.decomm != :none ? :oprCapa : :capa
for va in (:Conv, :StIn, :StOut, :StSize, :Exc)
var_sym = Symbol(capaTyp_sym,va)
costPar_sym = Symbol(:costOpr,va)
if !(costPar_sym in parObj_arr) continue end
# get all variables
allCapa_df = getAllVariables(var_sym,anyM)
if isempty(allCapa_df)
continue
else
allCapa_df = rename(allCapa_df,:var => :capa)
end
# joins costs and discount factors to create cost expression
allCapa_df = matchSetParameter(convertExcCol(allCapa_df),partObj.par[costPar_sym],anyM.sets,newCol = :costOpr)
allCapa_df = matchSetParameter(convertExcCol(allCapa_df),partObj.par[va != :Exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
if isempty(allCapa_df) continue end
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allCapa_df = combine(x -> (expr = sum(x.disFac .* x.capa .* x.costOpr),), groupby(allCapa_df,va != :Exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C]))
transferCostEle!(allCapa_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costCapa,anyM.options.checkRng)
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for capacity costs")
# XXX add elements for variable costs of technologies
for va in (:use,:gen,:stIn,:stOut,:exc)
costPar_sym = string(va) |> (x -> Symbol(:costVar,uppercase(x[1]),x[2:end]))
if !(costPar_sym in parObj_arr || (va == :use && :emissionPrc in parObj_arr && :emissionFac in keys(anyM.parts.lim.par))) continue end
# obtain all variables
allDisp_df = getAllVariables(va,anyM)
if isempty(allDisp_df)
continue
else
allDisp_df = rename(allDisp_df,:var => :disp)
end
# special case for variable costs of exchange (direct and symmetric values need to be considered both) and of use (emission price needs to be considered)
if va == :exc
if :costVarExcDir in parObj_arr
dirCost_df = matchSetParameter(convertExcCol(allDisp_df),anyM.parts.obj.par[:costVarExcDir],anyM.sets,newCol = :costVar)
else
dirCost_df = convertExcCol(allDisp_df[[],:])
end
noDirCost_df = matchSetParameter(antijoin(convertExcCol(allDisp_df),dirCost_df, on = intCol(dirCost_df)),anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
allDisp_df = rename(convertExcCol(vcat(dirCost_df,noDirCost_df)),:var => :disp)
elseif va == :use && :emissionPrc in parObj_arr && :emissionFac in keys(anyM.parts.lim.par)
# get emission prices as a costs entry
emPrc_df = matchSetParameter(select(allDisp_df,Not(:disp)),partObj.par[:emissionPrc],anyM.sets, newCol = :prc)
emPrc_df = matchSetParameter(emPrc_df,anyM.parts.lim.par[:emissionFac],anyM.sets, newCol = :fac)
emPrc_df[!,:costEms] = emPrc_df[!,:prc] .* emPrc_df[!,:fac] ./ 1000
select!(emPrc_df,Not([:prc,:fac]))
# merge emission costs with other variable costs or just use emission costs if there are not any other
if costPar_sym in parObj_arr
otherVar_df = matchSetParameter(select(allDisp_df,Not(:disp)),anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
allCost_df = joinMissing(otherVar_df,emPrc_df,intCol(emPrc_df),:outer,merge(Dict{Symbol,Any}(:costVar => 0.0, :costEms => 0.0),Dict{Symbol,Any}(x => 0 for x in intCol(emPrc_df))) )
allCost_df[!,:costVar] = allCost_df[!,:costVar] .+ allCost_df[!,:costEms]
select!(allCost_df,Not(:costEms))
else
allCost_df = emPrc_df
rename!(allCost_df,:costEms => :costVar)
end
allDisp_df = innerjoin(allCost_df,allDisp_df, on = intCol(allDisp_df))
else
allDisp_df = matchSetParameter(allDisp_df,anyM.parts.obj.par[costPar_sym],anyM.sets,newCol = :costVar)
end
if isempty(allDisp_df) continue end
# renames dispatch regions to enable join with discount factors
if va != :exc rename!(allDisp_df,:R_dis => :R_exp) end
allDisp_df = matchSetParameter(allDisp_df,partObj.par[va != :exc ? :disFac : :disFacExc],anyM.sets,newCol = :disFac)
# XXX groups cost expressions by technology, scales groups expression and creates a variables for each grouped entry
allDisp_df = combine(x -> (expr = sum(x.disFac .* x.disp .* x.costVar) ./ 1000.0 .* anyM.options.redStep,) ,groupby(allDisp_df,va != :exc ? [:Ts_disSup,:R_exp,:Te] : [:Ts_disSup,:C]))
transferCostEle!(allDisp_df, partObj,costPar_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng)
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for variables costs")
# XXX add elements for curtailment and loss of load costs of energy carriers
for varType in [:crt,:lss]
if varType in keys(anyM.parts.bal.var)
cost_sym = varType == :crt ? :costCrt : :costLss
# compute discounted curtailment costs
allVar_df = rename(matchSetParameter(anyM.parts.bal.var[varType],anyM.parts.bal.par[cost_sym],anyM.sets,newCol = :cost),:var => varType)
allVar_df = matchSetParameter(rename(allVar_df,:R_dis => :R_exp),partObj.par[:disFac],anyM.sets,newCol = :disFac)
# groups cost expressions by carrier, scales groups expression and creates a variables for each grouped entry
allVar_df = combine(x -> (expr = sum(x.disFac .* x[!,varType] .* x.cost) ./ 1000.0 .* anyM.options.redStep,) ,groupby(allVar_df, [:Ts_disSup,:R_exp,:C]))
transferCostEle!(allVar_df, partObj,cost_sym,anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng, NaN)
end
end
# XXX add elements for trade costs of energy carriers (buy and sell)
for va in (:trdBuy, :trdSell)
if va in keys(anyM.parts.trd.var)
# compute discounted trade costs
allTrd_df = rename(matchSetParameter(anyM.parts.trd.var[va],anyM.parts.trd.par[Symbol(va,:Prc)],anyM.sets,newCol = :costTrd),:var => :trd)
allTrd_df = matchSetParameter(rename(allTrd_df,:R_dis => :R_exp),partObj.par[:disFac],anyM.sets,newCol = :disFac)
# groups cost expressions by carrier, scales groups expression and creates a variables for each grouped entry
allTrd_df = combine(x -> (expr = sum(x.disFac .* x.trd .* x.costTrd) ./ (va == :trdSell ? -1000.0 : 1000.0) .* anyM.options.redStep,), groupby(allTrd_df, [:Ts_disSup,:R_exp,:C]))
transferCostEle!(allTrd_df, partObj,Symbol(:cost,uppercase(string(va)[1]),string(va)[2:end]),anyM.optModel,anyM.lock,anyM.sets,anyM.options.coefRng,anyM.options.scaFac.costDisp,anyM.options.checkRng,(va == :trdSell ? NaN : 0.0))
end
end
produceMessage(anyM.options,anyM.report, 3," - Created expression for curtailment and trade costs")
# XXX creates overall costs variable considering scaling parameters
relBla = filter(x -> x != :objVar, collectKeys(keys(partObj.var)))
objVar_arr = map(relBla) do varName
# sets lower limit of zero, except for curtailment and revenues from selling, because these can incure "negative" costs
lowBd_tup = !(varName in (:costCrt,:costTrdSell)) |> (x -> (x,x ? 0.0 : NaN))
info = VariableInfo(lowBd_tup[1], lowBd_tup[2], false, NaN, false, NaN, false, NaN, false, false)
return JuMP.add_variable(anyM.optModel, JuMP.build_variable(error, info),string(varName))
end
# create dataframe with for overall cost equations and scales it
objExpr_arr = [objVar_arr[idx] - sum(partObj.var[name][!,:var]) for (idx, name) in enumerate(relBla)]
cns_df = DataFrame(group = fill(:costs,length(objExpr_arr)), name = relBla, cnsExpr = objExpr_arr)
# add variables and equations to overall objective dataframes
partObj.cns[:objEqn] = vcat(partObj.cns[:objEqn],createCns(cnsCont(cns_df,:equal),anyM.optModel))
partObj.var[:objVar] = vcat(partObj.var[:objVar],DataFrame(group = fill(:costs,length(objVar_arr)), name = relBla, var = objVar_arr))
end
# XXX transfers provided cost dataframe into dataframe of overall objective variables and equations (and scales them)
function transferCostEle!(cost_df::DataFrame, partObj::OthPart,costPar_sym::Symbol,optModel::Model,lock_::ReentrantLock,sets_dic::Dict{Symbol,Tree},
coefRng_tup::NamedTuple{(:mat,:rhs),Tuple{Tuple{Float64,Float64},Tuple{Float64,Float64}}}, scaCost_fl::Float64, checkRng_fl::Float64, lowBd::Float64 = 0.0)
# create variables for cost entry and builds corresponding expression for equations controlling them
cost_df = createVar(cost_df,string(costPar_sym),NaN,optModel,lock_,sets_dic, scaFac = scaCost_fl, lowBd = lowBd)
cost_df[!,:cnsExpr] = map(x -> x.expr - x.var, eachrow(cost_df))
select!(cost_df,Not(:expr))
# scales cost expression
scaleCnsExpr!(cost_df,coefRng_tup,checkRng_fl)
cost_df[!,:var] = cost_df[!,:var]
# writes equations and variables
partObj.cns[costPar_sym] = createCns(cnsCont(select(cost_df,Not(:var)),:equal),optModel)
partObj.var[costPar_sym] = select(cost_df,Not(:cnsExpr))
end
|
{"hexsha": "71155d8e2832646800c8302508c49bfb21f03bcc", "size": 15865, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/optModel/objective.jl", "max_stars_repo_name": "wookay/AnyMOD.jl", "max_stars_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/optModel/objective.jl", "max_issues_repo_name": "wookay/AnyMOD.jl", "max_issues_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/optModel/objective.jl", "max_forks_repo_name": "wookay/AnyMOD.jl", "max_forks_repo_head_hexsha": "14fdae26d6c8dd88001b2b5e4aadb468a3856b42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.4590747331, "max_line_length": 231, "alphanum_fraction": 0.7297195084, "num_tokens": 4827}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 14:18:39 2017
@author: mitchell
"""
import config
import PyQt5
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
import matplotlib.patches as patches
plt.style.use(config.plot_style)
#-------------------------------------------------------#
# SET OUTLIERS, REFERENCE COMPOUNDS, AESTHETIC SETTINGS #
#-------------------------------------------------------#
outliers = ['mp-14134', 'mp-769844', 'mp-11585', 'mp-758317', 'mp-20526']
ref = {
5238:'CuGaS$_2$',
406:'CdTe',
149:'Si',
2534:'GaAs',
804:'GaN',
2133:'ZnO',
2624:'AlSb'
}
scaleDot = 1e2
c1, c2, c3, c4, c5 = ('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd')
fs = 24 # annotation fontsize
ms = 50
def gtrans(txt):
if txt != '\\Gamma':
return(txt)
else:
return(r'$\Gamma$')
def sizeplot(xdata, ydata, sizedata, scale = 1):
f, ax = plt.subplots()
return(f, ax, ax.scatter(xdata, ydata, s = np.array(sizedata) * scale,
facecolors = 'none', edgecolors = 'b'))
def nan2str(arg):
if arg is np.nan:
return('nan')
else:
return(arg)
def nan2nan(arg):
if arg == 'nan' or arg == 'none' or arg == 'none5':
return(np.nan)
else:
return(arg)
def scatter(xdata, ydata, c = 'k', ax = None, linewidth = None, alpha = None,
marker = 'o'):
new = ax is None
if new:
f, ax = plt.subplots()
if new:
return f, ax, ax.scatter(xdata, ydata, marker = marker, facecolors = 'none', edgecolors = c,
linewidth = linewidth, alpha = alpha)
else:
return ax.scatter(xdata, ydata, marker = marker, facecolors = 'none', edgecolors = c,
linewidth = linewidth, alpha = alpha)
def zoomscatter(xdata, ydata, zoom = 2, limits = None, zlimits = None, marker = 'o', loc = 1, size = None, scale = 1):
if scale != 1:
size = np.array([i * scale for i in size])
if limits is None:
limits = [min(xdata), max(xdata), min(ydata), max(ydata)]
f, ax = plt.subplots()
ax.scatter(xdata, ydata, s = size)
ax.set_xlim(limits[0], limits[1])
ax.set_ylim(limits[2], limits[3])
axins = zoomed_inset_axes(ax, zoom, loc = loc)
if zlimits is None:
xdis = limits[1] - limits[0]
ydis = limits[3] - limits[2]
zlimits = [limits[0] + .4*xdis, limits[0] + .6*xdis,
limits[2] + .4*ydis, limits[2] + .6*ydis]
axins.set_xlim(zlimits[0], zlimits[1])
axins.set_ylim(zlimits[2], zlimits[3])
plt.yticks(visible=False)
plt.xticks(visible=False)
axins.tick_params(axis = 'both', which = 'both', left = 'off', bottom = 'off')
axins.scatter(xdata, ydata, marker = marker, s = size)
mark_inset(ax, axins, loc1=2, loc2=4, fc = 'none', ec='0.5')
return(f, ax, axins)
def zoomscatterbicolor(xdata, ydata, select, zoom = 2, limits = None, zlimits = None, marker = 'o', loc = 1, size = None, scale = 1, c = 'b', cs = 'r'): #WIP
if scale != 1:
size = np.array([i * scale for i in size])
if limits is None:
limits = [min(xdata), max(xdata), min(ydata), max(ydata)]
f, ax = plt.subplots()
sel = ax.scatter(xdata[select], ydata[select], s = size, c = cs)
unsel = ax.scatter(xdata[~select], ydata[~select], s = size, c = c)
ax.set_xlim(limits[0], limits[1])
ax.set_ylim(limits[2], limits[3])
axins = zoomed_inset_axes(ax, zoom, loc = loc)
if zlimits is None:
xdis = limits[1] - limits[0]
ydis = limits[3] - limits[2]
zlimits = [limits[0] + .4*xdis, limits[0] + .6*xdis,
limits[2] + .4*ydis, limits[2] + .6*ydis]
axins.set_xlim(zlimits[0], zlimits[1])
axins.set_ylim(zlimits[2], zlimits[3])
plt.yticks(visible=False)
plt.xticks(visible=False)
axins.tick_params(axis = 'both', which = 'both', left = 'off', bottom = 'off')
axins.scatter(xdata, ydata, marker = marker, s = size)
mark_inset(ax, axins, loc1=2, loc2=4, fc = 'none', ec='0.5')
return(f, ax, axins)
def bandplot(kdist, energy, mpid, formula, dirChange, pathBreak, xrange = None, yrange = None):
if len(energy) == 1:
energyfull = energy[1]
elif len(energy) == 2:
print(energy)
energyfull = np.array([energy[1], energy[-1]])
if xrange is None:
xrange = (np.min(kdist), np.max(kdist))
if yrange is None:
yrange = (np.min(energyfull), np.max(energyfull))
ymin, ymax = yrange
xmin, xmax = xrange
f, ax = plt.subplots()
ax.plot(kdist, energy[1], '-' , c=( 0.121569 , 0.466667 , 0.705882 ) , lw=3)
if len(energy) == 2:
ax.plot(kdist, energy[-1], 'r--', lw=3)
for k in dirChange:
ax.plot( (k,k) , (ymin,ymax) , 'k-' , lw=1 )
for k in pathBreak:
ax.plot( (k,k) , (ymin,ymax) , 'k-' , lw=2 )
ax.plot( (xmin,xmax) , (0,0) , 'k--' , lw=1 ) #fermi level
ax.set_xlim( xmin , xmax )
if yrange is None:
ax.set_ylim( ymin , ymax )
else:
ax.set_ylim(yrange)
ax.set_ylabel(r'$E-E_f$ (eV)')
ax.tick_params( axis='x' , which='both' , bottom='off' , top='off' , labelbottom='off' )
ax.set_title('{} {}'.format(mpid, formula))
return(f, ax)
def get_kdist(BS):
k = BS.kpoints
cart = [i.cart_coords for i in k]
kdist = [0.]
dirChange, pathBreak = [], []
for i in range(1, len(cart)):
kstep = np.linalg.norm(cart[i] - cart[i-1])
if k[i].label is not None and k[i-1].label is not None and i != 1:
if k[i].label != k[i-1].label:
pathBreak.append( kdist[-1] )
kdist.append( kdist[-1] )
else:
dirChange.append( kdist[-1] )
kdist.append( kdist[-1] )
else:
kdist.append( kdist[-1] + kstep )
pathBreak = np.array(pathBreak)
pathBreak = pathBreak[np.where(pathBreak>0)]
pathBreak = np.array(list(set(pathBreak)))
kdist = np.array(kdist)
dirChange = np.array(list(set(dirChange)))
if not BS.is_spin_polarized:
assert(len(BS.bands)) == 1
key = list(BS.bands.keys())[0]
energy = BS.bands[key]
energy += -BS.efermi
energy = {1: energy.T}
else:
assert(len(BS.bands)) == 2
keys = list(BS.bands.keys())
energy = {1: BS.bands[keys[0]], -1 : BS.bands[keys[1]]}
energy = {i: energy[i].T -BS.efermi for i in energy}
return(kdist, energy, dirChange, pathBreak)
def labelBZ( ax , kdist , dirChange , pathBreak , labels, fdy = 0.03):
yl = ax.get_ylim()
joined = np.sort( np.concatenate((
np.array([np.min(kdist)]),
dirChange,pathBreak,
np.array([np.max(kdist)]))) )
for j , v in enumerate(joined):
if v in pathBreak:
s = '|'.join((labels.pop(0),labels.pop(0)))
else:
s = labels.pop(0)
ax.text( v , yl[0]-(yl[1]-yl[0])*fdy , s , horizontalalignment='center' )
def getlabels(BS):
out = []
kold = ''
for k in BS.kpoints:
if k.label is not None:
if k.label is not kold:
out.append(k.label)
kold = k.label
out = [gtrans(i) for i in out]
return(out)
def print_full_wide(x):
pd.set_option('display.max_columns', x.shape[1])
print(x)
pd.reset_option('display.max_columns')
def colon_sep_nums(x):
return([float(i) for i in str(x).split(':')])
def round2(x):
return(np.round(x, 2))
def round3(x):
return(np.round(x, 3))
def round4(x):
return(np.round(x, 4))
def plot_eg_dk_m(df):
'''plot dk vs. E_g with 1/mean(m_e) and 1/mean(m_h) dot size'''
fig, ax = plt.subplots()
ax.scatter(df['eg'], df['dk'], s=1./df['memean']*scaleDot, c=c1, label='', lw=1, alpha=0.2)
ax.scatter(df['eg'] ,df['dk'] ,s=1./df['mhmean']*scaleDot, c=c2, label='', lw=1, alpha=0.1)
ax.scatter(-10, -10, c=c1, s=100, label='electrons', lw=1, alpha=1)
ax.scatter(-10, -10, c=c2, s=100, label='holes', lw=1, alpha=1)
ax.set_xlabel('$E_g$ (eV)')
ax.set_ylabel('$\Delta k$ ($\AA^{-1}$)')
ax.set_ylim(-0.2,2)
ax.set_xlim(-0.5,5)
ax.set_title('marker size inversely proportional to $m^*$')
ax.legend()
return fig , ax
def plot_mh_me(DF):
'''plot mean(m_h) vs. mean(m_e) with E_g dot size'''
df = DF.ix[DF.eg>0] # remove metals
fig, ax = plt.subplots()
ax.loglog((1e-2,1e5), (1e-2,1e5), 'k--')
sh = ax.scatter(df['memean'], df['mhmean'], s=df['eg']*scaleDot, c=c1, label='', lw=1, alpha=0.2)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel('avg($m_e^*$) $m_e^{-1}$')
ax.set_ylabel('avg($m_h^*$) $m_e^{-1}$')
ax.set_xlim(1e-2,1e5)
ax.set_ylim(1e-2,1e5)
ax.text(4e2, 3e-2, '{:.0f}% have $m_e^*$ < $m_h^*$'.format(len(df.ix[df.memean<df.mhmean])/len(df)*100), size=fs)
ax.set_title('marker size proportional to bandgap')
return fig , ax
def plot_eg_m(DF):
'''plot mean(m_e) and mean(m_h) vs. E_g'''
df = DF.ix[DF.eg>0] # remove metals
fig , ax = plt.subplots()
ax.scatter(df['eg'], df['memean'], c=c1, s=ms, label='', lw=1, alpha=.4)
ax.scatter(df['eg'], df['mhmean'], c=c2, s=ms, label='', lw=1, alpha=.4)
ax.scatter(-1, -1, c=c1, s=100, label='$m_e^*$', lw=1, alpha=1)
ax.scatter(-1, -1, c=c2, s=100, label='$m_h^*$', lw=1, alpha=1)
ax.set_yscale('log')
ax.set_xlabel('$E_g$ (eV)')
ax.set_ylabel('avg($m^*$) $m_e^{-1}$')
ax.set_xlim(0, 10)
ax.set_ylim(1e-1,1e2)
p = ax.add_patch(patches.Rectangle((2, 1.5e-1), 9.5-2, 1-1.5e-1, fc='none', ec='k', ls='--'))
ax.text(4, 1.8e-1, 'transparent conductor candidates', size=fs)
p.set_zorder(10)
ax.legend(loc=1)
return fig , ax
def old2newmpid(x):
return('mp-{}'.format(x))
def dict_from_semi(x):
try:
if np.isnan(x):
return(np.nan)
except:
pass
return(eval(x.replace(';',',')))
def read_POSCAR(inlines):
return(np.array([[float(i) for i in j.strip(' ').split()] for j in inlines.rstrip('\n').split('\n')]))
def vol(x):
return(np.dot(np.cross(x[0], x[1]), x[2]))
|
{"hexsha": "d44e7d591e7d93379996e967c5dd5bb55cb95bb1", "size": 10263, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotfuncs.py", "max_stars_repo_name": "mfkoerner/icarus", "max_stars_repo_head_hexsha": "eb480596be127f760d10531d27569290df3e8ff9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-02-21T23:23:53.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-22T11:05:03.000Z", "max_issues_repo_path": "plotfuncs.py", "max_issues_repo_name": "mfkoerner/icarus", "max_issues_repo_head_hexsha": "eb480596be127f760d10531d27569290df3e8ff9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotfuncs.py", "max_forks_repo_name": "mfkoerner/icarus", "max_forks_repo_head_hexsha": "eb480596be127f760d10531d27569290df3e8ff9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7849462366, "max_line_length": 159, "alphanum_fraction": 0.5668907727, "include": true, "reason": "import numpy", "num_tokens": 3409}
|
import numpy as np
import torch
import torch.nn as nn
import random
import os
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
nz = 100
ngf = 64
nc=3
device = torch.device("cpu")
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
output = self.main(input)
return output
class Model:
def __init__(self, filename=None, model=None):
"""
"""
# make torch seed depend on system seed
manSeed = random.randint(0, 999999999)
self.netG = Generator().to(device)
self.netG.apply(weights_init)
if filename is not None:
self.netG.load_state_dict(torch.load(filename, map_location='cpu'))
print(self.netG)
def encode_images(self, images):
"""
Encode images x => z
images is an n x 3 x s x s numpy array where:
n = number of images
3 = R G B channels
s = size of image (eg: 64, 128, etc)
pixels values for each channel are encoded [0,1]
returns an n x z numpy array where:
n = len(images)
z = dimension of latent space
"""
# todo
pass
def get_zdim(self):
"""
Returns the integer dimension of the latent z space
"""
return 100
def sample_at(self, z):
"""
Decode images z => x
z is an n x z numpy array where:
n = len(images)
z = dimension of latent space
return images as an n x 3 x s x s numpy array where:
n = number of images
3 = R G B channels
s = size of image (eg: 64, 128, etc)
pixels values for each channel are encoded [0,1]
"""
z_len, nz = z.shape
z = z.reshape(z_len, nz, 1, 1)
# fixed_noise = torch.randn(z_len, nz, 1, 1, device=device)
# fix1 = fixed_noise.numpy()
# print(fix1.shape, " VERSUS ", z.shape)
fake = self.netG(torch.from_numpy(z).float())
# fake = self.netG(torch.from_numpy(z))
f = (fake.detach().numpy() + 1.0) / 2.0
print(f.shape)
print(np.amin(f))
print(np.amax(f))
return f
# return fake.detach().permute(1, 2, 0).to('cpu').numpy()
# print(decoded.shape)
# channel_first = np.rollaxis(decoded, 3, 1)
# print(channel_first.shape)
# return channel_first
|
{"hexsha": "15953a38c337f0760edd43d8712fdfb2a739ae98", "size": 3640, "ext": "py", "lang": "Python", "max_stars_repo_path": "plat/interface/pytorchdcgan.py", "max_stars_repo_name": "dribnet/plat", "max_stars_repo_head_hexsha": "0963c75d460c153183c1d414b02d4b5dc0b2208f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 332, "max_stars_repo_stars_event_min_datetime": "2016-09-16T23:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T07:12:14.000Z", "max_issues_repo_path": "plat/interface/pytorchdcgan.py", "max_issues_repo_name": "dribnet/plat", "max_issues_repo_head_hexsha": "0963c75d460c153183c1d414b02d4b5dc0b2208f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2016-12-16T02:56:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-07T10:39:39.000Z", "max_forks_repo_path": "plat/interface/pytorchdcgan.py", "max_forks_repo_name": "dribnet/plat", "max_forks_repo_head_hexsha": "0963c75d460c153183c1d414b02d4b5dc0b2208f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2016-10-06T14:21:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-15T10:36:56.000Z", "avg_line_length": 30.8474576271, "max_line_length": 79, "alphanum_fraction": 0.5368131868, "include": true, "reason": "import numpy", "num_tokens": 1024}
|
"""
Evaluating Video-QAP
Use eval metrics from Pycocoevalcap
Can be used standalone
Requires Bertscore
"""
from pathlib import Path
import fire
from yacs.config import CfgNode as CN
import yaml
import pickle
import numpy as np
from collections import namedtuple
import json
import bert_score as bs
import time
from collections import defaultdict
from tqdm import tqdm
from typing import List
from bert_score.utils import get_bert_embedding, pad_sequence, greedy_cos_idf
import torch
import sys
sys.path.append("./coco-caption")
def remove_nonascii(text):
return "".join([i if ord(i) < 128 else " " for i in text])
def bert_cos_score_idf(
model,
refs,
hyps,
tokenizer,
idf_dict,
verbose=False,
batch_size=64,
device="cuda:0",
all_layers=False,
):
"""
Compute BERTScore.
Args:
- :param: `model` : a BERT model in `pytorch_pretrained_bert`
- :param: `refs` (list of str): reference sentences
- :param: `hyps` (list of str): candidate sentences
- :param: `tokenzier` : a BERT tokenizer corresponds to `model`
- :param: `idf_dict` : a dictionary mapping a word piece index to its
inverse document frequency
- :param: `verbose` (bool): turn on intermediate status update
- :param: `batch_size` (int): bert score processing batch size
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
preds = []
def dedup_and_sort(l1):
return sorted(list(set(l1)), key=lambda x: len(x.split(" ")), reverse=True)
sentences = dedup_and_sort(refs + hyps)
embs = []
iter_range = range(0, len(sentences), batch_size)
if verbose:
print("computing bert embedding.")
iter_range = tqdm(iter_range)
stats_dict = dict()
for batch_start in iter_range:
sen_batch = sentences[batch_start : batch_start + batch_size]
embs, masks, padded_idf = get_bert_embedding(
sen_batch, model, tokenizer, idf_dict, device=device, all_layers=all_layers
)
embs = embs.cpu()
masks = masks.cpu()
padded_idf = padded_idf.cpu()
for i, sen in enumerate(sen_batch):
sequence_len = masks[i].sum().item()
emb = embs[i, :sequence_len]
idf = padded_idf[i, :sequence_len]
stats_dict[sen] = (emb, idf)
def pad_batch_stats(sen_batch, stats_dict, device):
stats = [stats_dict[s] for s in sen_batch]
emb, idf = zip(*stats)
emb = [e.to(device) for e in emb]
idf = [i.to(device) for i in idf]
lens = [e.size(0) for e in emb]
emb_pad = pad_sequence(emb, batch_first=True, padding_value=2.0)
idf_pad = pad_sequence(idf, batch_first=True)
def length_to_mask(lens):
lens = torch.tensor(lens, dtype=torch.long)
max_len = max(lens)
base = torch.arange(max_len, dtype=torch.long).expand(len(lens), max_len)
return base < lens.unsqueeze(1)
pad_mask = length_to_mask(lens).to(device)
return emb_pad, pad_mask, idf_pad
device = next(model.parameters()).device
iter_range = range(0, len(refs), batch_size)
if verbose:
print("computing greedy matching.")
iter_range = tqdm(iter_range)
with torch.no_grad():
for batch_start in iter_range:
batch_refs = refs[batch_start : batch_start + batch_size]
batch_hyps = hyps[batch_start : batch_start + batch_size]
ref_stats = pad_batch_stats(batch_refs, stats_dict, device)
hyp_stats = pad_batch_stats(batch_hyps, stats_dict, device)
P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats, all_layers)
preds.append(torch.stack((P, R, F1), dim=-1).cpu())
preds = torch.cat(preds, dim=1 if all_layers else 0)
return preds
class BertScoreOrig(bs.BERTScorer):
def score(self, cands, refs, verbose=False, batch_size=64, return_hash=False):
ref_group_boundaries = None
if not isinstance(refs[0], str):
ref_group_boundaries = []
ori_cands, ori_refs = cands, refs
cands, refs = [], []
count = 0
for cand, ref_group in zip(ori_cands, ori_refs):
cands += [cand] * len(ref_group)
refs += ref_group
ref_group_boundaries.append((count, count + len(ref_group)))
count += len(ref_group)
if verbose:
print("calculating scores...")
start = time.perf_counter()
if self.idf:
assert self._idf_dict, "IDF weights are not computed"
idf_dict = self._idf_dict
else:
idf_dict = defaultdict(lambda: 1.0)
idf_dict[self._tokenizer.sep_token_id] = 0
idf_dict[self._tokenizer.cls_token_id] = 0
all_preds = bert_cos_score_idf(
self._model,
refs,
cands,
self._tokenizer,
idf_dict,
verbose=verbose,
device=self.device,
batch_size=batch_size,
all_layers=self.all_layers,
).cpu()
if ref_group_boundaries is not None:
max_preds = []
for start, end in ref_group_boundaries:
max_preds.append(all_preds[start:end].max(dim=0)[0])
all_preds = torch.stack(max_preds, dim=0)
if self.rescale_with_baseline:
all_preds = (all_preds - self.baseline_vals) / (1 - self.baseline_vals)
out = all_preds[..., 0], all_preds[..., 1], all_preds[..., 2] # P, R, F
if verbose:
time_diff = time.perf_counter() - start
print(
f"done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec"
)
if return_hash:
out = tuple([out, self.hash])
return out
class BertScoreSimple:
def __init__(
self, lang="en", verbose=False, rescale_baseline: bool = True, idf: bool = True
):
import logging
logging.getLogger("pytorch_pretrained_bert").setLevel(logging.WARNING)
logging.getLogger("langid").setLevel(logging.WARNING)
self.verbose = verbose
self.bert_score_oop = BertScoreOrig(
lang=lang, rescale_with_baseline=rescale_baseline, nthreads=4, idf=idf
)
def compute_score(
self, gts, res, force_recompute_idf: bool = False, return_prf: bool = False
):
assert gts.keys() == res.keys()
imgIds = sorted(list(gts.keys()))
# scores = []
assert all([len(res[i]) == 1 for i in imgIds])
assert all(
[len(gts[i]) == 1 for i in imgIds]
), "Only single references supported in bert score atm"
hypothesis = [res[i][0] for i in imgIds]
references = [gts[i] for i in imgIds]
if self.bert_score_oop._idf:
if self.bert_score_oop._idf_dict is None or force_recompute_idf:
refs_idf = [r for ref in references for r in ref]
self.bert_score_oop.compute_idf(sents=refs_idf)
if return_prf:
return self.bert_score_oop.score(
hypothesis, references, verbose=self.verbose
)
sent_scores = self.bert_score_oop.score(
hypothesis, references, verbose=self.verbose
)[2].tolist()
corpus_scores = np.mean(sent_scores)
return corpus_scores, sent_scores
class EvalFnQAP:
def __init__(self, cfg, comm, met_keys, read_val_file: bool = True):
self.cfg = cfg
self.comm = comm
self.met_keys = met_keys
self.get_scorers()
self.scorers = {}
ScorerE = namedtuple("ScorerE", ["fn", "out_str"])
for k in self.met_keys:
scorer_tuple = self.scorer_dict[k]
if scorer_tuple.to_init:
scorer = scorer_tuple.cls_fn()
else:
scorer = scorer_tuple.cls_fn
self.scorers[k] = ScorerE(scorer, scorer_tuple.out_str)
# if read_val_file:
# if not self.cfg.ds.val_full_bal:
# val_set = json.load(open(self.cfg.ds.val_qa_flat_bal_trim))
# else:
# val_set = json.load(open(self.cfg.ds.val_qa_flat_full_bal_trim))
# self.val_srl_annots = {x["qsrl_ind"]: x for x in val_set}
def read_gt(self, gt_file):
val_set = json.load(open(gt_file))
self.val_srl_annots = {x["qsrl_ind"]: x for x in val_set}
def get_scorers(self):
# from pycoco_scorers_vizseq import BLEUScorerAll
from pycocoevalcap.bleu.bleu import Bleu
# from pycocoevalcap.spice.spice import Spice
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
import logging
import transformers
transformers.tokenization_utils.logger.setLevel(logging.ERROR)
transformers.configuration_utils.logger.setLevel(logging.ERROR)
transformers.modeling_utils.logger.setLevel(logging.ERROR)
Scorer_ = namedtuple("Scorer_", ["cls_fn", "to_init", "out_str"])
self.scorer_dict = {
"bleu": Scorer_(
Bleu(4, verbose=0), False, ["bleu@1", "bleu@2", "bleu@3", "bleu@4"]
),
"meteor": Scorer_(Meteor(), False, ["meteor"]),
"cider": Scorer_(Cider("corpus"), False, ["cider"]),
"rouge": Scorer_(Rouge(), False, ["rouge"]),
# "spice": Scorer_(Spice(), False, ["spice"]),
"bert_score": Scorer_(BertScoreSimple, True, ["bert_score"]),
}
self.tokenizer = PTBTokenizer()
def get_fin_scores(self, tok_hypo_dct, tok_gts_ref_dct, met_keys: List[str] = None):
out_score_dict = {}
if met_keys is None:
met_keys = self.met_keys
for k in met_keys:
scorer = self.scorers[k]
corpus_score, sent_score = scorer.fn.compute_score(
tok_gts_ref_dct, tok_hypo_dct
)
if isinstance(corpus_score, float):
assert len(scorer.out_str) == 1
out_score_dict[scorer.out_str[0]] = corpus_score
else:
for oi, ostr in enumerate(scorer.out_str):
out_score_dict[ostr] = corpus_score[oi]
return out_score_dict
def get_fin_scores_rescaled(self, tok_hypo_dct, tok_gts_ref_dct, full_bal=False):
def retokenize(dct):
return {k: [v] for k, v in dct.items()}
def get_ref_hyp_bas(ref_dct, hyp_dct):
qsrl_inds = [k for k in ref_dct]
qa_pair1 = {k: self.val_srl_annots[k]["qa_pair"] for k in qsrl_inds}
ref_new_dct = {
k: qa_pair1[k]["question"].replace(
qa_pair1[k]["question_type"], qa_pair1[k]["answer"]
)
for k in qa_pair1
}
bas_new_dct = {
k: qa_pair1[k]["question"].replace(qa_pair1[k]["question_type"], "")
for k in qa_pair1
}
hyp_new_dct = {
k: qa_pair1[k]["question"].replace(
qa_pair1[k]["question_type"], " ".join(hyp_dct[k])
)
for k in qa_pair1
}
return (
retokenize(ref_new_dct),
retokenize(bas_new_dct),
retokenize(hyp_new_dct),
)
def rescore_one(
ref_base_score1, ref_hyp_score1, refref_score1=1, sc_key: str = None
):
if sc_key is not None:
if sc_key != "cider":
assert abs(refref_score1 - 1) < 0.01
refref_score1 = 1
if ref_base_score1 < 0.98 * refref_score1:
return (ref_hyp_score1 - ref_base_score1) / (
refref_score1 - ref_base_score1
)
else:
print("Pain")
return 1
def get_score_from_cs(score1, score2):
if score1 < 0.1 or score2 < 0.1:
return 0
else:
return (score1 + score2) / 2
def get_cons(score1, score2, cons_thresh=0.1):
if score1 < cons_thresh and score2 < cons_thresh:
return 1
elif score1 > cons_thresh and score2 > cons_thresh:
return 1
else:
return 0
def get_out_dct_scores_from_ref_hyp_base(
scorer_key: str,
sent_score_only_phr,
sent_score_ref_bas,
sent_score_ref_hyp,
sent_score_ref_ref,
):
assert len(sent_score_ref_bas) == len(sent_score_ref_hyp)
sent_adj_scores = [
rescore_one(rb_score1, rh_score1, rrscore1, scorer_key)
for rb_score1, rh_score1, rrscore1 in zip(
sent_score_ref_bas, sent_score_ref_hyp, sent_score_ref_ref
)
]
sent_adj_scores_dct = {
k: sent_adj_scores[i] for i, k in enumerate(qsrl_ids)
}
sent_only_phr_dct = {
k: sent_score_only_phr[i] for i, k in enumerate(qsrl_ids)
}
sent_new_adj_scores = {}
sent_only_phr_bal_scores = {}
sent_fin_adj_scores = {}
cons_adj_score = {}
for k in sent_adj_scores_dct:
if len(self.val_srl_annots[k]["cs_qsrl_inds"]) > 0:
if not full_bal:
other_ind = self.val_srl_annots[k]["cs_qsrl_inds"][0]
else:
assert "matched_ind" in self.val_srl_annots[k]
other_ind = self.val_srl_annots[k]["matched_ind"]
new_score_adj = get_score_from_cs(
sent_adj_scores_dct[k], sent_adj_scores_dct[other_ind]
)
cons_adj_score[k] = get_cons(
sent_adj_scores_dct[k], sent_adj_scores_dct[other_ind]
)
sent_new_adj_scores[k] = new_score_adj
new_score_simp = get_score_from_cs(
sent_only_phr_dct[k], sent_only_phr_dct[other_ind]
)
sent_only_phr_bal_scores[k] = new_score_simp
sent_fin_adj_scores[k] = min(new_score_adj, new_score_simp)
corpus_adj_scores = sum(sent_adj_scores) / len(sent_adj_scores)
corpus_only_phr_scores = sum(sent_score_only_phr) / len(sent_score_only_phr)
corpus_adj_scores_new = sum(
[sent_new_adj_scores[k] for k in sent_new_adj_scores]
) / len(sent_new_adj_scores)
corpus_fin_adj_scores = sum(
[sent_fin_adj_scores[k] for k in sent_fin_adj_scores]
) / len(sent_fin_adj_scores)
corpush_bal_only_phr = sum(
[sent_only_phr_bal_scores[k] for k in sent_only_phr_bal_scores]
) / len(sent_only_phr_bal_scores)
corpus_cons = sum([cons_adj_score[k] for k in cons_adj_score]) / len(
cons_adj_score
)
sc_str = scorer_key
out_dct = {}
out_dct[sc_str] = corpus_fin_adj_scores
out_dct[f"{sc_str}_cons_sent"] = cons_adj_score
out_dct[f"{sc_str}_cons_corpus"] = corpus_cons
out_dct[f"{sc_str}_corpus_fin_adj_scores"] = corpus_fin_adj_scores
out_dct[f"{sc_str}_sent_fin_adj_scores"] = sent_fin_adj_scores
out_dct[f"{sc_str}_corpus_adj_bal"] = corpus_adj_scores_new
out_dct[f"{sc_str}_sent_adj_bal"] = sent_new_adj_scores
out_dct[f"{sc_str}_corpus_onlyphr_bal"] = corpush_bal_only_phr
out_dct[f"{sc_str}_sent_onlyphr_bal"] = sent_only_phr_bal_scores
out_dct[f"{sc_str}_corpus_adj_notbal"] = corpus_adj_scores
out_dct[f"{sc_str}_sent_adj_scores_not_bal"] = sent_adj_scores_dct
out_dct[f"{sc_str}_corpus_only_phr_scores_not_bal"] = corpus_only_phr_scores
out_dct[f"{sc_str}_sent_only_phr_scores_not_bal"] = sent_only_phr_dct
out_dct[f"{sc_str}_sent_ref_bas"] = sent_score_ref_bas
out_dct[f"{sc_str}_sent_ref_hyp"] = sent_score_ref_hyp
out_dct[f"{sc_str}_sent_ref_hyp_adj"] = sent_adj_scores
out_dct[f"{sc_str}_sent_ref_ref"] = sent_score_ref_ref
return out_dct
out_score_dict = {}
refnew, basnew, hypnew = get_ref_hyp_bas(tok_gts_ref_dct, tok_hypo_dct)
qsrl_ids = sorted(list(tok_gts_ref_dct.keys()))
for k in self.met_keys:
print("Starting Scorer", k)
scorer = self.scorers[k]
corpus_score_only_phr, sent_score_only_phr = scorer.fn.compute_score(
tok_gts_ref_dct, tok_hypo_dct
)
corpus_score_ref_bas, sent_score_ref_bas = scorer.fn.compute_score(
refnew, basnew
)
corpus_score_ref_hyp, sent_score_ref_hyp = scorer.fn.compute_score(
refnew, hypnew
)
corpus_score_ref_ref, sent_score_ref_ref = scorer.fn.compute_score(
refnew, refnew
)
if k != "bleu":
out_dct = get_out_dct_scores_from_ref_hyp_base(
scorer.out_str[0],
sent_score_only_phr,
sent_score_ref_bas,
sent_score_ref_hyp,
sent_score_ref_ref,
)
out_score_dict.update(out_dct)
else:
for bl_ix, scstr in enumerate(scorer.out_str):
if bl_ix >= 2:
continue
out_dct = get_out_dct_scores_from_ref_hyp_base(
scstr,
sent_score_only_phr[bl_ix],
sent_score_ref_bas[bl_ix],
sent_score_ref_hyp[bl_ix],
sent_score_ref_ref[bl_ix],
)
out_score_dict.update(out_dct)
return out_score_dict
def eval_vidqap_met(
self,
fname: str,
split_type: str = "valid",
do_rescaling: bool = False,
gt_file=None,
):
assert split_type in ["valid", "test"]
assert Path(fname).exists()
pred_file = pickle.load(open(fname, "rb"))
if gt_file is None:
gt_file = self.cfg.ds.val_qa_trim
self.read_gt(gt_file=gt_file)
hypo_dct = {}
gts_ref_dct = {}
for i, p in enumerate(pred_file):
qsrl_inds = p["idx_qsrl"]
assert len(qsrl_inds) == 1
qsrl_ind = qsrl_inds[0]
hypo_dct[qsrl_ind] = [{"caption": remove_nonascii(p["pred_tokens"][0])}]
gts_ref_dct[qsrl_ind] = [{"caption": remove_nonascii(p["tgt_tokens"])}]
assert len(hypo_dct) == len(gts_ref_dct)
print("Num vids", len(hypo_dct))
tok_hypo_dct = self.tokenizer.tokenize(hypo_dct)
tok_gts_ref_dct = self.tokenizer.tokenize(gts_ref_dct)
if not do_rescaling:
out_score_dict = self.get_fin_scores(
tok_hypo_dct=tok_hypo_dct, tok_gts_ref_dct=tok_gts_ref_dct
)
else:
# if not self.cfg.ds.val_full_bal:
full_bal = False
# else:
# full_bal = True
out_score_dict_by_qtype = {}
qtype_lst = ["<Q-ARG0>", "<Q-V>", "<Q-ARG1>", "<Q-ARG2>", "<Q-ARGM-LOC>"]
if self.cfg.ds_to_use == "ch":
qtype_lst = qtype_lst[1:]
for qtype in qtype_lst:
key_lst = [
k
for k, x in self.val_srl_annots.items()
if (x["qa_pair"]["question_type"] == qtype) and (k in tok_hypo_dct)
]
hypo_dct = {k: tok_hypo_dct[k] for k in key_lst}
refo_dct = {k: tok_gts_ref_dct[k] for k in key_lst}
out_score_dict_by_qtype[qtype] = self.get_fin_scores_rescaled(
tok_hypo_dct=hypo_dct, tok_gts_ref_dct=refo_dct, full_bal=full_bal
)
out_score_dict = self.get_fin_scores_rescaled(
tok_hypo_dct=tok_hypo_dct,
tok_gts_ref_dct=tok_gts_ref_dct,
full_bal=full_bal,
)
out_score_dict_by_qtype["full_dct"] = out_score_dict
fname = Path(fname)
out_file = fname.parent / f"{fname.stem}_evl_outs.pkl"
pickle.dump(out_score_dict_by_qtype, open(out_file, "wb"))
out_score_dict.update({"hypo_dct": tok_hypo_dct, "refo_dct": tok_gts_ref_dct})
return out_score_dict
def main(pred_file, split_type="valid"):
from vidqa_code.eval_vidqap import get_met_keys_
cfg = CN(yaml.safe_load(open("./configs/ivd_asrl_cfg.yml")))
cfg.ds.val_full_bal = False
met_keys = ["meteor", "rouge", "bert_score"]
evl_fn = EvalFnQAP(cfg, None, met_keys)
out = evl_fn.eval_vidqap_met(pred_file, do_rescaling=True)
out_met_keys = get_met_keys_(met_keys)
print({k: v for k, v in out.items() if k in out_met_keys})
return evl_fn
if __name__ == "__main__":
evl_fn = fire.Fire(main)
|
{"hexsha": "4bb1fa132ab391e7d67af18d685cd8c309f9fca5", "size": 21594, "ext": "py", "lang": "Python", "max_stars_repo_path": "vidqa_code/eval_fn_vidqap.py", "max_stars_repo_name": "TheShadow29/Video-QAP", "max_stars_repo_head_hexsha": "ffd60758c3426e593b04651c1071279bcb9912fb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-04-09T11:24:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T07:19:24.000Z", "max_issues_repo_path": "vidqa_code/eval_fn_vidqap.py", "max_issues_repo_name": "TheShadow29/Video-QAP", "max_issues_repo_head_hexsha": "ffd60758c3426e593b04651c1071279bcb9912fb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-28T13:38:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-28T17:13:52.000Z", "max_forks_repo_path": "vidqa_code/eval_fn_vidqap.py", "max_forks_repo_name": "TheShadow29/Video-QAP", "max_forks_repo_head_hexsha": "ffd60758c3426e593b04651c1071279bcb9912fb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1669535284, "max_line_length": 93, "alphanum_fraction": 0.5777067704, "include": true, "reason": "import numpy", "num_tokens": 5344}
|
import numpy as np
import pytest
from frispy import Disc
def test_smoke():
d = Disc()
assert d is not None
def test_disc_has_properties():
d = Disc()
assert hasattr(d, "model")
assert hasattr(d, "environment")
assert hasattr(d, "eom")
def test_initial_conditions():
d = Disc()
for key, value in d._default_initial_conditions.items():
assert d.default_initial_conditions[key] == value
def test_initial_conditions_kwarg():
d = Disc(vz=2.0)
for key, value in d._default_initial_conditions.items():
if key == "vz":
assert d.default_initial_conditions["vz"] == 2.0
else:
assert d.default_initial_conditions[key] == value
def test_physical_attribute_kwarg():
d = Disc(mass=12345, area=0.1234)
assert d.mass == 12345
assert d.area == 0.1234
assert d.eom.diameter == 2 * np.sqrt(d.area / np.pi)
def test_compute_trajectory_basics():
d = Disc()
result = d.compute_trajectory()
for x in d.coordinate_names:
assert len(result.times) == len(getattr(result, x))
def test_compute_trajectory_repeatable():
d = Disc()
result = d.compute_trajectory()
for x in d.coordinate_names:
assert len(result.times) == len(getattr(result, x))
result2 = d.compute_trajectory()
assert all(result.times == result2.times)
for x in d.coordinate_names:
assert len(getattr(result, x)) == len(getattr(result2, x))
def test_compute_trajectory_return_results():
d = Disc()
result = d.compute_trajectory()
result2, scipy_results = d.compute_trajectory(return_scipy_results=True)
for x in d.coordinate_names:
assert len(result.times) == len(getattr(result, x))
result2 = d.compute_trajectory()
assert all(result.times == result2.times)
for x in d.coordinate_names:
assert len(getattr(result, x)) == len(getattr(result2, x))
assert "status" in scipy_results
assert scipy_results.status >= 0 # -1 is failure
def test_compute_trajectory_t_span_vs_flight_time():
d = Disc()
result = d.compute_trajectory(flight_time=3)
result2 = d.compute_trajectory(t_span=(0, 3), flight_time=None)
assert all(result.times == result2.times)
for x in d.coordinate_names:
assert len(getattr(result, x)) == len(getattr(result2, x))
|
{"hexsha": "be7aa22b761831df7a843d24a5709b90ccea355d", "size": 2333, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/disc_test.py", "max_stars_repo_name": "McCannDahl/FrisPy", "max_stars_repo_head_hexsha": "1583cf24dcf64eab2a4dfdbd79b7652a76b0c95f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/disc_test.py", "max_issues_repo_name": "McCannDahl/FrisPy", "max_issues_repo_head_hexsha": "1583cf24dcf64eab2a4dfdbd79b7652a76b0c95f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/disc_test.py", "max_forks_repo_name": "McCannDahl/FrisPy", "max_forks_repo_head_hexsha": "1583cf24dcf64eab2a4dfdbd79b7652a76b0c95f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1625, "max_line_length": 76, "alphanum_fraction": 0.6750964423, "include": true, "reason": "import numpy", "num_tokens": 567}
|
#INCLUDE 'MR_H_ALIGN_PADDING.H'
!***********************************************************************************************************************************
! UNIT:
!
! (MODULE)
!
! PURPOSE:
!
!
!
! DEFINITION OF VARIABLES:
!
!
!
! RECORD OF REVISIONS:
!
! DATE | PROGRAMMER | DESCRIPTION OF CHANGE
! ==== | ========== | =====================
! 20XX-XX-XX | DR. HYDE | ORIGINAL CODE.
!
!***********************************************************************************************************************************
MODULE MR_MOD_GET_DSET_UNIT
USE XMDF
USE MR_KINDS
IMPLICIT NONE
PRIVATE
PUBLIC :: MR_GET_DSET_UNIT
!***********************************************************************************************************************************
CONTAINS
!***********************************************************************************************************************************
! UNIT:
!
! (SUBROUTINE)
!
! PURPOSE:
!
!
!
! DEFINITION OF VARIABLES:
!
!
!
! RECORD OF REVISIONS:
!
! DATE | PROGRAMMER | DESCRIPTION OF CHANGE
! ==== | ========== | =====================
! 20XX-XX-XX | DR. HYDE | ORIGINAL CODE.
!
!***********************************************************************************************************************************
SUBROUTINE MR_GET_DSET_UNIT( MULTI_DSETS_ID , PATH_DSET_IN_MULTI_DSETS , DSET_UNIT , ERROR , ERRMSG )
IMPLICIT NONE
INTEGER , INTENT(IN ) :: MULTI_DSETS_ID
CHARACTER( * ) , INTENT(IN ) :: PATH_DSET_IN_MULTI_DSETS
INTEGER :: DSET_ID
CHARACTER( * ) , INTENT(OUT) :: DSET_UNIT
INTEGER , INTENT(OUT) :: ERROR
CHARACTER( * ) , INTENT(OUT) :: ERRMSG
INTEGER :: ERROR_DUMMY
ERRMSG = ""
CALL XF_OPEN_GROUP( MULTI_DSETS_ID , TRIM(PATH_DSET_IN_MULTI_DSETS) , DSET_ID , ERROR )
IF( ERROR < 0 ) THEN
ERRMSG = "Error in openning dataset group"
ELSE
CALL XF_GET_DATASET_UNITS( DSET_ID , DSET_UNIT , ERROR )
IF( ERROR < 0 ) THEN
ERRMSG = "Error in getting unit from dataset group"
END IF
CALL XF_CLOSE_GROUP( DSET_ID , ERROR_DUMMY )
IF( ERROR_DUMMY < 0 .AND. ERROR >= 0 ) THEN
ERROR = ERROR_DUMMY
ERRMSG = "Error in closing dataset group"
END IF
END IF
IF( ERROR < 0 ) THEN
ERRMSG = TRIM(ERRMSG)//" /"//TRIM(PATH_DSET_IN_MULTI_DSETS)//" in multiple datasets"
RETURN
END IF
END SUBROUTINE MR_GET_DSET_UNIT
END MODULE MR_MOD_GET_DSET_UNIT
|
{"hexsha": "f04b6c56047c84cdf0e38fe32e543680c18661f9", "size": 2675, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "__Sources/__POST_MOD_FILE_MANIPULATIONS/__POST_MOD_FILE_XMDF_MANIPULATIONS/MR_MOD_GET_DSET_UNIT.f90", "max_stars_repo_name": "zht9947/Mr_Reds", "max_stars_repo_head_hexsha": "e9ce791e855aa6caa1213db9c0df63374529f586", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-06-07T08:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-24T01:04:15.000Z", "max_issues_repo_path": "__Sources/__POST_MOD_FILE_MANIPULATIONS/__POST_MOD_FILE_XMDF_MANIPULATIONS/MR_MOD_GET_DSET_UNIT.f90", "max_issues_repo_name": "zht9947/Mr_Reds", "max_issues_repo_head_hexsha": "e9ce791e855aa6caa1213db9c0df63374529f586", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "__Sources/__POST_MOD_FILE_MANIPULATIONS/__POST_MOD_FILE_XMDF_MANIPULATIONS/MR_MOD_GET_DSET_UNIT.f90", "max_forks_repo_name": "zht9947/Mr_Reds", "max_forks_repo_head_hexsha": "e9ce791e855aa6caa1213db9c0df63374529f586", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.75, "max_line_length": 132, "alphanum_fraction": 0.4160747664, "num_tokens": 600}
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
XC functional, the interface to libxc
(http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)
'''
import sys
import copy
import ctypes
import math
import numpy
import pyscf.lib
_itrf = pyscf.lib.load_library('libxc_itrf')
# xc_code from libxc
XC = XC_CODES = {
'XC_LDA_X' : 1, # Exchange
'XC_LDA_C_WIGNER' : 2, # Wigner parametrization
'XC_LDA_C_RPA' : 3, # Random Phase Approximation
'XC_LDA_C_HL' : 4, # Hedin & Lundqvist
'XC_LDA_C_GL' : 5, # Gunnarson & Lundqvist
'XC_LDA_C_XALPHA' : 6, # Slater Xalpha
'XC_LDA_C_VWN' : 7, # Vosko, Wilk, & Nussair
'XC_LDA_C_VWNRPA' : 8, # Vosko, Wilk, & Nussair (RPA)
'XC_LDA_C_PZ' : 9, # Perdew & Zunger
'XC_LDA_C_PZMOD' : 10, # Perdew & Zunger (Modified)
'XC_LDA_C_OBPZ' : 11, # Ortiz & Ballone (PZ)
'XC_LDA_C_PW' : 12, # Perdew & Wang
'XC_LDA_C_PWMOD' : 13, # Perdew & Wang (Modified)
'XC_LDA_C_OBPW' : 14, # Ortiz & Ballone (PW)
'XC_LDA_C_2DAMGB' : 15, # Attacalite et al
'XC_LDA_C_2DPRM' : 16, # Pittalis, Rasanen & Marques correlation in 2D
'XC_LDA_C_VBH' : 17, # von Barth & Hedin
'XC_LDA_C_1DCSC' : 18, # Casula, Sorella, and Senatore 1D correlation
'XC_LDA_X_2D' : 19, # Exchange in 2D
'XC_LDA_XC_TETER93' : 20, # Teter 93 parametrization
'XC_LDA_X_1D' : 21, # Exchange in 1D
'XC_LDA_C_ML1' : 22, # Modified LSD (version 1) of Proynov and Salahub
'XC_LDA_C_ML2' : 23, # Modified LSD (version 2) of Proynov and Salahub
'XC_LDA_C_GOMBAS' : 24, # Gombas parametrization
'XC_LDA_C_PWRPA' : 25, # Perdew & Wang fit of the RPA
'XC_LDA_C_1DLOOS' : 26, # P-F Loos correlation LDA
'XC_LDA_C_RC04' : 27, # Ragot-Cortona
'XC_LDA_C_VWN1' : 28, # Vosko, Wilk, & Nussair (1)
'XC_LDA_C_VWN2' : 29, # Vosko, Wilk, & Nussair (2)
'XC_LDA_C_VWN3' : 30, # Vosko, Wilk, & Nussair (3)
'XC_LDA_C_VWN4' : 31, # Vosko, Wilk, & Nussair (4)
'XC_LDA_K_TF' : 50, # Thomas-Fermi kinetic energy functional
'XC_LDA_K_LP' : 51, # Lee and Parr Gaussian ansatz
'XC_GGA_C_Q2D' : 47, # Chiodo et al
'XC_GGA_X_Q2D' : 48, # Chiodo et al
'XC_GGA_X_PBEMOL' : 49, # Del Campo, Gazquez, Trickey and Vela (PBE-like)
'XC_GGA_K_TFVW' : 52, # Thomas-Fermi plus von Weiszaecker correction
'XC_GGA_K_REVAPBEINT' : 53, # interpolated version of REVAPBE
'XC_GGA_K_APBEINT' : 54, # interpolated version of APBE
'XC_GGA_K_REVAPBE' : 55, # revised APBE
'XC_GGA_X_AK13' : 56, # Armiento & Kuemmel 2013
'XC_GGA_K_MEYER' : 57, # Meyer, Wang, and Young
'XC_GGA_X_LVRPW86' : 58, # Berland and Hyldgaard
'XC_GGA_X_PBETCA' : 59, # PBE revised by Tognetti et al
'XC_GGA_X_PBEINT' : 60, # PBE for hybrid interfaces
'XC_GGA_C_ZPBEINT' : 61, # spin-dependent gradient correction to PBEint
'XC_GGA_C_PBEINT' : 62, # PBE for hybrid interfaces
'XC_GGA_C_ZPBESOL' : 63, # spin-dependent gradient correction to PBEsol
'XC_GGA_XC_OPBED' : 65, # oPBE_D functional of Goerigk and Grimme
'XC_GGA_XC_OPWLYPD' : 66, # oPWLYP-D functional of Goerigk and Grimme
'XC_GGA_XC_OBLYPD' : 67, # oBLYP-D functional of Goerigk and Grimme
'XC_GGA_X_VMT84GE' : 68, # VMT{8,4} with constraint satisfaction with mu = mu_GE
'XC_GGA_X_VMT84PBE' : 69, # VMT{8,4} with constraint satisfaction with mu = mu_PBE
'XC_GGA_X_VMTGE' : 70, # Vela, Medel, and Trickey with mu = mu_GE
'XC_GGA_X_VMTPBE' : 71, # Vela, Medel, and Trickey with mu = mu_PBE
'XC_GGA_C_N12SX' : 79, # N12-SX functional from Minnesota
'XC_GGA_C_N12' : 80, # N12 functional from Minnesota
'XC_GGA_X_N12' : 82, # N12 functional from Minnesota
'XC_GGA_C_VPBE' : 83, # variant PBE
'XC_GGA_C_OPXALPHA' : 84, # one-parameter progressive functional (XALPHA version)
'XC_GGA_C_OPG96' : 85, # one-parameter progressive functional (G96 version)
'XC_GGA_C_OPPBE' : 86, # one-parameter progressive functional (PBE version)
'XC_GGA_C_OPB88' : 87, # one-parameter progressive functional (B88 version)
'XC_GGA_C_FT97' : 88, # Filatov & Thiel correlation
'XC_GGA_C_SPBE' : 89, # PBE correlation to be used with the SSB exchange
'XC_GGA_X_SSBSW' : 90, # Swarta, Sola and Bickelhaupt correction to PBE
'XC_GGA_X_SSB' : 91, # Swarta, Sola and Bickelhaupt
'XC_GGA_X_SSBD' : 92, # Swarta, Sola and Bickelhaupt dispersion
'XC_GGA_XC_HCTH407P' : 93, # HCTH/407+
'XC_GGA_XC_HCTHP76' : 94, # HCTH p=7/6
'XC_GGA_XC_HCTHP14' : 95, # HCTH p=1/4
'XC_GGA_XC_B97GGA1' : 96, # Becke 97 GGA-1
'XC_GGA_XC_HCTHA' : 97, # HCTH-A
'XC_GGA_X_BPCCAC' : 98, # BPCCAC (GRAC for the energy)
'XC_GGA_C_REVTCA' : 99, # Tognetti, Cortona, Adamo (revised)
'XC_GGA_C_TCA' : 100, # Tognetti, Cortona, Adamo
'XC_GGA_X_PBE' : 101, # Perdew, Burke & Ernzerhof exchange
'XC_GGA_X_PBER' : 102, # Perdew, Burke & Ernzerhof exchange (revised)
'XC_GGA_X_B86' : 103, # Becke 86 Xalfa,beta,gamma
'XC_GGA_X_HERMAN' : 104, # Herman et al original GGA
'XC_GGA_X_B86MGC' : 105, # Becke 86 Xalfa,beta,gamma (with mod. grad. correction)
'XC_GGA_X_B88' : 106, # Becke 88
'XC_GGA_X_G96' : 107, # Gill 96
'XC_GGA_X_PW86' : 108, # Perdew & Wang 86
'XC_GGA_X_PW91' : 109, # Perdew & Wang 91
'XC_GGA_X_OPTX' : 110, # Handy & Cohen OPTX 01
'XC_GGA_X_DK87R1' : 111, # dePristo & Kress 87 (version R1)
'XC_GGA_X_DK87R2' : 112, # dePristo & Kress 87 (version R2)
'XC_GGA_X_LG93' : 113, # Lacks & Gordon 93
'XC_GGA_X_FT97A' : 114, # Filatov & Thiel 97 (version A)
'XC_GGA_X_FT97B' : 115, # Filatov & Thiel 97 (version B)
'XC_GGA_X_PBESOL' : 116, # Perdew, Burke & Ernzerhof exchange (solids)
'XC_GGA_X_RPBE' : 117, # Hammer, Hansen & Norskov (PBE-like)
'XC_GGA_X_WC' : 118, # Wu & Cohen
'XC_GGA_X_MPW91' : 119, # Modified form of PW91 by Adamo & Barone
'XC_GGA_X_AM05' : 120, # Armiento & Mattsson 05 exchange
'XC_GGA_X_PBEA' : 121, # Madsen (PBE-like)
'XC_GGA_X_MPBE' : 122, # Adamo & Barone modification to PBE
'XC_GGA_X_XPBE' : 123, # xPBE reparametrization by Xu & Goddard
'XC_GGA_X_2DB86MGC' : 124, # Becke 86 MGC for 2D systems
'XC_GGA_X_BAYESIAN' : 125, # Bayesian best fit for the enhancement factor
'XC_GGA_X_PBEJSJR' : 126, # JSJR reparametrization by Pedroza, Silva & Capelle
'XC_GGA_X_2DB88' : 127, # Becke 88 in 2D
'XC_GGA_X_2DB86' : 128, # Becke 86 Xalfa,beta,gamma
'XC_GGA_X_2DPBE' : 129, # Perdew, Burke & Ernzerhof exchange in 2D
'XC_GGA_C_PBE' : 130, # Perdew, Burke & Ernzerhof correlation
'XC_GGA_C_LYP' : 131, # Lee, Yang & Parr
'XC_GGA_C_P86' : 132, # Perdew 86
'XC_GGA_C_PBESOL' : 133, # Perdew, Burke & Ernzerhof correlation SOL
'XC_GGA_C_PW91' : 134, # Perdew & Wang 91
'XC_GGA_C_AM05' : 135, # Armiento & Mattsson 05 correlation
'XC_GGA_C_XPBE' : 136, # xPBE reparametrization by Xu & Goddard
'XC_GGA_C_LM' : 137, # Langreth and Mehl correlation
'XC_GGA_C_PBEJRGX' : 138, # JRGX reparametrization by Pedroza, Silva & Capelle
'XC_GGA_X_OPTB88VDW' : 139, # Becke 88 reoptimized to be used with vdW functional of Dion et al
'XC_GGA_X_PBEK1VDW' : 140, # PBE reparametrization for vdW
'XC_GGA_X_OPTPBEVDW' : 141, # PBE reparametrization for vdW
'XC_GGA_X_RGE2' : 142, # Regularized PBE
'XC_GGA_C_RGE2' : 143, # Regularized PBE
'XC_GGA_X_RPW86' : 144, # refitted Perdew & Wang 86
'XC_GGA_X_KT1' : 145, # Keal and Tozer version 1
'XC_GGA_XC_KT2' : 146, # Keal and Tozer version 2
'XC_GGA_C_WL' : 147, # Wilson & Levy
'XC_GGA_C_WI' : 148, # Wilson & Ivanov
'XC_GGA_X_MB88' : 149, # Modified Becke 88 for proton transfer
'XC_GGA_X_SOGGA' : 150, # Second-order generalized gradient approximation
'XC_GGA_X_SOGGA11' : 151, # Second-order generalized gradient approximation 2011
'XC_GGA_C_SOGGA11' : 152, # Second-order generalized gradient approximation 2011
'XC_GGA_C_WI0' : 153, # Wilson & Ivanov initial version
'XC_GGA_XC_TH1' : 154, # Tozer and Handy v. 1
'XC_GGA_XC_TH2' : 155, # Tozer and Handy v. 2
'XC_GGA_XC_TH3' : 156, # Tozer and Handy v. 3
'XC_GGA_XC_TH4' : 157, # Tozer and Handy v. 4
'XC_GGA_X_C09X' : 158, # C09x to be used with the VdW of Rutgers-Chalmers
'XC_GGA_C_SOGGA11X' : 159, # To be used with hyb_gga_x_SOGGA11-X
'XC_GGA_X_LB' : 160, # van Leeuwen & Baerends
'XC_GGA_XC_HCTH93' : 161, # HCTH functional fitted to 93 molecules
'XC_GGA_XC_HCTH120' : 162, # HCTH functional fitted to 120 molecules
'XC_GGA_XC_HCTH147' : 163, # HCTH functional fitted to 147 molecules
'XC_GGA_XC_HCTH407' : 164, # HCTH functional fitted to 407 molecules
'XC_GGA_XC_EDF1' : 165, # Empirical functionals from Adamson, Gill, and Pople
'XC_GGA_XC_XLYP' : 166, # XLYP functional
'XC_GGA_XC_B97' : 167, # Becke 97
'XC_GGA_XC_B971' : 168, # Becke 97-1
'XC_GGA_XC_B972' : 169, # Becke 97-2
'XC_GGA_XC_B97D' : 170, # Grimme functional to be used with C6 vdW term
'XC_GGA_XC_B97K' : 171, # Boese-Martin for Kinetics
'XC_GGA_XC_B973' : 172, # Becke 97-3
'XC_GGA_XC_PBE1W' : 173, # Functionals fitted for water
'XC_GGA_XC_MPWLYP1W' : 174, # Functionals fitted for water
'XC_GGA_XC_PBELYP1W' : 175, # Functionals fitted for water
'XC_GGA_XC_SB981A' : 176, # Schmider-Becke 98 parameterization 1a
'XC_GGA_XC_SB981B' : 177, # Schmider-Becke 98 parameterization 1b
'XC_GGA_XC_SB981C' : 178, # Schmider-Becke 98 parameterization 1c
'XC_GGA_XC_SB982A' : 179, # Schmider-Becke 98 parameterization 2a
'XC_GGA_XC_SB982B' : 180, # Schmider-Becke 98 parameterization 2b
'XC_GGA_XC_SB982C' : 181, # Schmider-Becke 98 parameterization 2c
'XC_GGA_X_LBM' : 182, # van Leeuwen & Baerends modified
'XC_GGA_X_OL2' : 183, # Exchange form based on Ou-Yang and Levy v.2
'XC_GGA_X_APBE' : 184, # mu fixed from the semiclassical neutral atom
'XC_GGA_K_APBE' : 185, # mu fixed from the semiclassical neutral atom
'XC_GGA_C_APBE' : 186, # mu fixed from the semiclassical neutral atom
'XC_GGA_K_TW1' : 187, # Tran and Wesolowski set 1 (Table II)
'XC_GGA_K_TW2' : 188, # Tran and Wesolowski set 2 (Table II)
'XC_GGA_K_TW3' : 189, # Tran and Wesolowski set 3 (Table II)
'XC_GGA_K_TW4' : 190, # Tran and Wesolowski set 4 (Table II)
'XC_GGA_X_HTBS' : 191, # Haas, Tran, Blaha, and Schwarz
'XC_GGA_X_AIRY' : 192, # Constantin et al based on the Airy gas
'XC_GGA_X_LAG' : 193, # Local Airy Gas
'XC_GGA_XC_MOHLYP' : 194, # Functional for organometallic chemistry
'XC_GGA_XC_MOHLYP2' : 195, # Functional for barrier heights
'XC_GGA_XC_THFL' : 196, # Tozer and Handy v. FL
'XC_GGA_XC_THFC' : 197, # Tozer and Handy v. FC
'XC_GGA_XC_THFCFO' : 198, # Tozer and Handy v. FCFO
'XC_GGA_XC_THFCO' : 199, # Tozer and Handy v. FCO
'XC_GGA_C_OPTC' : 200, # Optimized correlation functional of Cohen and Handy
'XC_GGA_K_VW' : 500, # von Weiszaecker functional
'XC_GGA_K_GE2' : 501, # Second-order gradient expansion (l = 1/9)
'XC_GGA_K_GOLDEN' : 502, # TF-lambda-vW form by Golden (l = 13/45)
'XC_GGA_K_YT65' : 503, # TF-lambda-vW form by Yonei and Tomishima (l = 1/5)
'XC_GGA_K_BALTIN' : 504, # TF-lambda-vW form by Baltin (l = 5/9)
'XC_GGA_K_LIEB' : 505, # TF-lambda-vW form by Lieb (l = 0.185909191)
'XC_GGA_K_ABSP1' : 506, # gamma-TFvW form by Acharya et al [g = 1 - 1.412/N^(1/3)]
'XC_GGA_K_ABSP2' : 507, # gamma-TFvW form by Acharya et al [g = 1 - 1.332/N^(1/3)]
'XC_GGA_K_GR' : 508, # gamma-TFvW form by Gazquez and Robles
'XC_GGA_K_LUDENA' : 509, # gamma-TFvW form by Ludena
'XC_GGA_K_GP85' : 510, # gamma-TFvW form by Ghosh and Parr
'XC_GGA_K_PEARSON' : 511, # Pearson
'XC_GGA_K_OL1' : 512, # Ou-Yang and Levy v.1
'XC_GGA_K_OL2' : 513, # Ou-Yang and Levy v.2
'XC_GGA_K_FRB88' : 514, # Fuentealba & Reyes (B88 version)
'XC_GGA_K_FRPW86' : 515, # Fuentealba & Reyes (PW86 version)
'XC_GGA_K_DK' : 516, # DePristo and Kress
'XC_GGA_K_PERDEW' : 517, # Perdew
'XC_GGA_K_VSK' : 518, # Vitos, Skriver, and Kollar
'XC_GGA_K_VJKS' : 519, # Vitos, Johansson, Kollar, and Skriver
'XC_GGA_K_ERNZERHOF' : 520, # Ernzerhof
'XC_GGA_K_LC94' : 521, # Lembarki & Chermette
'XC_GGA_K_LLP' : 522, # Lee, Lee & Parr
'XC_GGA_K_THAKKAR' : 523, # Thakkar 1992
'XC_GGA_X_WPBEH' : 524, # short-range version of the PBE
'XC_GGA_X_HJSPBE' : 525, # HJS screened exchange PBE version
'XC_GGA_X_HJSPBESOL' : 526, # HJS screened exchange PBE_SOL version
'XC_GGA_X_HJSB88' : 527, # HJS screened exchange B88 version
'XC_GGA_X_HJSB97X' : 528, # HJS screened exchange B97x version
'XC_GGA_X_ITYH' : 529, # short-range recipe for exchange GGA functionals
'XC_GGA_X_SFAT' : 530, # short-range recipe for exchange GGA functionals
'XC_HYB_GGA_X_N12SX' : 81, # N12-SX functional from Minnesota
'XC_HYB_GGA_XC_B3PW91' : 401, # The original hybrid proposed by Becke
'XC_HYB_GGA_XC_B3LYP' : 402, # The (in)famous B3LYP
'XC_HYB_GGA_XC_B3P86' : 403, # Perdew 86 hybrid similar to B3PW91
'XC_HYB_GGA_XC_O3LYP' : 404, # hybrid using the optx functional
'XC_HYB_GGA_XC_MPW1K' : 405, # mixture of mPW91 and PW91 optimized for kinetics
'XC_HYB_GGA_XC_PBEH' : 406, # aka PBE0 or PBE1PBE
'XC_HYB_GGA_XC_B97' : 407, # Becke 97
'XC_HYB_GGA_XC_B971' : 408, # Becke 97-1
'XC_HYB_GGA_XC_B972' : 410, # Becke 97-2
'XC_HYB_GGA_XC_X3LYP' : 411, # maybe the best hybrid
'XC_HYB_GGA_XC_B1WC' : 412, # Becke 1-parameter mixture of WC and PBE
'XC_HYB_GGA_XC_B97K' : 413, # Boese-Martin for Kinetics
'XC_HYB_GGA_XC_B973' : 414, # Becke 97-3
'XC_HYB_GGA_XC_MPW3PW' : 415, # mixture with the mPW functional
'XC_HYB_GGA_XC_B1LYP' : 416, # Becke 1-parameter mixture of B88 and LYP
'XC_HYB_GGA_XC_B1PW91' : 417, # Becke 1-parameter mixture of B88 and PW91
'XC_HYB_GGA_XC_MPW1PW' : 418, # Becke 1-parameter mixture of mPW91 and PW91
'XC_HYB_GGA_XC_MPW3LYP' : 419, # mixture of mPW and LYP
'XC_HYB_GGA_XC_SB981A' : 420, # Schmider-Becke 98 parameterization 1a
'XC_HYB_GGA_XC_SB981B' : 421, # Schmider-Becke 98 parameterization 1b
'XC_HYB_GGA_XC_SB981C' : 422, # Schmider-Becke 98 parameterization 1c
'XC_HYB_GGA_XC_SB982A' : 423, # Schmider-Becke 98 parameterization 2a
'XC_HYB_GGA_XC_SB982B' : 424, # Schmider-Becke 98 parameterization 2b
'XC_HYB_GGA_XC_SB982C' : 425, # Schmider-Becke 98 parameterization 2c
'XC_HYB_GGA_X_SOGGA11X' : 426, # Hybrid based on SOGGA11 form
'XC_HYB_GGA_XC_HSE03' : 427, # the 2003 version of the screened hybrid HSE
'XC_HYB_GGA_XC_HSE06' : 428, # the 2006 version of the screened hybrid HSE
'XC_HYB_GGA_XC_HJSPBE' : 429, # HJS hybrid screened exchange PBE version
'XC_HYB_GGA_XC_HJSPBESOL' : 430, # HJS hybrid screened exchange PBE_SOL version
'XC_HYB_GGA_XC_HJSB88' : 431, # HJS hybrid screened exchange B88 version
'XC_HYB_GGA_XC_HJSB97X' : 432, # HJS hybrid screened exchange B97x version
'XC_HYB_GGA_XC_CAMB3LYP' : 433, # CAM version of B3LYP
'XC_HYB_GGA_XC_TUNEDCAMB3LYP': 434, # CAM version of B3LYP tunes for excitations
'XC_HYB_GGA_XC_BHANDH' : 435, # Becke half-and-half
'XC_HYB_GGA_XC_BHANDHLYP' : 436, # Becke half-and-half with B88 exchange
'XC_HYB_GGA_XC_MB3LYPRC04': 437, # B3LYP with RC04 LDA
'XC_HYB_GGA_XC_MPWLYP1M' : 453, # MPW with 1 par. for metals/LYP
'XC_HYB_GGA_XC_REVB3LYP' : 454, # Revised B3LYP
'XC_HYB_GGA_XC_CAMYBLYP' : 455, # BLYP with yukawa screening
'XC_HYB_GGA_XC_PBE013' : 456, # PBE0-1/3
'XC_MGGA_XC_OTPSSD' : 64, # oTPSS_D functional of Goerigk and Grimme
'XC_MGGA_C_CS' : 72, # Colle and Salvetti
'XC_MGGA_C_MN12SX' : 73, # MN12-SX functional of Minnesota
'XC_MGGA_C_MN12L' : 74, # MN12-L functional of Minnesota
'XC_MGGA_C_M11L' : 75, # M11-L functional of Minnesota
'XC_MGGA_C_M11' : 76, # M11 functional of Minnesota
'XC_MGGA_C_M08SO' : 77, # M08-SO functional of Minnesota
'XC_MGGA_C_M08HX' : 78, # M08-HX functional of Minnesota
'XC_MGGA_X_LTA' : 201, # Local tau approximation of Ernzerhof & Scuseria
'XC_MGGA_X_TPSS' : 202, # Perdew, Tao, Staroverov & Scuseria exchange
'XC_MGGA_X_M06L' : 203, # M06-Local functional of Minnesota
'XC_MGGA_X_GVT4' : 204, # GVT4 from Van Voorhis and Scuseria
'XC_MGGA_X_TAUHCTH' : 205, # tau-HCTH from Boese and Handy
'XC_MGGA_X_BR89' : 206, # Becke-Roussel 89
'XC_MGGA_X_BJ06' : 207, # Becke & Johnson correction to Becke-Roussel 89
'XC_MGGA_X_TB09' : 208, # Tran & Blaha correction to Becke & Johnson
'XC_MGGA_X_RPP09' : 209, # Rasanen, Pittalis, and Proetto correction to Becke & Johnson
'XC_MGGA_X_2DPRHG07' : 210, # Pittalis, Rasanen, Helbig, Gross Exchange Functional
'XC_MGGA_X_2DPRHG07PRP10' : 211, # PRGH07 with PRP10 correction
'XC_MGGA_X_REVTPSS' : 212, # revised Perdew, Tao, Staroverov & Scuseria exchange
'XC_MGGA_X_PKZB' : 213, # Perdew, Kurth, Zupan, and Blaha
'XC_MGGA_X_M05' : 214, # M05 functional of Minnesota
'XC_MGGA_X_M052X' : 215, # M05-2X functional of Minnesota
'XC_MGGA_X_M06HF' : 216, # M06-HF functional of Minnesota
'XC_MGGA_X_M06' : 217, # M06 functional of Minnesota
'XC_MGGA_X_M062X' : 218, # M06-2X functional of Minnesota
'XC_MGGA_X_M08HX' : 219, # M08-HX functional of Minnesota
'XC_MGGA_X_M08SO' : 220, # M08-SO functional of Minnesota
'XC_MGGA_X_MS0' : 221, # MS exchange of Sun, Xiao, and Ruzsinszky
'XC_MGGA_X_MS1' : 222, # MS1 exchange of Sun, et al
'XC_MGGA_X_MS2' : 223, # MS2 exchange of Sun, et al
'XC_MGGA_X_MS2H' : 224, # MS2 hybrid exchange of Sun, et al
'XC_MGGA_X_M11L' : 226, # M11-L functional of Minnesota
'XC_MGGA_X_MN12L' : 227, # MN12-L functional from Minnesota
'XC_MGGA_X_MN12SX' : 228, # MN12-SX functional from Minnesota
'XC_MGGA_C_CC06' : 229, # Cancio and Chou 2006
'XC_MGGA_X_MK00' : 230, # Exchange for accurate virtual orbital energies
'XC_MGGA_C_TPSS' : 231, # Perdew, Tao, Staroverov & Scuseria correlation
'XC_MGGA_C_VSXC' : 232, # VSxc from Van Voorhis and Scuseria (correlation part)
'XC_MGGA_C_M06L' : 233, # M06-Local functional of Minnesota
'XC_MGGA_C_M06HF' : 234, # M06-HF functional of Minnesota
'XC_MGGA_C_M06' : 235, # M06 functional of Minnesota
'XC_MGGA_C_M062X' : 236, # M06-2X functional of Minnesota
'XC_MGGA_C_M05' : 237, # M05 functional of Minnesota
'XC_MGGA_C_M052X' : 238, # M05-2X functional of Minnesota
'XC_MGGA_C_PKZB' : 239, # Perdew, Kurth, Zupan, and Blaha
'XC_MGGA_C_BC95' : 240, # Becke correlation 95
'XC_MGGA_C_REVTPSS' : 241, # revised TPSS correlation
'XC_MGGA_XC_TPSSLYP1W' : 242, # Functionals fitted for water
'XC_MGGA_X_MK00B' : 243, # Exchange for accurate virtual orbital energies (v. B)
'XC_MGGA_X_BLOC' : 244, # functional with balanced localization
'XC_MGGA_X_MODTPSS' : 245, # Modified Perdew, Tao, Staroverov & Scuseria exchange
'XC_HYB_MGGA_X_M11' : 225, # M11 functional of Minnesota
'XC_HYB_MGGA_XC_M05' : 438, # M05 functional of Minnesota
'XC_HYB_MGGA_XC_M052X' : 439, # M05-2X functional of Minnesota
'XC_HYB_MGGA_XC_B88B95' : 440, # Mixture of B88 with BC95 (B1B95)
'XC_HYB_MGGA_XC_B86B95' : 441, # Mixture of B86 with BC95
'XC_HYB_MGGA_XC_PW86B95' : 442, # Mixture of PW86 with BC95
'XC_HYB_MGGA_XC_BB1K' : 443, # Mixture of B88 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_M06HF' : 444, # M06-HF functional of Minnesota
'XC_HYB_MGGA_XC_MPW1B95' : 445, # Mixture of mPW91 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_MPWB1K' : 446, # Mixture of mPW91 with BC95 for kinetics
'XC_HYB_MGGA_XC_X1B95' : 447, # Mixture of X with BC95
'XC_HYB_MGGA_XC_XB1K' : 448, # Mixture of X with BC95 for kinetics
'XC_HYB_MGGA_XC_M06' : 449, # M06 functional of Minnesota
'XC_HYB_MGGA_XC_M062X' : 450, # M06-2X functional of Minnesota
'XC_HYB_MGGA_XC_PW6B95' : 451, # Mixture of PW91 with BC95 from Zhao and Truhlar
'XC_HYB_MGGA_XC_PWB6K' : 452, # Mixture of PW91 with BC95 from Zhao and Truhlar for kinetics
'XC_HYB_MGGA_XC_TPSSH' : 457, # TPSS hybrid
'XC_HYB_MGGA_XC_REVTPSSH' : 458, # revTPSS hybrid
#
# alias
#
'LDA' : 1 ,
'SLATER' : 1 ,
'VWN3' : 'VWNRPA' ,
'VWN5' : 'VWN' ,
'BLYP' : 'B88,LYP',
'BP86' : 'B88,P86',
'PBE0' : 406,
'PBE1PBE' : 406,
'B3LYP' : 'B3LYP5', # VWN5 version
'B3LYP5' : '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN',
'B3LYPG' : 402, # VWN3, used by Gaussian
'B3P86' : 'B3P865', # VWN5 version
'B3P865' : '.2*HF + .08*LDA + .72*B88, .81*P86 + .19*VWN',
'B3P86G' : 403, # VWN3, used by Gaussian
'MPW3PW' : 'MPW3PW5', # VWN5 version
'MPW3PW5' : '.2*HF + .08*LDA + .72*MPW91, .81*PW91 + .19*VWN',
'MPW3PWG' : 415, # VWN3, used by Gaussian
'MPW3LYP' : 'MPW3LYP5', # VWN5 version
'MPW3LYP5' : '.218*HF + .073*LDA + .709*MPW91, .871*LYP + .129*VWN',
'MPW3LYPG' : 419, # VWN3, used by Gaussian
'REVB3LYP' : 'REVB3LYP5', # VWN5 version
'REVB3LYP5' : '.2*HF + .13*LDA + .67*B88, .84*LYP + .16*VWN',
'REVB3LYPG' : 454, # VWN3, used by Gaussian
'X3LYP' : 'X3LYP5', # VWN5 version
'X3LYP5' : '.218*HF + .073*LDA + .478575*B88 + .166615*PW91, .871*LYP + .129*VWN',
'X3LYPG' : 411, # VWN3, used by Gaussian
}
XC_KEYS = set(XC_CODES.keys())
LDA_IDS = set((1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 50, 51,))
GGA_IDS = set(( 47, 48, 49, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 65, 66, 67, 68, 69,
70, 71, 79, 80, 82, 83, 84, 85, 86, 87,
88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
188, 189, 190, 191, 192, 193, 194, 195, 196, 197,
198, 199, 200, 500, 501, 502, 503, 504, 505, 506,
507, 508, 509, 510, 511, 512, 513, 514, 515, 516,
517, 518, 519, 520, 521, 522, 523, 524, 525, 526,
527, 528, 529, 530,
81, 401, 402, 403, 404, 405, 406, 407, 408, 410,
411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
421, 422, 423, 424, 425, 426, 427, 428, 429, 430,
431, 432, 433, 434, 435, 436, 437, 453, 454, 455,
456,))
MGGA_IDS = set(( 64, 72, 73, 74, 75, 76, 77, 78, 201, 202,
203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
223, 224, 226, 227, 228, 229, 230, 231, 232, 233,
234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
244, 245, 225, 438, 439, 440, 441, 442, 443, 444,
445, 446, 447, 448, 449, 450, 451, 452, 457, 458,))
HYB_IDS = set((401, 402, 403, 404, 405, 406, 407, 408, 410, 411,
412, 413, 414, 415, 416, 417, 418, 419, 420, 421,
422, 423, 424, 425, 427, 428, 429, 430, 431, 432,
433, 433, 433, 434, 435, 436, 437, 453, 454, 455,
456,
438, 439, 440, 441, 442, 443, 444, 445, 446, 447,
448, 449, 450, 451, 452, 457, 458,))
X_AND_C_IDS = set(( 20, 65, 66, 67, 93, 94, 95, 96, 97, 146,
154, 155, 156, 157, 161, 162, 163, 164, 165, 166,
167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
177, 178, 179, 180, 181, 194, 195, 196, 197, 198,
199, 401, 402, 403, 404, 405, 406, 407, 408, 410,
411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
421, 422, 423, 424, 425, 427, 428, 429, 430, 431,
432, 433, 434, 435, 436, 437, 453, 454, 455, 456,
64, 242, 438, 439, 440, 441, 442, 443, 444, 445,
446, 447, 448, 449, 450, 451, 452, 457, 458,))
def is_lda(xc_code):
if isinstance(xc_code, str):
if xc_code.isdigit():
return int(xc_code) in LDA_IDS
else:
return all((xid in LDA_IDS for xid, val in parse_xc(xc_code)[1]))
elif isinstance(xc_code, int):
return xc_code in LDA_IDS
else:
return all((is_lda(x) for x in xc_code))
def is_hybrid_xc(xc_code):
if isinstance(xc_code, str):
if xc_code.isdigit():
return int(xc_code) in HYB_IDS
else:
return ('HF' in xc_code or
any((xid in HYB_IDS for xid, val in parse_xc(xc_code)[1])) or
abs(parse_xc(xc_code)[0]) > 1e-14)
elif isinstance(xc_code, int):
return xc_code in HYB_IDS
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
if isinstance(xc_code, str):
if xc_code.isdigit():
return int(xc_code) in MGGA_IDS
else:
return any((xid in MGGA_IDS for xid, val in parse_xc(xc_code)[1]))
elif isinstance(xc_code, int):
return xc_code in MGGA_IDS
else:
return any((is_meta_gga(x) for x in xc_code))
def is_gga(xc_code):
if isinstance(xc_code, str):
if xc_code.isdigit():
xc_code = int(xc_code)
return xc_code in GGA_IDS
else:
xc_fns = parse_xc(xc_code)[1]
return (all((xid in GGA_IDS or xid in LDA_IDS for xid, val in xc_fns)) and
not is_lda(xc_code))
elif isinstance(xc_code, int):
return xc_code in GGA_IDS
else:
return (all((is_gga(x) or is_lda(x) for x in xc_code)) and
not is_lda(xc_code))
def hybrid_coeff(xc_code, spin=1):
'''Support recursively defining hybrid functional
'''
hyb, fn_facs = parse_xc(xc_code)
for xid, fac in fn_facs:
if xid in HYB_IDS:
_itrf.LIBXC_hybrid_coeff.restype = ctypes.c_double
hyb += _itrf.LIBXC_hybrid_coeff(ctypes.c_int(xid), ctypes.c_int(spin))
return hyb
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," not appeared in string, the entire string is considered as X functional.
- To neglect X functional (just apply C functional), leave blank in the
first part, eg description=',vwn' for pure VWN functional
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name is associated with one factor. If the factor is not
given, it is assumed equaling 1.
* String "HF" stands for exact exchange (HF K matrix). It is allowed to
put in C functional part.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution is included.
'''
if isinstance(description, int):
return 0, ((description, 1.))
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
hyb = [0]
fn_facs = []
def parse_token(token, possible_xc_for):
if token:
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = float(fac)
else:
fac, key = 1, token
if key == 'HF':
hyb[0] += fac
else:
if key in XC_CODES:
x_id = XC_CODES[key]
else:
possible_xc = XC_KEYS.intersection(possible_xc_for(key))
if possible_xc:
if len(possible_xc) > 1:
sys.stderr.write('Possible xc_code %s matches %s. '
% (possible_xc, key))
x_id = possible_xc.pop()
sys.stderr.write('Take %s\n' % x_id)
else:
x_id = possible_xc.pop()
x_id = XC_CODES[x_id]
else:
raise KeyError('Unknown key %s' % key)
if isinstance(x_id, str):
hyb1, acc1 = parse_xc(x_id)
hyb[0] += hyb1
fn_facs.extend(acc1)
elif x_id is None:
raise NotImplementedError(key)
else:
fn_facs.append((x_id, fac))
def possible_x_for(key):
return set(('XC_'+key,
'XC_LDA_X_'+key, 'XC_GGA_X_'+key, 'XC_MGGA_X_'+key,
'XC_HYB_GGA_X_'+key, 'XC_HYB_MGGA_X_'+key))
def possible_xc_for(key):
return set(('XC_LDA_XC_'+key, 'XC_GGA_XC_'+key, 'XC_MGGA_XC_'+key,
'XC_HYB_GGA_XC_'+key, 'XC_HYB_MGGA_XC_'+key))
def possible_k_for(key):
return set(('XC_'+key,
'XC_LDA_K_'+key, 'XC_GGA_K_'+key,))
def possible_c_for(key):
return set(('XC_'+key,
'XC_LDA_C_'+key, 'XC_GGA_C_'+key, 'XC_MGGA_C_'+key))
def remove_dup(fn_facs):
fn_ids = []
facs = []
n = 0
for key, val in fn_facs:
if key in fn_ids:
facs[fn_ids.index(key)] += val
else:
fn_ids.append(key)
facs.append(val)
n += 1
return list(zip(fn_ids, facs))
if ',' in description:
x_code, c_code = description.replace(' ','').replace('_','').upper().split(',')
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_x_for)
for token in c_code.replace('-', '+-').split('+'):
parse_token(token, possible_c_for)
else:
x_code = description.replace(' ','').replace('_','').upper()
try:
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_xc_for)
except KeyError:
for token in x_code.replace('-', '+-').split('+'):
parse_token(token, possible_x_for)
return hyb[0], remove_dup(fn_facs)
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
r'''Interface to call xcfun library to evaluate XC functional, potential
and functional derivatives.
* The given functional xc_code must be a one-line string.
* The functional xc_code is case-insensitive.
* The functional xc_code string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," not appeared in string, the entire string is considered as X functional.
- To neglect X functional (just apply C functional), leave blank in the
first part, eg description=',vwn' for pure VWN functional
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name is associated with one factor. If the factor is not
given, it is assumed equaling 1.
* String "HF" stands for exact exchange (HF K matrix). It is allowed to
put in C functional part.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
(v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
see also libxc_itrf.c
'''
hyb, fn_facs = parse_xc(xc_code)
return _eval_xc(fn_facs, rho, spin, relativity, deriv, verbose)
SINGULAR_IDS = set((131, # LYP functions
402, 404, 411, 416, 419, # hybrid LYP functions
74 , 75 , 226, 227)) # M11L and MN12L functional
def _eval_xc(fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv <= 3)
if spin == 0:
nspin = 1
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
nspin = 2
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
if rho_u.ndim == 1:
rho_u = rho_u.reshape(1,-1)
rho_d = rho_d.reshape(1,-1)
ngrids = rho_u.shape[1]
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if all((is_lda(x) for x in fn_ids)):
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
if spin == 0:
nvar = 4
else:
nvar = 9
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
if SINGULAR_IDS.intersection(fn_ids) and deriv > 1:
non0idx = (rho_u[0] > 1e-10) & (rho_d[0] > 1e-10)
rho_u = numpy.asarray(rho_u[:,non0idx], order='C')
rho_d = numpy.asarray(rho_d[:,non0idx], order='C')
outbuf = numpy.empty((outlen,non0idx.sum()))
else:
outbuf = numpy.empty((outlen,ngrids))
n = len(fn_ids)
_itrf.LIBXC_eval_xc(ctypes.c_int(n),
(ctypes.c_int*n)(*fn_ids), (ctypes.c_double*n)(*facs),
ctypes.c_int(nspin),
ctypes.c_int(deriv), ctypes.c_int(rho_u.shape[1]),
rho_u.ctypes.data_as(ctypes.c_void_p),
rho_d.ctypes.data_as(ctypes.c_void_p),
outbuf.ctypes.data_as(ctypes.c_void_p))
if outbuf.shape[1] != ngrids:
out = numpy.zeros((outlen,ngrids))
out[:,non0idx] = outbuf
outbuf = out
exc = outbuf[0]
vxc = fxc = kxc = None
if nvar == 1: # LDA
if deriv > 0:
vxc = (outbuf[1], None, None, None)
if deriv > 1:
fxc = (outbuf[2],) + (None,)*9
if deriv > 2:
kxc = (outbuf[3], None, None, None)
elif nvar == 2:
if spin == 0: # GGA
if deriv > 0:
vxc = (outbuf[1], outbuf[2], None, None)
if deriv > 1:
fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7
if deriv > 2:
kxc = outbuf[6:10]
else: # LDA
if deriv > 0:
vxc = (outbuf[1:3].T, None, None, None)
if deriv > 1:
fxc = (outbuf[3:6].T,) + (None,)*9
if deriv > 2:
kxc = (outbuf[6:10].T, None, None, None)
elif nvar == 5: # GGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)
if deriv > 1:
fxc = (outbuf[6:9].T, outbuf[9:15].T, outbuf[15:21].T) + (None,)*7
if deriv > 2:
kxc = (outbuf[21:25].T, outbuf[25:34].T, outbuf[34:46].T, outbuf[46:56].T)
elif nvar == 4: # MGGA
if deriv > 0:
vxc = outbuf[1:5]
if deriv > 1:
fxc = outbuf[5:15]
elif nvar == 9: # MGGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, outbuf[6:8].T, outbuf[8:10].T)
if deriv > 1:
fxc = (outbuf[10:13].T, outbuf[13:19].T, outbuf[19:25].T,
outbuf[25:28].T, outbuf[28:31].T, outbuf[31:35].T,
outbuf[35:39].T, outbuf[39:43].T, outbuf[43:49].T,
outbuf[49:55].T)
return exc, vxc, fxc, kxc
def define_xc_(ni, description):
'''Define XC functional. See also :func:`eval_xc` for the rules of input description.
Args:
ni : an instance of :class:`_NumInt`
description : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> mf = dft.RKS(mol)
>>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
'''
ni.eval_xc = lambda xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None: \
eval_xc(description, rho, spin, relativity, deriv, verbose)
ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)
def xc_type(*args):
if is_lda(description):
return 'LDA'
elif is_meta_gga(description):
raise NotImplementedError('meta-GGA')
else:
return 'GGA'
ni._xc_type = xc_type
return ni
def define_xc(ni, description):
return define_xc_(copy.copy(ni), description)
define_xc.__doc__ = define_xc_.__doc__
if __name__ == '__main__':
from pyscf import gto, dft
mol = gto.M(
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = '6311g*',)
mf = dft.RKS(mol)
mf.xc = 'b88,lyp'
eref = mf.kernel()
mf = dft.RKS(mol)
mf._numint = define_xc(mf._numint, 'BLYP')
e1 = mf.kernel()
print(e1 - eref)
mf = dft.RKS(mol)
mf._numint = define_xc(mf._numint, 'B3LYP5')
e1 = mf.kernel()
print(e1 - -76.4102840115744)
|
{"hexsha": "45b991884195c67cbee554fb417654dfd352aa37", "size": 42660, "ext": "py", "lang": "Python", "max_stars_repo_path": "dft/libxc.py", "max_stars_repo_name": "gmwang18/pyscf", "max_stars_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dft/libxc.py", "max_issues_repo_name": "gmwang18/pyscf", "max_issues_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dft/libxc.py", "max_forks_repo_name": "gmwang18/pyscf", "max_forks_repo_head_hexsha": "fcd6877751661c8a9743c1c872a4a2b65f6dd7ac", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8364485981, "max_line_length": 140, "alphanum_fraction": 0.587177684, "include": true, "reason": "import numpy", "num_tokens": 15823}
|
```python
from __future__ import division
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
sns.set_style('whitegrid')
sns.set_palette('colorblind')
np.random.seed(40997)
```
```python
import datagenerators as dg
```
```python
observed_data_0 = dg.generate_dataset_0()
observed_data_0.tail()
len(observed_data_0[observed_data_0.x == 0]), len(observed_data_0[observed_data_0.x == 1])
# X = wearing hat, Y = productivity
```
(252, 248)
### Estimate Conditional
```latex
%%latex
\begin{align}
\mathbb{E}[Y=1|X=1] - \mathbb{E}[Y=1|X=0]
\end{align}
```
\begin{align}
\mathbb{E}[Y=1|X=1] - \mathbb{E}[Y=1|X=0]
\end{align}
```python
def estimate_uplift(ds):
c, t = ds[ds.x == 0], ds[ds.x==1]
delta = t.y.mean() - c.y.mean()
# 95% confidence interval for standard normal is [-1.96, 1.96]
err = 1.96*np.sqrt( (t.y.var() / t.shape[0] + c.y.var() /c.shape[0]) )
return {'estimated_effect': delta, 'standard_err': err}
```
```python
estimate_uplift(observed_data_0)
```
{'estimated_effect': -0.10816692268305167,
'standard_err': 0.087296361223711094}
### Chi2 test for categorical data
```python
from scipy.stats import chi2_contingency
contingency_table = (
observed_data_0
.assign(placeholder=1)
.pivot_table(index='x', columns='y', values='placeholder', aggfunc='sum')
.values
)
contingency_table
_, p, _, _ = chi2_contingency(contingency_table, lambda_='log-likelihood')
p
# p > .05, say and so Null hypothesis holds. ????
# There is a significant relationship between wearing hat (X) and productivity (Y) ???
```
0.019712554881038489
```python
def run_ab_test(datagenerator, nsamples=10000, filter_=None):
'''
Generate nsamples using datagenerator with 50% X=1 and the other X=0.
Run the results through uplift to get the estimated conditional effect
(avg treatment effect)
'''
nsamples_a = int(nsamples/2)
nsamples_b = nsamples - nsamples_a
setX = np.concatenate([np.ones(nsamples_a), np.zeros(nsamples_b)]).astype(np.int64)
ds = datagenerator(nsamples, set_X=setX)
if (filter_ != None):
ds = ds[filter_(ds)].copy()
return estimate_uplift(ds)
run_ab_test(dg.generate_dataset_0)
# low p-value < .05 indicates null hypothesis needs to be rejected - that is there is no relationship between
# wearing a hat and productivity. Why this reversal? Because in our A/B test we randomized the X's, and it teases
# out the relation between X, Y
```
{'estimated_effect': 0.19500000000000006, 'standard_err': 0.019224274761603016}
## Causality
Average Treatment Effect: $ \Delta = E[Y_1] - E[Y_0] $, where $Y_1$ is the outcome when the entire population is treated, and $Y_0$ is the outcome when no one is treated (control). In RCT, $Y_1, Y_0 \perp X$. However, in Observational studies, we assume $ Y_1, Y_0 \perp X \text{ } | \text{ }Z$, where $Z$ are the control variates. Therefore, $P(X, Y_0, Y_1, Z) = P(Z)P(X|Z)P(Y_0,Y_1|Z)$
```python
observed_with_confounders = dg.generate_dataset_0(show_z=True)
print(estimate_uplift(observed_with_confounders.loc[lambda df: df.z==0]))
print(estimate_uplift(observed_with_confounders.loc[lambda df: df.z==1]))
#
# We see positive effects of hat and productivity in both splits, with Z=1 (skilled) and Z=0 (unskilled)
#
```
{'estimated_effect': 0.25333333333333335, 'standard_err': 0.052167040467244345}
{'estimated_effect': 0.12639553429027117, 'standard_err': 0.072451436661551447}
### Approach 1: Modeling Counter factual
Use estimators of $Y_0, Y_1$
$\hat{Y_0}(Z) = E[Y|Z, X=0] $
$\hat{Y_1}(Z) = E[Y|Z, X=1] $
ATE = $\Delta = \frac{1}{N} \sum_i(\hat{Y_1}(z_i) - \hat{Y_0}(z_i))$
```python
observed_1 = dg.generate_dataset_1()
observed_1.plot.scatter(x='z', y='y', c='x', cmap='rainbow_r', colorbar=True)
plt.title('Controls (RED) vs. Treated')
# set title color
# tobj = plt.title('Controls vs. Treated')
# plt.setp(tobj, color='r')
```
```python
observed_1.head() #, observed_1.tail()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>x</th>
<th>y</th>
<th>z</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1</td>
<td>0.958249</td>
<td>0.717579</td>
</tr>
<tr>
<th>1</th>
<td>1</td>
<td>0.669238</td>
<td>0.483711</td>
</tr>
<tr>
<th>2</th>
<td>1</td>
<td>1.375049</td>
<td>0.943936</td>
</tr>
<tr>
<th>3</th>
<td>0</td>
<td>1.218325</td>
<td>0.503694</td>
</tr>
<tr>
<th>4</th>
<td>1</td>
<td>1.210632</td>
<td>0.993055</td>
</tr>
</tbody>
</table>
</div>
```python
sns.kdeplot(observed_1.loc[lambda df: df.x == 0].y, color='r', label='control')
sns.kdeplot(observed_1.loc[lambda df: df.x == 1].y, color='g', label='treated')
```
```python
print("Observed ATE: {estimated_effect:.3f}, ({standard_err:.3f})".format(**estimate_uplift(observed_1)))
```
Observed ATE: 0.085, (0.037)
```python
sns.kdeplot(observed_1[lambda df: df.x == 0].z, color='r', label='control')
sns.kdeplot(observed_1[lambda df: df.x == 1].z, color='g', label='treated')
```
```python
# Real ATE by running A/B test
print('Real ATE: {estimated_effect:.3f}, ({standard_err:.3f})'.format(**run_ab_test(dg.generate_dataset_1)))
```
Real ATE: -0.520, (0.009)
#### Linear Model for the counterfactual
$Y_0 = \alpha + \beta Z + \epsilon $
$Y_1 = Y_0 + \gamma $
pip install causalinference
```python
from causalinference import CausalModel
cm = CausalModel(
Y=observed_1.y.values, # y, outcome
D=observed_1.x.values, # x, treatment
X=observed_1.z.values # z confounders
)
cm.est_via_ols(adj=1)
print(cm.estimates)
#
# The package does really well because the input data is linear and designed to
# meet the expectations
#
```
Treatment Effect Estimates: OLS
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE -0.510 0.030 -16.917 0.000 -0.569 -0.451
```python
#
# Check against a data that is not designed to such assumptions
#
observed_2 = dg.generate_dataset_2()
observed_2.plot.scatter(x='z',y='y',c='x', cmap='rainbow_r', colorbar=True)
```
```python
sns.kdeplot(observed_2[lambda df: df.x==0].y, color='r', label='control')
sns.kdeplot(observed_2[lambda df: df.x==1].y, color='g', label='treated')
```
```python
sns.kdeplot(observed_2[lambda df: df.x==0].z, color='r', label='control')
sns.kdeplot(observed_2[lambda df: df.x==1].z, color='g', label='treated')
```
```python
#
# Here the effects are not additive
#
print('Observed ATE: {estimated_effect:.3f} ({standard_err:.3f})'.format(**estimate_uplift(observed_2)))
print('Real ATE: {estimated_effect:.3f} ({standard_err:.3f})'.format(**run_ab_test(dg.generate_dataset_2)))
```
Observed ATE: 0.641 (0.040)
Real ATE: 0.569 (0.011)
```python
cm = CausalModel(
Y=observed_2.y.values,
D=observed_2.x.values,
X=observed_2.z.values
)
cm.est_via_ols(adj=1)
print(cm.estimates)
```
Treatment Effect Estimates: OLS
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 0.244 0.074 3.292 0.001 0.099 0.390
```python
#
# Estimates by matching
#
cm = CausalModel(
Y=observed_2.y.values,
D=observed_2.x.values,
X=observed_2.z.values
)
cm.est_via_matching()
print(cm.estimates)
```
Treatment Effect Estimates: Matching
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 0.535 0.139 3.863 0.000 0.264 0.807
ATC 0.979 0.170 5.759 0.000 0.646 1.312
ATT 0.074 0.168 0.439 0.661 -0.256 0.404
### Approach 2: Covariate Imbalance (matching?)
```python
#
# Imbalanced control vs treatment
#
observed_3 = dg.generate_dataset_3()
observed_3.plot.scatter(x='z',y='y',c='x', cmap='rainbow_r', colorbar=False)
#plt.xticks(np.arange(observed_3.z.min(),observed_3.z.max(), .1))
plt.xlabel('skill')
plt.ylabel('productivity')
# plt.show()
#print(observed_3.z.min(),observed_3.z.max())
# actual response curves (data generators)
z = np.linspace(0,1,100)
y0 = np.where(z>=0.4, -4*(z-0.4), 0)
y1 = np.where(z<0.6, -4*(z-0.6), 0) + 1.
plt.plot(z, y0, color='r' )
plt.plot(z, y1, color='b' )
print("Observed ATE: {estimated_effect:.3f}, ({standard_err:.3f})".format(**estimate_uplift(observed_3)))
print("Real ATE: {estimated_effect:.3f}, ({standard_err:.3f})".format(**run_ab_test(dg.generate_dataset_3)))
```
```python
# OLS estimator
cm = CausalModel(
Y=observed_3.y.values,
D=observed_3.x.values,
X=observed_3.z.values
)
cm.est_via_ols()
print(cm.estimates)
```
Treatment Effect Estimates: OLS
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 1.933 0.052 37.451 0.000 1.832 2.034
ATC 1.940 0.055 35.303 0.000 1.832 2.047
ATT 1.929 0.066 29.189 0.000 1.799 2.058
```python
# Matching
cm = CausalModel(
Y=observed_3.y.values,
D=observed_3.x.values,
X=observed_3.z.values
)
cm.est_via_matching()
print(cm.estimates)
```
Treatment Effect Estimates: Matching
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 1.877 0.163 11.521 0.000 1.557 2.196
ATC 1.830 0.277 6.597 0.000 1.286 2.374
ATT 1.906 0.196 9.734 0.000 1.522 2.290
```python
print(cm.summary_stats)
```
Summary Statistics
Controls (N_c=194) Treated (N_t=306)
Variable Mean S.d. Mean S.d. Raw-diff
--------------------------------------------------------------------------------
Y -0.109 0.450 1.214 0.453 1.323
Controls (N_c=194) Treated (N_t=306)
Variable Mean S.d. Mean S.d. Nor-diff
--------------------------------------------------------------------------------
X0 0.268 0.203 0.686 0.199 2.078
### Approach 3: Inverse Propensity Score Weighting
How likely is it for a subject to have received treatment?
$\hat{p}(Z) = P[X|Z] $
Probability of potential outcome, $Y_i$:
$P(Y_i) = P(Y_i|X=i) P(X=i)$
So, we can estimate $E[Y_i] $ (inverse propensity score weight estimator, IPS),
$ = \Delta_{IPS} = \frac{1}{N}\big{(}\sum_{x_i = 1}\frac{y_i}{\hat{p}(z_i)} - \sum_{x_i = 0}\frac{y_i}{1. - \hat{p}(z_i)}\big{)}$
```python
cm = CausalModel(
Y=observed_1.y.values,
D=observed_1.x.values,
X=observed_1.z.values
)
cm.est_propensity_s()
propensity = cm.propensity['fitted']
#print(propensity)
df = observed_1
def compute_ipse(df, propensity):
df['ips'] = np.where(df.x == 1, 1./propensity, 1./(1.-propensity))
df['ipsw'] = df.y * df.ips
ipse = (df[df.x==1].ipsw.sum() - df[df.x==0].ipsw.sum())/(1. * df.shape[0])
return ipse
# print(ipse)
#
# Compare this to the real ATE for observed_1 from previously (way above)
# Real ATE: -0.520, (0.009)
#
print(compute_ipse(df, propensity))
```
-0.486394457147
```python
#
# If we used logistic regression to estimate the propensity score, the outcome is slightly poorer
#
def compute_propensity_lg(df):
from sklearn.linear_model import LogisticRegression
lg = LogisticRegression()
X = df.z.values[:, np.newaxis]
#print(X)
#X = df.z.values.reshape(-1,1)
y = df.x.values
lg.fit(X,y)
return lg.predict_proba(X)[:,1] # probability of predicting class '1'
df = dg.generate_dataset_1()
print(compute_ipse(df, compute_propensity_lg(df)))
# The propensity computed by logistic regression is lower? (because it is not inversely weighted?)
```
-0.435559813357
### Approach 4: Doubly Robust Weighted Estimator
This technique combines the inverse propensity score weighted and the linear estimators
```python
observed_1 = dg.generate_dataset_1()
cm = CausalModel(
Y=observed_1.y.values,
D=observed_1.x.values,
X=observed_1.z.values
)
cm.est_propensity_s()
cm.est_via_weighting()
print(cm.estimates)
```
Treatment Effect Estimates: Weighting
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE -0.453 0.038 -12.013 0.000 -0.527 -0.379
### Unconfoundedness and Propensity Score
A stronger assumption than $Y_1, Y_0 \perp X \text{ }| \text{ } Z$ is
$ Y_1, Y_0 \perp X \text{ } | \text{ } \hat{p}(Z)$, that is given propensity
### Approach 5: Trimming (Calipers?)
Imbalances in covariates can cause issues. A solution is to make predictions for counterfactuals in regions where there is a good overlap, and "trim" points where there is not any overlap. For high dimensional data it is difficult to define good overlap. Trimming based on propensity scores is one approach
```python
# Look at dataset 3, where there is poor overlap
cm = CausalModel(
Y=observed_3.y.values,
D=observed_3.x.values,
X=observed_3.z.values
)
cm.est_propensity_s()
cm.trim_s()
cm.est_via_matching()
print(cm.estimates)
#
# Real ATE: 2.456, (0.012)
#
```
Treatment Effect Estimates: Matching
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 1.824 0.113 16.199 0.000 1.604 2.045
ATC 1.848 0.218 8.473 0.000 1.420 2.275
ATT 1.807 0.094 19.183 0.000 1.623 1.992
```python
# Visualize the trimmed data
propensity = cm.propensity['fitted']
#print(cm.cutoff)
#print(propensity[:20])
mask = (propensity > cm.cutoff) & ( propensity < 1. - cm.cutoff)
#print(mask[:20])
def plot_actuals(plt):
z = np.linspace(0,1,100)
y0 = np.where(z >=0.4, -4*(z-.4), 0 )
y1 = np.where(z < 0.6, -4*(z-.6), 0) + 1
plt.plot(z,y0,'r')
plt.plot(z,y1,'b')
plt = observed_3[mask].plot.scatter(x='z',y='y',c='x', cmap='rainbow_r', colorbar=False)
plot_actuals(plt)
filter_ = lambda df: (df.z > 0.2) & (df.z < 0.7)
print("Observed ATE[mask]: {estimated_effect:.3f}, ({standard_err:.3f})".format(**estimate_uplift(observed_3[mask])))
print("Real ATE[mask]: {estimated_effect:.3f}, ({standard_err:.3f})".format(**run_ab_test(dg.generate_dataset_3,
filter_=filter_)))
```
### Approach 6: Stratification (or blocking estimator)
Another use of the propensity score is in the stratified or blocking estimator. It consists of grouping data points into groups of similar propensity and to estimate the ATE within these groups.
```python
# Use stratify to specify the boundaries or stratify_s to have it picked automatically
observed_1 = dg.generate_dataset_1()
cm = CausalModel(
Y=observed_1.y.values,
D=observed_1.x.values,
X=observed_1.z.values
)
cm.est_propensity_s()
cm.stratify_s()
print(cm.strata)
```
Stratification Summary
Propensity Score Sample Size Ave. Propensity Outcome
Stratum Min. Max. Controls Treated Controls Treated Raw-diff
--------------------------------------------------------------------------------
1 0.108 0.162 58 6 0.133 0.127 -0.428
2 0.162 0.209 25 6 0.185 0.194 -0.463
3 0.209 0.269 19 12 0.227 0.234 -0.467
4 0.269 0.508 82 43 0.374 0.385 -0.507
5 0.512 0.782 41 84 0.655 0.669 -0.453
6 0.784 0.909 18 106 0.838 0.852 -0.332
```python
# Compute overall ATE
cm.est_via_blocking()
print(cm.estimates)
```
Treatment Effect Estimates: Blocking
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE -0.458 0.029 -15.966 0.000 -0.514 -0.401
ATC -0.473 0.033 -14.338 0.000 -0.537 -0.408
ATT -0.443 0.033 -13.478 0.000 -0.508 -0.379
```python
# Just an aside
# Compute a confidence interval from sample
# https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data
#
import numpy as np
import scipy.stats
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, h, m-h, m+h
a = np.random.randn(1000,)
sns.distplot(a)
# import matplotlib.pyplot as plt
# plt.scatter(a, range(len(a)))
# plt.show()
# print(a[:10])
#print(scipy.stats.norm.stats(a, 'mvsk'))
print(np.mean(a), np.var(a))
print(mean_confidence_interval(a,.95))
```
### Various Methods
Let's try all these
```python
data_gen = dg.generate_exercise_dataset_2
ds = data_gen()
print("Observed ATE: {estimated_effect:.3f} ({standard_err:.3f})".format(**estimate_uplift(ds)))
print("Real ATE: {estimated_effect:.3f} ({standard_err:.3f})".format(**run_ab_test(data_gen)))
zs = [c for c in ds.columns if c.startswith("z")]
cm = CausalModel(
Y=ds.y.values,
D=ds.x.values,
X=ds[zs].values # confounders
)
print(zs)
cm.est_via_ols()
cm.est_via_matching()
#ipsw
cm.est_propensity_s()
cm.est_via_weighting()
#stratified
cm.stratify_s()
cm.est_via_blocking()
print(cm.estimates)
```
Observed ATE: 0.118 (0.665)
Real ATE: 4.504 (0.186)
['z_0', 'z_1', 'z_2', 'z_3', 'z_4']
Treatment Effect Estimates: OLS
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 0.474 0.321 1.476 0.140 -0.155 1.104
ATC 0.750 0.332 2.260 0.024 0.100 1.401
ATT 0.346 0.335 1.030 0.303 -0.312 1.003
Treatment Effect Estimates: Matching
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 1.768 0.498 3.552 0.000 0.792 2.743
ATC 1.598 0.625 2.556 0.011 0.373 2.824
ATT 1.846 0.548 3.367 0.001 0.771 2.922
Treatment Effect Estimates: Weighting
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 3.654 0.956 3.821 0.000 1.780 5.528
Treatment Effect Estimates: Blocking
Est. S.e. z P>|z| [95% Conf. int.]
--------------------------------------------------------------------------------
ATE 1.201 0.333 3.605 0.000 0.548 1.854
ATC -0.054 0.468 -0.115 0.908 -0.972 0.864
ATT 1.787 0.351 5.086 0.000 1.098 2.475
```python
cm.estimates
```
{'ols': {'ate': 0.47419777090420356, 'ate_se': 0.32116657752282168, 'atc': 0.75020996241911453, 'att': 0.34550000418610732, 'atc_se': 0.33195237063060334, 'att_se': 0.33546747808044225}, 'matching': {'atc': 1.5983416141984819, 'att': 1.8464926008999762, 'ate': 1.7675805871289012, 'atc_se': 0.62526736068696021, 'att_se': 0.54847357409903819, 'ate_se': 0.49758450947551408}, 'weighting': {'ate': 3.6538297598650313, 'ate_se': 0.95614235242183243}, 'blocking': {'ate': 1.2013569062060201, 'atc': -0.053889561123241191, 'att': 1.7866477810017753, 'ate_se': 0.33320254112880332, 'atc_se': 0.46840424000538011, 'att_se': 0.35128984048406586}}
```python
y = []
yerr = []
x_label = []
for method, result in dict(cm.estimates).items():
y.append(result['ate'])
yerr.append(result['ate_se'])
x_label.append(method)
x = np.arange(len(y))
plt.errorbar(x=x, y=y, yerr=yerr, linestyle='none', capsize=5, marker='o')
plt.xticks(x, x_label)
plt.title('Estimated Effect Size', fontsize=18)
plt.hlines(4.5, -.5, 3.5, linestyles='dashed')
plt.xlim(-.5,3.5)
```
```python
```
|
{"hexsha": "f9d0d2a366ef70a9c87f065f337f7a49f3f43ec4", "size": 315544, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "framework/causal_note1.ipynb", "max_stars_repo_name": "mullachv/causal_notes", "max_stars_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "framework/causal_note1.ipynb", "max_issues_repo_name": "mullachv/causal_notes", "max_issues_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "framework/causal_note1.ipynb", "max_forks_repo_name": "mullachv/causal_notes", "max_forks_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 228.8208846991, "max_line_length": 46924, "alphanum_fraction": 0.9023876226, "converted": true, "num_tokens": 6825}
|
[STATEMENT]
lemma of_hypnat_0_le_iff [simp]: "\<And>n. 0 \<le> (of_hypnat n::'a::linordered_semidom star)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. 0 \<le> of_hypnat n
[PROOF STEP]
by transfer (rule of_nat_0_le_iff)
|
{"llama_tokens": 112, "file": null, "length": 1}
|
module ProgressMeter
using Printf: @sprintf
using Distributed
export Progress, ProgressThresh, ProgressUnknown, BarGlyphs, next!, update!, cancel, finish!, @showprogress, progress_map, progress_pmap, ijulia_behavior
"""
`ProgressMeter` contains a suite of utilities for displaying progress
in long-running computations. The major functions/types in this module
are:
- `@showprogress`: an easy interface for straightforward situations
- `Progress`: an object for managing progress updates with a predictable number of iterations
- `ProgressThresh`: an object for managing progress updates where termination is governed by a threshold
- `next!` and `update!`: report that progress has been made
- `cancel` and `finish!`: early termination
"""
ProgressMeter
abstract type AbstractProgress end
"""
Holds the five characters that will be used to generate the progress bar.
"""
mutable struct BarGlyphs
leftend::Char
fill::Char
front::Union{Vector{Char}, Char}
empty::Char
rightend::Char
end
"""
String constructor for BarGlyphs - will split the string into 5 chars
"""
function BarGlyphs(s::AbstractString)
glyphs = (s...,)
if !isa(glyphs, NTuple{5,Char})
error("""
Invalid string in BarGlyphs constructor.
You supplied "$s".
Note: string argument must be exactly 5 characters long, e.g. "[=> ]".
""")
end
return BarGlyphs(glyphs...)
end
"""
`prog = Progress(n; dt=0.1, desc="Progress: ", color=:green,
output=stderr, barlen=tty_width(desc), start=0)` creates a progress meter for a
task with `n` iterations or stages starting from `start`. Output will be
generated at intervals at least `dt` seconds apart, and perhaps longer if each
iteration takes longer than `dt`. `desc` is a description of
the current task.
"""
mutable struct Progress <: AbstractProgress
n::Int
reentrantlocker::Threads.ReentrantLock
dt::Float64
counter::Int
tfirst::Float64
tlast::Float64
printed::Bool # true if we have issued at least one status update
desc::String # prefix to the percentage, e.g. "Computing..."
barlen::Union{Int,Nothing} # progress bar size (default is available terminal width)
barglyphs::BarGlyphs # the characters to be used in the bar
color::Symbol # default to green
output::IO # output stream into which the progress is written
offset::Int # position offset of progress bar (default is 0)
numprintedvalues::Int # num values printed below progress in last iteration
start::Int # which iteration number to start from
enabled::Bool # is the output enabled
function Progress(n::Integer;
dt::Real=0.1,
desc::AbstractString="Progress: ",
color::Symbol=:green,
output::IO=stderr,
barlen=nothing,
barglyphs::BarGlyphs=BarGlyphs('|','█', Sys.iswindows() ? '█' : ['▏','▎','▍','▌','▋','▊','▉'],' ','|',),
offset::Integer=0,
start::Integer=0,
enabled::Bool = true
)
reentrantlocker = Threads.ReentrantLock()
counter = start
tfirst = tlast = time()
printed = false
new(n, reentrantlocker, dt, counter, tfirst, tlast, printed, desc, barlen, barglyphs, color, output, offset, 0, start, enabled)
end
end
Progress(n::Integer, dt::Real, desc::AbstractString="Progress: ",
barlen=nothing, color::Symbol=:green, output::IO=stderr;
offset::Integer=0) =
Progress(n, dt=dt, desc=desc, barlen=barlen, color=color, output=output, offset=offset)
Progress(n::Integer, desc::AbstractString, offset::Integer=0) = Progress(n, desc=desc, offset=offset)
"""
`prog = ProgressThresh(thresh; dt=0.1, desc="Progress: ",
color=:green, output=stderr)` creates a progress meter for a task
which will terminate once a value less than or equal to `thresh` is
reached. Output will be generated at intervals at least `dt` seconds
apart, and perhaps longer if each iteration takes longer than
`dt`. `desc` is a description of the current task.
"""
mutable struct ProgressThresh{T<:Real} <: AbstractProgress
thresh::T
reentrantlocker::Threads.ReentrantLock
dt::Float64
val::T
counter::Int
triggered::Bool
tfirst::Float64
tlast::Float64
printed::Bool # true if we have issued at least one status update
desc::String # prefix to the percentage, e.g. "Computing..."
color::Symbol # default to green
output::IO # output stream into which the progress is written
numprintedvalues::Int # num values printed below progress in last iteration
offset::Int # position offset of progress bar (default is 0)
enabled::Bool # is the output a file or not
function ProgressThresh{T}(thresh;
dt::Real=0.1,
desc::AbstractString="Progress: ",
color::Symbol=:green,
output::IO=stderr,
offset::Integer=0,
enabled = true) where T
reentrantlocker = Threads.ReentrantLock()
tfirst = tlast = time()
printed = false
new{T}(thresh, reentrantlocker, dt, typemax(T), 0, false, tfirst, tlast, printed, desc, color, output, 0, offset, enabled)
end
end
ProgressThresh(thresh::Real; kwargs...) = ProgressThresh{typeof(thresh)}(thresh; kwargs...)
# Legacy constructor calls
ProgressThresh(thresh::Real, dt::Real, desc::AbstractString="Progress: ",
color::Symbol=:green, output::IO=stderr;
offset::Integer=0) =
ProgressThresh(thresh; dt=dt, desc=desc, color=color, output=output, offset=offset)
ProgressThresh(thresh::Real, desc::AbstractString, offset::Integer=0) = ProgressThresh(thresh; desc=desc, offset=offset)
"""
`prog = ProgressUnknown(; dt=0.1, desc="Progress: ",
color=:green, output=stderr)` creates a progress meter for a task
which has a non-deterministic termination criterion.
Output will be generated at intervals at least `dt` seconds
apart, and perhaps longer if each iteration takes longer than
`dt`. `desc` is a description of the current task.
"""
mutable struct ProgressUnknown <: AbstractProgress
done::Bool
reentrantlocker::Threads.ReentrantLock
dt::Float64
counter::Int
triggered::Bool
tfirst::Float64
tlast::Float64
printed::Bool # true if we have issued at least one status update
desc::String # prefix to the percentage, e.g. "Computing..."
color::Symbol # default to green
output::IO # output stream into which the progress is written
numprintedvalues::Int # num values printed below progress in last iteration
enabled::Bool
end
function ProgressUnknown(;dt::Real=0.1, desc::AbstractString="Progress: ", color::Symbol=:green, output::IO=stderr, enabled::Bool = true)
reentrantlocker = Threads.ReentrantLock()
tfirst = tlast = time()
printed = false
ProgressUnknown(false, reentrantlocker, dt, 0, false, tfirst, tlast, printed, desc, color, output, 0, enabled)
end
ProgressUnknown(dt::Real, desc::AbstractString="Progress: ",
color::Symbol=:green, output::IO=stderr; kwargs...) =
ProgressUnknown(dt=dt, desc=desc, color=color, output=output; kwargs...)
ProgressUnknown(desc::AbstractString) = ProgressUnknown(desc=desc)
#...length of percentage and ETA string with days is 29 characters
tty_width(desc, output) = max(0, (displaysize(output)::Tuple{Int,Int})[2] - (length(desc) + 29))
# Package level behavior of IJulia clear output
@enum IJuliaBehavior IJuliaWarned IJuliaClear IJuliaAppend
const IJULIABEHAVIOR = Ref(IJuliaWarned)
function ijulia_behavior(b)
@assert b in [:warn, :clear, :append]
b == :warn && (IJULIABEHAVIOR[] = IJuliaWarned)
b == :clear && (IJULIABEHAVIOR[] = IJuliaClear)
b == :append && (IJULIABEHAVIOR[] = IJuliaAppend)
end
# Whether or not to use IJulia.clear_output
running_ijulia_kernel() = isdefined(Main, :IJulia) && Main.IJulia.inited
clear_ijulia() = (IJULIABEHAVIOR[] != IJuliaAppend) && running_ijulia_kernel()
# update progress display
function updateProgress!(p::Progress; showvalues = (), truncate_lines = false, valuecolor = :blue, offset::Integer = p.offset, keep = (offset == 0), desc::Union{Nothing,AbstractString} = nothing)
(!running_ijulia_kernel() & !p.enabled) && return
if desc !== nothing
if p.barlen !== nothing
p.barlen += length(p.desc) - length(desc) #adjust bar length to accommodate new description
end
p.desc = desc
end
p.offset = offset
t = time()
if p.counter >= p.n
if p.counter == p.n && p.printed
barlen = p.barlen isa Nothing ? tty_width(p.desc, p.output) : p.barlen
percentage_complete = 100.0 * p.counter / p.n
bar = barstring(barlen, percentage_complete, barglyphs=p.barglyphs)
dur = durationstring(t-p.tfirst)
msg = @sprintf "%s%3u%%%s Time: %s" p.desc round(Int, percentage_complete) bar dur
!clear_ijulia() && print(p.output, "\n" ^ (p.offset + p.numprintedvalues))
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
if keep
println(p.output)
else
print(p.output, "\r\u1b[A" ^ (p.offset + p.numprintedvalues))
end
flush(p.output)
end
return nothing
end
if t > p.tlast+p.dt
barlen = p.barlen isa Nothing ? tty_width(p.desc, p.output) : p.barlen
percentage_complete = 100.0 * p.counter / p.n
bar = barstring(barlen, percentage_complete, barglyphs=p.barglyphs)
elapsed_time = t - p.tfirst
est_total_time = elapsed_time * (p.n - p.start) / (p.counter - p.start)
if 0 <= est_total_time <= typemax(Int)
eta_sec = round(Int, est_total_time - elapsed_time )
eta = durationstring(eta_sec)
else
eta = "N/A"
end
msg = @sprintf "%s%3u%%%s ETA: %s" p.desc round(Int, percentage_complete) bar eta
!clear_ijulia() && print(p.output, "\n" ^ (p.offset + p.numprintedvalues))
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
!clear_ijulia() && print(p.output, "\r\u1b[A" ^ (p.offset + p.numprintedvalues))
flush(p.output)
# Compensate for any overhead of printing. This can be
# especially important if you're running over a slow network
# connection.
p.tlast = t + 2*(time()-t)
p.printed = true
end
return nothing
end
function updateProgress!(p::ProgressThresh; showvalues = (), truncate_lines = false, valuecolor = :blue, offset::Integer = p.offset, keep = (offset == 0), desc = p.desc)
(!running_ijulia_kernel() & !p.enabled) && return
p.offset = offset
p.desc = desc
t = time()
if p.val <= p.thresh && !p.triggered
p.triggered = true
if p.printed
p.triggered = true
dur = durationstring(t-p.tfirst)
msg = @sprintf "%s Time: %s (%d iterations)" p.desc dur p.counter
print(p.output, "\n" ^ (p.offset + p.numprintedvalues))
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
if keep
println(p.output)
else
print(p.output, "\r\u1b[A" ^ (p.offset + p.numprintedvalues))
end
flush(p.output)
end
return
end
if t > p.tlast+p.dt && !p.triggered
elapsed_time = t - p.tfirst
msg = @sprintf "%s (thresh = %g, value = %g)" p.desc p.thresh p.val
print(p.output, "\n" ^ (p.offset + p.numprintedvalues))
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
print(p.output, "\r\u1b[A" ^ (p.offset + p.numprintedvalues))
flush(p.output)
# Compensate for any overhead of printing. This can be
# especially important if you're running over a slow network
# connection.
p.tlast = t + 2*(time()-t)
p.printed = true
end
end
function updateProgress!(p::ProgressUnknown; showvalues = (), truncate_lines = false, valuecolor = :blue, desc = p.desc)
(!running_ijulia_kernel() & !p.enabled) && return
p.desc = desc
t = time()
if p.done
if p.printed
dur = durationstring(t-p.tfirst)
msg = @sprintf "%s %d \t Time: %s" p.desc p.counter dur
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
println(p.output)
flush(p.output)
end
return
end
if t > p.tlast+p.dt
dur = durationstring(t-p.tfirst)
msg = @sprintf "%s %d \t Time: %s" p.desc p.counter dur
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, p.color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
flush(p.output)
# Compensate for any overhead of printing. This can be
# especially important if you're running over a slow network
# connection.
p.tlast = t + 2*(time()-t)
p.printed = true
return
end
end
# update progress display
"""
`next!(prog, [color], step = 1)` reports that `step` units of progress have been
made. Depending on the time interval since the last update, this may
or may not result in a change to the display.
You may optionally change the color of the display. See also `update!`.
"""
function next!(p::Union{Progress, ProgressUnknown}; step::Int = 1, options...)
lock(p.reentrantlocker) do
p.counter += step
updateProgress!(p; options...)
end
end
function next!(p::Union{Progress, ProgressUnknown}, color::Symbol; step::Int = 1, options...)
lock(p.reentrantlocker) do
p.color = color
p.counter += step
updateProgress!(p; options...)
end
end
"""
`update!(prog, counter, [color])` sets the progress counter to
`counter`, relative to the `n` units of progress specified when `prog`
was initialized. Depending on the time interval since the last
update, this may or may not result in a change to the display.
If `prog` is a `ProgressThresh`, `update!(prog, val, [color])` specifies
the current value.
You may optionally change the color of the display. See also `next!`.
"""
function update!(p::Union{Progress, ProgressUnknown}, counter::Int=p.counter, color::Symbol=p.color; options...)
lock(p.reentrantlocker) do
p.counter = counter
p.color = color
updateProgress!(p; options...)
end
end
function update!(p::ProgressThresh, val=p.val, color::Symbol=p.color; increment::Bool = true, options...)
lock(p.reentrantlocker) do
p.val = val
if increment
p.counter += 1
end
p.color = color
updateProgress!(p; options...)
end
end
"""
`cancel(prog, [msg], [color=:red])` cancels the progress display
before all tasks were completed. Optionally you can specify the
message printed and its color.
See also `finish!`.
"""
function cancel(p::AbstractProgress, msg::AbstractString = "Aborted before all tasks were completed", color = :red; showvalues = (), truncate_lines = false, valuecolor = :blue, offset = p.offset, keep = (offset == 0))
lock(p.reentrantlocker) do
p.offset = offset
if p.printed
print(p.output, "\n" ^ (p.offset + p.numprintedvalues))
move_cursor_up_while_clearing_lines(p.output, p.numprintedvalues)
printover(p.output, msg, color)
printvalues!(p, showvalues; color = valuecolor, truncate = truncate_lines)
if keep
println(p.output)
else
print(p.output, "\r\u1b[A" ^ (p.offset + p.numprintedvalues))
end
end
end
return
end
"""
`finish!(prog)` indicates that all tasks have been completed.
See also `cancel`.
"""
function finish!(p::Progress; options...)
while p.counter < p.n
next!(p; options...)
end
end
function finish!(p::ProgressThresh; options...)
update!(p, p.thresh; options...)
end
function finish!(p::ProgressUnknown; options...)
lock(p.reentrantlocker) do
p.done = true
updateProgress!(p; options...)
end
end
# Internal method to print additional values below progress bar
function printvalues!(p::AbstractProgress, showvalues; color = :normal, truncate = false)
length(showvalues) == 0 && return
maxwidth = maximum(Int[length(string(name)) for (name, _) in showvalues])
p.numprintedvalues = 0
for (name, value) in showvalues
msg = "\n " * rpad(string(name) * ": ", maxwidth+2+1) * string(value)
max_len = (displaysize(p.output)::Tuple{Int,Int})[2]
# I don't understand why the minus 1 is necessary here, but empircally
# it is needed.
msg_lines = ceil(Int, (length(msg)-1) / max_len)
if truncate && msg_lines >= 2
# For multibyte characters, need to index with nextind.
printover(p.output, msg[1:nextind(msg, 1, max_len-1)] * "…", color)
p.numprintedvalues += 1
else
printover(p.output, msg, color)
p.numprintedvalues += msg_lines
end
end
p
end
# Internal method to print additional values below progress bar (lazy-showvalues version)
printvalues!(p::AbstractProgress, showvalues::Function; kwargs...) = printvalues!(p, showvalues(); kwargs...)
function move_cursor_up_while_clearing_lines(io, numlinesup)
if numlinesup > 0 && clear_ijulia()
Main.IJulia.clear_output(true)
if IJULIABEHAVIOR[] == IJuliaWarned
@warn "ProgressMeter by default refresh meters with additional information in IJulia via `IJulia.clear_output`, which clears all outputs in the cell. \n - To prevent this behaviour, do `ProgressMeter.ijulia_behavior(:append)`. \n - To disable this warning message, do `ProgressMeter.ijulia_behavior(:clear)`."
end
else
for _ in 1:numlinesup
print(io, "\r\u1b[K\u1b[A")
end
end
end
function printover(io::IO, s::AbstractString, color::Symbol = :color_normal)
print(io, "\r")
printstyled(io, s; color=color)
if isdefined(Main, :IJulia)
Main.IJulia.stdio_bytes[] = 0 # issue #76: circumvent IJulia I/O throttling
elseif isdefined(Main, :ESS) || isdefined(Main, :Atom)
else
print(io, "\u1b[K") # clear the rest of the line
end
end
function compute_front(barglyphs::BarGlyphs, frac_solid::AbstractFloat)
barglyphs.front isa Char && return barglyphs.front
idx = round(Int, frac_solid * (length(barglyphs.front) + 1))
return idx > length(barglyphs.front) ? barglyphs.fill :
idx == 0 ? barglyphs.empty :
barglyphs.front[idx]
end
function barstring(barlen, percentage_complete; barglyphs)
bar = ""
if barlen>0
if percentage_complete == 100 # if we're done, don't use the "front" character
bar = string(barglyphs.leftend, repeat(string(barglyphs.fill), barlen), barglyphs.rightend)
else
n_bars = barlen * percentage_complete / 100
nsolid = trunc(Int, n_bars)
frac_solid = n_bars - nsolid
nempty = barlen - nsolid - 1
bar = string(barglyphs.leftend,
repeat(string(barglyphs.fill), max(0,nsolid)),
compute_front(barglyphs, frac_solid),
repeat(string(barglyphs.empty), max(0, nempty)),
barglyphs.rightend)
end
end
bar
end
function durationstring(nsec)
days = div(nsec, 60*60*24)
r = nsec - 60*60*24*days
hours = div(r,60*60)
r = r - 60*60*hours
minutes = div(r, 60)
seconds = floor(r - 60*minutes)
hhmmss = @sprintf "%u:%02u:%02u" hours minutes seconds
if days>9
return @sprintf "%.2f days" nsec/(60*60*24)
elseif days>0
return @sprintf "%u days, %s" days hhmmss
end
hhmmss
end
function showprogress_process_expr(node, metersym)
if !isa(node, Expr)
node
elseif node.head === :break || node.head === :return
# special handling for break and return statements
quote
($finish!)($metersym)
$node
end
elseif node.head === :for || node.head === :while
# do not process inner loops
#
# FIXME: do not process break and return statements in inner functions
# either
node
else
# process each subexpression recursively
Expr(node.head, [showprogress_process_expr(a, metersym) for a in node.args]...)
end
end
struct ProgressWrapper{T}
obj::T
meter::Progress
end
Base.length(wrap::ProgressWrapper) = Base.length(wrap.obj)
function Base.iterate(wrap::ProgressWrapper, state...)
ir = iterate(wrap.obj, state...)
if ir === nothing
finish!(wrap.meter)
elseif !isempty(state)
next!(wrap.meter)
end
ir
end
"""
Equivalent of @showprogress for a distributed for loop.
```
result = @showprogress dt "Computing..." @distributed (+) for i = 1:50
sleep(0.1)
i^2
end
```
"""
function showprogressdistributed(args...)
if length(args) < 1
throw(ArgumentError("@showprogress @distributed requires at least 1 argument"))
end
progressargs = args[1:end-1]
expr = Base.remove_linenums!(args[end])
if expr.head != :macrocall || expr.args[1] != Symbol("@distributed")
throw(ArgumentError("malformed @showprogress @distributed expression"))
end
distargs = filter(x -> !(x isa LineNumberNode), expr.args[2:end])
na = length(distargs)
if na == 1
loop = distargs[1]
elseif na == 2
reducer = distargs[1]
loop = distargs[2]
else
println("$distargs $na")
throw(ArgumentError("wrong number of arguments to @distributed"))
end
if loop.head !== :for
throw(ArgumentError("malformed @distributed loop"))
end
var = loop.args[1].args[1]
r = loop.args[1].args[2]
body = loop.args[2]
setup = quote
n = length($(esc(r)))
p = Progress(n, $([esc(arg) for arg in progressargs]...))
ch = RemoteChannel(() -> Channel{Bool}(n))
end
if na == 1
# would be nice to do this with @sync @distributed but @sync is broken
# https://github.com/JuliaLang/julia/issues/28979
compute = quote
display = @async let i = 0
while i < n
take!(ch)
next!(p)
i += 1
end
end
@distributed for $(esc(var)) = $(esc(r))
$(esc(body))
put!(ch, true)
end
nothing
end
else
compute = quote
display = @async while take!(ch) next!(p) end
results = @distributed $(esc(reducer)) for $(esc(var)) = $(esc(r))
x = $(esc(body))
put!(ch, true)
x
end
put!(ch, false)
results
end
end
quote
$setup
results = $compute
wait(display)
results
end
end
"""
```
@showprogress dt "Computing..." for i = 1:50
# computation goes here
end
@showprogress dt "Computing..." pmap(x->x^2, 1:50)
```
displays progress in performing a computation. `dt` is the minimum
interval between updates to the user. You may optionally supply a
custom message to be printed that specifies the computation being
performed.
`@showprogress` works for loops, comprehensions, map, reduce, and pmap.
"""
macro showprogress(args...)
showprogress(args...)
end
function showprogress(args...)
if length(args) < 1
throw(ArgumentError("@showprogress requires at least one argument."))
end
progressargs = args[1:end-1]
expr = args[end]
if expr.head == :macrocall && expr.args[1] == Symbol("@distributed")
return showprogressdistributed(args...)
end
orig = expr = copy(expr)
if expr.args[1] == :|> # e.g. map(x->x^2) |> sum
expr.args[2] = showprogress(progressargs..., expr.args[2])
return expr
end
metersym = gensym("meter")
mapfuns = (:map, :asyncmap, :reduce, :pmap)
kind = :invalid # :invalid, :loop, or :map
if isa(expr, Expr)
if expr.head == :for
outerassignidx = 1
loopbodyidx = lastindex(expr.args)
kind = :loop
elseif expr.head == :comprehension
outerassignidx = lastindex(expr.args)
loopbodyidx = 1
kind = :loop
elseif expr.head == :typed_comprehension
outerassignidx = lastindex(expr.args)
loopbodyidx = 2
kind = :loop
elseif expr.head == :call && expr.args[1] in mapfuns
kind = :map
elseif expr.head == :do
call = expr.args[1]
if call.head == :call && call.args[1] in mapfuns
kind = :map
end
end
end
if kind == :invalid
throw(ArgumentError("Final argument to @showprogress must be a for loop, comprehension, map, reduce, or pmap; got $expr"))
elseif kind == :loop
# As of julia 0.5, a comprehension's "loop" is actually one level deeper in the syntax tree.
if expr.head !== :for
@assert length(expr.args) == loopbodyidx
expr = expr.args[outerassignidx] = copy(expr.args[outerassignidx])
@assert expr.head === :generator
outerassignidx = lastindex(expr.args)
loopbodyidx = 1
end
# Transform the first loop assignment
loopassign = expr.args[outerassignidx] = copy(expr.args[outerassignidx])
if loopassign.head === :block # this will happen in a for loop with multiple iteration variables
for i in 2:length(loopassign.args)
loopassign.args[i] = esc(loopassign.args[i])
end
loopassign = loopassign.args[1] = copy(loopassign.args[1])
end
@assert loopassign.head === :(=)
@assert length(loopassign.args) == 2
obj = loopassign.args[2]
loopassign.args[1] = esc(loopassign.args[1])
loopassign.args[2] = :(ProgressWrapper(iterable, $(esc(metersym))))
# Transform the loop body break and return statements
if expr.head === :for
expr.args[loopbodyidx] = showprogress_process_expr(expr.args[loopbodyidx], metersym)
end
# Escape all args except the loop assignment, which was already appropriately escaped.
for i in 1:length(expr.args)
if i != outerassignidx
expr.args[i] = esc(expr.args[i])
end
end
if orig !== expr
# We have additional escaping to do; this will occur for comprehensions with julia 0.5 or later.
for i in 1:length(orig.args)-1
orig.args[i] = esc(orig.args[i])
end
end
setup = quote
iterable = $(esc(obj))
$(esc(metersym)) = Progress(length(iterable), $([esc(arg) for arg in progressargs]...))
end
if expr.head === :for
return quote
$setup
$expr
end
else
# We're dealing with a comprehension
return quote
begin
$setup
rv = $orig
next!($(esc(metersym)))
rv
end
end
end
else # kind == :map
# isolate call to map
if expr.head == :do
call = expr.args[1]
else
call = expr
end
# get args to map to determine progress length
mapargs = collect(Any, filter(call.args[2:end]) do a
return isa(a, Symbol) || !(a.head in (:kw, :parameters))
end)
if expr.head == :do
insert!(mapargs, 1, :nothing) # to make args for ncalls line up
end
# change call to progress_map
mapfun = call.args[1]
call.args[1] = :progress_map
# escape args as appropriate
for i in 2:length(call.args)
call.args[i] = esc(call.args[i])
end
if expr.head == :do
expr.args[2] = esc(expr.args[2])
end
# create appropriate Progress expression
lenex = :(ncalls($(esc(mapfun)), ($([esc(a) for a in mapargs]...),)))
progex = :(Progress($lenex, $([esc(a) for a in progressargs]...)))
# insert progress and mapfun kwargs
push!(call.args, Expr(:kw, :progress, progex))
push!(call.args, Expr(:kw, :mapfun, esc(mapfun)))
return expr
end
end
"""
progress_map(f, c...; mapfun=map, progress=Progress(...), kwargs...)
Run a `map`-like function while displaying progress.
`mapfun` can be any function, but it is only tested with `map`, `reduce` and `pmap`.
"""
function progress_map(args...; mapfun=map,
progress=Progress(ncalls(mapfun, args)),
channel_bufflen=min(1000, ncalls(mapfun, args)),
kwargs...)
f = first(args)
other_args = args[2:end]
channel = RemoteChannel(()->Channel{Bool}(channel_bufflen), 1)
local vals
@sync begin
# display task
@async while take!(channel)
next!(progress)
end
# map task
@sync begin
vals = mapfun(other_args...; kwargs...) do x...
val = f(x...)
put!(channel, true)
yield()
return val
end
put!(channel, false)
end
end
return vals
end
"""
progress_pmap(f, [::AbstractWorkerPool], c...; progress=Progress(...), kwargs...)
Run `pmap` while displaying progress.
"""
progress_pmap(args...; kwargs...) = progress_map(args...; mapfun=pmap, kwargs...)
"""
Infer the number of calls to the mapped function (i.e. the length of the returned array) given the input arguments to map, reduce or pmap.
"""
function ncalls(mapfun::Function, map_args)
if mapfun == pmap && length(map_args) >= 2 && isa(map_args[2], AbstractWorkerPool)
relevant = map_args[3:end]
else
relevant = map_args[2:end]
end
if isempty(relevant)
error("Unable to determine number of calls in $mapfun. Too few arguments?")
else
return maximum(length(arg) for arg in relevant)
end
end
end
|
{"hexsha": "cfbdcbd92711a9e0c37a2552d674680fbafff73c", "size": 31578, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ProgressMeter.jl", "max_stars_repo_name": "palday/ProgressMeter.jl", "max_stars_repo_head_hexsha": "e2b7ccdd681450474ca4a919b1e507701bc6de3e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ProgressMeter.jl", "max_issues_repo_name": "palday/ProgressMeter.jl", "max_issues_repo_head_hexsha": "e2b7ccdd681450474ca4a919b1e507701bc6de3e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ProgressMeter.jl", "max_forks_repo_name": "palday/ProgressMeter.jl", "max_forks_repo_head_hexsha": "e2b7ccdd681450474ca4a919b1e507701bc6de3e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6410835214, "max_line_length": 321, "alphanum_fraction": 0.6079865729, "num_tokens": 7981}
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from scipy.signal import periodogram
from .misc import get_equivalent_days
import re
#%% plotting functions
def adjust_bright(color, amount=1.2):
"""
Adjust color brightness in plots for use.
Inputs
------
color: str | list,
color can be basic color string name or rgb list.
amount: float,
the level of brightness of the input color to be adjusted.
the higher the amount, the brighter the color is.
Returns
-------
color with brightness level adjusted.
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(
c[0], max(0, min(1, amount * c[1])), c[2])
def missingval_plot(df, figsize=(20,6), show=True):
"""
Visualize index location of missin values of each feature.
Doesn't work for 1-dim df.
df: pd.DataFrame
"""
# check all are bool
if (df.dtypes != bool).any():
df = df.reset_index().T.isna()
f, ax = plt.subplots(nrows=1, ncols=1, figsize=figsize)
g = sns.heatmap(df, cmap='Blues', cbar=True,
yticklabels=df.index.values)
# customize colorbar
colorbar = g.collections[0].colorbar
colorbar.set_ticks([0, 1])
colorbar.set_ticklabels(['non-missing', 'missing'])
# customize title
ax.set_title('Distribution of Missing Values', fontsize=16)
# customize font size in ticks
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(12)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(12)
if show:
plt.show()
def plot_cv_indices(cv, X, y, ax, n_splits, lw=10):
"""
Create a sample plot for indices of a cross-validation object.
"""
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw,
cmap=plt.cm.coolwarm, vmin=-.2, vmax=1.2)
# Formatting
yticklabels = list(range(n_splits))
ax.set(yticks=np.arange(n_splits)+.5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits+.2, -.2], xlim=[0, len(X)])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
def corrtri_plot(df, figsize=(10,10)):
"""correlation plot of the dataframe"""
# sns.set() #: will cause plt warning later in lag_plot
c = df.corr()
mask = np.triu(c.corr(), k=1)
plt.figure(figsize=figsize)
plt.tick_params(axis='both', which='major', labelsize=10,
bottom=False, labelbottom=False,
top=False, labeltop=True)
g = sns.heatmap(c, annot=True, fmt='.1f', cmap='coolwarm',
square=True, mask=mask, linewidths=1, cbar=False)
plt.show()
def acpac_plot(data, features=[], figsize=(10,5)):
"""Autocorrelation and Partial-aurocorrelation plots."""
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
if features == []:
features = data.columns
for i, col in enumerate(features):
fig, ax = plt.subplots(1,2,figsize=figsize)
plot_acf(data[col], lags=30,
title='AC: ' + data[col].name,
ax=ax[0]) # missing='drop'
plot_pacf(data[col], lags=30,
title='PAC: ' + data[col].name,
ax=ax[1])
def residac_plot(model, cols=None, figsize=(16, 8), ylim=(-.3, .3)):
"""
model: var/vecm model (from statsmodels)
cols: can be integer/str list.
"""
# set up
if cols is not None:
cols = list(cols)
assert len(cols)==pd.DataFrame(model.resid).shape[1], \
"cols length not matched with model.resid columns."
else:
cols = list(model.names)
# make sure DataFrame type
resid = pd.DataFrame(model.resid)
if isinstance(model.resid, np.ndarray):
resid = pd.DataFrame(resid, columns=cols)
# plot
plt.figure(figsize=figsize)
for count, (name, series) in enumerate(resid[cols].iteritems()):
ax = plt.subplot( len(cols)//3 +1, 3, count+1)
ax.set(ylim=ylim)
pd.plotting.autocorrelation_plot(series, ax=ax)
ax.set_ylabel('')
plt.title(f'Residual Autocorrelation: {name}')
ax.figure.tight_layout(pad=0.5)
return ax
# periodogram plots
def rfft_plot(series, ylim=(0,400), figsize=(15,10)):
"""plot real valued fourier transform to find most important
frequency/periodicity"""
import tensorflow as tf
fft = tf.signal.rfft(series)
f_per_dataset = np.arange(0, len(fft))
n_samples_d = len(series)
d_per_year = 365.2524
years_per_dataset = n_samples_d/(d_per_year)
f_per_year = f_per_dataset/years_per_dataset
plt.figure(figsize=figsize)
plt.step(f_per_year, np.abs(fft))
plt.xscale('log')
plt.ylim(*ylim)
plt.xlim([0.1, max(plt.xlim())])
plt.xticks([1, 4, 12, 52, 365.2524],
labels=['1/Year', '1/quarter',
'1/month', '1/week', '1/day'])
_ = plt.xlabel('Frequency (log scale)')
plt.show()
def seasonal_plot(data, y, period, freq, ax=None, figsize=(20,10)):
"""
Plot every ``period`` of ``y`` on ``freq``.
Example:
>>> X = pd.read_csv(...)
>>> X['day'] = X.index.dayofweek
>>> X['week'] = X.index.week
>>> seasonal_plot(X, y='sales', period='week', freq='day')
"""
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
palette = sns.color_palette("husl", n_colors=data[period].nunique(),)
ax = sns.lineplot(
x=freq,
y=y,
hue=period,
data=data,
ci=False,
ax=ax,
palette=palette,
legend=False,
)
ax.set_title(f"Seasonal Plot ({period}/{freq})")
# loop over # nique periods
for line, name in zip(ax.lines, data[period].unique()):
# annotate besides the last pt of each curve
y_ = line.get_ydata()[-1]
ax.annotate(
name,
xy=(1, y_),
xytext=(6, 0),
color=line.get_color(),
xycoords=ax.get_yaxis_transform(),
textcoords="offset points",
size=14,
va="center",
)
return ax
def plot_periodogram(ts: pd.Series, ts_freq=None, detrend='linear', ax=None):
# extract relevant freq
assert isinstance(ts_freq, str) or ts_freq is None, "ts_freq type not valid."
if ts_freq is None:
dates = ts.index
if isinstance(dates, pd.PeriodIndex):
ts_freq = dates.freqstr # may contain numbers
else:
ts_freq = pd.infer_freq(dates)
assert ts_freq is not None, (
"Cannot decide the ts freq. Please either transform the date index to"+
"period or set a value for ts_freq instead of the default value None."
)
# conver ts_freq to Timedelta
value = re.search("^\d*", ts_freq)[0] # only match head string numbers
if value == '':
value = 1.0
denominator = get_equivalent_days(float(value), unit=ts_freq)
# get power spectrum
# default is use 1Y as numerator
fs = pd.Timedelta(365.2425, unit='D') / denominator
freqencies, spectrum = periodogram(
ts,
fs=fs,
detrend=detrend,
window="boxcar",
scaling='spectrum',
)
# plot
if ax is None:
_, ax = plt.subplots()
ax.step(freqencies, spectrum, color="purple")
ax.set_xscale("log")
# periods = 1/w = fs/w', where w' is the number for annotatioin in x-axis.
## Eg, if fs=365 and ts_freq is daily, then period =1Y if w' is 1, =Quarter if w' is 4.
## Eg, if fs=365 and ts_freq is 2D, then period =Biannual if w' is 1, =Annual if w' is 2, =2/3Y if w' is 3, =Quarter if w' is 8.
## Eg, if fs=12 and ts_freq is monthly, then period =1Y if w' is 1, =Quarter if w' is 4.
# I intentionally define fs = 365 / ts_freq in the above so that second eg never happens.
ax.set_xticks([ 1, 2, 4, 6, 12, 26, 52, 104])
ax.set_xticklabels(
[
"Annual (1)",
"Semiannual (2)",
"Quarterly (4)",
"Bimonthly (6)",
"Monthly (12)",
"Biweekly (26)",
"Weekly (52)",
"Semiweekly (104)",
],
rotation=90,
)
ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0))
ax.set_ylabel("Variance")
ax.set_title("Periodogram")
return ax
def _lagplot(x, y=None, lag=1, standardize=False, ax=None, **kwargs):
"""
helper function of plot_lag.
`Ref`_: https://www.kaggle.com/ryanholbrook/time-series-as-features
"""
from matplotlib.offsetbox import AnchoredText
x_ = x.shift(lag)
if standardize:
x_ = (x_ - x_.mean()) / x_.std()
if y is not None:
y_ = (y - y.mean()) / y.std() if standardize else y
else:
y_ = x
corr = y_.corr(x_)
if ax is None:
fig, ax = plt.subplots()
scatter_kws = dict(
alpha=0.75,
s=3,
)
line_kws = dict(color='C3', )
ax = sns.regplot(x=x_,
y=y_,
scatter_kws=scatter_kws,
line_kws=line_kws,
lowess=True,
ax=ax,
**kwargs)
at = AnchoredText(
f"{corr:.2f}",
prop=dict(size="large"),
frameon=True,
loc="upper left",
)
at.patch.set_boxstyle("square, pad=0.0")
ax.add_artist(at)
ax.set(title=f"Lag {lag}", xlabel=x_.name, ylabel=y_.name)
return ax
def lags_plot(x, y=None, lags=6, nrows=1, lagplot_kwargs={}, **kwargs):
"""
`Ref`_: https://www.kaggle.com/ryanholbrook/time-series-as-features
"""
import math
kwargs.setdefault('nrows', nrows)
kwargs.setdefault('ncols', math.ceil(lags / nrows))
kwargs.setdefault('figsize', (kwargs['ncols'] * 2, nrows * 2 + 0.5))
fig, axs = plt.subplots(sharex=True, sharey=True, squeeze=False, **kwargs)
for ax, k in zip(fig.get_axes(), range(kwargs['nrows'] * kwargs['ncols'])):
if k + 1 <= lags:
ax = _lagplot(x, y, lag=k + 1, ax=ax, **lagplot_kwargs)
ax.set_title(f"Lag {k + 1}", fontdict=dict(fontsize=14))
ax.set(xlabel="", ylabel="")
else:
ax.axis('off')
plt.setp(axs[-1, :], xlabel=x.name)
plt.setp(axs[:, 0], ylabel=y.name if y is not None else x.name)
fig.tight_layout(w_pad=0.1, h_pad=0.1)
return fig
def pca_plot(data, n_comp=None, regex=None, figsize=(5,3)):
"""
Plot n_comp pricipal components of data via PCA.
data: pd.DataFrame / np.ndarray
regex: string pattern to filter data.
Use all data if not specified.
n_comp: number of components desired in PCA.
Default to data column numbers if not specified.
"""
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0,0,1,1])
x = data
if regex is not None:
x = x.filter(regex=regex)
xSfit = StandardScaler().fit_transform(x)
if n_comp is None:
n_comp = xSfit.shape[1]
pca = PCA(n_components=n_comp)
pca.fit(xSfit)
v = pca.explained_variance_ratio_.round(2)
xtick = range(1,n_comp+1)
ax.bar(xtick,v) # range(1,n_comp+1)
plt.xticks(xtick, x.columns, rotation='vertical')
plt.xlabel("PCA components")
plt.title("Variance Explained by each dimension")
plt.show()
def predict_gt_plot(at, y_true_tra, y_pred_tra, y_true_tes, y_pred_tes,
figsize=(25,15), freq='MS'):
"""
Plot the ground truth and prediction curves on both train and test set.
y_tra and y_tes should have timestamp at index.
:param at: [specifies which prediction horizon it is which will be used to shift the timestamp of ground truth data, ie, ``y_tra`` and ``y_tes``.]
:type at: [int]
:param y_true_tra: training set ground truth time series
:type y_true_tra: pd.DataFrame, pd.Series
:param y_pred_tra: training set prediction time series
:type y_pred_tra: pd.DataFrame, pd.Series, np.array
:param y_true_tes: testing set ground truth time series
:type y_true_tes: pd.DataFrame, pd.Series
:param y_pred_tes: testing set prediction time series
:type y_pred_tes: pd.DataFrame, pd.Series, np.array
:param figsize: [description], defaults to (25,15)
:type figsize: tuple, optional
:param freq: freq of the input time series data, defaults to 'MS'
:type freq: str, optional
:return: axes that contains data content of the figure.
:rtype: plt.Axes
"""
# initialization
y_true_tra, y_true_tes = pd.DataFrame(y_true_tra), pd.DataFrame(y_true_tes)
y_pred_tra, y_pred_tes = np.array(y_pred_tra), np.array(y_pred_tes)
if y_pred_tra.ndim == 1:
y_pred_tra = y_pred_tra.reshape(-1, 1)
if y_pred_tes.ndim == 1:
y_pred_tes = y_pred_tes.reshape(-1, 1)
# plot
num_targets = y_true_tra.shape[1]
targets = y_true_tra.columns
fig, ax = plt.subplots(num_targets, 1, figsize=figsize, sharex=True)
for j, ta in enumerate(targets):
ax[j].plot(y_true_tra.asfreq(freq).index.shift(at), y_pred_tra[:, j], c='b', label='train-predict')
ax[j].plot(y_true_tra.asfreq(freq).index.shift(at), y_true_tra.values[:, j], c='k', label='gt-tra')
ax[j].plot(y_true_tes.asfreq(freq).index.shift(at), y_pred_tes[:, j], c='orange', label='test-predict')
ax[j].plot(y_true_tes.asfreq(freq).index.shift(at), y_true_tes.values[:, j], c='k', label='gt-tes')
ax[j].set_title(f'prediction of {ta} at {at}th-horizon', fontdict = {'fontsize' : 20})
plt.legend()
plt.tight_layout()
return ax
|
{"hexsha": "f0de38db6f8c1e7102498b6a065e194f2fb2e496", "size": 14729, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/plot.py", "max_stars_repo_name": "wkCircle/myPythonLibrary", "max_stars_repo_head_hexsha": "3b37568c658ba237d3ca32d01c82fd3049b459f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-07T08:45:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T08:45:18.000Z", "max_issues_repo_path": "Utils/plot.py", "max_issues_repo_name": "wkCircle/PythonToolKit", "max_issues_repo_head_hexsha": "3b37568c658ba237d3ca32d01c82fd3049b459f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Utils/plot.py", "max_forks_repo_name": "wkCircle/PythonToolKit", "max_forks_repo_head_hexsha": "3b37568c658ba237d3ca32d01c82fd3049b459f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8369829684, "max_line_length": 151, "alphanum_fraction": 0.5831353113, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 3966}
|
/*
* Copyright © 2014-2015 Klaus Reuter
* Copyright © 2014 Felix Höfling
* Copyright © 2014 Manuel Dibak
* All rights reserved.
*
* This file is part of h5xx — a C++ wrapper for the HDF5 library.
*
* This software may be modified and distributed under the terms of the
* 3-clause BSD license. See accompanying file LICENSE for details.
*/
#ifndef H5XX_DATASET_MULTI_ARRAY
#define H5XX_DATASET_MULTI_ARRAY
#include <algorithm>
#include <h5xx/ctype.hpp>
#include <h5xx/dataset/dataset.hpp>
#include <h5xx/dataspace.hpp>
#include <h5xx/error.hpp>
#include <h5xx/policy/storage.hpp>
#include <h5xx/utility.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/mpl/and.hpp>
#include <boost/multi_array.hpp>
#include <boost/array.hpp>
#include <boost/type_traits.hpp>
#include <boost/utility/enable_if.hpp>
namespace h5xx {
/**
* Create and return a dataset of multi-dimensional array type,
* properties of the dataset can be set using storage policies.
*/
template <typename h5xxObject, typename T, typename StoragePolicy>
inline typename boost::enable_if<is_multi_array<T>, dataset>::type
create_dataset(h5xxObject const& object, std::string const& name, T const& value,
StoragePolicy const& storage_policy = StoragePolicy())
{
typedef typename T::element value_type;
hid_t type_id = ctype<value_type>::hid(); // this ID must not be closed
enum { rank = T::dimensionality };
// --- create a temporary dataspace based on the input array dimensions
boost::array<hsize_t, rank> dims;
std::copy(value.shape(), value.shape() + rank, dims.begin());
return dataset(object, name, type_id, dataspace(dims), storage_policy);
}
/**
* Create and return a dataset of multi-dimensional array type,
* using the default storage policy (contiguous layout).
*/
template <typename h5xxObject, typename T>
inline typename boost::enable_if<is_multi_array<T>, dataset>::type
create_dataset(h5xxObject const& object, std::string const& name, T const& value)
{
return create_dataset(object, name, value, h5xx::policy::storage::contiguous());
}
/**
* Write multiarray data to an existing dataset specified by location and name.
*/
template <typename h5xxObject, typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(h5xxObject const& object, std::string const& name, T const& value)
{
dataset dset(object, name);
write_dataset(dset, value);
}
/**
* Write multiarray data to dataset.
*/
template <typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(dataset& dset, T const& value)
{
typedef typename T::element value_type;
hid_t type_id = ctype<value_type>::hid();
dset.write(type_id, value.origin());
}
/**
* Write multiarray data to an existing dataset specified its location and name,
* memory and file locations (hyperslabs) are passed via the dataspace objects.
*/
template <typename h5xxObject, typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(h5xxObject const& object, std::string const& name, T const& value,
dataspace const& memspace, dataspace const& filespace)
{
dataset dset(object, name);
write_dataset(dset, value, memspace, filespace);
}
/**
* Write multiarray data to dataset, memory and file locations (hyperslabs)
* are passed via the dataspace objects.
*/
template <typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(dataset& dset, T const& value, dataspace const& memspace, dataspace const& filespace)
{
typedef typename T::element value_type;
hid_t type_id = ctype<value_type>::hid();
hid_t mem_space_id = memspace.hid(); //H5S_ALL;
hid_t file_space_id = filespace.hid();
hid_t xfer_plist_id = H5P_DEFAULT;
dset.write(type_id, value.origin(), mem_space_id, file_space_id, xfer_plist_id);
}
/**
* Write multiarray data to dataset specified its location and name, only the
* file location (hyperslab) is given via a slice object.
*/
template <typename h5xxObject, typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(h5xxObject const& object, std::string const& name, T const& value, slice const& file_slice)
{
dataset dset(object, name);
write_dataset(dset, value, file_slice);
}
/**
* Write multiarray data to dataset, only the file location (hyperslab) is given
* via a slice object.
*/
template <typename T>
inline typename boost::enable_if<is_multi_array<T>, void>::type
write_dataset(dataset& dset, T const& value, slice const& file_slice)
{
// --- create memory dataspace for the complete input array
h5xx::dataspace memspace = h5xx::create_dataspace(value);
// --- create file dataspace and select the slice (hyperslab) from it
h5xx::dataspace filespace(dset);
filespace.select(file_slice);
write_dataset(dset, value, memspace, filespace);
}
/**
* Read multiarray data from an existing dataset specified by location and name.
* The vector data is resized and overwritten internally.
*/
template <typename h5xxObject, typename T>
typename boost::enable_if<is_multi_array<T>, void>::type
read_dataset(h5xxObject const& object, std::string const& name, T & array)
{
dataset dset(object, name);
read_dataset(dset, array);
}
/**
* Read multiarray data from an existing dataset.
* The vector data is resized and overwritten internally.
*/
template <typename T>
typename boost::enable_if<is_multi_array<T>, void>::type
read_dataset(dataset & data_set, T & array)
{
const int array_rank = T::dimensionality;
typedef typename T::element value_type;
// --- use temporary dataspace object to get the shape of the dataset
dataspace file_space(data_set);
if (!(file_space.rank() == array_rank))
H5XX_THROW("dataset \"" + get_name(data_set) + "\" and target array have mismatching dimensions");
boost::array<hsize_t, array_rank> file_dims = file_space.extents<array_rank>();
// --- clear array - TODO check if this feature is necessary/wanted
boost::array<size_t, array_rank> array_zero;
array_zero.assign(0);
array.resize(array_zero);
// --- resize array to match the dataset - TODO check if this feature is necessary/wanted
boost::array<size_t, array_rank> array_shape;
std::copy(file_dims.begin(), file_dims.begin() + array_rank, array_shape.begin());
array.resize(array_shape);
hid_t mem_space_id = H5S_ALL;
hid_t file_space_id = H5S_ALL;
hid_t xfer_plist_id = H5P_DEFAULT;
data_set.read(ctype<value_type>::hid(), array.origin(), mem_space_id, file_space_id, xfer_plist_id);
}
/**
* Read multiarray data from an existing dataset specified by location and name,
* a slice specifies the data locations to be read in file space. The array
* is not resized internally, the user must resize it in advance to fit the slice.
*/
template <typename h5xxObject, typename T>
typename boost::enable_if<is_multi_array<T>, void>::type
read_dataset(h5xxObject const& object, std::string const& name, T & array, slice const& file_slice)
{
dataset data_set(object, name);
read_dataset(data_set, array, file_slice);
}
/**
* Read multiarray data from an existing dataset, a slice specifies the data
* locations to be read in file space. The array is not resized internally, the
* user must resize it in advance to fit the slice.
*/
template <typename T>
typename boost::enable_if<is_multi_array<T>, void>::type
read_dataset(dataset & data_set, T & array, slice const& file_slice)
{
// --- create memory dataspace for the complete input array
h5xx::dataspace memspace = h5xx::create_dataspace(array);
// --- create file dataspace and select the slice (hyperslab) from it
h5xx::dataspace filespace(data_set);
filespace.select(file_slice);
// ---
read_dataset(data_set, array, memspace, filespace);
}
/**
* Read multiarray data from an existing dataset, dataspace objects for both
* memory and file allow to specify the locations of the data. The array is
* not resized internally, the user must resize it in advance to fit the
* dataspace.
*/
template <typename T>
typename boost::enable_if<is_multi_array<T>, void>::type
read_dataset(dataset & data_set, T & array, dataspace const& memspace, dataspace const& filespace)
{
// --- disabled this check, it is orthogonal to a useful feature (eg read from 2D dataset into 1D array)
// const int array_rank = T::dimensionality;
// if (!(memspace.rank() == array_rank)) {
// throw error("memory dataspace and array rank do not match");
// }
if (static_cast<hsize_t>(filespace.get_select_npoints()) > array.num_elements())
H5XX_THROW("target array does not provide enough space to store selected dataspace elements");
hid_t mem_space_id = memspace.hid(); //H5S_ALL;
hid_t file_space_id = filespace.hid();
hid_t xfer_plist_id = H5P_DEFAULT;
typedef typename T::element value_type;
data_set.read(ctype<value_type>::hid(), array.origin(), mem_space_id, file_space_id, xfer_plist_id);
}
} // namespace h5xx
#endif // ! H5XX_DATASET_MULTI_ARRAY
|
{"hexsha": "ba2910a236288f1cf2a59d90d2b5dd658f596d1c", "size": 9171, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "h5xx/dataset/boost_multi_array.hpp", "max_stars_repo_name": "halmd-org/h5xx", "max_stars_repo_head_hexsha": "fedf38a0bc58ff5ff30c46819d64eb7b8c644c60", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17.0, "max_stars_repo_stars_event_min_datetime": "2016-04-02T09:05:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-16T15:10:18.000Z", "max_issues_repo_path": "h5xx/dataset/boost_multi_array.hpp", "max_issues_repo_name": "halmd-org/h5xx", "max_issues_repo_head_hexsha": "fedf38a0bc58ff5ff30c46819d64eb7b8c644c60", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2016-06-03T15:38:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-06-28T11:56:47.000Z", "max_forks_repo_path": "h5xx/dataset/boost_multi_array.hpp", "max_forks_repo_name": "halmd-org/h5xx", "max_forks_repo_head_hexsha": "fedf38a0bc58ff5ff30c46819d64eb7b8c644c60", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2016-03-31T11:13:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-02T12:00:36.000Z", "avg_line_length": 35.2730769231, "max_line_length": 108, "alphanum_fraction": 0.7278377494, "num_tokens": 2263}
|
[STATEMENT]
lemma times_inf [simp]:
"x * y = x \<sqinter> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x * y = x \<sqinter> y
[PROOF STEP]
by simp
|
{"llama_tokens": 71, "file": "Stone_Relation_Algebras_Relation_Algebras", "length": 1}
|
#########################################################################
# File Name: test.py
# Author: Walker
# mail:qngskk@gmail.com
# Created Time: Thu Dec 9 11:25:59 2021
#########################################################################
# !/usr/bin/env python3
import numpy as np
import pandas as pd
import datetime
# 加载 csv 数据
df_ferrara = pd.read_csv('data/WeatherData/ferrara_270615.csv')
df_milano = pd.read_csv('data/WeatherData/milano_270615.csv')
df_mantova = pd.read_csv('data/WeatherData/mantova_270615.csv')
df_ravenna = pd.read_csv('data/WeatherData/ravenna_270615.csv')
df_torino = pd.read_csv('data/WeatherData/torino_270615.csv')
df_asti = pd.read_csv('data/WeatherData/asti_270615.csv')
df_bologna = pd.read_csv('data/WeatherData/bologna_270615.csv')
df_piacenza = pd.read_csv('data/WeatherData/piacenza_270615.csv')
df_cesena = pd.read_csv('data/WeatherData/cesena_270615.csv')
df_faenza = pd.read_csv('data/WeatherData/faenza_270615.csv')
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from dateutil import parser
# 温度数据
y1 = df_milano['temp']
x1 = df_milano['day']
# datetime format
day_milano = [parser.parser(x) for x in x1]
# subplot, fig ax
fig, ax = plt.subplots()
# re pos
plt.xticks(rotation=70)
# time format
hours = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(hours)
# draw
ax.plot(day_milano, y1, 'r')
# 读取温度和日期数据
y1 = df_ravenna['temp']
x1 = df_ravenna['day']
y2 = df_faenza['temp']
x2 = df_faenza['day']
y3 = df_cesena['temp']
x3 = df_cesena['day']
y4 = df_milano['temp']
x4 = df_milano['day']
y5 = df_asti['temp']
x5 = df_asti['day']
y6 = df_torino['temp']
x6 = df_torino['day']
# 把日期从 string 类型转化为标准的 datetime 类型
day_ravenna = [parser.parse(x) for x in x1]
day_faenza = [parser.parse(x) for x in x2]
day_cesena = [parser.parse(x) for x in x3]
day_milano = [parser.parse(x) for x in x4]
day_asti = [parser.parse(x) for x in x5]
day_torino = [parser.parse(x) for x in x6]
# 调用 subplots() 函数,重新定义 fig, ax 变量
fig, ax = plt.subplots()
plt.xticks(rotation=70)
hours = mdates.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(hours)
#这里需要画出三根线,所以需要三组参数, 'g'代表'green'
ax.plot(day_ravenna,y1,'r',day_faenza,y2,'r',day_cesena,y3,'r')
ax.plot(day_milano,y4,'g',day_asti,y5,'g',day_torino,y6,'g')
# dist 是一个装城市距离海边距离的列表
dist = [df_ravenna['dist'][0],
df_cesena['dist'][0],
df_faenza['dist'][0],
df_ferrara['dist'][0],
df_bologna['dist'][0],
df_mantova['dist'][0],
df_piacenza['dist'][0],
df_milano['dist'][0],
df_asti['dist'][0],
df_torino['dist'][0]
]
# temp_max 是一个存放每个城市最高温度的列表
temp_max = [df_ravenna['temp'].max(),
df_cesena['temp'].max(),
df_faenza['temp'].max(),
df_ferrara['temp'].max(),
df_bologna['temp'].max(),
df_mantova['temp'].max(),
df_piacenza['temp'].max(),
df_milano['temp'].max(),
df_asti['temp'].max(),
df_torino['temp'].max()
]
# temp_min 是一个存放每个城市最低温度的列表
temp_min = [df_ravenna['temp'].min(),
df_cesena['temp'].min(),
df_faenza['temp'].min(),
df_ferrara['temp'].min(),
df_bologna['temp'].min(),
df_mantova['temp'].min(),
df_piacenza['temp'].min(),
df_milano['temp'].min(),
df_asti['temp'].min(),
df_torino['temp'].min()
]
fig, ax = plt.subplots()
ax.plot(dist,temp_max,'ro')
from sklearn.svm import SVR
# dist1是靠近海的城市集合,dist2是远离海洋的城市集合
dist1 = dist[0:5]
dist2 = dist[5:10]
# 改变列表的结构,dist1现在是5个列表的集合
# 之后我们会看到 nbumpy 中 reshape() 函数也有同样的作用
dist1 = [[x] for x in dist1]
dist2 = [[x] for x in dist2]
# temp_max1 是 dist1 中城市的对应最高温度
temp_max1 = temp_max[0:5]
# temp_max2 是 dist2 中城市的对应最高温度
temp_max2 = temp_max[5:10]
# 我们调用SVR函数,在参数中规定了使用线性的拟合函数
# 并且把 C 设为1000来尽量拟合数据(因为不需要精确预测不用担心过拟合)
svr_lin1 = SVR(kernel='linear', C=1e3)
svr_lin2 = SVR(kernel='linear', C=1e3)
# 加入数据,进行拟合(这一步可能会跑很久,大概10多分钟,休息一下:) )
svr_lin1.fit(dist1, temp_max1)
svr_lin2.fit(dist2, temp_max2)
# 关于 reshape 函数请看代码后面的详细讨论
xp1 = np.arange(10,100,10).reshape((9,1))
xp2 = np.arange(50,400,50).reshape((7,1))
yp1 = svr_lin1.predict(xp1)
yp2 = svr_lin2.predict(xp2)
# 限制了 x 轴的取值范围
fig, ax = plt.subplots()
ax.set_xlim(0,400)
# 画出图像
ax.plot(xp1, yp1, c='b', label='Strong sea effect')
ax.plot(xp2, yp2, c='g', label='Light sea effect')
ax.plot(dist,temp_max,'ro')
|
{"hexsha": "bfc12a9d6ea671729f40df32ada9b93a53e70506", "size": 4273, "ext": "py", "lang": "Python", "max_stars_repo_path": "learn/1_weather/src/test.py", "max_stars_repo_name": "PuddingWalker/py_small_projects", "max_stars_repo_head_hexsha": "d039fec99b2d42ff577ada59a91e95d97d692214", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "learn/1_weather/src/test.py", "max_issues_repo_name": "PuddingWalker/py_small_projects", "max_issues_repo_head_hexsha": "d039fec99b2d42ff577ada59a91e95d97d692214", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learn/1_weather/src/test.py", "max_forks_repo_name": "PuddingWalker/py_small_projects", "max_forks_repo_head_hexsha": "d039fec99b2d42ff577ada59a91e95d97d692214", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2840236686, "max_line_length": 73, "alphanum_fraction": 0.6704891177, "include": true, "reason": "import numpy", "num_tokens": 1666}
|
import pickle
import os
import numpy as np
import viewer3d
from viewer3d import plot3d, inte_to_rgb, show_pillar_cuboid
from msic import get_corners_3d
from kitti import Object3d
car_th = 0.5
ped_th = 0.5
data_dir = '/data/Machine_Learning/ImageSet/KITTI/object/training/'
f = open('./results/car/step_296960/result.pkl', 'rb')
res_cars = pickle.load(f)
print(len(res_cars))
f.close()
f = open('./results/ped/step_194880/result.pkl', 'rb')
res_peds = pickle.load(f)
print(len(res_peds))
f.close()
f = open('./ImageSets/val.txt')
ids = f.readlines()
f.close()
print(len(ids))
show_sets = [ '002565']
for i, id in enumerate(ids):
id = id.replace('\n', '')
# if id not in show_sets:
# continue
pc_path = os.path.join(data_dir,'velodyne', id+'.bin')
print(pc_path)
pc_velo = np.fromfile(pc_path, dtype=np.float32).reshape(-1, 4)
print(pc_velo.shape)
res_car = res_cars[i]
res_ped = res_peds[i]
results = []
cls_list = []
for j, score in enumerate(res_car['score']):
if score > car_th:
result = {}
result['type'] = res_car['name'][j]
result['alpha'] = res_car['alpha'][j]
result['truncated'] = res_car['truncated'][j]
result['occluded'] = res_car['occluded'][j]
result['bbox'] = res_car['bbox'][j]
result['dimensions'] = res_car['dimensions'][j]
result['location'] = res_car['location'][j]
result['rotation_y'] = res_car['rotation_y'][j]
results.append(result)
cls_list.append(result['type'])
for j, score in enumerate(res_ped['score']):
if score > ped_th:
result = {}
result['type'] = res_ped['name'][j]
result['alpha'] = res_ped['alpha'][j]
result['truncated'] = res_ped['truncated'][j]
result['occluded'] = res_ped['occluded'][j]
result['bbox'] = res_ped['bbox'][j]
result['dimensions'] = res_ped['dimensions'][j]
result['location'] = res_ped['location'][j]
result['rotation_y'] = res_ped['rotation_y'][j]
results.append(result)
cls_list.append(result['type'])
# p3d = plot3d()
# points = pc_velo[:, 0:3]
# pc_inte = pc_velo[:, 3]
# pc_color = inte_to_rgb(pc_inte)
# p3d.add_points(points, pc_color)
# p3d.show()
show_pillar_cuboid(pc_velo, pc_path, results, id=id)
|
{"hexsha": "e6d03a3461fffb36974c49e53220f2ab667d136c", "size": 2472, "ext": "py", "lang": "Python", "max_stars_repo_path": "display3d/display3d.py", "max_stars_repo_name": "leon-liangwu/PillarsRNN", "max_stars_repo_head_hexsha": "b6e7d64af4e2819098ae9a87a9dd676ee8288874", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-30T08:09:24.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-30T08:09:24.000Z", "max_issues_repo_path": "display3d/display3d.py", "max_issues_repo_name": "leon-liangwu/second.pytorch", "max_issues_repo_head_hexsha": "b6e7d64af4e2819098ae9a87a9dd676ee8288874", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "display3d/display3d.py", "max_forks_repo_name": "leon-liangwu/second.pytorch", "max_forks_repo_head_hexsha": "b6e7d64af4e2819098ae9a87a9dd676ee8288874", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4285714286, "max_line_length": 67, "alphanum_fraction": 0.5865695793, "include": true, "reason": "import numpy", "num_tokens": 686}
|
#
# Copyright (C) 2020 by The Board of Trustees of Stanford University
# This program is free software: you can redistribute it and/or modify it under
# the terms of the Modified BSD-3 License as published by the Open Source
# Initiative.
# If you use this program in your research, we request that you reference the
# Illusion paper, and that you send us a citation of your work.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the BSD-3 License for more details.
# You should have received a copy of the Modified BSD-3 License along with this
# program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
import numpy as np
from sklearn.metrics import f1_score
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy
import random
import multiprocessing as mp
import pdb
from torch_models import D2NN, Q
from qtorch import FixedPoint, FloatingPoint
from qtorch.quant import Quantizer, quantizer
from qtorch.optim import OptimLP
import pdb
from tqdm import tqdm
best_result = 0
path = os.path.join("./data")
batchSize = 128
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])),
batch_size=batchSize, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=path, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])),
batch_size=batchSize, shuffle=True)
ubit_8 = FixedPoint(8, 4)
ubit_16 = FixedPoint(16, 8)
#ubit_8 = FloatingPoint(exp=5, man=2)
#ubit_16 = FloatingPoint(exp=6, man=9)
weight_quant = quantizer(forward_number=ubit_8,
forward_rounding="nearest")
grad_quant = quantizer(forward_number=None,
forward_rounding="nearest")
momentum_quant = quantizer(forward_number=None,
forward_rounding="stochastic")
acc_quant = quantizer(forward_number=None,
forward_rounding="stochastic")
act_error_quant = lambda : Quantizer(forward_number= ubit_8, backward_number=None,
forward_rounding="nearest", backward_rounding="stochastic")
act2_error_quant = lambda : Quantizer(forward_number=ubit_16, backward_number=None,
forward_rounding="nearest", backward_rounding="stochastic")
device = 'cuda' # torch.device("cuda" if torch.cuda.is_available() else "cpu")
# state_dict = torch.load('../checkpoints/mnist_model.pth')
model = D2NN(act_error_quant, act2_error_quant)
# model.load_state_dict(state_dict)
model = model.to(device=device)
state_dict = torch.load('./checkpoints/mnist_d2nn_quant.pth')
model.load_state_dict(state_dict)
model_q = Q(act_error_quant, act2_error_quant)
model_q = model_q.to(device=device)
def run_epoch(loader, model, model_q, lamb, eps, criterion, optimizer=None, phase="train"):
assert phase in ["train","eval"], "Invalid Phase"
num_n2 = 0
num_n3 = 0
loss_sum_q = 0.0
correct_q = 0.0
loss_sum_n2 = 0.0
correct_n2 = 0.0
loss_sum_n3 = 0.0
correct_n3 = 0.0
if phase=="train":
model_q.train()
model.eval()
elif phase=="eval":
model_q.eval()
model.eval()
ttl = 0
with torch.autograd.set_grad_enabled(phase=="train"):
for i, (input, target) in tqdm(enumerate(loader), total = len(loader)):
input = input.to(device=device)
target = target.to(device=device)
N1_out, N2_out, N3_out = model(input)
Q_out = model_q(N1_out.detach())
#print(Q_out)
r_mask = torch.rand_like(Q_out.detach()[:,0]) < eps
N3_mask = torch.argmax(Q_out.detach(),dim=1).bool() != r_mask
N2_mask = ~N3_mask
N2_masked = N2_out.detach()[N2_mask,:]
N3_masked = N3_out.detach()[N3_mask,:]
target_N2 = target.detach()[N2_mask]
target_N3 = target.detach()[N3_mask]
Q_out_N2 = Q_out[N2_mask,0]
Q_out_N3 = Q_out[N3_mask,1]
Q_out.retain_grad()
Q_out_N2.retain_grad()
Q_out_N3.retain_grad()
if len(Q_out_N2) > 0:
score_N2 = f1_score(target_N2.cpu(), torch.argmax(N2_masked,dim=1).cpu(), average='micro')
pred_n2 = N2_masked.data.argmax(1)
score_N2 = torch.sum(pred_n2.eq(target_N2)).float()/len(target_N2)
#print(score_N2)
cost_N2 = torch.Tensor([lamb*score_N2 + (1-lamb)*0.2]).to(device) #N1 is 5x cheaper
#print(cost_N2)
mean_Q_N2 = torch.mean(Q_out_N2,dim = 0,keepdim=True)
mean_Q_N2.retain_grad()
loss_N2 = criterion(mean_Q_N2,cost_N2)
loss_sum_n2 += loss_N2.cpu().item()*input.size(0)
num_n2 += len(Q_out_N2)
pred_n2 = N2_masked.data.argmax(1,keepdim=True)
#score_N2 = pred_n2.eq(target_N2.data.view_as(pred_n2)).sum().cpu().item()
correct_n2 += pred_n2.eq(target_N2.data.view_as(pred_n2)).sum().cpu().item()
else:
loss_N2 = torch.FloatTensor([0]).to(device)
if len(Q_out_N3) > 0:
score_N3 = f1_score( target_N3.cpu(), torch.argmax(N3_masked,dim=1).cpu(), average='micro')
pred_n3 = N3_masked.data.argmax(1)
score_N3 = torch.sum(pred_n3.eq(target_N3)).float()/len(target_N3)
#print(score_N3)
cost_N3 = torch.Tensor([lamb*score_N3 + (1-lamb)*1]).to(device) #N1 is 5x cheaper
#print(cost_N3)
mean_Q_N3 = torch.mean(Q_out_N3,dim=0,keepdim=True)
mean_Q_N3.retain_grad()
loss_N3 = criterion(mean_Q_N3, cost_N3)
loss_sum_n3 += loss_N3.cpu().item()*input.size(0)
num_n3 += len(Q_out_N3)
pred_n3 = N3_masked.data.argmax(1,keepdim=True)
correct_n3 += pred_n3.eq(target_N3.data.view_as(pred_n3)).sum().cpu().item()
else:
loss_N3 = torch.FloatTensor([0]).to(device)
if phase =="train":
optimizer.zero_grad()
loss_N2 = loss_N2*1000
loss_N3 = loss_N3*1000
(loss_N2 + loss_N3).backward()
optimizer.step()
#else:
# print(Q_out)
ttl += input.size()[0]
return {
'percent_n2': num_n2 / float(ttl),
'loss_n2': loss_sum_n2 / float(ttl),
'accuracy_n2': np.float64(correct_n2) / np.float64(num_n2)*100,
'percent_n3': num_n3 / float(ttl),
'loss_n3': loss_sum_n3 / float(ttl),
'accuracy_n3': np.float64(correct_n3) / np.float64(num_n3)*100
}
optimizer = optim.SGD(model_q.parameters(), lr=0.000001, momentum=0.9)
optimizer = OptimLP(optimizer,
weight_quant=weight_quant,
grad_quant=grad_quant,
momentum_quant=momentum_quant,
acc_quant=acc_quant,
grad_scaling=1/1000)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer,step_size=5,gamma=0.1)
eps = 0.5
lamb = 0.55
for epoch in range(20):
eps = eps*0.9
train_res = run_epoch(train_loader, model, model_q, lamb, eps, F.mse_loss, optimizer=optimizer, phase="train")
print(train_res)
test_res = run_epoch(test_loader, model, model_q, lamb, 0, F.mse_loss, optimizer=optimizer, phase="eval")
print(test_res)
exp_lr_scheduler.step()
torch.save(model_q.state_dict(),'./checkpoints/mnist_d2nn_q_quant.pth')
|
{"hexsha": "dc7858a82a31268654fee296899a575f144a377d", "size": 8626, "ext": "py", "lang": "Python", "max_stars_repo_path": "illusion_testing/training/train_d2nn_Q2.py", "max_stars_repo_name": "robust-systems-group/illusion_system", "max_stars_repo_head_hexsha": "f142cc1dacb02312ad78b0ec613061fe84e6648c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-01-22T14:28:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-21T10:12:35.000Z", "max_issues_repo_path": "illusion_testing/training/train_d2nn_Q2.py", "max_issues_repo_name": "robust-systems-group/illusion_system", "max_issues_repo_head_hexsha": "f142cc1dacb02312ad78b0ec613061fe84e6648c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "illusion_testing/training/train_d2nn_Q2.py", "max_forks_repo_name": "robust-systems-group/illusion_system", "max_forks_repo_head_hexsha": "f142cc1dacb02312ad78b0ec613061fe84e6648c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9351851852, "max_line_length": 115, "alphanum_fraction": 0.6005100858, "include": true, "reason": "import numpy", "num_tokens": 2112}
|
import numpy as np
from .utils import parallel_talismane, lemmatize
from .textometry import match_lexique_to_responses_texts
from .constant import *
WINDOW_SIZE = 4
import re
def get_data_from_texts(texts, batch_size=1000, lemmas_only=False):
def transform_and_return(df):
df.LEMMA = df.apply(lambda x: x.FORM.lower() if x.LEMMA == "_" else x.LEMMA, axis=1)
return df
if lemmas_only:
res = [transform_and_return(talismane_data).LEMMA.values for talismane_data in
parallel_talismane(data=texts, batch_size=batch_size)]
else:
res = [transform_and_return(talismane_data).values for talismane_data in
parallel_talismane(data=texts, batch_size=batch_size)]
return res
def get_matching_data(lemmas):
def get_dict(match_res, lexiques):
dict_ = {}
for m in match_res:
dict_[m[1]] = m[0]
return dict_
transp_terms_matched = match_lexique_to_responses_texts(lemmas, lexiques["transport_term"], return_pos=True)
change_verbs_matched = match_lexique_to_responses_texts(lemmas, lexiques["change_verb"], return_pos=True)
attribut_matched = match_lexique_to_responses_texts(lemmas, lexiques["attribut"], return_pos=True)
final_res = []
for i in range(len(transp_terms_matched)):
transp_ = get_dict(transp_terms_matched[i], lexiques["transport_term"])
attr_ = get_dict(attribut_matched[i], lexiques["attribut"])
chang_ = get_dict(change_verbs_matched[i], lexiques["change_verb"])
final_res.append({
"transport_term": transp_,
"change_verb": chang_,
"attribut": attr_
})
return final_res
class Contribution(object):
def __init__(self, text_data, matching_data_dict, column=None, phrase_separator=[".", "?", "!"],
erase_temp_data=True):
self.__text = text_data
self.matching_data_dict = matching_data_dict
self.column = column
self.phrase_separator = phrase_separator
self.erase_temp_data = erase_temp_data
self.split_in_sentences()
def get_matched_for_a_sentence(self, begin_pos, end_pos):
new_dict = {}
for lexique in self.matching_data_dict:
new_dict[lexique] = {}
for pos in self.matching_data_dict[lexique]:
if pos <= end_pos and pos >= begin_pos:
new_dict[lexique][pos - begin_pos] = self.matching_data_dict[lexique][pos]
return new_dict
def split_in_sentences(self):
self.sentences = []
index_start = 0
for i in range(len(self.__text)):
if self.column and self.__text[i, self.column] in self.phrase_separator:
matched_data = self.get_matched_for_a_sentence(index_start, i)
s = Sentence(self.__text[index_start:i + 1], matched_data)
self.sentences.append(s)
index_start = i + 1
elif not self.column and self.__text[i] in self.phrase_separator:
matched_data = self.get_matched_for_a_sentence(index_start, i)
s = Sentence(self.__text[index_start:i + 1], matched_data)
self.sentences.append(s)
index_start = i + 1
if index_start == 0 and not self.sentences:
self.sentences.append(Sentence(self.__text, self.matching_data_dict, True))
if len(self.sentences) == 1:
self.sentences[0].is_alone = True
if self.erase_temp_data:
del self.__text
def apply(self, treatment, *args):
results = []
for sentence in self.sentences:
sentence_results = treatment(sentence)
for res in sentence_results:
res.extend([*args])
results.extend(sentence_results)
return results
class Sentence():
def __init__(self, data, matched_data, alone=False):
self.data = data
self.is_alone = alone
self.lexique_matched = matched_data
class Treatement():
def __init__(self, name):
self.name = name
def __call__(self, a):
return self.parseSentence(a)
def parseSentence(self, sentence):
raise NotImplementedError()
class TransportAdjectif(Treatement):
def __init__(self):
Treatement.__init__(self, "TRAN + ADJ")
def parseSentence(self, sentence):
results = []
pos_adj = np.argwhere(sentence.data[:, 3] == "ADJ").flatten()
N = len(pos_adj)
for pos in sentence.lexique_matched["transport_term"]:
term_index = sentence.lexique_matched["transport_term"][pos]
term_str = lexiques["transport_term"].iloc[term_index].word
A = np.array([pos] * N)
diff = pos_adj - A
adj_selected = sentence.data[:, 2][pos_adj[(diff > 0) & (diff < WINDOW_SIZE)]]
if len(adj_selected) > 0:
results.extend(
[[term_str, None, adj, self.name, " ".join(sentence.data[:, 1])] for adj in adj_selected if
not re.match("\d+", adj) and adj not in term_str])
return results
class AttributAdjectif(Treatement):
def __init__(self):
Treatement.__init__(self, "ATTR + ADJ")
def parseSentence(self, sentence):
results = []
pos_adj = np.argwhere(sentence.data[:, 3] == "ADJ").flatten()
N = len(pos_adj)
for pos in sentence.lexique_matched["attribut"]:
term_index = sentence.lexique_matched["attribut"][pos]
term_str = lexiques["attribut"].iloc[term_index].word
A = np.array([pos] * N)
diff = pos_adj - A
adj_selected = sentence.data[:, 2][pos_adj[(diff > 0) & (diff < WINDOW_SIZE)]]
if len(adj_selected) > 0:
results.extend(
[["Q_SUBJ", term_str, adj, self.name, " ".join(sentence.data[:, 1])] for adj in adj_selected if
not re.match("\d+", adj)])
return results
class AttributTransport(Treatement):
def __init__(self):
Treatement.__init__(self, "ATTR + ADJ")
def parseSentence(self, sentence):
results = []
for pos1 in sentence.lexique_matched["attribut"]:
chang_word = lexiques["attribut"].iloc[sentence.lexique_matched["attribut"][pos1]].word
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
diff = pos2 - pos1
if diff > 0 and diff < WINDOW_SIZE:
results.append([chang_word, None, transp_word, self.name, " ".join(sentence.data[:, 1])])
return results
class ChangementAttributTransport(Treatement):
def __init__(self):
Treatement.__init__(self, "CHG + ATTR + TRAN")
def parseSentence(self, sentence):
results = []
for pos1 in sentence.lexique_matched["change_verb"]:
chang_word = lexiques["change_verb"].iloc[sentence.lexique_matched["change_verb"][pos1]].word
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
diff = pos2 - pos1
if diff > 0 and diff < WINDOW_SIZE:
for pos3 in sentence.lexique_matched["attribut"]:
if pos3 > pos1 and pos3 < pos2:
attr_word = lexiques["attribut"].iloc[sentence.lexique_matched["attribut"][pos3]].word
results.append(
[chang_word, attr_word, transp_word, self.name, " ".join(sentence.data[:, 1])])
return results
class ChangementTransport(Treatement):
def __init__(self):
Treatement.__init__(self, "CHG + TRAN")
def parseSentence(self, sentence):
results = []
for pos1 in sentence.lexique_matched["change_verb"]:
chang_word = lexiques["change_verb"].iloc[sentence.lexique_matched["change_verb"][pos1]].word
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
diff = pos2 - pos1
if diff > 0 and diff < WINDOW_SIZE:
results.append([chang_word, None, transp_word, self.name, " ".join(sentence.data[:, 1])])
return results
class ChangementTransportAdjectif(Treatement):
def __init__(self):
Treatement.__init__(self, "CHG + ATTR + ADJ")
def parseSentence(self, sentence):
results = []
pos_adj = np.argwhere(sentence.data[:, 3] == "ADJ").flatten()
sentence.lexique_matched["adjectif"] = {}
for pos in pos_adj:
sentence.lexique_matched["adjectif"][pos] = pos # sentence.data[:,2][pos]
for pos1 in sentence.lexique_matched["change_verb"]:
chang_word = lexiques["change_verb"].iloc[sentence.lexique_matched["change_verb"][pos1]].word
for pos2 in sentence.lexique_matched["adjectif"]:
adj_word = sentence.data[:, 2][pos2]
if re.match("\d+", adj_word):
continue
diff = pos2 - pos1
if diff > 0 and diff < WINDOW_SIZE:
for pos3 in sentence.lexique_matched["transport_term"]:
if pos3 > pos1 and pos3 < pos2:
transp_word = lexiques["transport_term"].iloc[
sentence.lexique_matched["transport_term"][pos3]].word
if not adj_word in transp_word:
results.append(
[chang_word, transp_word, adj_word, self.name, " ".join(sentence.data[:, 1])])
return results
class GratuiteTransport(Treatement):
def __init__(self):
Treatement.__init__(self, "GRATUITE + TRAN")
def parseSentence(self, sentence):
results = []
pos_grat = np.argwhere(sentence.data[:, 2] == "gratuité").flatten()
sentence.lexique_matched["gratuité"] = {}
for pos in pos_grat:
sentence.lexique_matched["gratuité"][pos] = pos # sentence.data[:,2][pos]
for pos1 in sentence.lexique_matched["gratuité"]:
grat_word = "gratuité"
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
diff = pos2 - pos1
if diff > 0 and diff < WINDOW_SIZE:
results.append([grat_word, None, transp_word, self.name, " ".join(sentence.data[:, 1])])
return results
class ShortPhrases(Treatement):
def __init__(self):
Treatement.__init__(self, "ADD TRAN")
def parseSentence(self, sentence):
results = []
if not sentence.lexique_matched["attribut"] and not sentence.lexique_matched[
"change_verb"] and sentence.is_alone:
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
results.append(["ajouter", None, transp_word, self.name, " ".join(sentence.data[:, 1])])
return results
def is_between(pos1, pos2, pos3):
return (pos1 < pos2) and (pos2 < pos3)
class IsThereSomeone(Treatement):
def __init__(self):
Treatement.__init__(self, "ISTHERESOMEONE?")
def parseSentence(self, sentence):
results = []
if sentence.lexique_matched["change_verb"] and sentence.lexique_matched["transport_term"]:
for pos1 in sentence.lexique_matched["change_verb"]:
change_word = lexiques["change_verb"].iloc[sentence.lexique_matched["change_verb"][pos1]].word
for pos2 in sentence.lexique_matched["transport_term"]:
transp_word = lexiques["transport_term"].iloc[sentence.lexique_matched["transport_term"][pos2]].word
if pos2 - pos1 < WINDOW_SIZE and pos2 - pos1 > 0:
if sentence.lexique_matched["attribut"]:
for pos3 in sentence.lexique_matched["attribut"]:
attr_word = lexiques["attribut"].iloc[sentence.lexique_matched["attribut"][pos3]].word
if is_between(pos1, pos3, pos2):
results.append([change_word, attr_word, transp_word, pos1, pos3, pos2, self.name,
" ".join(sentence.data[:, 1])])
else:
results.append([change_word, None, transp_word, pos1, None, pos2, self.name,
" ".join(sentence.data[:, 1])])
if not sentence.lexique_matched["change_verb"] and not sentence.lexique_matched["transport_term"]:
results.append([None, None, None, None, None, None, "CONTRE-EXEMPLE", " ".join(sentence.data[:, 1])])
return results
|
{"hexsha": "cf7fe62f0b1690bfbc664b653a9248b925539f4e", "size": 13283, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/cooc.py", "max_stars_repo_name": "Make-the-Debat-Great-Again/grand_debat_nlp", "max_stars_repo_head_hexsha": "6be7211492e10aae97a9d6e001f87af1d499de1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/cooc.py", "max_issues_repo_name": "Make-the-Debat-Great-Again/grand_debat_nlp", "max_issues_repo_head_hexsha": "6be7211492e10aae97a9d6e001f87af1d499de1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/cooc.py", "max_forks_repo_name": "Make-the-Debat-Great-Again/grand_debat_nlp", "max_forks_repo_head_hexsha": "6be7211492e10aae97a9d6e001f87af1d499de1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-15T16:20:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-17T13:42:22.000Z", "avg_line_length": 41.509375, "max_line_length": 120, "alphanum_fraction": 0.6052096665, "include": true, "reason": "import numpy", "num_tokens": 3047}
|
import h5py
import pandas as pd
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import model_from_json
# The maximum number of words to be used. (most frequent)
max_top_words = 50000
# Max number of words in each complaint.
max_tweet_lenght = 300
tokenizer = Tokenizer(num_words=max_top_words, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
#loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
tweet = input("Enter your tweet here: ")
tweet = [str(tweet)]
seq = tokenizer.texts_to_sequences(tweet)
padded = pad_sequences(seq, maxlen=max_tweet_lenght)
pred = loaded_model.predict(padded)
labels=['populistic','technocratic']
print(pred, labels[np.argmax(pred)])
|
{"hexsha": "9d9cccfb99ff887989bd417f101c9e68a411553d", "size": 1107, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorscript_RNN_ideology_recognition.py", "max_stars_repo_name": "Pijanes/Party_Mobilisation_Twitter", "max_stars_repo_head_hexsha": "59181b442fa16c7e96cbabfcd28798b473aad2c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorscript_RNN_ideology_recognition.py", "max_issues_repo_name": "Pijanes/Party_Mobilisation_Twitter", "max_issues_repo_head_hexsha": "59181b442fa16c7e96cbabfcd28798b473aad2c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorscript_RNN_ideology_recognition.py", "max_forks_repo_name": "Pijanes/Party_Mobilisation_Twitter", "max_forks_repo_head_hexsha": "59181b442fa16c7e96cbabfcd28798b473aad2c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-18T08:32:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-18T08:32:12.000Z", "avg_line_length": 34.59375, "max_line_length": 101, "alphanum_fraction": 0.7705510388, "include": true, "reason": "import numpy", "num_tokens": 262}
|
import numpy as np
from collections.abc import Sequence, Iterable
from numbers import Number
from .type import str_to_dtype, is_arr, is_dict, is_seq_of, is_type, scalar_type, is_str
def astype(x, dtype):
if dtype is None:
return x
assert is_arr(x) and is_str(dtype), (type(x), type(dtype))
if is_arr(x, 'np'):
return x.astype(str_to_dtype(dtype, 'np'))
elif is_arr(x, 'torch'):
return x.to(str_to_dtype(dtype, 'torch'))
elif is_type(dtype):
return dtype(x)
else:
raise NotImplementedError(f"As type {type(x)}")
def to_torch(x, dtype=None, device=None, non_blocking=False):
import torch
if x is None:
return None
elif is_dict(x):
return {k: to_torch(x[k], dtype, device, non_blocking) for k in x}
elif is_seq_of(x):
return type(x)([to_torch(y, dtype, device, non_blocking) for y in x])
if isinstance(x, torch.Tensor):
ret = x.detach()
elif isinstance(x, (Sequence, Number)):
ret = torch.from_numpy(np.array(x))
elif isinstance(x, np.ndarray):
ret = torch.from_numpy(x)
else:
raise NotImplementedError(f"{x} {dtype}")
if device is not None:
ret = ret.to(device, non_blocking=non_blocking)
if dtype is not None:
ret = astype(ret, dtype)
return ret
def to_np(x, dtype=None):
if x is None:
return None
elif isinstance(x, str):
return np.string_(x)
elif is_dict(x):
return {k: to_np(x[k], dtype) for k in x}
elif is_seq_of(x):
return type(x)([to_np(y, dtype) for y in x])
elif isinstance(x, (Number, Sequence)):
ret = np.array(x, dtype=scalar_type(x))
elif isinstance(x, np.ndarray):
ret = x
else:
import torch
if isinstance(x, torch.Tensor):
ret = x.cpu().detach().numpy()
else:
raise NotImplementedError(f"{dtype}")
if dtype is not None:
ret = astype(ret, dtype)
return ret
def iter_cast(inputs, dst_type, return_type=None):
"""Cast elements of an iterable object into some type.
Args:
inputs (Iterable): The input object.
dst_type (type): Destination type.
return_type (type, optional): If specified, the output object will be converted to this type,
otherwise an iterator.
Returns:
iterator or specified type: The converted object.
"""
if not isinstance(inputs, Iterable):
raise TypeError('inputs must be an iterable object')
if not isinstance(dst_type, type):
raise TypeError('"dst_type" must be a valid type')
out_iterable = map(dst_type, inputs)
if return_type is None:
return out_iterable
else:
return return_type(out_iterable)
def list_cast(inputs, dst_type):
return iter_cast(inputs, dst_type, return_type=list)
def tuple_cast(inputs, dst_type):
return iter_cast(inputs, dst_type, return_type=tuple)
def dict_to_seq(inputs, num_output=2):
keys = list(sorted(inputs.keys()))
values = [inputs[k] for k in keys]
if num_output == 2:
return keys, values
elif num_output == 1:
return tuple(zip(keys, values))
else:
raise ValueError(f"num_output is {num_output}, which is not 1 or 2")
def seq_to_dict(*args):
# args: key, value or a list of list
args = list(args)
if len(args) == 2:
assert len(args[0]) == len(args[1])
return {args[0][i]: args[1][i] for i in range(len(args[0]))}
elif len(args) == 1:
ret = {}
for item in args:
assert len(item) == 2
ret[item[0]] = item[1]
else:
raise ValueError(f"len(args) is {len(args)}, which is not 1 or 2")
def dict_to_str(inputs):
ret = ''
for key in inputs:
if ret != '':
ret += " "
if isinstance(inputs[key], (float, np.float32, np.float64)):
if np.abs(inputs[key]).min() < 1E-2:
ret += f'{key}:{inputs[key]:.4e}'
else:
ret += f'{key}:{inputs[key]:.6f}'
else:
ret += f'{key}:{inputs[key]}'
return ret
def number_to_str(x, num):
if isinstance(x, str):
return x
elif np.isscalar(x):
if np.isreal(x):
return f'{x:.{num}f}'
else:
return str(x)
else:
print(type(x))
raise TypeError(f"Type of {x} is not a number")
|
{"hexsha": "1cc5bcf8ca8c4d233aac92bd258ca8f60bcc4dbe", "size": 4456, "ext": "py", "lang": "Python", "max_stars_repo_path": "mani_skill_learn/utils/data/converter.py", "max_stars_repo_name": "Zed-Wu/ManiSkill-Learn", "max_stars_repo_head_hexsha": "8056fe327752cd0863f8730672fe62bd85a0ec12", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mani_skill_learn/utils/data/converter.py", "max_issues_repo_name": "Zed-Wu/ManiSkill-Learn", "max_issues_repo_head_hexsha": "8056fe327752cd0863f8730672fe62bd85a0ec12", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mani_skill_learn/utils/data/converter.py", "max_forks_repo_name": "Zed-Wu/ManiSkill-Learn", "max_forks_repo_head_hexsha": "8056fe327752cd0863f8730672fe62bd85a0ec12", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5099337748, "max_line_length": 101, "alphanum_fraction": 0.5908886894, "include": true, "reason": "import numpy", "num_tokens": 1121}
|
import numpy as np
def sample(env,
controller,
num_paths=10,
horizon=1000,
render=False,
verbose=False):
"""
Samples paths in a environment with a provided controller
Each path can have elements for observations, next_observations, rewards, returns, actions, etc.
"""
paths = []
for i_episode in range(num_paths):
state = env.reset()
state_t_array, action_array, state_t1_array, reward_array = [], [], [], []
for t in range(horizon):
if render and (i_episode % 10 == 0):
env.render()
action = controller.get_action(state)
prev_state = state
state, reward, done, info = env.step(action)
# append triples (obs_t, act_t, obs_t1) to dataset
state_t_array.append(prev_state)
action_array.append(action)
state_t1_array.append(state)
reward_array.append(reward)
if done:
break
if verbose:
print("Generated new episode {} - Length: {}, Mean-Reward: {}".format(i_episode, len(reward_array),
np.mean(reward_array)))
path_dict = {
'observations': np.stack(state_t_array, axis=0),
'next_observations': np.stack(state_t1_array, axis=0),
'actions': np.stack(action_array, axis=0),
'rewards': np.stack(reward_array, axis=0)
}
paths.append(path_dict)
return paths
def path_reward(reward_fn, path):
return trajectory_cost_fn(reward_fn, path['observations'], path['actions'], path['next_observations'])
def trajectory_cost_fn(reward_fn, states, actions, next_states):
trajectory_cost = 0
for i in range(len(actions)):
trajectory_cost += reward_fn(states[i], actions[i], next_states[i])
return trajectory_cost
|
{"hexsha": "87c04a8ce6a62f9c34823f66d5fceb55cafa4720", "size": 1967, "ext": "py", "lang": "Python", "max_stars_repo_path": "sandbox/ours/model_based_rl/helpers.py", "max_stars_repo_name": "jackwilkinson255/mbmpo_master", "max_stars_repo_head_hexsha": "e9e0eaf542c7895764dcb0bfee28752818124ff2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2018-11-15T14:14:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T01:53:43.000Z", "max_issues_repo_path": "sandbox/ours/model_based_rl/helpers.py", "max_issues_repo_name": "hongzimao/model_ensemble_meta_learning", "max_issues_repo_head_hexsha": "8b1351df94dfe530efaff1118022315c8d877774", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-05-05T23:39:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-15T15:28:06.000Z", "max_forks_repo_path": "sandbox/ours/model_based_rl/helpers.py", "max_forks_repo_name": "hongzimao/model_ensemble_meta_learning", "max_forks_repo_head_hexsha": "8b1351df94dfe530efaff1118022315c8d877774", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-11-15T16:47:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T14:58:01.000Z", "avg_line_length": 33.9137931034, "max_line_length": 111, "alphanum_fraction": 0.5775292323, "include": true, "reason": "import numpy", "num_tokens": 409}
|
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import sys
from operator import sub
def get_aspect(ax):
# Total figure size
figW, figH = ax.get_figure().get_size_inches()
# Axis size on figure
_, _, w, h = ax.get_position().bounds
# Ratio of display units
disp_ratio = (figH * h) / (figW * w)
# Ratio of data units
# Negative over negative because of the order of subtraction
data_ratio = sub(*ax.get_ylim()) / sub(*ax.get_xlim())
return disp_ratio / data_ratio
plt.style.use("classic")
dat = np.loadtxt(sys.argv[1])
Nkpt = dat.shape[0]
Nstates = dat.shape[1] - 1
print("Nkpt = ", Nkpt)
print("Nstates = ", Nstates)
plt.clf()
fig = plt.gcf()
scal = 1.2
fig.set_size_inches(6*scal,8*scal)
x = dat[:,0]
for ist in range(1,Nstates+1):
plt.plot( x, dat[:,ist], marker='o' )
#w = plt.xlim()[1] - plt.xlim()[0]
#h = plt.ylim()[1] - plt.ylim()[0]
#aspect_orig = get_aspect(plt.axes())
#h_w = h/w
#print("h_w = ", h_w)
#plt.axes().set_aspect(h_w/aspect_orig*aspect_orig)
filplot = sys.argv[1].replace(".dat",".pdf")
plt.savefig(filplot)
#import os
#os.system("pdfcrop " + filplot + " " + filplot)
|
{"hexsha": "c9439ff625dd23f69d19e671177d20262f52e3ba", "size": 1186, "ext": "py", "lang": "Python", "max_stars_repo_path": "PW/sch_03/plot_bandstructure.py", "max_stars_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_stars_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-01-03T02:19:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T13:30:20.000Z", "max_issues_repo_path": "PW/sch_03/plot_bandstructure.py", "max_issues_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_issues_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PW/sch_03/plot_bandstructure.py", "max_forks_repo_name": "f-fathurrahman/ffr-ElectronicStructure.jl", "max_forks_repo_head_hexsha": "35dca9831bfc6a3e49bb0f3a5872558ffce4b211", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-03-23T06:58:47.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T00:54:28.000Z", "avg_line_length": 22.8076923077, "max_line_length": 64, "alphanum_fraction": 0.655143339, "include": true, "reason": "import numpy", "num_tokens": 365}
|
import torch
import copy
from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
class PPODataset(Dataset):
def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len):
self.is_rnn = is_rnn
self.seq_len = seq_len
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
self.length = self.batch_size // self.minibatch_size
self.is_discrete = is_discrete
self.is_continuous = not is_discrete
total_games = self.batch_size // self.seq_len
self.num_games_batch = self.minibatch_size // self.seq_len
self.game_indexes = torch.arange(total_games, dtype=torch.long, device=self.device)
self.flat_indexes = torch.arange(total_games * self.seq_len, dtype=torch.long, device=self.device).reshape(
total_games, self.seq_len)
self.special_names = ['rnn_states']
def update_values_dict(self, values_dict):
self.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
start = self.last_range[0]
end = self.last_range[1]
self.values_dict['mu'][start:end] = mu
self.values_dict['sigma'][start:end] = sigma
def __len__(self):
return self.length
def _get_item_rnn(self, idx):
gstart = idx * self.num_games_batch
gend = (idx + 1) * self.num_games_batch
start = gstart * self.seq_len
end = gend * self.seq_len
self.last_range = (start, end)
input_dict = {}
for k, v in self.values_dict.items():
if k not in self.special_names:
if v is dict:
v_dict = {kd: vd[start:end] for kd, vd in v.items()}
input_dict[k] = v_dict
else:
if v is not None:
input_dict[k] = v[start:end]
else:
input_dict[k] = None
rnn_states = self.values_dict['rnn_states']
input_dict['rnn_states'] = [s[:, gstart:gend, :].contiguous() for s in rnn_states]
return input_dict
def _get_item(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
self.last_range = (start, end)
input_dict = {}
for k, v in self.values_dict.items():
if k not in self.special_names and v is not None:
if type(v) is dict:
v_dict = {kd: vd[start:end] for kd, vd in v.items()}
input_dict[k] = v_dict
else:
input_dict[k] = v[start:end]
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
sample = self._get_item_rnn(idx)
else:
sample = self._get_item(idx)
return sample
class StateBasedDataset:
def __init__(self, dataset_path: str):
if not Path(dataset_path).exists() or dataset_path == "":
print(f"Dataset file {dataset_path} not exist")
self.length = 0
return
self.is_hdf5 = False
if dataset_path.endswith(("pkl", "pickle")):
data = np.load(dataset_path, allow_pickle=True)
data_obs = []
data_action = []
for k, v in data.items():
data_obs.append(v['observations'])
data_action.append(v['actions'])
self.data_obs = np.concatenate(data_obs, axis=0)
self.data_action = np.concatenate(data_action, axis=0)
assert len(self.data_obs) == len(self.data_action), "Demo Dataset Error: Obs num does not match Action num."
self.length = len(self.data_obs)
elif dataset_path.endswith("hdf5"):
import h5py
self.is_hdf5 = True
self.file = h5py.File(dataset_path)
self.data = self.file["data"]
self.data_obs = self.data["observations"]
self.data_action = self.data["actions"]
self.length = np.prod(self.data_obs.shape[:-1])
self.data_ind = 0
print("Demo dataset loaded, length = {}".format(self.length))
def __len__(self):
return self.length
def get_random_batch(self, batchsize):
if self.length == 0:
raise RuntimeError
idxes = np.random.randint(0, self.length, batchsize)
if not self.is_hdf5:
batch_obs = self.data_obs[idxes]
batch_actions = self.data_action[idxes]
else:
ind_after = self.data_ind + batchsize
if ind_after <= self.length:
batch_obs = self.data_obs[self.data_ind: ind_after, ...]
batch_actions = self.data_action[self.data_ind: ind_after, ...]
self.data_ind = ind_after
else:
addition_ind = batchsize - (self.length - self.data_ind)
batch_obs_before = self.data_obs[self.data_ind:, ...]
batch_obs_after = self.data_obs[:addition_ind, ...]
batch_obs = np.concatenate([batch_obs_before, batch_obs_after])
batch_actions_before = self.data_action[self.data_ind:, ...]
batch_actions_after = self.data_action[:addition_ind, ...]
batch_actions = np.concatenate([batch_actions_before, batch_actions_after])
self.data_ind = addition_ind
return batch_obs, batch_actions
class DemoAugmentedPPODataset(Dataset):
def __init__(self, ppo_dataset, dataset_path, demo_batch_size):
self.demo_dataset = StateBasedDataset(dataset_path)
self.ppo_dataset = ppo_dataset
self.dapg_batch_size = demo_batch_size
self.device = self.ppo_dataset.device
self.is_rnn = self.ppo_dataset.is_rnn
def update_values_dict(self, values_dict):
self.ppo_dataset.values_dict = values_dict
def update_mu_sigma(self, mu, sigma):
self.ppo_dataset.update_mu_sigma(mu, sigma)
def __len__(self):
return self.ppo_dataset.length
@property
def demo_len(self):
return self.demo_dataset.length
def get_discrim_batch(self):
input_dict = self.get_bc_batch(self.dapg_batch_size)
obs = self.values_dict['obs']
actions = self.values_dict['actions']
idxes = np.random.randint(0, len(obs), self.dapg_batchsize)
input_dict['obs'] = obs[idxes]
input_dict['actions'] = actions[idxes]
return input_dict
def get_bc_batch(self, batch_size):
input_dict = {}
demo_obs, demo_actions = self.demo_dataset.get_random_batch(batch_size)
input_dict['demo_obs'] = torch.from_numpy(demo_obs).float().to(self.device)
input_dict['demo_actions'] = torch.from_numpy(demo_actions).float().to(self.device)
return input_dict
def _get_item(self, idx):
input_dict = self.ppo_dataset._get_item(idx)
demo_dict = self.get_bc_batch(self.dapg_batch_size)
input_dict.update(demo_dict)
return input_dict
def __getitem__(self, idx):
if self.is_rnn:
raise NotImplementedError
else:
sample = self._get_item(idx)
return sample
class DatasetList(Dataset):
def __init__(self):
self.dataset_list = []
def __len__(self):
return self.dataset_list[0].length * len(self.dataset_list)
def add_dataset(self, dataset):
self.dataset_list.append(copy.deepcopy(dataset))
def clear(self):
self.dataset_list = []
def __getitem__(self, idx):
ds_len = len(self.dataset_list)
ds_idx = idx % ds_len
in_idx = idx // ds_len
return self.dataset_list[ds_idx].__getitem__(in_idx)
|
{"hexsha": "c2251bf37c21b5ae2fabc77dc550f0a58129024c", "size": 7775, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl_games/common/datasets.py", "max_stars_repo_name": "yzqin/rl_games", "max_stars_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl_games/common/datasets.py", "max_issues_repo_name": "yzqin/rl_games", "max_issues_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl_games/common/datasets.py", "max_forks_repo_name": "yzqin/rl_games", "max_forks_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3317757009, "max_line_length": 120, "alphanum_fraction": 0.6083601286, "include": true, "reason": "import numpy", "num_tokens": 1752}
|
\chapter{Physical Interaction}\label{ch:interaction}
\index{interaction!physical}
As pointed out before, grounding an ontology and lexicon is supposed to be influenced for a great deal by agents' physical interaction with their environment. In this chapter several influences of these physical interaction are investigated. The robot's interaction schemes are varied in two cases, the physical body has been changed in one experiment and in still another experiment both the environment and the physical body has been changed. Naturally, all experiments are compared with the basic experiment.
\p
In the basic experiment, the robots decided to stop after two rotations based on finding a maximum intensity of IR on one back IR sensor. In the description of the model the robots stopped after they aligned their backs towards each other using IR taxis (section \ref{s:robots:PDL}). In section \ref{s:int:taxis} two experiments are shown where taxis is used to align the robots. In the first experiment it was observed that the gearings of the robots were worn off. In the second experiment the gearings were replaced by new ones. These experiments show how coordination abilities and physical fitness may influence the quality of interactions.
In \cite{steelsvogt:1997} the robots did not rotate twice aligning back-to-back while doing the perception, but only once aligning face-to-face. This experiment has been repeated in section \ref{s:int:original} to see what the differences are.
The adaptation of an agent to its environment and the agent's ability to perceive the environment with enough precision is likely to be very important. In section \ref{s:int:close} the environment and robots are changed such that the resolution of the robots decreases.
In all the above experiments there were constantly 4 light sources present in the robots' environment. What happens when in each situation there are only 3 light sources present, while the robots' niche has 4 light sources? This question shall be investigated in section \ref{s:int:3refs}.
\section{Alignment Using IR Taxis}\label{s:int:taxis}
\index{infrared taxis|see{phototaxis}}
\index{phototaxis}
Applying IR taxis (or taxis for short) for alignment has been explained in the original description of the model in chapter \ref{ch:lg}, so it will not be repeated here. When the robots use taxis their alignment is better than when they use one IR sensor and finding the maximum intensity. However, the physical behavior is more complex and less games succeed in performing the complete physical behavior. Thus the acquisition of sensory data takes much more time. A reason why for other data sets the maximum intensity condition was used.
Although the robots are better at aligning each other, it is not likely that the performance increases because the perception is done in the middle of the two rotations. Perhaps the influence of worn off gearings can be observed.
\index{gearings|(}
\subsection{Worn off gearings}
The data in this experiment has been recorded using taxis. During the recording it was found that the gearings were worn off, causing the robots to move less smoothly than they would when the gearings are brand new. A data set of 606 situations has been recorded\footnote{The fact that the robots had problems in rotating was the reason why the data recording has been abandoned at this early stage.}. The average context size per situation was 3.64 for robot r0 and 3.83 for r1, so the a priori communicative success would be 0.268. The potential understandability is 0.897 for r0 and 0.824 for robot r1. It seems as if the slower rotation of the robots allowed them to perceive more coherent contexts. Off-board processing of the data is done exactly as in the basic experiment.
\p
Table \ref{t:int:taxis} shows the averaged measures of 10 runs of 5,000 language games. The experiment is in most ways similar to the basic experiment. Only the discrimination game is more successful (approximately 2 \%, $p=0.0004$). Specificity is higher and consistency is lower, but their significance is low ($p=0.1704$ and $p=0.2798$ resp.).
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.350 & 0.005\\\hline
DS0 & 0.945 & 0.006\\\hline
DS1 & 0.944 & 0.001\\\hline
D0 & 0.956 & 0.000\\\hline
D1 & 0.960 & 0.000\\\hline
P0 & 0.852 & 0.006\\\hline
P1 & 0.880 & 0.007\\\hline
S0 & 0.849 & 0.006\\\hline
S1 & 0.869 & 0.011\\\hline
C0 & 0.802 & 0.001\\\hline
C1 & 0.828 & 0.006\\\hline
\hline
\end{tabular}
\caption{The average results of the experiment involving taxis and old gearings.}
\label{t:int:taxis}
\end{table}
So, although the gearings of the robots were really at their ends, the communication system that emerges is not worse than the basic experiment. Question is if this result is biased by the fact that this data set only consists of 606 situations rather than 1,000. Table \ref{t:int:basis606} presents the results of the basic experiment using 606 situations taken from the basic data set used. The table shows that using only 606 situations does not alter the results of the basic experiment very much, so the smaller data set does not really bias the experiment.
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.354 & 0.016\\\hline
DS0 & 0.794 & 0.009\\\hline
DS1 & 0.816 & 0.008\\\hline
D0 & 0.959 & 0.000\\\hline
D1 & 0.960 & 0.001\\\hline
P0 & 0.869 & 0.004\\\hline
P1 & 0.877 & 0.002\\\hline
S0 & 0.849 & 0.019\\\hline
S1 & 0.853 & 0.007\\\hline
C0 & 0.820 & 0.014\\\hline
C1 & 0.831 & 0.014\\\hline
\hline
\end{tabular}
\caption{The results of the basic experiments using only 606 situations from the basic data set.}
\label{t:int:basis606}
\end{table}
\subsection{The Repaired Robots: New Gearings}
\index{data set!taxis \& gearing}
\index{a priori success}
\index{understandability}
The results of the experiment where the robots controlled their physical behavior is shown in table \ref{t:int:gearing}. The rest of the experimental setup is like the previous experiment. 934 situations have been recorded in the so-called {\em taxis \& gearing data set}. From this data set it is found that the average context sizes of the two robots are 3.283 and 3.485, thus the a priori communicative success is: 0.296. The potential understandability is 0.808 for robot r0 and 0.779 for r1. This is lower than in the previous experiment and approximately equal to the basic data set. It is important to note that the robot now moves at a higher speed than with worn off gearings. Rotating faster reduces the resolution of the perception. Hence the robots may miss a small perturbation of a distant light source. The basic data set has been recorded immediately after this experiment, so there the gearings were still okay.
The communicative success is 2.8 \% better than the basic experiment ($p=0.1230$). It is also better than the taxis experiment with old gearings with a significance of $p=0.0752$. The discriminative success is more or less equal compared with the basic experiment and is $\pm 2.5$ \% lower for the old gearings ($p=0.0008$). There are no significant differences when comparing the distinctiveness, parsimony, specificity and consistency with the two other experiments. So, also using new gearings does not influence the ability for the robots to construct ground a language very much. Slight improvement is found when comparing to the basic experiment, although this is not significant.
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.379 & 0.013\\\hline
DS0 & 0.918 & 0.003\\\hline
DS1 & 0.917 & 0.003\\\hline
D0 & 0.959 & 0.000\\\hline
D1 & 0.960 & 0.001\\\hline
P0 & 0.864 & 0.001\\\hline
P1 & 0.858 & 0.002\\\hline
S0 & 0.837 & 0.014\\\hline
S1 & 0.824 & 0.018\\\hline
C0 & 0.803 & 0.006\\\hline
C1 & 0.794 & 0.004\\\hline
\hline
\end{tabular}
\caption{Results of 10 runs of 5,000 language games in which the robots got new gearings.}
\label{t:int:gearing}
\end{table}
\p
So, it seems that when the robots can better control their movements by using new gearings, their ability is slightly better than when they use old gearings. It is striking, however, that in comparison to the basic experiment the worn off gearings experiment outperforms the basic experiment in some ways. The first important difference is the discrimination success. The second difference is that the potential understandability is lower in the `new gearings' data set than in the `worn off gearings' data set.
\index{gearings|)}
\section{Face-to-face alignment}\label{s:int:original}
In the original implementation the robots rotate only once starting face-to-face \cite{steelsvogt:1997} rather than rotating twice and starting back-to-back. When the robots rotate once they immediately start the perception, while when rotating twice they start perception when the rotating robot faces its opponent. This way the robot is already moving at a constant speed, whereas in the original implementation the robots first have to accelerate. When the robots first have to accelerate, the landscape view initially is somewhat warped.
Statistics of the recorded data set yielded the following: The data set has 1360 recorded situations. The average context sizes are 3.546 for r0 and 3.354 for r1, yielding an a priori communicative success of 0.290. Robot r0 has a potential understandability of 0.722 and r1's potential understandability is 0.764. The context size is almost the same as in the basic experiment, but the potential understandability is pretty much lower. This latter finding is likely to be caused by the fact that it is not assured that the robots really rotate $360^o$. However, it may also be caused by the initial acceleration phase.
This lower understandability seems to have little effect on the results (table \ref{t:int:original}). The distinctive success is about 3 \% lower, which is significant ($p=0.0000$). Also the communicative success is lower: 2 \%, but with $p=0.0770$. All other differences are insignificant. So, the onset of acceleration cannot be observed as an important difference. However, when pointing is involved using the physical method this has much influence (see next chapter).
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.331 & 0.008\\\hline
DS0 & 0.883 & 0.002\\\hline
DS1 & 0.891 & 0.001\\\hline
D0 & 0.957 & 0.011\\\hline
D1 & 0.956 & 0.011\\\hline
P0 & 0.861 & 0.012\\\hline
P1 & 0.855 & 0.009\\\hline
S0 & 0.826 & 0.009\\\hline
S1 & 0.823 & 0.009\\\hline
C0 & 0.818 & 0.007\\\hline
C1 & 0.809 & 0.009\\\hline
\hline
\end{tabular}
\caption{The average results of 10 runs of 5,000 language games where the robots rotated only once during the perception.}
\label{t:int:original}
\end{table}
\section{Reducing environmental distinctiveness}\label{s:int:close}
In the environment used so far, the light sources and sensors were placed at different heights with a difference of 3.9 cm. This way the environment was made rather distinctive as can be seen in figure \ref{f:robots:calibration} on page \pageref{f:robots:calibration}. What happens if the difference in heights are made smaller? Naturally it is expected that the robots have more difficulty in discriminating and identifying the light sources.
\index{sensors!white light|(}
In this experiment the difference in heights were reduced to 1.9 cm. Figure \ref{f:int:calibration} shows the characteristics of the sensors as measured for different distances when facing a light source. It is obvious that the further a robot gets away from the light source, the closer the different sensor readings are. Furthermore, it should be clear that when the distance between robot and light source is larger, correspondence between sensor and light source is unreliable. Hence, the feedback mechanism is unreliable. Interesting to see is that when the robot is close to the light source the non-corresponding sensors hardly sense light, but up to 40 cm the intensities increase. This is because at close distance the light source is invisible for these sensors and at larger distance the divergent light emission falls on the sensors.
\begin{figure}
\subfigure[L0]{\psfig{figure=physical//sensors0.eps,width=5.6cm}}
\subfigure[L1]{\psfig{figure=physical//sensors1.eps,width=5.6cm}}\\
\subfigure[L2]{\psfig{figure=physical//sensors2.eps,width=5.6cm}}
\subfigure[L3]{\psfig{figure=physical//sensors3.eps,width=5.6cm}}
\caption{The characteristics of sensors s0, s1, s2 and s3 of robot r0 while looking at light sources (a) L0, (b) L1, (c) L2 and (d) L3. The light sources are placed at heights with a difference of 1.9 cm in between. Note that the characteristics of L3 may be inaccurate since the characteristics is quite different from all other characteristics.}
\label{f:int:calibration}
\end{figure}
\p
The statistics of the data set reveal the following: The context sizes are 3.530 (r0) and 3.483 (r1), thus the a priori communicative success is 0.285. Potential understandability is $0.639\pm 0.292$ (r0) and $0.679 \pm 0.321$. Again this is much smaller than in the basic experiment. Note that these are unreliable statistics since the method for relating an observation to a referent is not correct when the robots are at larger distances (see figure \ref{f:int:calibration}). The recorded data set has 953 situations.
Again 10 runs of 5,000 language games are done. The results are presented in table \ref{t:int:close}. The communicative success is around the a priori value; its significance in comparison to the basic experiment is $p=0.0000$. The discriminative success is similar to the basic experiment.
The distinctiveness seems approximately the same as in the basic experiment, but its $p$-value is $p=0.0114$, which is not very high. So, it seems likely that the two experiments yield different distinctiveness, but its difference is not large ($\leq 0.002$). Since the difference is so small, no further implications will be made.
Besides the specificity which does not show a significant difference, the parsimony and consistency ($p=0.0068$ and $p=0.0028$ resp.) are significantly different and lower than in the basic experiment. Obviously this has to with the large overlap in the sensory characteristics.
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.281 & 0.007\\\hline
DS0 & 0.913 & 0.004\\\hline
DS1 & 0.917 & 0.004\\\hline
D0 & 0.954 & 0.002\\\hline
D1 & 0.955 & 0.002\\\hline
P0 & 0.823 & 0.001\\\hline
P1 & 0.822 & 0.001\\\hline
S0 & 0.829 & 0.015\\\hline
S1 & 0.840 & 0.014\\\hline
C0 & 0.778 & 0.008\\\hline
C1 & 0.778 & 0.007\\\hline
\hline
\end{tabular}
\caption{The results of an experiment where the distance between light source heights have been made closer.}
\label{t:int:close}
\end{table}
\index{sensors!white light|)}
\section{A dynamic environment}\label{s:int:3refs}
This section presents an experiment where in every situation the robots recorded there were only three light sources present. The height of the light sources were the same as in the basic experiment. After every few games, one of the light sources was removed and the one that was already out of the environment has been placed back. Whereas in the other experiments all light sources stayed roughly at the same place, the position of the light sources changed in this experiment as well. This way a dynamic environment was created.
A data set of 980 situations has been recorded. The average context sizes were measured to be 2.857 for r0 and 2.899 for r1, yielding an a priori communicative success of 0.347. The potential understandability was $0.757 \pm 0.314$ for $r0$ and $0.738 \pm 0.308$ for r1.
Table \ref{t:int:3refs} shows the results of an experiment of 10 runs of 5,000 language games. The communicative success is about 2.5 \% higher than the a priori value, and about 2 \% higher than the basic experiment, but this latter result is not very significant ($p=0.0892$). Distinctiveness, specificity, parsimony and consistency show no significant difference with the basic experiment. Discriminative success looks higher than in the basic experiment, but its significance is low: $p=0.0630$.
\p
The fact that the CS is only slightly higher than the a priori success makes it hard to draw a meaningful conclusion. Nevertheless, it seems that the robots perform as if there are 4 referents. This is of course correct, there are 4 referents in the world, but in each situation there are only 3. This may be why the robots have some difficulty in performing with the same specificity and consistency as when all referents are continuously in their proximity.
\begin{table}
\centering
\begin{tabular}{||l|c|c||}
\hline\hline
Score & Avg & Std\\\hline
CS & 0.372 & 0.018\\\hline
DS0 & 0.927 & 0.005\\\hline
DS1 & 0.932 & 0.003\\\hline
D0 & 0.959 & 0.000\\\hline
D1 & 0.958 & 0.000\\\hline
P0 & 0.858 & 0.003\\\hline
P1 & 0.847 & 0.001\\\hline
S0 & 0.807 & 0.009\\\hline
S1 & 0.812 & 0.007\\\hline
C0 & 0.814 & 0.008\\\hline
C1 & 0.812 & 0.008\\\hline
\hline
\end{tabular}
\caption{Results of 10 runs of 5,000 language games in which the environment consists of only 3 referents.}
\label{t:int:3refs}
\end{table}
\section{Summary}
In this chapter the influence of different types physical interactions have been explored. In the first experiment IR taxis was used to let the robots align to each other after perception. In this experiment it has been observed while recording the data that the gearings were worn out. In the second experiment these have been replaced by new ones, still using taxis. The third experiment was setup like the original implementation \cite{steelsvogt:1997} rotating only once to do the perception. The environment and the robots were changed in the fourth experiment. The light sources were placed at heights that are closer to one another; the sensors were adjusted accordingly. In the last experiment the robots played language games where for each situation there were only three referents. Figure \ref{f:int:results} shows an overview of the results.
\begin{figure}
\subfigure[CS]{\psfig{figure=physical//cs.eps,width=5.6cm}}
\subfigure[DS]{\psfig{figure=physical//ds.eps,width=5.6cm}}\\
\subfigure[S]{\psfig{figure=physical//spec.eps,width=5.6cm}}
\subfigure[D]{\psfig{figure=physical//dist.eps,width=5.6cm}}\\
\subfigure[C]{\psfig{figure=physical//cons.eps,width=5.6cm}}
\subfigure[P]{\psfig{figure=physical//pars.eps,width=5.6cm}}
\caption{An overview of the results of the experiments presented in this chapter. The experiments investigating the influence of taxis with old gearings (T) and new gearings (G), one rotation (O), different heights (H) and 3 referents (3) are compared with the basic experiment (B).}
\label{f:int:results}
\end{figure}
\p
A striking result has been observed when the robots use taxis with or without new gearings. These experiments are qualitatively more or less similar to the basic experiment. Differences in discrimination success in the taxis experiment with old gearings may lie in the fact that this was the first experiment after the sensors have been calibrated. It is not unlikely that the accuracy of the sensors become less reliable through time. That taxis as such has no influence on the language games is because the perception is completed before the robots start taxis. Beginning and end point of the perception are the same, so the robots are well capable of doing perception for $360^o$ in experiments with or without taxis.
The experiment where the robots rotate only $360^o$ and where there are only three referents present are also qualitatively similar as the basic experiment. So, the slow onset of movement has little impact on the robots performance in these experiments. Furthermore, the robots seem to be well capable of dealing with a dynamic environment. Although the a prior success is higher, the robots appear to perform as if there are four referents. All these experiments show that the data recording can be repeated without influencing the experiments very much.
When the environment is changed such that it is less distinctive the performance is significantly worse than the basic experiment. Surprising is that this does not hold for the discrimination success. It seems to have more impact on the ability to provide reliable feedback. However, the results might indicate the importance of agents' physical adaptation to their environment as a basis for language origins.
\p
Physical interactions are also a part of how joint attention and feedback can be provided to the agents. However, these processes additionally require cognitive capabilities. Experiments investigating the influence of these interaction strategies are presented in the next chapter.
|
{"hexsha": "17b801b47d28914d6a93080f37851ea83b074087", "size": 20858, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "physical/physical.tex", "max_stars_repo_name": "langsci/Vogt", "max_stars_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-04-13T13:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-13T13:08:09.000Z", "max_issues_repo_path": "physical/physical.tex", "max_issues_repo_name": "langsci/Vogt", "max_issues_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "physical/physical.tex", "max_forks_repo_name": "langsci/Vogt", "max_forks_repo_head_hexsha": "bbec105485e4641c61e0df6157f62dccf61d6f93", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 83.7670682731, "max_line_length": 928, "alphanum_fraction": 0.7736120433, "num_tokens": 5452}
|
# -*- coding: utf-8 -*-
#############################################################
# IMPORTS #
#############################################################
## --> GUI
from PySide6 import QtCore, QtGui, QtWidgets
from Order_data.ui_main import Ui_MainWindow
## --> GLOBAL IMPORTS
import os
import sys
import re
from time import sleep
from PIL import Image, ImageFile, ImageDraw, ImageChops, ImageFont
import numpy as np
from numpy import linalg
#############################################################
# PATH #
#############################################################
PATH = os.path.dirname(os.path.abspath(__file__))
os.chdir(PATH)
#############################################################
# CONTENT #
#############################################################
ImageFile.LOAD_TRUNCATED_IMAGES = True
EXTENSION = (".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG")
FOLDER = [file for file in sorted(os.listdir()) if file.endswith(EXTENSION) and not file == "watermark.png"]
TOTAL = len(FOLDER)
Y_OFFSET = 4 ## Amount of offset to get people faces on the combined images.
PIXEL_SIZE = 720 ## Maximum pixel width if image is reduced before processing (line 321).
## Size and position of each element with their mask then on the order sheet.
## For this to work, the background image MUST have a bit of transparency !
## Here each backgroun have at least 1px thick transparent border (generally at the bottom)
############################################
###### FICHE ######
FICHE = Image.open(f"{PATH}/Order_data/FICHE.png")
WATERMARK = Image.open(f"{PATH}/Order_data/watermark.png")
THUMB_SIZE = 155 #px
ORDER_X = int(FICHE.width * 0.016)
ORDER_Y = int(FICHE.height * 0.94)
ORDER_FONT_SIZE = 50
ORDER_START = 1
BIG_THUMB_LEFT = int(FICHE.width * 0.34)
BIG_THUMB_UP = int(FICHE.height * 0.69)
BIG_THUMB_RIGHT = int(FICHE.width * 0.66)
BIG_THUMB_DOWN = int(FICHE.height * 0.98)
####### MUG #######
MUG = Image.open(f"{PATH}/Order_data/MUG.png")
MUG_ALPHA = Image.open(f"{PATH}/Order_data/MUG_ALPHA.png")
MUG_LEFT = 100
MUG_UP = 420
MUG_RIGHT = 1510
MUG_DOWN = 2080
MUG_THUMB_UP = int(FICHE.height * 0.45)
MUG_THUMB_LEFT = int(FICHE.width * 0.35)
####### CALENDRIER #######
CALENDRIER = Image.open(f"{PATH}/Order_data/CALENDRIER.png")
CALENDRIER_ALPHA = Image.open(f"{PATH}/Order_data/CALENDRIER_ALPHA.png")
CALENDRIER_LEFT = int(CALENDRIER.width * 0.03)
CALENDRIER_UP = int(CALENDRIER.height * 0.02)
CALENDRIER_RIGHT = int(CALENDRIER.width * 0.96)
CALENDRIER_DOWN = int(CALENDRIER.height * 0.42)
CALENDRIER_THUMB_UP = int(FICHE.height * 0.18)
CALENDRIER_THUMB_LEFT = int(FICHE.width * 0.675)
####### MAGNET #######
MAGNET = Image.open(f"{PATH}/Order_data/MAGNET_ROND.png")
MAGNET_ALPHA = Image.open(f"{PATH}/Order_data/MAGNET_ROND_ALPHA.png")
MAGNET_LEFT = 78
MAGNET_UP = 157
MAGNET_RIGHT = 1708
MAGNET_DOWN = 1634
MAGNET_THUMB_UP = int(FICHE.height * 0.15)
MAGNET_THUMB_LEFT = int(FICHE.width * 0.35)
####### PORTE-CLEF #######
ID = Image.open(f"{PATH}/Order_data/PORTE-CLEF.png")
ID_ALPHA = Image.open(f"{PATH}/Order_data/PORTE-CLEF_ALPHA.png")
ID_LEFT = 467
ID_UP = 451
ID_RIGHT = 1310
ID_DOWN = 1498
ID_THUMB_UP = int(FICHE.height * 0.3)
ID_THUMB_LEFT = int(FICHE.width * 0.35)
####### PLUMIER #######
PLUMIER = Image.open(f"{PATH}/Order_data/PLUMIER.png")
PLUMIER_ALPHA = Image.open(f"{PATH}/Order_data/PLUMIER_ALPHA.png")
PLUMIER_LEFT = 153
PLUMIER_UP = 302
PLUMIER_RIGHT = 1980
PLUMIER_DOWN = 734
####### CADRE #######
CADRE = Image.open(f"{PATH}/Order_data/CADRE.png")
CADRE_ALPHA = Image.open(f"{PATH}/Order_data/CADRE_ALPHA.png")
CADRE_LEFT = 575
CADRE_UP = 390
CADRE_RIGHT = 1585
CADRE_DOWN = 1800
CADRE_THUMB_UP = 1105
CADRE_THUMB_LEFT = 107
####### PASSE #######
PASSE = Image.open(f"{PATH}/Order_data/PASSE.png")
PASSE_ALPHA = Image.open(f"{PATH}/Order_data/PASSE_ALPHA.png")
PASSE_LEFT = 316
PASSE_UP = 541
PASSE_RIGHT = 1138
PASSE_DOWN = 1647
PASSE_THUMB_UP = 1916
PASSE_THUMB_LEFT = 107
####### SUPPORT_BOIS #######
BOIS = Image.open(f"{PATH}/Order_data/SUPPORT_BOIS.png")
BOIS_ALPHA = Image.open(f"{PATH}/Order_data/SUPPORT_BOIS_ALPHA.png")
BOIS_LEFT = 442
BOIS_UP = 442
BOIS_RIGHT = 1348
BOIS_DOWN = 1601
BOIS_THUMB_UP = 1510
BOIS_THUMB_LEFT = 107
####### BOULE A NEIGE #######
NEIGE = Image.open(f"{PATH}/Order_data/NEIGE.png")
NEIGE_ALPHA = Image.open(f"{PATH}/Order_data/NEIGE_ALPHA.png")
NEIGE_LEFT = 176
NEIGE_UP = 235
NEIGE_RIGHT = 1610
NEIGE_DOWN = 1483
NEIGE_THUMB_UP = int(FICHE.height * 0.1)
NEIGE_THUMB_LEFT = int(FICHE.width * 0.675)
####### VOEUX 01 #######
VOEUX01 = Image.open(f"{PATH}/Order_data/VOEUX01.png")
VOEUX01_ALPHA = Image.open(f"{PATH}/Order_data/VOEUX01_ALPHA.png")
VOEUX01_LEFT = 130
VOEUX01_UP = 118
VOEUX01_RIGHT = 1075
VOEUX01_DOWN = 1064
VOEUX01_THUMB_UP = int(FICHE.height * 0.25)
VOEUX01_THUMB_LEFT = int(FICHE.width * 0.675)
###### VOEUX 02 #######
VOEUX02 = Image.open(f"{PATH}/Order_data/VOEUX02.png")
VOEUX02_ALPHA = Image.open(f"{PATH}/Order_data/VOEUX02_ALPHA.png")
VOEUX02_LEFT = int(VOEUX02.width * 0.06)
VOEUX02_UP = int(VOEUX02.height * 0.09)
VOEUX02_RIGHT = int(VOEUX02.width * 0.94)
VOEUX02_DOWN = int(VOEUX02.height * 0.91)
VOEUX02_THUMB_UP = int(FICHE.height * 0.32)
VOEUX02_THUMB_LEFT = int(FICHE.width * 0.675)
# ####### GOURDE #######
# GOURDE = Image.open(f"{PATH}/Order_data/GOURDE.png")
# GOURDE_ALPHA = Image.open(f"{PATH}/Order_data/GOURDE_ALPHA.png")
# GOURDE_LEFT = 613
# GOURDE_UP = 910
# GOURDE_RIGHT = 1538
# GOURDE_DOWN = 2106
# GOURDE_THUMB_UP = int(FICHE.height * 0.2)
# GOURDE_THUMB_LEFT = int(FICHE.width * 0.675)
# ####### PORTEFEUILLE #######
# PORTEFEUILLE = Image.open(f"{PATH}/Order_data/PORTEFEUILLE.png")
# PORTEFEUILLE_ALPHA = Image.open(f"{PATH}/Order_data/PORTEFEUILLE_ALPHA.png")
# PORTEFEUILLE_LEFT = 280
# PORTEFEUILLE_UP = 534
# PORTEFEUILLE_RIGHT = 2023
# PORTEFEUILLE_DOWN = 1574
# PORTEFEUILLE_THUMB_UP = int(FICHE.height * 0.3)
# PORTEFEUILLE_THUMB_LEFT = int(FICHE.width * 0.675)
#############################################################
# GUI CLASS #
#############################################################
class Order(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
## UI --> INTERFACE CODE
############################################
## REMOVE TITLE BAR
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
## DROP SHADOW EFFECT
self.shadow = QtWidgets.QGraphicsDropShadowEffect(self)
self.shadow.setBlurRadius(21)
self.shadow.setXOffset(0)
self.shadow.setYOffset(0)
self.shadow.setColor(QtGui.QColor(0, 0, 0, 64))
self.ui.drop_shadow_frame.setGraphicsEffect(self.shadow)
## LABEL DESCRIPTION
# self.ui.label_description.setText(f"Recadrage en <strong>{MAXSIZE[0]}</strong>px")
## LABEL COUNTER
self.ui.label_counter.setText(f"<strong>{TOTAL}</strong> FICHIERS TROUVES")
## PROGRESS BAR
self.ui.progressBar.setVisible(False)
self.ui.progressBar.setValue(0)
self.ui.progressBar.setMaximum(20)
## BUTTON
# self.ui.pushButton.clicked.connect(lambda: self.main())
self.ui.pushButton.clicked.connect(lambda: self.debug())
self.ui.pushButton.setGraphicsEffect(self.shadow)
## NO FILES ?
if TOTAL == 0 :
self.no_files("Aucun fichier trouvé")
## SHOW --> MAIN WINDOW
############################################
self.show()
## --> END
## --> APP FUNCTIONS
############################################
def no_files(self, message):
self.ui.pushButton.clicked.disconnect()
self.ui.label_counter.setText(message)
self.ui.pushButton.setText("Fermer")
self.ui.pushButton.clicked.connect(lambda: self.close())
self.ui.progressBar.setVisible(False)
self.ui.pushButton.setVisible(True)
def fit_in(self, max_size, primary_size, secondary_size):
primary_ratio = (max_size/float(primary_size))
secondary_ratio = int((float(secondary_size)*float(primary_ratio)))
return secondary_ratio
def perspective_transform(self,
xyA1, xyA2, xyA3, xyA4,
xyB1, xyB2, xyB3, xyB4):
A = np.array([
[xyA1[0], xyA1[1], 1, 0, 0, 0, -xyB1[0] * xyA1[0], -xyB1[0] * xyA1[1]],
[0, 0, 0, xyA1[0], xyA1[1], 1, -xyB1[1] * xyA1[0], -xyB1[1] * xyA1[1]],
[xyA2[0], xyA2[1], 1, 0, 0, 0, -xyB2[0] * xyA2[0], -xyB2[0] * xyA2[1]],
[0, 0, 0, xyA2[0], xyA2[1], 1, -xyB2[1] * xyA2[0], -xyB2[1] * xyA2[1]],
[xyA3[0], xyA3[1], 1, 0, 0, 0, -xyB3[0] * xyA3[0], -xyB3[0] * xyA3[1]],
[0, 0, 0, xyA3[0], xyA3[1], 1, -xyB3[1] * xyA3[0], -xyB3[1] * xyA3[1]],
[xyA4[0], xyA4[1], 1, 0, 0, 0, -xyB4[0] * xyA4[0], -xyB4[0] * xyA4[1]],
[0, 0, 0, xyA4[0], xyA4[1], 1, -xyB4[1] * xyA4[0], -xyB4[1] * xyA4[1]],
], dtype=np.float32)
B = np.array([
xyB1[0],
xyB1[1],
xyB2[0],
xyB2[1],
xyB3[0],
xyB3[1],
xyB4[0],
xyB4[1],
], dtype=np.float32)
return linalg.solve(A, B)
def combine_images(self, IMAGE, LEFT, UP, RIGHT, DOWN, BG, ALPHA=None, perspective = False, perspective_coefficient = 60, FIT = False, height_multiplier = 1.0, orientation = False):
## ORIENTATION
##############
if orientation == True :
if IMAGE.width > IMAGE.height :
BG = BG.rotate(90, expand=True)
ALPHA = ALPHA.rotate(90, expand=True)
# There is probably a more efficient way to do this but it works. :D
temp_left = LEFT
temp_right = RIGHT
temp_up = UP
temp_down = DOWN
LEFT = temp_up
UP = temp_left
RIGHT = temp_down
DOWN = temp_right
WIDTH = RIGHT - LEFT
HEIGHT = DOWN - UP
result = Image.new('RGBA', (WIDTH, HEIGHT), (255, 255, 255, 0))
## FIT -IN
##############
if FIT == True :
cropped_image = IMAGE.resize((WIDTH, self.fit_in(WIDTH, IMAGE.width, IMAGE.height)), Image.LANCZOS)
if cropped_image.height > HEIGHT :
cropped_image = IMAGE.resize((self.fit_in(HEIGHT, IMAGE.height, IMAGE.width), HEIGHT), Image.LANCZOS)
## FILL-IN
##############
else :
cropped_image = IMAGE.resize((self.fit_in(HEIGHT, IMAGE.height, IMAGE.width), HEIGHT), Image.LANCZOS)
if cropped_image.width < WIDTH :
cropped_image = IMAGE.resize((WIDTH, self.fit_in(WIDTH, IMAGE.width, IMAGE.height)), Image.LANCZOS)
## PERSPECTIVE
##############
if perspective == True :
coeff = self.perspective_transform(
(0, 0),
(WIDTH, 0),
(WIDTH,HEIGHT),
(0, HEIGHT),
# =>
(- perspective_coefficient, 0),
(WIDTH + perspective_coefficient, 0),
(WIDTH, HEIGHT),
(0, HEIGHT),
)
cropped_image = cropped_image.transform((cropped_image.width, cropped_image.height), method=Image.PERSPECTIVE, data=coeff)
cropped_image = IMAGE.resize((self.fit_in(HEIGHT, IMAGE.height, IMAGE.width), int(HEIGHT * height_multiplier)), Image.LANCZOS)
if cropped_image.width < WIDTH :
cropped_image = IMAGE.resize((WIDTH, int(self.fit_in(WIDTH, IMAGE.width, IMAGE.height) * height_multiplier)), Image.LANCZOS)
# if cropped_image.height > HEIGHT :
# cropped_image = IMAGE.resize((int(self.fit_in(HEIGHT, IMAGE.height, IMAGE.width) * height_multiplier), HEIGHT), Image.LANCZOS)
offset = (result.width - cropped_image.width) // 2, (result.height - cropped_image.height) // Y_OFFSET
result.paste(cropped_image, offset)
sized_result = Image.new("RGBA", BG.size, (255, 255, 255, 0))
sized_result.paste(result, (LEFT, UP, RIGHT, DOWN), result)
if ALPHA :
alpha_blend = ImageChops.darker(sized_result, ALPHA)
out = Image.alpha_composite(BG, alpha_blend)
else :
out = Image.alpha_composite(BG, sized_result)
return out
def order_number(self, image, filename) :
number_to_draw = ImageDraw.Draw(image)
myFont = ImageFont.truetype(f"{PATH}/Order_data/Montserrat-Regular.ttf", ORDER_FONT_SIZE)
number_to_draw.text((ORDER_X, ORDER_Y), filename, font=myFont, fill=(0, 0, 0))
return image
## MAIN FUNCTION
############################################
def main(self) :
self.ui.progressBar.setVisible(True)
self.ui.pushButton.setVisible(False)
## Create a new folder for the order sheets to be saved on.
if not os.path.exists(PATH + "/Fiches") :
os.makedirs(PATH + "/Fiches")
for i, file in enumerate(FOLDER):
base_image = Image.open(file)
current_thumb = base_image
## --> COMMENT THIS LINE FOR FULL QUALITY (BUT SLOWER) RESULTS
current_thumb.thumbnail((PIXEL_SIZE, PIXEL_SIZE), Image.LANCZOS)
base_image = current_thumb.convert("RGBA")
self.ui.progressBar.setValue(0)
current_fiche = FICHE
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création miniature")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
if WATERMARK :
current_thumb.paste(WATERMARK, WATERMARK)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création porte-clef")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_ID = self.combine_images(base_image, ID_LEFT, ID_UP, ID_RIGHT, ID_DOWN, ID, ID_ALPHA)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création cadre")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_cadre = self.combine_images(base_image, CADRE_LEFT, CADRE_UP, CADRE_RIGHT, CADRE_DOWN, CADRE, CADRE_ALPHA, orientation=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création magnet")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_magnet = self.combine_images(base_image, MAGNET_LEFT, MAGNET_UP, MAGNET_RIGHT, MAGNET_DOWN, MAGNET, MAGNET_ALPHA)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création mug")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_mug = self.combine_images(base_image, MUG_LEFT, MUG_UP, MUG_RIGHT, MUG_DOWN, MUG, MUG_ALPHA)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création calendrier")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_cal = self.combine_images(base_image, CALENDRIER_LEFT, CALENDRIER_UP, CALENDRIER_RIGHT, CALENDRIER_DOWN, CALENDRIER, CALENDRIER_ALPHA, FIT=True)
# self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création gourde")
# self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
# QtWidgets.QApplication.processEvents()
# current_gourde = self.combine_images(base_image, GOURDE_LEFT, GOURDE_UP, GOURDE_RIGHT, GOURDE_DOWN, GOURDE, GOURDE_ALPHA, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création passe partout")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_passe = self.combine_images(base_image, PASSE_LEFT, PASSE_UP, PASSE_RIGHT, PASSE_DOWN, PASSE, PASSE_ALPHA, orientation=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création support bois")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_bois = self.combine_images(base_image, BOIS_LEFT, BOIS_UP, BOIS_RIGHT, BOIS_DOWN, BOIS, BOIS_ALPHA, orientation=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création boule a neige")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_neige = self.combine_images(base_image, NEIGE_LEFT, NEIGE_UP, NEIGE_RIGHT, NEIGE_DOWN, NEIGE, NEIGE_ALPHA)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création carte de voeux 01")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_voeux01 = self.combine_images(base_image, VOEUX01_LEFT, VOEUX01_UP, VOEUX01_RIGHT, VOEUX01_DOWN, VOEUX01, VOEUX01_ALPHA)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création carte de voeux 02")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_voeux02 = self.combine_images(base_image, VOEUX02_LEFT, VOEUX02_UP, VOEUX02_RIGHT, VOEUX02_DOWN, VOEUX02, VOEUX02_ALPHA)
# self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création portefeuille")
# self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
# QtWidgets.QApplication.processEvents()
# current_portefeuille = self.combine_images(base_image, PORTEFEUILLE_LEFT, PORTEFEUILLE_UP, PORTEFEUILLE_RIGHT, PORTEFEUILLE_DOWN, PORTEFEUILLE, PORTEFEUILLE_ALPHA, FIT=True)
#######
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement cadre")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_cadre, CADRE_THUMB_LEFT, CADRE_THUMB_UP, CADRE_THUMB_LEFT+THUMB_SIZE, CADRE_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement support bois")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_bois, BOIS_THUMB_LEFT, BOIS_THUMB_UP, BOIS_THUMB_LEFT+THUMB_SIZE, BOIS_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement passe partout")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_passe, PASSE_THUMB_LEFT, PASSE_THUMB_UP, PASSE_THUMB_LEFT+THUMB_SIZE, PASSE_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement magnet")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_magnet, MAGNET_THUMB_LEFT, MAGNET_THUMB_UP, MAGNET_THUMB_LEFT+THUMB_SIZE, MAGNET_THUMB_UP+THUMB_SIZE, current_fiche)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement porte-clef")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_ID, ID_THUMB_LEFT, ID_THUMB_UP, ID_THUMB_LEFT+THUMB_SIZE, ID_THUMB_UP+THUMB_SIZE, current_fiche)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement mug")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_mug, MUG_THUMB_LEFT, MUG_THUMB_UP, MUG_THUMB_LEFT+THUMB_SIZE, MUG_THUMB_UP+THUMB_SIZE, current_fiche)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement calendrier")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_cal, CALENDRIER_THUMB_LEFT, CALENDRIER_THUMB_UP, CALENDRIER_THUMB_LEFT+THUMB_SIZE, CALENDRIER_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
# self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement gourde")
# self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
# QtWidgets.QApplication.processEvents()
# current_fiche = self.combine_images(current_gourde, GOURDE_THUMB_LEFT, GOURDE_THUMB_UP, GOURDE_THUMB_LEFT+THUMB_SIZE, GOURDE_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement boule à neige")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_neige, NEIGE_THUMB_LEFT, NEIGE_THUMB_UP, NEIGE_THUMB_LEFT+THUMB_SIZE, NEIGE_THUMB_UP+THUMB_SIZE, current_fiche)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement carte voeux 01")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_voeux01, VOEUX01_THUMB_LEFT, VOEUX01_THUMB_UP, VOEUX01_THUMB_LEFT+THUMB_SIZE, VOEUX01_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement carte voeux 02")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_voeux02, VOEUX02_THUMB_LEFT, VOEUX02_THUMB_UP, VOEUX02_THUMB_LEFT+THUMB_SIZE, VOEUX02_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
# self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement portefeuille")
# self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
# QtWidgets.QApplication.processEvents()
# current_fiche = self.combine_images(current_portefeuille, PORTEFEUILLE_THUMB_LEFT, PORTEFEUILLE_THUMB_UP, PORTEFEUILLE_THUMB_LEFT+THUMB_SIZE, PORTEFEUILLE_THUMB_UP+THUMB_SIZE, current_fiche, FIT=True)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Placement miniature")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
current_fiche = self.combine_images(current_thumb, BIG_THUMB_LEFT, BIG_THUMB_UP, BIG_THUMB_RIGHT, BIG_THUMB_DOWN, current_fiche, FIT=True)
current_fiche = current_fiche.convert("RGB")
filename = os.path.splitext(file)[0]
current_fiche = self.order_number(current_fiche, filename)
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Enregistré !")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
QtWidgets.QApplication.processEvents()
# current_fiche.show()
current_fiche.save(f"{PATH}/Fiches/{filename}.jpg", format='JPEG', subsampling=0, quality=100)
ORDER_START + 1
self.no_files("Terminé")
def debug(self):
self.ui.progressBar.setValue(0)
self.ui.progressBar.setMaximum(TOTAL)
self.ui.progressBar.setVisible(True)
self.ui.pushButton.setVisible(False)
for i, file in enumerate(FOLDER):
base_image = Image.open(file)
current_thumb = base_image
### COMMENT THIS LINE FOR FULL QUALITY (BUT SLOWER) RESULTS
# current_thumb.thumbnail((PIXEL_SIZE, PIXEL_SIZE), Image.LANCZOS)
base_image = current_thumb.convert("RGBA")
self.ui.label_counter.setText(f"{i+1} / {TOTAL} : Création en cours...")
self.ui.progressBar.setValue(self.ui.progressBar.value() + 1)
combined = self.combine_images(base_image, VOEUX02_LEFT, VOEUX02_UP, VOEUX02_RIGHT, VOEUX02_DOWN, VOEUX02, VOEUX02_ALPHA)
combined = combined.convert("RGB")
combined.save(f"{PATH}/V_{os.path.splitext(file)[0]}.jpg", format='JPEG', subsampling=0, quality=100)
# combined.show()
self.no_files("Terminé")
#############################################################
# MAIN #
#############################################################
if __name__ == '__main__':
QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
app = QtWidgets.QApplication(sys.argv)
order = Order()
order.show()
sys.exit(app.exec_())
|
{"hexsha": "799301ef243cf11f3a91255ef92b57628e093589", "size": 26296, "ext": "pyw", "lang": "Python", "max_stars_repo_path": "COMMANDE/Order.pyw", "max_stars_repo_name": "PictorSomni/Image_manipulations", "max_stars_repo_head_hexsha": "7b91dd8514a2bb4383308c199e03e26539cef430", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "COMMANDE/Order.pyw", "max_issues_repo_name": "PictorSomni/Image_manipulations", "max_issues_repo_head_hexsha": "7b91dd8514a2bb4383308c199e03e26539cef430", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "COMMANDE/Order.pyw", "max_forks_repo_name": "PictorSomni/Image_manipulations", "max_forks_repo_head_hexsha": "7b91dd8514a2bb4383308c199e03e26539cef430", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.972027972, "max_line_length": 215, "alphanum_fraction": 0.6078491025, "include": true, "reason": "import numpy,from numpy", "num_tokens": 6762}
|
#Julia Gadfly Histogram
using Gadfly
plot(x=randn(113), Geom.histogram(bincount=10))
|
{"hexsha": "f23f28b972bd916fbc0f32674e561ecf6d6b738d", "size": 88, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Chapter04/B10526-04_julia_code/6-gadfly-histogram.jl", "max_stars_repo_name": "suwarnarajput/Learning-Jupyter-5-Second-Edition", "max_stars_repo_head_hexsha": "77f6e04f9cc86fb3490978e0b34a804c47965a65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-12-23T15:55:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-03T05:33:29.000Z", "max_issues_repo_path": "Chapter04/B10526-04_julia_code/6-gadfly-histogram.jl", "max_issues_repo_name": "suwarnarajput/Learning-Jupyter-5-Second-Edition", "max_issues_repo_head_hexsha": "77f6e04f9cc86fb3490978e0b34a804c47965a65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter04/B10526-04_julia_code/6-gadfly-histogram.jl", "max_forks_repo_name": "suwarnarajput/Learning-Jupyter-5-Second-Edition", "max_forks_repo_head_hexsha": "77f6e04f9cc86fb3490978e0b34a804c47965a65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-09-16T05:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T09:58:01.000Z", "avg_line_length": 22.0, "max_line_length": 47, "alphanum_fraction": 0.7613636364, "num_tokens": 26}
|
# ---
# title: 1347. Minimum Number of Steps to Make Two Strings Anagram
# id: problem1347
# author: Tian Jun
# date: 2020-10-31
# difficulty: Medium
# categories: String
# link: <https://leetcode.com/problems/minimum-number-of-steps-to-make-two-strings-anagram/description/>
# hidden: true
# ---
#
# Given two equal-size strings `s` and `t`. In one step you can choose **any
# character** of `t` and replace it with **another character**.
#
# Return _the minimum number of steps_ to make `t` an anagram of `s`.
#
# An **Anagram** of a string is a string that contains the same characters
# with a different (or the same) ordering.
#
#
#
# **Example 1:**
#
#
#
# Input: s = "bab", t = "aba"
# Output: 1
# Explanation: Replace the first 'a' in t with b, t = "bba" which is anagram of s.
#
#
# **Example 2:**
#
#
#
# Input: s = "leetcode", t = "practice"
# Output: 5
# Explanation: Replace 'p', 'r', 'a', 'i' and 'c' from t with proper characters to make t anagram of s.
#
#
# **Example 3:**
#
#
#
# Input: s = "anagram", t = "mangaar"
# Output: 0
# Explanation: "anagram" and "mangaar" are anagrams.
#
#
# **Example 4:**
#
#
#
# Input: s = "xxyyzz", t = "xxyyzz"
# Output: 0
#
#
# **Example 5:**
#
#
#
# Input: s = "friend", t = "family"
# Output: 4
#
#
#
#
# **Constraints:**
#
# * `1 <= s.length <= 50000`
# * `s.length == t.length`
# * `s` and `t` contain lower-case English letters only.
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
|
{"hexsha": "ccff61583ac994677c9b965bf4610b8082e450d3", "size": 1625, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/unresolved/1347.minimum-number-of-steps-to-make-two-strings-anagram.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "src/unresolved/1347.minimum-number-of-steps-to-make-two-strings-anagram.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "src/unresolved/1347.minimum-number-of-steps-to-make-two-strings-anagram.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 20.5696202532, "max_line_length": 107, "alphanum_fraction": 0.5575384615, "num_tokens": 545}
|
import cv2
import numpy as np
from base_camera import BaseCamera
def merge(left_image, right_image):
return np.concatenate((left_image, right_image), axis=1)
class Camera(BaseCamera):
video_source_1 = 1
video_source_2 = 2
@staticmethod
def set_video_sources(source_1, source_2):
Camera.video_source_1 = source_1
Camera.video_source_2 = source_2
@staticmethod
def frames():
camera_1 = cv2.VideoCapture(Camera.video_source_1)
camera_2 = cv2.VideoCapture(Camera.video_source_2)
if not camera_1.isOpened() and camera_2.isOpened():
raise RuntimeError('Could not start the cameras.')
while True:
# read current frame
_, img_1 = camera_1.read()
_, img_2 = camera_2.read()
img = merge(img_1, img_2)
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', img)[1].tobytes()
|
{"hexsha": "163b68114d711f8dac5fb4c84823dbc8a793c20e", "size": 943, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera_opencv_stereo.py", "max_stars_repo_name": "taraprasad73/flask-video-streaming", "max_stars_repo_head_hexsha": "bb123db41857f90224fa4025334c04f73fa52419", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "camera_opencv_stereo.py", "max_issues_repo_name": "taraprasad73/flask-video-streaming", "max_issues_repo_head_hexsha": "bb123db41857f90224fa4025334c04f73fa52419", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camera_opencv_stereo.py", "max_forks_repo_name": "taraprasad73/flask-video-streaming", "max_forks_repo_head_hexsha": "bb123db41857f90224fa4025334c04f73fa52419", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5757575758, "max_line_length": 62, "alphanum_fraction": 0.6458112407, "include": true, "reason": "import numpy", "num_tokens": 235}
|
import numpy as np
import torchvision.datasets as datasets
from pathlib import Path
import libs.dirs as dirs
import libs.utils as utils
import libs.dataset_utils as dutils
import models.utils as mutils
import libs.commons as commons
from libs.vis_functions import plot_confusion_matrix
def wrapper_train(epochs, model_path, history_path, dataset_path):
seed = None
device_id = 0
numImgBatch = 256
use_weights = True
# ImageNet statistics
dataTransforms = mutils.resnet_transforms(commons.IMAGENET_MEAN, commons.IMAGENET_STD)
# Load Dataset objects for train and val sets from folder
sets = ['train', 'val']
imageDataset = {}
for phase in sets:
f = dataset_path / phase
imageDataset[phase] = datasets.ImageFolder(str(f),
transform=dataTransforms[phase],
is_valid_file=utils.check_empty_file)
history, _ = mutils.train_network(dataset_path, dataTransforms, epochs=epochs,
batch_size=numImgBatch,
model_path=model_path,
history_path=history_path,
seed=seed,
weighted_loss=use_weights,
device_id=device_id)
# Get best epoch results
bestValIndex = np.argmin(history['loss-val'])
bestValLoss = history['loss-val'][bestValIndex]
bestValAcc = history['acc-val'][bestValIndex]
confMat = history['conf-val'][bestValIndex]
return bestValLoss, bestValAcc, confMat
if __name__ == "__main__":
numEvals = 5
net_type = dutils.get_input_network_type(commons.network_types)
val_type = dutils.get_input_network_type(commons.val_types, message="validation set")
rede = int(input("\nEnter net number.\n"))
numEpochs = 25
# Dataset root folder
datasetPath = Path(dirs.dataset) / "{}_dataset_rede_{}_val_{}".format(net_type, rede, val_type)
datasetName = datasetPath.stem
modelFolder = Path(dirs.saved_models) / \
"{}_{}_epochs".format(datasetName, numEpochs)
historyFolder = Path(dirs.saved_models) / \
"history_{}_{}_epochs".format(datasetName, numEpochs)
filePath = Path(dirs.results) / \
"log_evaluation_{}_{}_epochs.txt".format(datasetName, numEpochs)
confMatPath = Path(dirs.results) / \
"confusion_matrix_{}.pdf".format(datasetName)
valLoss = []
valAcc = []
print()
# Run function many times and save best results
for i in range(numEvals):
print("\nStarting run number {}/{}.\n".format(i+1, numEvals))
modelPath = modelFolder / "model_run_{}.pt".format(i)
historyPath = historyFolder / "history_run_{}.pickle".format(i)
roundValLoss, roundValAcc, confMat = wrapper_train(numEpochs, modelPath, historyPath, datasetPath)
valLoss.append(roundValLoss)
classAcc = mutils.compute_class_acc(confMat)
avgAcc = np.mean(classAcc)
valAcc.append(roundValAcc)
print("Debug\nAvg acc: {:.3f}".format(avgAcc))
print("other acc: {:.3f}\n".format(roundValAcc))
# Save best confusion matrix
if np.argmin(valLoss) == i:
bestConfMat = confMat
printString = ""
printString += "\nFinished training {} evaluation runs for dataset\n{}\n".format(numEvals, datasetPath)
printString += "\nResulting statistics:\n\
Val Loss:\n\
Mean: {:.3f}\n\
Std : {:.3f}\n\
Val Avg Acc:\n\
Mean: {:.5f}\n\
Std {:.5f}\n".format(np.mean(valLoss), np.std(valLoss),
np.mean(valAcc), np.std(valAcc))
print(printString)
with open(filePath, mode='w') as f:
f.write(printString)
title = "Confusion Matrix "+str(datasetName)
plot_confusion_matrix(confMat, title=title, normalize=True, show=False, save_path=confMatPath)
# print("Conf matrix:")
# print(confMat)
|
{"hexsha": "52469f8ecb67d529ba2fdbe487fcda4b777dab68", "size": 4168, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/get_train_stats.py", "max_stars_repo_name": "olavosamp/semiauto-video-annotation", "max_stars_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/get_train_stats.py", "max_issues_repo_name": "olavosamp/semiauto-video-annotation", "max_issues_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2019-07-15T21:49:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-09T14:35:03.000Z", "max_forks_repo_path": "models/get_train_stats.py", "max_forks_repo_name": "olavosamp/semiauto-video-annotation", "max_forks_repo_head_hexsha": "b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.8909090909, "max_line_length": 107, "alphanum_fraction": 0.6082053743, "include": true, "reason": "import numpy", "num_tokens": 970}
|
#define BOOST_TEST_MODULE Qt5Gui
#include "Qt5Gui.hpp"
#include <boost/test/unit_test.hpp>
#include "thread.hpp"
#include "stack.hpp"
#include "algorithm.hpp"
#include "load.hpp"
#include "reference.hpp"
#include "convert/string.hpp"
#include "convert/char.hpp"
#include "convert/callable.hpp"
#include "convert/numeric.hpp"
#include "Qt5Core/QString.hpp"
#include "Qt5Core/QChar.hpp"
#include "Qt5Core/QVariant.hpp"
#include "Qt5Core/QObject.hpp"
#include <QDir>
#include <QPoint>
#include <QGuiApplication>
struct QGuiApplicationFixture
{
int argc;
char name[100];
char* argv[1];
QGuiApplication* app;
public:
QGuiApplicationFixture() :
argc(1)
{
strcpy(name, "luacxx");
argv[0] = name;
app = new QGuiApplication(argc, argv);
}
~QGuiApplicationFixture()
{
delete app;
}
};
BOOST_GLOBAL_FIXTURE(QGuiApplicationFixture);
BOOST_AUTO_TEST_CASE(QObject_destruction)
{
auto env = lua::create();
env["package"]["cpath"] = ".libs/libluacxx-?.so";
lua::run_string(env, ""
"require 'Qt5Gui.QWindow';"
"require 'Qt5Gui.QDrag';"
"require 'Qt5Core.QMimeData';"
"require 'Qt5Core.QCoreApplication';"
""
"win = QWindow.new();\n"
"drag = QDrag.new(win);\n"
"md = QMimeData.new();\n"
"drag:setMimeData(md);\n"
);
lua_gc(env, LUA_GCCOLLECT, 0);
}
|
{"hexsha": "f1529f84b935e10ab0f74885bbab4d70f8ed3cf5", "size": 1380, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tests/Qt5Gui.cpp", "max_stars_repo_name": "dafrito/luacxx", "max_stars_repo_head_hexsha": "278bf8a7c6664536ea7f1dd1f59d35b6fb8d2dad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 128.0, "max_stars_repo_stars_event_min_datetime": "2015-01-07T19:47:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T19:42:14.000Z", "max_issues_repo_path": "src/tests/Qt5Gui.cpp", "max_issues_repo_name": "dafrito/luacxx", "max_issues_repo_head_hexsha": "278bf8a7c6664536ea7f1dd1f59d35b6fb8d2dad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tests/Qt5Gui.cpp", "max_forks_repo_name": "dafrito/luacxx", "max_forks_repo_head_hexsha": "278bf8a7c6664536ea7f1dd1f59d35b6fb8d2dad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24.0, "max_forks_repo_forks_event_min_datetime": "2015-01-07T19:47:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T17:42:37.000Z", "avg_line_length": 19.7142857143, "max_line_length": 53, "alphanum_fraction": 0.6528985507, "num_tokens": 385}
|
import math
from typing import Dict, List
import numpy as np
import pandas as pd
from data_manager.base_manager import DataManagerBase, DataParam
from proto.aiengine.v1 import aiengine_pb2
class TimeSeriesDataManager(DataManagerBase):
def __init__(self, param: DataParam, fields: Dict[str, aiengine_pb2.FieldData], action_rewards: Dict[str, str],
actions_order: Dict[str, int], external_reward_funcs: str, laws: List[str]):
super().__init__(param, fields, action_rewards, actions_order, external_reward_funcs, laws)
new_series = {}
sorted_field_names = sorted(fields)
for field_name in sorted_field_names:
new_series[field_name] = [fields[field_name].initializer]
self.massive_table_sparse = pd.DataFrame(new_series, index={self.param.epoch_time})
self.massive_table_training_filled = None
self.current_time: pd.Timestamp = None
self.is_training = False
def get_window_span(self):
return math.floor(self.param.interval_secs / self.param.granularity_secs)
def start_training(self):
if self.is_training:
raise Exception("unable to start a new training run before the previous has finished")
self.is_training = True
self.metrics.start("copy_training_table")
self.massive_table_training_filled = self._fill_table(self.massive_table_sparse)
self.metrics.end("copy_training_table")
def end_training(self):
self.is_training = False
self.massive_table_training_filled = None
def _resample_table(self, table_to_resample: pd.DataFrame) -> pd.DataFrame:
self.metrics.start("resample")
resampled_table = table_to_resample.resample(self.param.granularity_secs).mean()
self.metrics.end("resample")
return resampled_table
def _fill_table(self, input_table: pd.DataFrame) -> pd.DataFrame:
table_to_fill = input_table.copy()
self.metrics.start("ffill")
for col_name in table_to_fill:
fill_method = self.fields[col_name].fill_method
if fill_method == aiengine_pb2.FILL_FORWARD:
table_to_fill[col_name] = table_to_fill[
col_name
].ffill()
elif fill_method == aiengine_pb2.FILL_ZERO:
table_to_fill[col_name] = table_to_fill[
col_name
].fillna(0)
self.metrics.end("ffill")
return table_to_fill
def merge_training_row(self, new_row: pd.DataFrame):
if not self.is_training:
raise Exception("only valid to call merge_training_row during a training run")
index = new_row.index[0]
for column_name in list(new_row.keys()):
value = new_row[column_name].array[0]
self.massive_table_training_filled.loc[index][column_name] = value
def merge_data(self, new_data: pd.DataFrame):
if len(new_data) == 0:
return
new_data_resampled = self._resample_table(new_data)
# On initial data load, overwrite initializers if we have actual data for them
if len(self.massive_table_sparse) == 1 and self.massive_table_sparse.index[0] == new_data_resampled.index[0]:
initial_row = self.massive_table_sparse.iloc[0]
for key, val in new_data_resampled.iloc[0].iteritems():
initial_row[key] = val
self.metrics.start("concat")
concat_table = pd.concat([self.massive_table_sparse, new_data_resampled])
self.metrics.end("concat")
self.massive_table_sparse = self._resample_table(concat_table)
def add_interpretations(self, interpretations):
self.interpretations = interpretations
def get_interpretations_for_interval(self):
if self.interpretations is not None:
index = self.interpretations.index[int(self.current_time.timestamp())]
if index is not None and index.indicies is not None and len(index.indicies) > 0:
interval_interpretations = []
for i in index.indicies:
interval_interpretations.append(
self.interpretations.interpretations[i]
)
return interval_interpretations
return None
def get_shape(self):
return np.shape([0] * self.get_window_span() * len(self.fields))
# This method should only be called during training.
def get_current_window(self) -> pd.DataFrame:
if not self.is_training:
raise Exception("Start training before calling get_current_window()")
# This will get the nearest previous index that matches the timestamp,
# so we don't need to specify the timestamps exactly
start_index = self.massive_table_training_filled.index.get_loc(self.current_time, "ffill")
end_index = self.massive_table_training_filled.index.get_loc(
self.current_time + self.param.interval_secs, "ffill")
return (
self.massive_table_training_filled.iloc[start_index:start_index + 1]
if self.get_window_span() == 1 else
self.massive_table_training_filled.iloc[start_index:end_index])
def get_window_at(self, time: pd.Timestamp):
start_index = None
end_index = None
filled_table = self._fill_table(self.massive_table_sparse)
# If we only have a single row, use it
if filled_table.shape[0] == 1:
start_index = filled_table.index.get_loc(time)
end_index = start_index
else:
start_index = filled_table.index.get_loc(
time - self.param.interval_secs, "ffill"
)
end_index = filled_table.index.get_loc(time, "ffill")
if self.get_window_span() == 1:
return filled_table.iloc[start_index:start_index + 1]
return filled_table.iloc[start_index:end_index]
def reset(self):
self.current_time = self.param.epoch_time
def advance(self) -> bool:
if self.current_time >= self.param.end_time - self.param.interval_secs:
return False
self.current_time += self.param.granularity_secs
return True
|
{"hexsha": "159074a9f55be420b0a971a1140ca55abf0fa6bc", "size": 6239, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai/src/data_manager/time_series_manager.py", "max_stars_repo_name": "ScriptBox99/spiceai", "max_stars_repo_head_hexsha": "f8aa178fed5cc6d6d9397c123bdc869500c5135b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 713, "max_stars_repo_stars_event_min_datetime": "2021-09-07T19:57:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T02:31:02.000Z", "max_issues_repo_path": "ai/src/data_manager/time_series_manager.py", "max_issues_repo_name": "ScriptBox99/spiceai", "max_issues_repo_head_hexsha": "f8aa178fed5cc6d6d9397c123bdc869500c5135b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 133, "max_issues_repo_issues_event_min_datetime": "2021-09-07T17:34:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T17:34:31.000Z", "max_forks_repo_path": "ai/src/data_manager/time_series_manager.py", "max_forks_repo_name": "ScriptBox99/spiceai", "max_forks_repo_head_hexsha": "f8aa178fed5cc6d6d9397c123bdc869500c5135b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2021-09-07T23:46:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T21:11:04.000Z", "avg_line_length": 40.512987013, "max_line_length": 117, "alphanum_fraction": 0.6642090079, "include": true, "reason": "import numpy", "num_tokens": 1302}
|
#!/usr/local/bin/python3
from numpy import array, sum, savetxt, loadtxt, zeros, arange, vectorize
from datetime import datetime
from commonutils import construct_app_num, log_error
from casestatus import CaseStatus
from typing import Tuple, List
from functools import reduce
from os.path import basename
from itertools import tee
def save_data(start: int, end: int, data: array) -> None:
"""
Save raw data to a CSV file for later use.
"""
filename = datetime.today().strftime('%Y-%b-%d-%H-%M-%S.csv')
result = zeros(data.size, dtype=[('appNum', int), ('status', 'U12')])
result['appNum'] = arange(start, end)
result['status'] = data
savetxt(filename, result, header='AppReceiptNum, CaseStatus',
delimiter=',', fmt='%d, %s')
def load_data(filename: str) -> Tuple[int, int, array]:
"""
Reads raw data from the given CSV file and returns the range (start, end) of
application receipt numbers and the case status array
"""
to_status = lambda item: CaseStatus.csv_to_status(item)
result = loadtxt(filename, delimiter=',', skiprows=1,
dtype={'names': ('appNum', 'status'),
'formats': ('i', 'U12')})
return (min(result['appNum']), max(result['appNum']),
vectorize(to_status)(result['status']))
def compare_data(filenames: List[str]) -> None:
"""
Reads raw data from the given CSV files (at least 2), compares the data, and
prints the list of applications that have changed status
"""
if len(filenames) < 2:
log_error("Specify at least 2 files for comparison.")
return
results = []
for f in filenames:
results.append(loadtxt(f, delimiter=',', skiprows=1,
dtype={'names': ('appNum', 'status'),
'formats': ('i', 'U12')}))
for (x, y) in pairwise(results):
if not(are_comparable(x, y)):
log_error('The files are not comparable.')
return
header = '{:<11}'.format('App #')
prepHead = lambda x, y: '{:<16} {:<17}'.format(extract_date_from_filename(x),
extract_date_from_filename(y))
print(reduce(prepHead, filenames, header))
print('-----------' + '------------------' * len(filenames))
for (i, app) in enumerate(results[0]['appNum']):
line = '{:9} : {:<13}'.format(construct_app_num(app), results[0][i]['status'])
shouldPrint = False
for (x, y) in pairwise(results):
if x[i]['status'] != y[i]['status']:
line += ' --> {:<13}'.format(y[i]['status'])
shouldPrint = True
else:
break
if shouldPrint:
print(line)
def print_stats(start: int, end: int, save: bool, data: array) -> None:
"""
Prints the aggregate statistics from the data in the given numpy array.
"""
if save:
save_data(start, end, data)
print('***** Stats *****')
todate = datetime.today().strftime('%Y-%b-%d')
print('Date: {0}'.format(todate))
print('Unprocessed: {0}'.format(sum(data == CaseStatus.RECEIVED)))
print('New Card: {0}'.format(sum(data == CaseStatus.NEW_CARD)))
print('Approved: {0}'.format(sum(data == CaseStatus.APPROVED)))
print('Mailed: {0}'.format(sum(data == CaseStatus.MAILED)))
print('Delivered: {0}'.format(sum(data == CaseStatus.DELIVERED)))
print('Picked By USPS: {0}'.format(sum(data == CaseStatus.USPS_PICKED)))
print('Unknown: {0}'.format(sum(data == CaseStatus.UNKNOWN)))
def are_comparable(res1: array, res2: array) -> bool:
"""
Returns True if two given result arrays are comparable, False otherwise.
Two arrays are comparable if their minimum and maximum match and they are of
the same length.
"""
return min(res1['appNum']) == min(res2['appNum']) and \
max(res1['appNum']) == max(res2['appNum']) and \
len(res1['appNum']) == len(res2['appNum'])
def extract_date_from_filename(filename: str) -> str:
parts = basename(filename).split('-')
if len(parts) <= 2:
return filename
return '{} {}, {}'.format(parts[1], parts[2], parts[0])
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
|
{"hexsha": "cc9731d0fe3da6617b05c6d4a88794c855d88b33", "size": 4139, "ext": "py", "lang": "Python", "max_stars_repo_path": "datautils.py", "max_stars_repo_name": "rohgarg/uscis-scrape", "max_stars_repo_head_hexsha": "73b882566d0b6ed92a0a25d459c0e2bd637f266c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datautils.py", "max_issues_repo_name": "rohgarg/uscis-scrape", "max_issues_repo_head_hexsha": "73b882566d0b6ed92a0a25d459c0e2bd637f266c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datautils.py", "max_forks_repo_name": "rohgarg/uscis-scrape", "max_forks_repo_head_hexsha": "73b882566d0b6ed92a0a25d459c0e2bd637f266c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6272727273, "max_line_length": 82, "alphanum_fraction": 0.6185068857, "include": true, "reason": "from numpy", "num_tokens": 1095}
|
try:
import cv2
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
except ImportError as e:
from pip._internal import main as install
packages = ["numpy", "opencv-python", "matplotlib", "scipy"]
for package in packages:
install(["install", package])
finally:
pass
# getting all the mouse events
def mouseEvents():
events = [i for i in dir(cv2) if "EVENT" in i]
print(events)
return
""""
Drawing a circle point when, on the point where the mouse is clicked.
"""
image = np.zeros((500, 500, 3), np.uint8)
def drawCicle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN or event == 1 or event == cv2.EVENT_LBUTTONDBLCLK or event == 7:
cv2.circle(image,(x, y), 3, (np.random.randint(0, 256), np.random.randint(0, 256), np.random.randint(0, 256)), -1)
cv2.imshow("Mouse Event", image)
return
def performTask():
cv2.imshow("Mouse Event", image)
cv2.setMouseCallback("Mouse Event", drawCicle)
key = cv2.waitKey(0)
if key & 0xff == ord('q'):
cv2.destroyAllWindows()
else:
pass
performTask()
|
{"hexsha": "bc1b98d38bd3c37bfc02ce248bd3180b93ebc5fd", "size": 1152, "ext": "py", "lang": "Python", "max_stars_repo_path": "beginner/Open-Computer-Vision-Chapter-1/mouse-event.py", "max_stars_repo_name": "CrispenGari/opencv-python", "max_stars_repo_head_hexsha": "cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-08T07:37:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T07:37:05.000Z", "max_issues_repo_path": "beginner/Open-Computer-Vision-Chapter-1/mouse-event.py", "max_issues_repo_name": "CrispenGari/opencv-python", "max_issues_repo_head_hexsha": "cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beginner/Open-Computer-Vision-Chapter-1/mouse-event.py", "max_forks_repo_name": "CrispenGari/opencv-python", "max_forks_repo_head_hexsha": "cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0434782609, "max_line_length": 122, "alphanum_fraction": 0.6458333333, "include": true, "reason": "import numpy,from scipy", "num_tokens": 317}
|
function [u, ind, baseDataTYPE] = getScanGroups(vw, baseDT, confirm)
% Group the scans within a dataTYPE into subgroups with identical annotations.
% The number of subgroups equals the number of scans with unique
% annotations.
%
% [u, ind, baseDataTYPE] = getScanGroups(vw, baseDT)
%
% Purpose:
% Return the list of unique scan annotatons (u) and the corresponding scan
% numbers (ind). This is useful if you want to do calculations on all
% scans with the same annotation (e.g., you might want to make averages
% of these scans, etc.).
%
% Example
% [u, ind, baseDataTYPE] = getScanGroups;
% var check
mrGlobals;
%----------------------------------------------------------------------
% variable check
if notDefined('vw'), vw = getCurView; end
if notDefined('baseDT'), baseDT = viewGet(vw, 'curdatatype'); end
if notDefined('confirm'), confirm = false; end
%----------------------------------------------------------------------
% set view to base dataTYPE
vw = viewSet(vw, 'currentDataTYPE', baseDT);
% get the dataTYPE name
baseDataTYPE = dtGet(dataTYPES(baseDT), 'name');
% count the scans
nScans = viewGet(vw, 'nScans');
% get the annotation for each scan
annotation = cell(1,nScans);
for scan = 1:nScans;
annotation{scan} = dtGet(dataTYPES(baseDT), 'annotation', scan);
end
% get a list of the unique scan annotations
u = unique(annotation);
% count them
nGroups = length(u);
% get the scan numbers that correspond to each unique scan
ind = cell(1,nGroups);
for scan =1:nGroups;
ind{scan} = find(strcmp(u{scan},annotation));
end
% confirm the groupings
if confirm
for group = 1:nGroups
q{group} = [u{group} sprintf('\n') ...
sprintf('%s scans ', baseDataTYPE)...
num2str(ind{group})...
sprintf(' -> Group %d\n', group)];
end
theanswer = questdlg(q, mfilename);
if ~isequal(theanswer, 'Yes')
fprintf('[%s]: Aborting....\n', mfilename);
u = []; ind = []; baseDataTYPE =[];
end
end
return
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/mrBOLD/Utilities/getScanGroups.m"}
|
\section*{Acknowledgements}
We would like to thank Prof.~Idit Keidar for her expert advices about
the quorum systems. In fact, the idea of separating the KV quorum
system from the auth quorum system first appeared in her email
messages. Also, Dr.~Edward Bortnikov and Prof.~Juan A.~Garay helped us
move the project forward in many ways.
|
{"hexsha": "2bd2bf7d6c2b0645c598867bd6406e93062b02b2", "size": 337, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/tex/ack.tex", "max_stars_repo_name": "dmitris/bftkv", "max_stars_repo_head_hexsha": "8769a830c87436922a2568f967aed891baf0cd5b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2017-09-29T22:46:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T22:27:28.000Z", "max_issues_repo_path": "docs/tex/ack.tex", "max_issues_repo_name": "dmitris/bftkv", "max_issues_repo_head_hexsha": "8769a830c87436922a2568f967aed891baf0cd5b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/tex/ack.tex", "max_forks_repo_name": "dmitris/bftkv", "max_forks_repo_head_hexsha": "8769a830c87436922a2568f967aed891baf0cd5b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-27T17:17:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-08T23:56:28.000Z", "avg_line_length": 48.1428571429, "max_line_length": 70, "alphanum_fraction": 0.7952522255, "num_tokens": 89}
|
"""SeqNN regression metrics."""
import pdb
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.losses import LossFunctionWrapper
from tensorflow.python.keras.utils import metrics_utils
################################################################################
# Losses
################################################################################
# def MeanSquaredErrorSpecificity(y_true, y_pred, spec_weight=1):
# mse_term = tf.keras.losses.mean_squared_error(y_pred, y_true)
# yn_true = y_true - tf.math.reduce_mean(y_true, axis=-1, keepdims=True)
# yn_pred = y_pred - tf.math.reduce_mean(y_pred, axis=-1, keepdims=True)
# spec_term = tf.keras.losses.mean_squared_error(yn_pred, yn_true)
# return mse_term + spec_weight*spec_term
def mean_squared_error_udot(y_true, y_pred, udot_weight=1):
mse_term = tf.keras.losses.mean_squared_error(y_true, y_pred)
yn_true = y_true - tf.math.reduce_mean(y_true, axis=-1, keepdims=True)
yn_pred = y_pred - tf.math.reduce_mean(y_pred, axis=-1, keepdims=True)
udot_term = -tf.reduce_mean(yn_true * yn_pred, axis=-1)
return mse_term + udot_weight*udot_term
class MeanSquaredErrorUDot(LossFunctionWrapper):
def __init__(self, udot_weight=1, reduction=losses_utils.ReductionV2.AUTO, name='mse_udot'):
self.udot_weight = udot_weight
mse_udot = lambda yt, yp: mean_squared_error_udot(yt, yp, self.udot_weight)
super(MeanSquaredErrorUDot, self).__init__(
mse_udot, name=name, reduction=reduction)
def poisson_kl(y_true, y_pred, kl_weight=1, epsilon=1e-3):
# poisson loss
poisson_term = tf.keras.losses.poisson(y_true, y_pred)
# add epsilon to protect against all tiny values
y_true += epsilon
y_pred += epsilon
# normalize to sum to one
yn_true = y_true / tf.math.reduce_sum(y_true, axis=-1, keepdims=True)
yn_pred = y_pred / tf.math.reduce_sum(y_pred, axis=-1, keepdims=True)
# kl term
kl_term = tf.keras.losses.kl_divergence(yn_true, yn_pred)
# weighted combination
return poisson_term + kl_weight*kl_term
class PoissonKL(LossFunctionWrapper):
def __init__(self, kl_weight=1, reduction=losses_utils.ReductionV2.AUTO, name='poisson_kl'):
self.kl_weight = kl_weight
pois_kl = lambda yt, yp: poisson_kl(yt, yp, self.kl_weight)
super(PoissonKL, self).__init__(
pois_kl, name=name, reduction=reduction)
def poisson_multinomial(y_true, y_pred, total_weight=1, epsilon=1e-6, rescale=False):
seq_len = y_true.shape[1]
# add epsilon to protect against tiny values
y_true += epsilon
y_pred += epsilon
# sum across lengths
s_true = tf.math.reduce_sum(y_true, axis=-2, keepdims=True)
s_pred = tf.math.reduce_sum(y_pred, axis=-2, keepdims=True)
# normalize to sum to one
p_pred = y_pred / s_pred
# total count poisson loss
poisson_term = tf.keras.losses.poisson(s_true, s_pred) # B x T
poisson_term /= seq_len
# multinomial loss
pl_pred = tf.math.log(p_pred) # B x L x T
multinomial_dot = -tf.math.multiply(y_true, pl_pred) # B x L x T
multinomial_term = tf.math.reduce_sum(multinomial_dot, axis=-2) # B x T
multinomial_term /= seq_len
# normalize to scale of 1:1 term ratio
loss_raw = multinomial_term + total_weight * poisson_term
if rescale:
loss_rescale = loss_raw*2/(1 + total_weight)
else:
loss_rescale = loss_raw
return loss_rescale
class PoissonMultinomial(LossFunctionWrapper):
def __init__(self, total_weight=1, reduction=losses_utils.ReductionV2.AUTO, name='poisson_multinomial'):
self.total_weight = total_weight
pois_mn = lambda yt, yp: poisson_multinomial(yt, yp, self.total_weight)
super(PoissonMultinomial, self).__init__(
pois_mn, name=name, reduction=reduction)
################################################################################
# Metrics
################################################################################
class SeqAUC(tf.keras.metrics.AUC):
def __init__(self, curve='ROC', name=None, summarize=True, **kwargs):
if name is None:
if curve == 'ROC':
name = 'auroc'
elif curve == 'PR':
name = 'auprc'
super(SeqAUC, self).__init__(curve=curve, name=name, multi_label=True, **kwargs)
self._summarize = summarize
def update_state(self, y_true, y_pred, **kwargs):
"""Flatten sequence length before update."""
# flatten batch and sequence length
num_targets = y_pred.shape[-1]
y_true = tf.reshape(y_true, (-1,num_targets))
y_pred = tf.reshape(y_pred, (-1,num_targets))
# update
super(SeqAUC, self).update_state(y_true, y_pred, **kwargs)
def interpolate_pr_auc(self):
"""Add option to remove summary."""
dtp = self.true_positives[:self.num_thresholds -
1] - self.true_positives[1:]
p = tf.math.add(self.true_positives, self.false_positives)
dp = p[:self.num_thresholds - 1] - p[1:]
prec_slope = tf.math.divide_no_nan(
dtp, tf.maximum(dp, 0), name='prec_slope')
intercept = self.true_positives[1:] - tf.multiply(prec_slope, p[1:])
safe_p_ratio = tf.where(
tf.logical_and(p[:self.num_thresholds - 1] > 0, p[1:] > 0),
tf.math.divide_no_nan(
p[:self.num_thresholds - 1],
tf.maximum(p[1:], 0),
name='recall_relative_ratio'),
tf.ones_like(p[1:]))
pr_auc_increment = tf.math.divide_no_nan(
prec_slope * (dtp + intercept * tf.math.log(safe_p_ratio)),
tf.maximum(self.true_positives[1:] + self.false_negatives[1:], 0),
name='pr_auc_increment')
if self.multi_label:
by_label_auc = tf.reduce_sum(
pr_auc_increment, name=self.name + '_by_label', axis=0)
if self._summarize:
if self.label_weights is None:
# Evenly weighted average of the label AUCs.
return tf.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return tf.math.divide_no_nan(
tf.reduce_sum(
tf.multiply(by_label_auc, self.label_weights)),
tf.reduce_sum(self.label_weights),
name=self.name)
else:
return by_label_auc
else:
if self._summarize:
return tf.reduce_sum(pr_auc_increment, name='interpolate_pr_auc')
else:
return pr_auc_increment
def result(self):
"""Add option to remove summary.
It's not clear why, but these metrics_utils == aren't working for tf.26 on.
I'm hacking a solution to compare the values instead."""
if (self.curve.value == metrics_utils.AUCCurve.PR.value and
self.summation_method.value == metrics_utils.AUCSummationMethod.INTERPOLATION.value
):
# This use case is different and is handled separately.
return self.interpolate_pr_auc()
# Set `x` and `y` values for the curves based on `curve` config.
recall = tf.math.divide_no_nan(
self.true_positives,
tf.math.add(self.true_positives, self.false_negatives))
if self.curve.value == metrics_utils.AUCCurve.ROC.value:
fp_rate = tf.math.divide_no_nan(
self.false_positives,
tf.math.add(self.false_positives, self.true_negatives))
x = fp_rate
y = recall
else: # curve == 'PR'.
precision = tf.math.divide_no_nan(
self.true_positives,
tf.math.add(self.true_positives, self.false_positives))
x = recall
y = precision
# Find the rectangle heights based on `summation_method`.
if self.summation_method.value == metrics_utils.AUCSummationMethod.INTERPOLATION.value:
# Note: the case ('PR', 'interpolation') has been handled above.
heights = (y[:self.num_thresholds - 1] + y[1:]) / 2.
elif self.summation_method.value == metrics_utils.AUCSummationMethod.MINORING.value:
heights = tf.minimum(y[:self.num_thresholds - 1], y[1:])
else: # self.summation_method = metrics_utils.AUCSummationMethod.MAJORING:
heights = tf.maximum(y[:self.num_thresholds - 1], y[1:])
# Sum up the areas of all the rectangles.
if self.multi_label:
riemann_terms = tf.multiply(x[:self.num_thresholds - 1] - x[1:],
heights)
by_label_auc = tf.reduce_sum(
riemann_terms, name=self.name + '_by_label', axis=0)
if self._summarize:
if self.label_weights is None:
# Unweighted average of the label AUCs.
return tf.reduce_mean(by_label_auc, name=self.name)
else:
# Weighted average of the label AUCs.
return tf.math.div_no_nan(
tf.reduce_sum(
tf.multiply(by_label_auc, self.label_weights)),
tf.reduce_sum(self.label_weights),
name=self.name)
else:
return by_label_auc
else:
if self._summarize:
return tf.reduce_sum(
tf.multiply(x[:self.num_thresholds-1] - x[1:], heights),
name=self.name)
else:
return tf.multiply(x[:self.num_thresholds-1] - x[1:], heights)
class PearsonR(tf.keras.metrics.Metric):
def __init__(self, num_targets, summarize=True, name='pearsonr', **kwargs):
super(PearsonR, self).__init__(name=name, **kwargs)
self._summarize = summarize
self._shape = (num_targets,)
self._count = self.add_weight(name='count', shape=self._shape, initializer='zeros')
self._product = self.add_weight(name='product', shape=self._shape, initializer='zeros')
self._true_sum = self.add_weight(name='true_sum', shape=self._shape, initializer='zeros')
self._true_sumsq = self.add_weight(name='true_sumsq', shape=self._shape, initializer='zeros')
self._pred_sum = self.add_weight(name='pred_sum', shape=self._shape, initializer='zeros')
self._pred_sumsq = self.add_weight(name='pred_sumsq', shape=self._shape, initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, 'float32')
y_pred = tf.cast(y_pred, 'float32')
if len(y_true.shape) == 2:
reduce_axes = 0
else:
reduce_axes = [0,1]
product = tf.reduce_sum(tf.multiply(y_true, y_pred), axis=reduce_axes)
self._product.assign_add(product)
true_sum = tf.reduce_sum(y_true, axis=reduce_axes)
self._true_sum.assign_add(true_sum)
true_sumsq = tf.reduce_sum(tf.math.square(y_true), axis=reduce_axes)
self._true_sumsq.assign_add(true_sumsq)
pred_sum = tf.reduce_sum(y_pred, axis=reduce_axes)
self._pred_sum.assign_add(pred_sum)
pred_sumsq = tf.reduce_sum(tf.math.square(y_pred), axis=reduce_axes)
self._pred_sumsq.assign_add(pred_sumsq)
count = tf.ones_like(y_true)
count = tf.reduce_sum(count, axis=reduce_axes)
self._count.assign_add(count)
def result(self):
true_mean = tf.divide(self._true_sum, self._count)
true_mean2 = tf.math.square(true_mean)
pred_mean = tf.divide(self._pred_sum, self._count)
pred_mean2 = tf.math.square(pred_mean)
term1 = self._product
term2 = -tf.multiply(true_mean, self._pred_sum)
term3 = -tf.multiply(pred_mean, self._true_sum)
term4 = tf.multiply(self._count, tf.multiply(true_mean, pred_mean))
covariance = term1 + term2 + term3 + term4
true_var = self._true_sumsq - tf.multiply(self._count, true_mean2)
pred_var = self._pred_sumsq - tf.multiply(self._count, pred_mean2)
pred_var = tf.where(tf.greater(pred_var, 1e-12),
pred_var,
np.inf*tf.ones_like(pred_var))
tp_var = tf.multiply(tf.math.sqrt(true_var), tf.math.sqrt(pred_var))
correlation = tf.divide(covariance, tp_var)
if self._summarize:
return tf.reduce_mean(correlation)
else:
return correlation
def reset_state(self):
K.batch_set_value([(v, np.zeros(self._shape)) for v in self.variables])
class R2(tf.keras.metrics.Metric):
def __init__(self, num_targets, summarize=True, name='r2', **kwargs):
super(R2, self).__init__(name=name, **kwargs)
self._summarize = summarize
self._shape = (num_targets,)
self._count = self.add_weight(name='count', shape=self._shape, initializer='zeros')
self._true_sum = self.add_weight(name='true_sum', shape=self._shape, initializer='zeros')
self._true_sumsq = self.add_weight(name='true_sumsq', shape=self._shape, initializer='zeros')
self._product = self.add_weight(name='product', shape=self._shape, initializer='zeros')
self._pred_sumsq = self.add_weight(name='pred_sumsq', shape=self._shape, initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, 'float32')
y_pred = tf.cast(y_pred, 'float32')
if len(y_true.shape) == 2:
reduce_axes = 0
else:
reduce_axes = [0,1]
true_sum = tf.reduce_sum(y_true, axis=reduce_axes)
self._true_sum.assign_add(true_sum)
true_sumsq = tf.reduce_sum(tf.math.square(y_true), axis=reduce_axes)
self._true_sumsq.assign_add(true_sumsq)
product = tf.reduce_sum(tf.multiply(y_true, y_pred), axis=reduce_axes)
self._product.assign_add(product)
pred_sumsq = tf.reduce_sum(tf.math.square(y_pred), axis=reduce_axes)
self._pred_sumsq.assign_add(pred_sumsq)
count = tf.ones_like(y_true)
count = tf.reduce_sum(count, axis=reduce_axes)
self._count.assign_add(count)
def result(self):
true_mean = tf.divide(self._true_sum, self._count)
true_mean2 = tf.math.square(true_mean)
total = self._true_sumsq - tf.multiply(self._count, true_mean2)
resid1 = self._pred_sumsq
resid2 = -2*self._product
resid3 = self._true_sumsq
resid = resid1 + resid2 + resid3
r2 = tf.ones_like(self._shape, dtype=tf.float32) - tf.divide(resid, total)
if self._summarize:
return tf.reduce_mean(r2)
else:
return r2
def reset_state(self):
K.batch_set_value([(v, np.zeros(self._shape)) for v in self.variables])
|
{"hexsha": "2a86e0bd2ae44cb8fddf454b967b65a350b4f614", "size": 14011, "ext": "py", "lang": "Python", "max_stars_repo_path": "basenji/metrics.py", "max_stars_repo_name": "prasathlab/basenji", "max_stars_repo_head_hexsha": "d61389dc553aa610544503a3e937c1b53906fe35", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basenji/metrics.py", "max_issues_repo_name": "prasathlab/basenji", "max_issues_repo_head_hexsha": "d61389dc553aa610544503a3e937c1b53906fe35", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basenji/metrics.py", "max_forks_repo_name": "prasathlab/basenji", "max_forks_repo_head_hexsha": "d61389dc553aa610544503a3e937c1b53906fe35", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4625668449, "max_line_length": 106, "alphanum_fraction": 0.6711155521, "include": true, "reason": "import numpy", "num_tokens": 3642}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.