text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import streamlit as st
import numpy as np
def display_population_widgets(p, selected_states, selected_counties, nat_data) -> int:
sub_nat = nat_data.loc[nat_data.state.isin(selected_states) & nat_data.county.isin(selected_counties)]
population = int(np.sum(sub_nat.pop_est2019.unique()).item())
st.subheader(f"""Calculated Regional Population: {population:,}""")
override_population = st.checkbox(
"Override Calculated Population",
value = p.override_population
)
p.override_population = override_population
if override_population:
population_manual_override = st.number_input(
"Population",
min_value=0,
max_value=8000000000,
step=100000,
value=population if p.population_manual_override is None else p.population_manual_override,
)
p.population_manual_override = population_manual_override
population = population_manual_override
p.population = population
return population
|
{"hexsha": "ea8c8e200cdb1d187678555604f992bb9515e5ed", "size": 1020, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/penn_chime/population.py", "max_stars_repo_name": "mmastand/chime", "max_stars_repo_head_hexsha": "6012a9b0e921d7d55ad4b8b7432740807cce721d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/penn_chime/population.py", "max_issues_repo_name": "mmastand/chime", "max_issues_repo_head_hexsha": "6012a9b0e921d7d55ad4b8b7432740807cce721d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/penn_chime/population.py", "max_forks_repo_name": "mmastand/chime", "max_forks_repo_head_hexsha": "6012a9b0e921d7d55ad4b8b7432740807cce721d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5, "max_line_length": 106, "alphanum_fraction": 0.7117647059, "include": true, "reason": "import numpy", "num_tokens": 204}
|
"""
Pymer4 Lmer Class
=================
Main class to wrap R's lme4 library
"""
from copy import copy
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
from rpy2.rinterface_lib import callbacks
from rpy2.robjects import numpy2ri
import rpy2.rinterface as rinterface
import warnings
import traceback
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ..utils import (
_sig_stars,
_perm_find,
_return_t,
_to_ranks_by_group,
con2R,
pandas2R,
)
from pandas.api.types import CategoricalDtype
# Import R libraries we need
base = importr("base")
stats = importr("stats")
numpy2ri.activate()
# Make a reference to the default R console writer from rpy2
consolewrite_warning_backup = callbacks.consolewrite_warnerror
consolewrite_print_backup = callbacks.consolewrite_print
class Lmer(object):
"""
Model class to hold data outputted from fitting lmer in R and converting to Python object. This class stores as much information as it can about a merMod object computed using lmer and lmerTest in R. Most attributes will not be computed until the fit method is called.
Args:
formula (str): Complete lmer-style model formula
data (pandas.core.frame.DataFrame): input data
family (string): what distribution family (i.e.) link function to use for the generalized model; default is gaussian (linear model)
Attributes:
fitted (bool): whether model has been fit
formula (str): model formula
data (pd.DataFrame): model copy of input data
grps (dict): groups and number of observations per groups recognized by lmer
design_matrix (pd.DataFrame): model design matrix determined by lmer
AIC (float): model akaike information criterion
logLike (float): model Log-likelihood
family (string): model family
warnings (list): warnings output from R or Python
ranef (pd.DataFrame/list): cluster-level differences from population parameters, i.e. difference between coefs and fixefs; returns list if multiple cluster variables are used to specify random effects (e.g. subjects and items)
fixef (pd.DataFrame/list): cluster-level parameters; returns list if multiple cluster variables are used to specify random effects (e.g. subjects and items)
coefs (pandas.core.frame.DataFrame/list): model summary table of population parameters
ranef_var (pd.DataFrame): random effects variances
ranef_corr(pd.DataFrame): random effects correlations
residuals (numpy.ndarray): model residuals
fits (numpy.ndarray): model fits/predictions
model_obj (lmer model): rpy2 lmer model object
factors (dict): factors used to fit the model if any
"""
def __init__(self, formula, data, family="gaussian"):
self.family = family
implemented_fams = [
"gaussian",
"binomial",
"gamma",
"inverse_gaussian",
"poisson",
]
if self.family not in implemented_fams:
raise ValueError(
"Family must be one of: gaussian, binomial, gamma, inverse_gaussian or poisson!"
)
self.fitted = False
self.formula = formula.replace(" ", "")
self.data = copy(data)
self.grps = None
self.AIC = None
self.logLike = None
self.warnings = []
self.ranef_var = None
self.ranef_corr = None
self.ranef = None
self.fixef = None
self.design_matrix = None
self.residuals = None
self.coefs = None
self.model_obj = None
self.factors = None
self.contrast_codes = None
self.ranked_data = False
self.marginal_estimates = None
self.marginal_contrasts = None
self.sig_type = None
self.factors_prev_ = None
self.contrasts = None
def __repr__(self):
out = "{}(fitted = {}, formula = {}, family = {})".format(
self.__class__.__module__, self.fitted, self.formula, self.family
)
return out
def _make_factors(self, factor_dict, ordered=False):
"""
Covert specific columns to R-style factors. Default scheme is dummy coding where reference is 1st level provided. Alternative is orthogonal polynomial contrasts. User can also specific custom contrasts.
Args:
factor_dict: (dict) dictionary with column names specified as keys and values as a list for dummy/treatment/polynomial contrast; a dict with keys as factor leves and values as desired comparisons in human readable format
ordered: (bool) whether to interpret factor_dict values as dummy-coded (1st list item is reference level) or as polynomial contrasts (linear contrast specified by ordered of list items); ignored if factor_dict values are not a list
Returns:
pandas.core.frame.DataFrame: copy of original data with factorized columns
Examples:
Dummy/treatment contrasts with 'A' as the reference level and other contrasts as 'B'-'A' and 'C'-'A'
>>> _make_factors(factor_dict={'factor': ['A','B','C']})
Same as above but a linear contrast (and automatically computed quadratic contrast) of A < B < C
>>> _make_factors(factor_dict={'factor': ['A','B','C']}, ordered=True)
Custom contrast of 'A' - mean('B', 'C')
>>> _make_factors(factor_dict={'factor': {'A': 1, 'B': -0.5, 'C': -0.5}})
"""
errormsg = "factors should be specified as a dictionary with values as one of:\n1) a list with factor levels in the desired order for dummy/treatment/polynomial contrasts\n2) a dict with keys as factor levels and values as desired comparisons in human readable format"
# We create a copy of data because we need to convert dtypes to categories and then pass them to R. However, resetting categories on the *same* dataframe and passing to R repeatedly (e.g. multiple calls to .fit with different contrasats) does not work as R only uses the 1st category spec. So instead we create a copy and return that copy to get used by .fit
out = {}
df = self.data.copy()
if not isinstance(factor_dict, dict):
raise TypeError(errormsg)
for factor, contrasts in factor_dict.items():
# First convert to a string type because R needs string based categories
df[factor] = df[factor].apply(str)
# Treatment/poly contrasts
if isinstance(contrasts, list):
# Ensure that all factor levels are accounted for
if not all([e in contrasts for e in df[factor].unique()]):
raise ValueError(
"Not all factor levels are specified in the desired contrast"
)
# Define and apply a pandas categorical type in the same order as requested, which will get converted to the right factor levels in R
cat = CategoricalDtype(contrasts)
df[factor] = df[factor].astype(cat)
if ordered:
# Polynomial contrasts
con_codes = np.array(stats.contr_poly(len(contrasts)))
else:
# Treatment/dummy contrasts
con_codes = np.array(stats.contr_treatment(len(contrasts)))
out[factor] = con_codes
# Custom contrasts (human readable)
elif isinstance(contrasts, dict):
factor_levels = list(contrasts.keys())
cons = list(contrasts.values())
# Ensure that all factor levels are accounted for
if not all([e in factor_levels for e in df[factor].unique()]):
raise ValueError(
"Not all factor levels are specified in the desired contrast"
)
# Define and apply categorical type in the same order as requested
cat = CategoricalDtype(factor_levels)
df[factor] = df[factor].astype(cat)
# Compute desired contrasts in R format along with addition k - 1 contrasts not specified
con_codes = con2R(cons)
out[factor] = con_codes
else:
raise TypeError(errormsg)
self.factors = factor_dict
self.contrast_codes = out
return robjects.ListVector(out), df
def _refit_orthogonal(self):
"""
Refit a model with factors organized as polynomial contrasts to ensure valid type-3 SS calculations with using `.anova()`. Previous factor specifications are stored in `model.factors_prev_`.
"""
self.factors_prev_ = copy(self.factors)
self.contrast_codes_prev_ = copy(self.contrast_codes)
# Create orthogonal polynomial contrasts for all factors, by creating a list of unique
# factor levels as self._make_factors will handle the rest
new_factors = {}
for factor in self.factors.keys():
new_factors[factor] = sorted(list(map(str, self.data[factor].unique())))
self.fit(
factors=new_factors,
ordered=True,
summarize=False,
permute=self._permute,
conf_int=self._conf_int,
REML=self._REML,
)
def anova(self, force_orthogonal=False):
"""
Return a type-3 ANOVA table from a fitted model. Like R, this method does not ensure that contrasts are orthogonal to ensure correct type-3 SS computation. However, the force_orthogonal flag can refit the regression model with orthogonal polynomial contrasts automatically guaranteeing valid SS type 3 inferences. Note that this will overwrite factors specified in the last call to `.fit()`
Args:
force_orthogonal (bool): whether factors in the model should be recoded using polynomial contrasts to ensure valid type-3 SS calculations. If set to True, previous factor specifications will be saved in `model.factors_prev_`; default False
Returns:
pd.DataFrame: Type 3 ANOVA results
"""
if self.factors:
# Model can only have factors if it's been fit
if force_orthogonal:
self._refit_orthogonal()
elif not self.fitted:
raise ValueError("Model must be fit before ANOVA table can be generated!")
rstring = """
function(model){
df<- anova(model)
df
}
"""
anova = robjects.r(rstring)
self.anova_results = pd.DataFrame(anova(self.model_obj))
if self.anova_results.shape[1] == 6:
self.anova_results.columns = [
"SS",
"MS",
"NumDF",
"DenomDF",
"F-stat",
"P-val",
]
self.anova_results["Sig"] = self.anova_results["P-val"].apply(
lambda x: _sig_stars(x)
)
elif self.anova_results.shape[1] == 4:
warnings.warn(
"MODELING FIT WARNING! Check model.warnings!! P-value computation did not occur because lmerTest choked. Possible issue(s): ranefx have too many parameters or too little variance..."
)
self.anova_results.columns = ["DF", "SS", "MS", "F-stat"]
if force_orthogonal:
print(
"SS Type III Analysis of Variance Table with Satterthwaite approximated degrees of freedom:\n(NOTE: Model refit with orthogonal polynomial contrasts)"
)
else:
print(
"SS Type III Analysis of Variance Table with Satterthwaite approximated degrees of freedom:\n(NOTE: Using original model contrasts, orthogonality not guaranteed)"
)
return self.anova_results
def _get_ngrps(self, unsum, base):
"""Get the groups information from the model as a dictionary
"""
# This works for 2 grouping factors
ns = unsum.rx2("ngrps")
names = base.names(self.model_obj.slots["flist"])
self.grps = dict(zip(names, ns))
def _set_R_stdout(self, verbose):
"""Adjust whether R prints to the console (often as a duplicate) based on the verbose flag of a method call. Reference to rpy2 interface here: https://bit.ly/2MsrufO"""
if verbose:
# use the default logging in R
callbacks.consolewrite_warnerror = consolewrite_warning_backup
else:
# Create a list buffer to catch messages and discard them
buf = []
def _f(x):
buf.append(x)
callbacks.consolewrite_warnerror = _f
def fit(
self,
conf_int="Wald",
n_boot=500,
factors=None,
permute=False,
ordered=False,
verbose=False,
REML=True,
rank=False,
rank_group="",
rank_exclude_cols=[],
no_warnings=False,
control="",
old_optimizer=False,
**kwargs,
):
"""
Main method for fitting model object. Will modify the model's data attribute to add columns for residuals and fits for convenience. Factors should be specified as a dictionary with values as a list or themselves a dictionary of *human readable* contrasts *not* R-style contrast codes as these will be auto-converted for you. See the factors docstring and examples below. After fitting, the .factors attribute will store a reference to the user-specified dictionary. The .contrast_codes model attributes will store the requested comparisons in converted R format. Note that Lmer estimate naming conventions differs a bit from R: Lmer.coefs = summary(model); Lmer.fixefs = coefs(model); Lmer.ranef = ranef(model)
Args:
conf_int (str): which method to compute confidence intervals; 'profile', 'Wald' (default), or 'boot' (parametric bootstrap)
n_boot (int): number of bootstrap intervals if bootstrapped confidence intervals are requests; default 500
factors (dict): dictionary with column names specified as keys and values as a list for dummy/treatment/polynomial contrast or a dict with keys as factor leves and values as desired comparisons in human readable format See examples below
permute (int): if non-zero, computes parameter significance tests by permuting test stastics rather than parametrically. Permutation is done by shuffling observations within clusters to respect random effects structure of data.
ordered (bool): whether factors should be treated as ordered polynomial contrasts; this will parameterize a model with K-1 orthogonal polynomial regressors beginning with a linear contrast based on the factor order provided; default is False
summarize/summary (bool): whether to print a model summary after fitting; default is True
verbose (bool): whether to print when and which model and confidence interval are being fitted
REML (bool): whether to fit using restricted maximum likelihood estimation instead of maximum likelihood estimation; default True
rank (bool): covert predictors in model formula to ranks by group prior to estimation. Model object will still contain original data not ranked data; default False
rank_group (str): column name to group data on prior to rank conversion
rank_exclude_cols (list/str): columns in model formula to not apply rank conversion to
no_warnings (bool): turn off auto-printing warnings messages; warnings are always stored in the .warnings attribute; default False
control (str): string containing options to be passed to (g)lmer control. See https://bit.ly/2OQONTH for options
old_optimizer (bool): use the old bobyqa optimizer that was the default in lmer4 <= 1.1_20, i.e. prior to 02/04/2019. This is not compatible with the control setting as it's meant to be a quick shorthand (e.g. to reproduce previous model results). However, the same setting can be manually requested using the control option if preferred. (For optimizer change discussions see: https://bit.ly/2MrP9Nq and https://bit.ly/2Vx5jte )
Returns:
pd.DataFrame: R/statsmodels style summary
Examples:
The following examples demonstrate how to treat variables as categorical factors.
Dummy-Coding: Treat Col1 as a factor which 3 levels: A, B, C. Use dummy-coding with A as the reference level. Model intercept will be mean of A, and parameters will be B-A, and C-A.
>>> model.fit(factors = {"Col1": ['A','B','C']})
Orthogonal Polynomials: Treat Col1 as a factor which 3 levels: A, B, C. Estimate a linear contrast of C > B > A. Model intercept will be grand-mean of all levels, and parameters will be linear contrast, and orthogonal polynomial contrast (auto-computed).
>>> model.fit(factors = {"Col1": ['A','B','C']}, ordered=True)
Custom-contrast: Treat Col1 as a factor which 3 levels: A, B, C. Compare A to the mean of B and C. Model intercept will be the grand-mean of all levels, and parameters will be the desired contrast, a well as an automatically determined orthogonal contrast.
>>> model.fit(factors = {"Col1": {'A': 1, 'B': -.5, 'C': -.5}}))
Here is an example specifying stricter deviance and paramter values stopping criteria.
>>> model.fit(control="optCtrl = list(ftol_abs=1e-8, xtol_abs=1e-8)")
Here is an example specifying a different optimizer in addition to stricter deviance and paramter values stopping criteria.
>>> model.fit(control="optimizer='Nelder_Mead', optCtrl = list(FtolAbs=1e-8, XtolRel=1e-8)")
Here is an example using the default optimization in previous versions of lme4 prior to the 2019 update.
>>> model.fit(old_optimizer=True)
"""
# Alllow summary or summarize for compatibility
if "summary" in kwargs and "summarize" in kwargs:
raise ValueError(
"You specified both summary and summarize, please prefer summarize"
)
summarize = kwargs.pop("summarize", True)
summarize = kwargs.pop("summary", summarize)
# Save params for future calls
self._permute = permute
self._conf_int = conf_int
self._REML = REML if self.family == "gaussian" else False
self._set_R_stdout(verbose)
if permute is True:
raise TypeError(
"permute should 'False' or the number of permutations to perform"
)
if old_optimizer:
if control:
raise ValueError(
"Must specify EITHER control OR old_optimizer not both"
)
else:
control = "optimizer='bobyqa'"
if factors:
contrasts, dat = self._make_factors(factors, ordered)
else:
contrasts = rinterface.NULL
dat = self.data
if rank:
if not rank_group:
raise ValueError("rank_group must be provided if rank is True")
dat = _to_ranks_by_group(
self.data, rank_group, self.formula, rank_exclude_cols
)
if factors and (set(factors.keys()) != set(rank_exclude_cols)):
w = "Factors and ranks requested, but factors are not excluded from rank conversion. Are you sure you wanted to do this?"
warnings.warn(w)
self.warnings.append(w)
if conf_int == "boot":
self.sig_type = "bootstrapped"
else:
if permute:
self.sig_type = "permutation" + " (" + str(permute) + ")"
else:
self.sig_type = "parametric"
data = pandas2R(dat)
if self.family == "gaussian":
_fam = "gaussian"
if verbose:
print(
f"Fitting linear model using lmer with {conf_int} confidence intervals...\n"
)
lmer = importr("lmerTest")
lmc = robjects.r(f"lmerControl({control})")
self.model_obj = lmer.lmer(
self.formula, data=data, REML=REML, control=lmc, contrasts=contrasts
)
else:
if verbose:
print(
f"Fitting generalized linear model using glmer (family {self.family}) with {conf_int} confidence intervals...\n"
)
lmer = importr("lme4")
if self.family == "inverse_gaussian":
_fam = "inverse.gaussian"
elif self.family == "gamma":
_fam = "Gamma"
else:
_fam = self.family
lmc = robjects.r(f"glmerControl({control})")
self.model_obj = lmer.glmer(
self.formula, data=data, family=_fam, control=lmc, contrasts=contrasts,
)
# Store design matrix and get number of IVs for inference
design_matrix = stats.model_matrix(self.model_obj)
if design_matrix:
self.design_matrix = pd.DataFrame(base.data_frame(design_matrix))
num_IV = self.design_matrix.shape[1]
else:
num_IV = 0
if permute and verbose:
print("Using {} permutations to determine significance...".format(permute))
summary = base.summary(self.model_obj)
unsum = base.unclass(summary)
# Do scalars first cause they're easier
# Get group names separately cause rpy2 > 2.9 is weird and doesnt return them above
try:
self._get_ngrps(unsum, base)
except Exception as e: # NOQA
print(traceback.format_exc())
raise Exception(
"The rpy2, lme4, or lmerTest API appears to have changed again. Please file a bug report at https://github.com/ejolly/pymer4/issues with your R, Python, rpy2, lme4, and lmerTest versions and the OS you're running pymer4 on. Apologies."
)
self.AIC = unsum.rx2("AICtab")[0]
self.logLike = unsum.rx2("logLik")[0]
# First check for lme4 printed messages (e.g. convergence info is usually here instead of in warnings)
fit_messages = unsum.rx2("optinfo").rx2("conv").rx2("lme4").rx2("messages")
# Then check warnings for additional stuff
fit_warnings = unsum.rx2("optinfo").rx2("warnings")
try:
fit_warnings = [fw for fw in fit_warnings]
except TypeError:
fit_warnings = []
try:
fit_messages = [fm for fm in fit_messages]
except TypeError:
fit_messages = []
fit_messages_warnings = fit_warnings + fit_messages
if fit_messages_warnings:
self.warnings.extend(fit_messages_warnings)
if not no_warnings:
for warning in self.warnings:
if isinstance(warning, list) | isinstance(warning, np.ndarray):
for w in warning:
print(w + " \n")
else:
print(warning + " \n")
else:
self.warnings = []
# Coefficients, and inference statistics
if num_IV != 0:
if self.family in ["gaussian", "gamma", "inverse_gaussian", "poisson"]:
rstring = (
"""
function(model){
out.coef <- data.frame(unclass(summary(model))$coefficients)
out.ci <- data.frame(confint(model,method='"""
+ conf_int
+ """',nsim="""
+ str(n_boot)
+ """))
n <- c(rownames(out.ci))
idx <- max(grep('sig',n))
out.ci <- out.ci[-seq(1:idx),]
out <- cbind(out.coef,out.ci)
list(out,rownames(out))
}
"""
)
estimates_func = robjects.r(rstring)
out_summary, out_rownames = estimates_func(self.model_obj)
df = pd.DataFrame(out_summary)
dfshape = df.shape[1]
df.index = out_rownames
# gaussian
if dfshape == 7:
df.columns = [
"Estimate",
"SE",
"DF",
"T-stat",
"P-val",
"2.5_ci",
"97.5_ci",
]
df = df[
["Estimate", "2.5_ci", "97.5_ci", "SE", "DF", "T-stat", "P-val"]
]
# gamma, inverse_gaussian
elif dfshape == 6:
if self.family in ["gamma", "inverse_gaussian"]:
df.columns = [
"Estimate",
"SE",
"T-stat",
"P-val",
"2.5_ci",
"97.5_ci",
]
df = df[
["Estimate", "2.5_ci", "97.5_ci", "SE", "T-stat", "P-val"]
]
else:
# poisson
df.columns = [
"Estimate",
"SE",
"Z-stat",
"P-val",
"2.5_ci",
"97.5_ci",
]
df = df[
["Estimate", "2.5_ci", "97.5_ci", "SE", "Z-stat", "P-val"]
]
# Incase lmerTest chokes it won't return p-values
elif dfshape == 5 and self.family == "gaussian":
if not permute:
warnings.warn(
"MODELING FIT WARNING! Check model.warnings!! P-value computation did not occur because lmerTest choked. Possible issue(s): ranefx have too many parameters or too little variance..."
)
df.columns = ["Estimate", "SE", "T-stat", "2.5_ci", "97.5_ci"]
df = df[["Estimate", "2.5_ci", "97.5_ci", "SE", "T-stat"]]
elif self.family == "binomial":
rstring = (
"""
function(model){
out.coef <- data.frame(unclass(summary(model))$coefficients)
out.ci <- data.frame(confint(model,method='"""
+ conf_int
+ """',nsim="""
+ str(n_boot)
+ """))
n <- c(rownames(out.ci))
idx <- max(grep('sig',n))
out.ci <- out.ci[-seq(1:idx),]
out <- cbind(out.coef,out.ci)
odds <- exp(out.coef[1])
colnames(odds) <- "OR"
probs <- data.frame(sapply(out.coef[1],plogis))
colnames(probs) <- "Prob"
odds.ci <- exp(out.ci)
colnames(odds.ci) <- c("OR_2.5_ci","OR_97.5_ci")
probs.ci <- data.frame(sapply(out.ci,plogis))
if(ncol(probs.ci) == 1){
probs.ci = t(probs.ci)
}
colnames(probs.ci) <- c("Prob_2.5_ci","Prob_97.5_ci")
out <- cbind(out,odds,odds.ci,probs,probs.ci)
list(out,rownames(out))
}
"""
)
estimates_func = robjects.r(rstring)
out_summary, out_rownames = estimates_func(self.model_obj)
df = pd.DataFrame(out_summary)
df.index = out_rownames
df.columns = [
"Estimate",
"SE",
"Z-stat",
"P-val",
"2.5_ci",
"97.5_ci",
"OR",
"OR_2.5_ci",
"OR_97.5_ci",
"Prob",
"Prob_2.5_ci",
"Prob_97.5_ci",
]
df = df[
[
"Estimate",
"2.5_ci",
"97.5_ci",
"SE",
"OR",
"OR_2.5_ci",
"OR_97.5_ci",
"Prob",
"Prob_2.5_ci",
"Prob_97.5_ci",
"Z-stat",
"P-val",
]
]
if permute:
perm_dat = dat.copy()
dv_var = self.formula.split("~")[0].strip()
grp_vars = list(self.grps.keys())
perms = []
for i in range(permute):
perm_dat[dv_var] = perm_dat.groupby(grp_vars)[dv_var].transform(
lambda x: x.sample(frac=1)
)
if self.family == "gaussian":
perm_obj = lmer.lmer(self.formula, data=perm_dat, REML=REML)
else:
perm_obj = lmer.glmer(self.formula, data=perm_dat, family=_fam)
perms.append(_return_t(perm_obj))
perms = np.array(perms)
pvals = []
for c in range(df.shape[0]):
if self.family in ["gaussian", "gamma", "inverse_gaussian"]:
pvals.append(_perm_find(perms[:, c], df["T-stat"][c]))
else:
pvals.append(_perm_find(perms[:, c], df["Z-stat"][c]))
df["P-val"] = pvals
if "DF" in df.columns:
df["DF"] = [permute] * df.shape[0]
df = df.rename(columns={"DF": "Num_perm", "P-val": "Perm-P-val"})
else:
df["Num_perm"] = [permute] * df.shape[0]
df = df.rename(columns={"P-val": "Perm-P-val"})
if "P-val" in df.columns:
df = df.assign(Sig=df["P-val"].apply(lambda x: _sig_stars(x)))
elif "Perm-P-val" in df.columns:
df = df.assign(Sig=df["Perm-P-val"].apply(lambda x: _sig_stars(x)))
if (conf_int == "boot") and (permute is None):
# We're computing parametrically bootstrapped ci's so it doesn't make sense to use approximation for p-values. Instead remove those from the output and make significant inferences based on whether the bootstrapped ci's cross 0.
df = df.drop(columns=["P-val", "Sig"])
if "DF" in df.columns:
df = df.drop(columns="DF")
df["Sig"] = df.apply(
lambda row: "*" if row["2.5_ci"] * row["97.5_ci"] > 0 else "",
axis=1,
)
# Because all models except lmm have no DF column make sure Num_perm gets put in the right place
if permute:
if self.family != "gaussian":
cols = list(df.columns)
col_order = cols[:-4] + ["Num_perm"] + cols[-4:-2] + [cols[-1]]
df = df[col_order]
self.coefs = df
# Make sure the design matrix column names match population coefficients
self.design_matrix.columns = self.coefs.index[:]
else:
self.coefs = None
if permute or conf_int == "boot":
print(
"**NOTE**: Non-parametric inference only applies to fixed effects and none were estimated\n"
)
self.fitted = True
# Random effect variances and correlations
varcor_NAs = ["NA", "N", robjects.NA_Character] # NOQA
df = pd.DataFrame(base.data_frame(unsum.rx2("varcor")))
ran_vars = df.query("var2 in @varcor_NAs").drop("var2", axis=1)
ran_vars.index = ran_vars["grp"]
ran_vars.drop("grp", axis=1, inplace=True)
ran_vars.columns = ["Name", "Var", "Std"]
ran_vars.index.name = None
ran_vars.replace("NA", "", inplace=True)
ran_vars = ran_vars.applymap(
lambda x: np.nan if x == robjects.NA_Character else x
)
ran_vars.replace(np.nan, "", inplace=True)
ran_corrs = df.query("var2 not in @varcor_NAs").drop("vcov", axis=1)
if ran_corrs.shape[0] != 0:
ran_corrs.index = ran_corrs["grp"]
ran_corrs.drop("grp", axis=1, inplace=True)
ran_corrs.columns = ["IV1", "IV2", "Corr"]
ran_corrs.index.name = None
ran_corrs = ran_corrs.applymap(
lambda x: np.nan if x == robjects.NA_Character else x
)
ran_corrs.replace(np.nan, "", inplace=True)
else:
ran_corrs = None
self.ranef_var = ran_vars
self.ranef_corr = ran_corrs
# Cluster (e.g subject) level coefficients
rstring = """
function(model){
out <- coef(model)
out
}
"""
fixef_func = robjects.r(rstring)
fixefs = fixef_func(self.model_obj)
fixefs = [pd.DataFrame(f) for f in fixefs]
if len(fixefs) > 1:
if self.coefs is not None:
f_corrected_order = []
for f in fixefs:
f_corrected_order.append(
pd.DataFrame(
f[
list(self.coefs.index)
+ [
elem
for elem in f.columns
if elem not in self.coefs.index
]
]
)
)
self.fixef = f_corrected_order
else:
self.fixef = list(fixefs)
else:
self.fixef = fixefs[0]
if self.coefs is not None:
self.fixef = self.fixef[
list(self.coefs.index)
+ [
elem
for elem in self.fixef.columns
if elem not in self.coefs.index
]
]
# Sort column order to match population coefs
# This also handles cases in which random slope terms exist in the model without corresponding fixed effects terms, which generates extra columns in this dataframe. By default put those columns *after* the fixed effect columns of interest (i.e. population coefs)
# Cluster (e.g subject) level random deviations
rstring = """
function(model){
uniquify <- function(df){
colnames(df) <- make.unique(colnames(df))
df
}
out <- lapply(ranef(model),uniquify)
out
}
"""
ranef_func = robjects.r(rstring)
ranefs = ranef_func(self.model_obj)
if len(ranefs) > 1:
self.ranef = [pd.DataFrame(e) for e in ranefs]
else:
self.ranef = pd.DataFrame(ranefs[0])
# Model residuals
rstring = """
function(model){
out <- resid(model)
out
}
"""
resid_func = robjects.r(rstring)
self.residuals = np.array(resid_func(self.model_obj))
try:
self.data["residuals"] = copy(self.residuals)
except ValueError as e: # NOQA
print(
"**NOTE**: Column for 'residuals' not created in model.data, but saved in model.resid only. This is because you have rows with NaNs in your data.\n"
)
# Model fits
rstring = """
function(model){
out <- fitted(model)
out
}
"""
fit_func = robjects.r(rstring)
self.fits = fit_func(self.model_obj)
try:
self.data["fits"] = copy(self.fits)
except ValueError as e: # NOQA
print(
"**NOTE** Column for 'fits' not created in model.data, but saved in model.fits only. This is because you have rows with NaNs in your data.\n"
)
if summarize:
return self.summary()
def simulate(self, num_datasets, use_rfx=True, verbose=False):
"""
Simulate new responses based upon estimates from a fitted model. By default group/cluster means for simulated data will match those of the original data. Unlike predict, this is a non-deterministic operation because lmer will sample random-efects values for all groups/cluster and then sample data points from their respective conditional distributions.
Args:
num_datasets (int): number of simulated datasets to generate. Each simulation always generates a dataset that matches the size of the original data
use_rfx (bool): wehther to match group/cluster means in simulated data
verbose (bool): whether to print R messages to console
Returns:
np.ndarray: simulated data values
"""
self._set_R_stdout(verbose)
if isinstance(num_datasets, float):
num_datasets = int(num_datasets)
if not isinstance(num_datasets, int):
raise ValueError("num_datasets must be an integer")
if use_rfx:
re_form = "NULL"
else:
re_form = "NA"
rstring = (
"""
function(model){
out <- simulate(model,"""
+ str(num_datasets)
+ """,allow.new.levels=TRUE,re.form="""
+ re_form
+ """)
out
}
"""
)
simulate_func = robjects.r(rstring)
sims = simulate_func(self.model_obj)
return pd.DataFrame(sims)
def predict(self, data, use_rfx=False, pred_type="response", verbose=False):
"""
Make predictions given new data. Input must be a dataframe that contains the same columns as the model.matrix excluding the intercept (i.e. all the predictor variables used to fit the model). If using random effects to make predictions, input data must also contain a column for the group identifier that were used to fit the model random effects terms. Using random effects to make predictions only makes sense if predictions are being made about the same groups/clusters.
Args:
data (pandas.core.frame.DataFrame): input data to make predictions on
use_rfx (bool): whether to condition on random effects when making predictions
pred_type (str): whether the prediction should be on the 'response' scale (default); or on the 'link' scale of the predictors passed through the link function (e.g. log-odds scale in a logit model instead of probability values)
verbose (bool): whether to print R messages to console
Returns:
np.ndarray: prediction values
"""
self._set_R_stdout(verbose)
if self.design_matrix is None:
raise ValueError(
"No fixed effects were estimated so prediction is not possible!"
)
required_cols = self.design_matrix.columns[1:]
if not all([col in data.columns for col in required_cols]):
raise ValueError("Column names do not match all fixed effects model terms!")
if use_rfx:
required_cols = set(list(required_cols) + list(self.grps.keys()))
if not all([col in data.columns for col in required_cols]):
raise ValueError(
"Column names are missing random effects model grouping terms!"
)
re_form = "NULL"
else:
re_form = "NA"
rstring = (
"""
function(model,new){
out <- predict(model,new,allow.new.levels=TRUE,re.form="""
+ re_form
+ """,type='"""
+ pred_type
+ """')
out
}
"""
)
predict_func = robjects.r(rstring)
preds = predict_func(self.model_obj, pandas2R(data))
return preds
def summary(self):
"""
Summarize the output of a fitted model.
Returns:
pd.DataFrame: R/statsmodels style summary
"""
if not self.fitted:
raise RuntimeError("Model must be fitted to generate summary!")
print("Formula: {}\n".format(self.formula))
print("Family: {}\t Inference: {}\n".format(self.family, self.sig_type))
print(
"Number of observations: %s\t Groups: %s\n"
% (self.data.shape[0], self.grps)
)
print("Log-likelihood: %.3f \t AIC: %.3f\n" % (self.logLike, self.AIC))
print("Random effects:\n")
print("%s\n" % (self.ranef_var.round(3)))
if self.ranef_corr is not None:
print("%s\n" % (self.ranef_corr.round(3)))
else:
print("No random effect correlations specified\n")
if self.coefs is None:
print("No fixed effects estimated\n")
return
else:
print("Fixed effects:\n")
return self.coefs.round(3)
# TODO Provide option to to pass lmerTest.limit = N in order to get non Inf dof when number of observations > 3000. Apparently this is a new default in emmeans. This warning is only visible when verbose=True
def post_hoc(
self,
marginal_vars,
grouping_vars=None,
p_adjust="tukey",
summarize=True,
verbose=False,
):
"""
Post-hoc pair-wise tests corrected for multiple comparisons (Tukey method) implemented using the emmeans package. This method provide both marginal means/trends along with marginal pairwise differences. More info can be found at: https://cran.r-project.org/web/packages/emmeans/emmeans.pdf
Args:
marginal_var (str/list): what variable(s) to compute marginal means/trends for; unique combinations of factor levels of these variable(s) will determine family-wise error correction
grouping_vars (str/list): what variable(s) to group on. Trends/means/comparisons of other variable(s), will be computed at each level of these variable(s)
p_adjust (str): multiple comparisons adjustment method. One of: tukey, bonf, fdr, hochberg, hommel, holm, dunnet, mvt (monte-carlo multi-variate T, aka exact tukey/dunnet). Default tukey
summarize (bool): output effects and contrasts or don't (always stored in model object as model.marginal_estimates and model.marginal_contrasts); default True
verbose (bool): whether to print R messages to the console
Returns:
Multiple:
- **marginal_estimates** (*pd.Dataframe*): unique factor level effects (e.g. means/coefs)
- **marginal_contrasts** (*pd.DataFrame*): contrasts between factor levels
Examples:
Pairwise comparison of means of A at each level of B
>>> model.post_hoc(marginal_vars='A',grouping_vars='B')
Pairwise differences of slopes of C between levels of A at each level of B
>>> model.post_hoc(marginal_vars='C',grouping_vars=['A','B'])
Pairwise differences of each unique A,B cell
>>> model.post_hoc(marginal_vars=['A','B'])
"""
self._set_R_stdout(verbose)
if not marginal_vars:
raise ValueError("Must provide marginal_vars")
if not self.fitted:
raise RuntimeError("Model must be fitted to generate post-hoc comparisons")
if not isinstance(marginal_vars, list):
marginal_vars = [marginal_vars]
if grouping_vars and not isinstance(grouping_vars, list):
grouping_vars = [grouping_vars]
# Conditional vars can only be factor types
if not all([elem in self.factors.keys() for elem in grouping_vars]):
raise ValueError(
"All grouping_vars must be existing categorical variables (i.e. factors)"
)
# Need to figure out if marginal_vars is continuous or not to determine lstrends or emmeans call
cont, factor = [], []
for var in marginal_vars:
if not self.factors or var not in self.factors.keys():
cont.append(var)
else:
factor.append(var)
if cont:
if factor:
raise ValueError(
"With more than one marginal variable, all variables must be categorical factors. Mixing continuous and categorical variables is not supported. Try passing additional categorical factors to grouping_vars"
""
)
else:
if len(cont) > 1:
raise ValueError(
"Marginal variables can only contain one continuous variable"
)
elif len(cont) == 1:
if grouping_vars:
# Emtrends; there's a bug for trends where options don't get set by default so an empty list is passed to R, see: https://bit.ly/2VJ9QZM
cont = cont[0]
if len(grouping_vars) > 1:
g1 = grouping_vars[0]
_conditional = "+".join(grouping_vars[1:])
rstring = (
"""
function(model){
suppressMessages(library(emmeans))
out <- emtrends(model,pairwise ~ """
+ g1
+ """|"""
+ _conditional
+ """,var='"""
+ cont
+ """',adjust='"""
+ p_adjust
+ """',options=list(),lmer.df='satterthwaite',lmerTest.limit=9999)
out
}"""
)
else:
rstring = (
"""
function(model){
suppressMessages(library(emmeans))
out <- emtrends(model,pairwise ~ """
+ grouping_vars[0]
+ """,var='"""
+ cont
+ """',adjust='"""
+ p_adjust
+ """',options=list(),lmer.df='satterthwaite',lmerTest.limit=9999)
out
}"""
)
else:
raise ValueError(
"grouping_vars are required with a continuous marginal_vars"
)
else:
if factor:
_marginal = "+".join(factor)
if grouping_vars:
# emmeans with pipe
_conditional = "+".join(grouping_vars)
rstring = (
"""
function(model){
suppressMessages(library(emmeans))
out <- emmeans(model,pairwise ~ """
+ _marginal
+ """|"""
+ _conditional
+ """, adjust='"""
+ p_adjust
+ """',lmer.df='satterthwaite',lmerTest.limit=9999)
out
}"""
)
else:
# emmeans without pipe
rstring = (
"""
function(model){
suppressMessages(library(emmeans))
out <- emmeans(model,pairwise ~ """
+ _marginal
+ """,adjust='"""
+ p_adjust
+ """',lmer.df='satterthwaite',lmerTest.limit=9999)
out
}"""
)
else:
raise ValueError("marginal_vars are not in model!")
func = robjects.r(rstring)
res = func(self.model_obj)
emmeans = importr("emmeans")
# Marginal estimates
self.marginal_estimates = pd.DataFrame(base.summary(res)[0])
# Resort columns
effect_names = list(self.marginal_estimates.columns[:-4])
# this column name changes depending on whether we're doing post-hoc trends or means
effname = effect_names[-1]
sortme = effect_names[:-1] + ["Estimate", "2.5_ci", "97.5_ci", "SE", "DF"]
# In emmeans (compared to lsmeans) the CI column names change too depending on how many factor variabls are in the model
if "asymp.LCL" in self.marginal_estimates.columns:
self.marginal_estimates = self.marginal_estimates.rename(
columns={
effname: "Estimate",
"df": "DF",
"asymp.LCL": "2.5_ci",
"asymp.UCL": "97.5_ci",
}
)[sortme]
elif "lower.CL" in self.marginal_estimates.columns:
self.marginal_estimates = self.marginal_estimates.rename(
columns={
effname: "Estimate",
"df": "DF",
"lower.CL": "2.5_ci",
"upper.CL": "97.5_ci",
}
)[sortme]
else:
raise ValueError(
f"Cannot figure out what emmeans is naming marginal CI columns. Expected 'lower.CL' or 'asymp.LCL', but columns are {self.marginal_estimates.columns}"
)
# Marginal Contrasts
self.marginal_contrasts = pd.DataFrame(base.summary(res)[1])
if "t.ratio" in self.marginal_contrasts.columns:
rename_dict = {
"t.ratio": "T-stat",
"p.value": "P-val",
"estimate": "Estimate",
"df": "DF",
"contrast": "Contrast",
}
sorted_names = [
"Estimate",
"2.5_ci",
"97.5_ci",
"SE",
"DF",
"T-stat",
"P-val",
]
elif "z.ratio" in self.marginal_contrasts.columns:
rename_dict = {
"z.ratio": "Z-stat",
"p.value": "P-val",
"estimate": "Estimate",
"df": "DF",
"contrast": "Contrast",
}
sorted_names = [
"Estimate",
"2.5_ci",
"97.5_ci",
"SE",
"DF",
"Z-stat",
"P-val",
]
else:
raise ValueError(
f"Cannot figure out what emmeans is naming contrast means columns. Expected 't.ratio' or 'z.ratio', but columns are: {self.marginal_contrasts.columns}"
)
self.marginal_contrasts = self.marginal_contrasts.rename(columns=rename_dict)
# Need to make another call to emmeans to get confidence intervals on contrasts
confs = pd.DataFrame(base.unclass(emmeans.confint_emmGrid(res))[1])
confs = confs.iloc[:, -2:]
# Deal with changing column names again
if "asymp.LCL" in confs.columns:
confs = confs.rename(
columns={"asymp.LCL": "2.5_ci", "asymp.UCL": "97.5_ci"}
)
elif "lower.CL" in confs.columns:
confs = confs.rename(columns={"lower.CL": "2.5_ci", "upper.CL": "97.5_ci"})
else:
raise ValueError(
f"Cannot figure out what emmeans is naming contrast CI columns. Expected 'lower.CL' or 'asymp.LCL', but columns are {self.marginal_estimates.columns}"
)
self.marginal_contrasts = pd.concat([self.marginal_contrasts, confs], axis=1)
# Resort columns
effect_names = list(self.marginal_contrasts.columns[:-7])
sortme = effect_names + sorted_names
self.marginal_contrasts = self.marginal_contrasts[sortme]
self.marginal_contrasts["Sig"] = self.marginal_contrasts["P-val"].apply(
_sig_stars
)
if (
p_adjust == "tukey"
and self.marginal_contrasts.shape[0] >= self.marginal_estimates.shape[0]
):
print(
"P-values adjusted by tukey method for family of {} estimates".format(
self.marginal_contrasts["Contrast"].nunique()
)
)
elif p_adjust != "tukey":
print(
"P-values adjusted by {} method for {} comparisons".format(
p_adjust, self.marginal_contrasts["Contrast"].nunique()
)
)
if summarize:
return self.marginal_estimates.round(3), self.marginal_contrasts.round(3)
def plot_summary(
self,
figsize=(12, 6),
error_bars="ci",
ranef=True,
axlim=None,
plot_intercept=True,
ranef_alpha=0.5,
coef_fmt="o",
orient="v",
ranef_idx=0,
):
"""
Create a forestplot overlaying estimated coefficients with random effects (i.e. BLUPs). By default display the 95% confidence intervals computed during fitting.
Args:
error_bars (str): one of 'ci' or 'se' to change which error bars are plotted; default 'ci'
ranef (bool): overlay BLUP estimates on figure; default True
axlim (tuple): lower and upper limit of plot; default min and max of BLUPs
plot_intercept (bool): plot the intercept estimate; default True
ranef_alpha (float): opacity of random effect points; default .5
coef_fmt (str): matplotlib marker style for population coefficients
ranef_idx (int): if multiple random effects clusters were specified this value indicates which one should be plotted; uses 0-based indexing; default 0 (first)
Returns:
plt.axis: matplotlib axis handle
"""
if not self.fitted:
raise RuntimeError("Model must be fit before plotting!")
if orient not in ["h", "v"]:
raise ValueError("orientation must be 'h' or 'v'")
if isinstance(self.fixef, list):
m_ranef = self.fixef[ranef_idx]
else:
m_ranef = self.fixef
m_fixef = self.coefs
if not plot_intercept:
m_ranef = m_ranef.drop("(Intercept)", axis=1)
m_fixef = m_fixef.drop("(Intercept)", axis=0)
if error_bars == "ci":
col_lb = (m_fixef["Estimate"] - m_fixef["2.5_ci"]).values
col_ub = (m_fixef["97.5_ci"] - m_fixef["Estimate"]).values
elif error_bars == "se":
col_lb, col_ub = m_fixef["SE"], m_fixef["SE"]
# For seaborn
m = pd.melt(m_ranef)
f, ax = plt.subplots(1, 1, figsize=figsize)
if ranef:
alpha_plot = ranef_alpha
else:
alpha_plot = 0
if orient == "v":
x_strip = "value"
x_err = m_fixef["Estimate"]
y_strip = "variable"
y_err = range(m_fixef.shape[0])
xerr = [col_lb, col_ub]
yerr = None
ax.vlines(
x=0, ymin=-1, ymax=self.coefs.shape[0], linestyles="--", color="grey"
)
if not axlim:
xlim = (m["value"].min() - 1, m["value"].max() + 1)
else:
xlim = axlim
ylim = None
else:
y_strip = "value"
y_err = m_fixef["Estimate"]
x_strip = "variable"
x_err = range(m_fixef.shape[0])
yerr = [col_lb, col_ub]
xerr = None
ax.hlines(
y=0, xmin=-1, xmax=self.coefs.shape[0], linestyles="--", color="grey"
)
if not axlim:
ylim = (m["value"].min() - 1, m["value"].max() + 1)
else:
ylim = axlim
xlim = None
sns.stripplot(
x=x_strip, y=y_strip, data=m, ax=ax, size=6, alpha=alpha_plot, color="grey"
)
ax.errorbar(
x=x_err,
y=y_err,
xerr=xerr,
yerr=yerr,
fmt=coef_fmt,
capsize=0,
elinewidth=4,
color="black",
ms=12,
zorder=9999999999,
)
ax.set(ylabel="", xlabel="Estimate", xlim=xlim, ylim=ylim)
sns.despine(top=True, right=True, left=True)
return ax
def plot(
self,
param,
figsize=(8, 6),
xlabel="",
ylabel="",
plot_fixef=True,
plot_ci=True,
grps=[],
ax=None,
):
"""
Plot random and group level parameters from a fitted model
Args:
param (str): model parameter (column name) to plot
figsize (tup): matplotlib desired figsize
xlabel (str): x-axis label
ylabel (str): y-axis label
plot_fixef (bool): plot population effect fit of param?; default True
plot_ci (bool): plot computed ci's of population effect?; default True
grps (list): plot specific group fits only; must correspond to index values in model.fixef
ax (matplotlib.axes.Axes): axis handle for an existing plot; if provided will ensure that random parameter plots appear *behind* all other plot objects.
Returns:
plt.axis: matplotlib axis handle
"""
if not self.fitted:
raise RuntimeError("Model must be fit before plotting!")
if self.factors:
raise NotImplementedError(
"Plotting can currently only handle models with continuous predictors!"
)
if isinstance(self.fixef, list) or isinstance(self.ranef, list):
raise NotImplementedError(
"Plotting can currently only handle models with 1 random effect grouping variable!"
)
if self.design_matrix is None:
raise ValueError(
"No fixed effects were estimated so prediction is not possible!"
)
if not ax:
f, ax = plt.subplots(1, 1, figsize=figsize)
# Get range of unique values for desired parameter
x_vals = self.design_matrix[param].unique()
# Sort order to handle bug in matplotlib plotting
idx = np.argsort(x_vals)
# Get desired parameter part of the prediction
fixef_pred = (
self.coefs.loc["(Intercept)", "Estimate"]
+ self.coefs.loc[param, "Estimate"] * x_vals
)
fixef_pred_upper = (
self.coefs.loc["(Intercept)", "97.5_ci"]
+ self.coefs.loc[param, "97.5_ci"] * x_vals
)
fixef_pred_lower = (
self.coefs.loc["(Intercept)", "2.5_ci"]
+ self.coefs.loc[param, "2.5_ci"] * x_vals
)
if grps:
if all(isinstance(x, int) for x in grps):
ran_dat = self.fixef.iloc[grps, :]
elif all(isinstance(x, str) for x in grps):
ran_dat = self.fixef.loc[grps, :]
else:
raise TypeError(
"grps must be integer list for integer-indexing (.iloc) of fixed effects, or label list for label-indexing (.loc) of fixed effects"
)
else:
ran_dat = self.fixef
# Now generate random effects predictions
for i, row in ran_dat.iterrows():
ranef_desired = row["(Intercept)"] + row[param] * x_vals
# ranef_other = np.dot(other_vals_means, row.loc[other_vals])
pred = ranef_desired # + ranef_other
ax.plot(x_vals[idx], pred[idx], "-", linewidth=2)
if plot_fixef:
ax.plot(
x_vals[idx],
fixef_pred[idx],
"--",
color="black",
linewidth=3,
zorder=9999999,
)
if plot_ci:
ax.fill_between(
x_vals[idx],
fixef_pred_lower[idx],
fixef_pred_upper[idx],
facecolor="black",
alpha=0.25,
zorder=9999998,
)
ax.set(
ylim=(self.data.fits.min(), self.data.fits.max()),
xlim=(x_vals.min(), x_vals.max()),
xlabel=param,
ylabel=self.formula.split("~")[0].strip(),
)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
return ax
|
{"hexsha": "989ede8ecf233a4aba2b8fc7cf6b624d002b46d8", "size": 61938, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymer4/models/Lmer.py", "max_stars_repo_name": "jcheong0428/pymer4", "max_stars_repo_head_hexsha": "7e98fa28f5fdc01e8f786e381179c6b36067ef90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pymer4/models/Lmer.py", "max_issues_repo_name": "jcheong0428/pymer4", "max_issues_repo_head_hexsha": "7e98fa28f5fdc01e8f786e381179c6b36067ef90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymer4/models/Lmer.py", "max_forks_repo_name": "jcheong0428/pymer4", "max_forks_repo_head_hexsha": "7e98fa28f5fdc01e8f786e381179c6b36067ef90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2208588957, "max_line_length": 718, "alphanum_fraction": 0.5389906035, "include": true, "reason": "import numpy", "num_tokens": 13221}
|
# Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from unittest.mock import Mock
import numpy as np
import pytest
import braket.ir.jaqcd as jaqcd
from braket.circuits import (
AsciiCircuitDiagram,
Circuit,
Gate,
Instruction,
Moments,
Observable,
QubitSet,
ResultType,
circuit,
)
@pytest.fixture
def cnot():
return Circuit().add_instruction(Instruction(Gate.CNot(), [0, 1]))
@pytest.fixture
def cnot_instr():
return Instruction(Gate.CNot(), [0, 1])
@pytest.fixture
def h():
return Circuit().add_instruction(Instruction(Gate.H(), 0))
@pytest.fixture
def h_instr():
return Instruction(Gate.H(), 0)
@pytest.fixture
def prob():
return ResultType.Probability([0, 1])
@pytest.fixture
def cnot_prob(cnot_instr, prob):
return Circuit().add_result_type(prob).add_instruction(cnot_instr)
@pytest.fixture
def bell_pair(prob):
return (
Circuit()
.add_instruction(Instruction(Gate.H(), 0))
.add_instruction(Instruction(Gate.CNot(), [0, 1]))
.add_result_type(prob)
)
def test_repr_instructions(h):
expected = f"Circuit('instructions': {list(h.instructions)})"
assert repr(h) == expected
def test_repr_result_types(cnot_prob):
circuit = cnot_prob
expected = (
f"Circuit('instructions': {list(circuit.instructions)}"
+ f"result_types': {circuit.result_types})"
)
assert repr(circuit) == expected
def test_str(h):
expected = AsciiCircuitDiagram.build_diagram(h)
assert str(h) == expected
def test_equality():
circ_1 = Circuit().h(0).probability([0, 1])
circ_2 = Circuit().h(0).probability([0, 1])
other_circ = Circuit().h(1)
non_circ = "non circuit"
assert circ_1 == circ_2
assert circ_1 is not circ_2
assert circ_1 != other_circ
assert circ_1 != non_circ
def test_add_result_type_default(prob):
circ = Circuit().add_result_type(prob)
assert list(circ.result_types) == [prob]
def test_add_result_type_with_mapping(prob):
expected = [ResultType.Probability([10, 11])]
circ = Circuit().add_result_type(prob, target_mapping={0: 10, 1: 11})
assert list(circ.result_types) == expected
def test_add_result_type_with_target(prob):
expected = [ResultType.Probability([10, 11])]
circ = Circuit().add_result_type(prob, target=[10, 11])
assert list(circ.result_types) == expected
def test_add_result_type_already_exists():
expected = [ResultType.StateVector()]
circ = Circuit(expected).add_result_type(expected[0])
assert list(circ.result_types) == expected
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_observable_conflict_target():
circ = Circuit().add_result_type(ResultType.Probability([0, 1]))
circ.add_result_type(ResultType.Expectation(observable=Observable.Y(), target=0))
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_observable_conflict_all():
circ = Circuit().add_result_type(ResultType.Probability())
circ.add_result_type(ResultType.Expectation(observable=Observable.Y()))
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_observable_conflict_all_target_then_selected_target():
circ = Circuit().add_result_type(ResultType.Probability())
circ.add_result_type(ResultType.Expectation(observable=Observable.Y(), target=[0, 1]))
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_observable_conflict_different_selected_targets_then_all_target():
circ = Circuit().add_result_type(ResultType.Expectation(observable=Observable.Z(), target=[0]))
circ.add_result_type(ResultType.Expectation(observable=Observable.Y(), target=[1]))
circ.add_result_type(ResultType.Expectation(observable=Observable.Y()))
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_observable_conflict_selected_target_then_all_target():
circ = Circuit().add_result_type(
ResultType.Expectation(observable=Observable.Y(), target=[0, 1])
)
circ.add_result_type(ResultType.Probability())
def test_add_result_type_observable_no_conflict_all_target():
expected = [
ResultType.Probability(),
ResultType.Expectation(observable=Observable.Z(), target=[0]),
]
circ = Circuit(expected)
assert circ.result_types == expected
def test_add_result_type_observable_no_conflict_target_all():
expected = [
ResultType.Expectation(observable=Observable.Z(), target=[0]),
ResultType.Probability(),
]
circ = Circuit(expected)
assert circ.result_types == expected
def test_add_result_type_observable_no_conflict_all():
expected = [
ResultType.Variance(observable=Observable.Y()),
ResultType.Expectation(observable=Observable.Y()),
]
circ = Circuit(expected)
assert circ.result_types == expected
def test_add_result_type_observable_no_conflict_state_vector_obs_return_value():
expected = [
ResultType.StateVector(),
ResultType.Expectation(observable=Observable.Y()),
]
circ = Circuit(expected)
assert circ.result_types == expected
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_same_observable_wrong_target_order_tensor_product():
Circuit().add_result_type(
ResultType.Expectation(observable=Observable.Y() @ Observable.X(), target=[0, 1])
).add_result_type(
ResultType.Variance(observable=Observable.Y() @ Observable.X(), target=[1, 0])
)
@pytest.mark.xfail(raises=ValueError)
def test_add_result_type_same_observable_wrong_target_order_hermitian():
array = np.eye(4)
Circuit().add_result_type(
ResultType.Expectation(observable=Observable.Hermitian(matrix=array), target=[0, 1])
).add_result_type(
ResultType.Variance(observable=Observable.Hermitian(matrix=array), target=[1, 0])
)
@pytest.mark.xfail(raises=TypeError)
def test_add_result_type_with_target_and_mapping(prob):
Circuit().add_result_type(prob, target=[10], target_mapping={0: 10})
def test_add_instruction_default(cnot_instr):
circ = Circuit().add_instruction(cnot_instr)
assert list(circ.instructions) == [cnot_instr]
def test_add_instruction_with_mapping(cnot_instr):
expected = [Instruction(Gate.CNot(), [10, 11])]
circ = Circuit().add_instruction(cnot_instr, target_mapping={0: 10, 1: 11})
assert list(circ.instructions) == expected
def test_add_instruction_with_target(cnot_instr):
expected = [Instruction(Gate.CNot(), [10, 11])]
circ = Circuit().add_instruction(cnot_instr, target=[10, 11])
assert list(circ.instructions) == expected
def test_add_multiple_single_qubit_instruction(h_instr):
circ = Circuit().add_instruction(h_instr, target=[0, 1, 2, 3])
expected = Circuit().h(0).h(1).h(2).h(3)
assert circ == expected
@pytest.mark.xfail(raises=TypeError)
def test_add_instruction_with_target_and_mapping(h):
Circuit().add_instruction(h, target=[10], target_mapping={0: 10})
def test_add_circuit_default(bell_pair):
circ = Circuit().add_circuit(bell_pair)
assert circ == bell_pair
def test_add_circuit_with_mapping(bell_pair):
circ = Circuit().add_circuit(bell_pair, target_mapping={0: 10, 1: 11})
expected = (
Circuit()
.add_instruction(Instruction(Gate.H(), 10))
.add_instruction(Instruction(Gate.CNot(), [10, 11]))
.add_result_type(ResultType.Probability([10, 11]))
)
assert circ == expected
def test_add_circuit_with_target(bell_pair):
circ = Circuit().add_circuit(bell_pair, target=[10, 11])
expected = (
Circuit()
.add_instruction(Instruction(Gate.H(), 10))
.add_instruction(Instruction(Gate.CNot(), [10, 11]))
.add_result_type(ResultType.Probability([10, 11]))
)
assert circ == expected
def test_add_circuit_with_target_and_non_continuous_qubits():
widget = Circuit().h(5).h(50).h(100)
circ = Circuit().add_circuit(widget, target=[1, 3, 5])
expected = (
Circuit()
.add_instruction(Instruction(Gate.H(), 1))
.add_instruction(Instruction(Gate.H(), 3))
.add_instruction(Instruction(Gate.H(), 5))
)
assert circ == expected
@pytest.mark.xfail(raises=TypeError)
def test_add_circuit_with_target_and_mapping(h):
Circuit().add_circuit(h, target=[10], target_mapping={0: 10})
def test_add_with_instruction_with_default(cnot_instr):
circ = Circuit().add(cnot_instr)
assert circ == Circuit().add_instruction(cnot_instr)
def test_add_with_instruction_with_mapping(cnot_instr):
target_mapping = {0: 10, 1: 11}
circ = Circuit().add(cnot_instr, target_mapping=target_mapping)
expected = Circuit().add_instruction(cnot_instr, target_mapping=target_mapping)
assert circ == expected
def test_add_with_instruction_with_target(cnot_instr):
target = [10, 11]
circ = Circuit().add(cnot_instr, target=target)
expected = Circuit().add_instruction(cnot_instr, target=target)
assert circ == expected
def test_add_with_circuit_with_default(bell_pair):
circ = Circuit().add(bell_pair)
assert circ == Circuit().add_circuit(bell_pair)
def test_add_with_circuit_with_mapping(bell_pair):
target_mapping = {0: 10, 1: 11}
circ = Circuit().add(bell_pair, target_mapping=target_mapping)
expected = Circuit().add_circuit(bell_pair, target_mapping=target_mapping)
assert circ == expected
def test_add_with_circuit_with_target(bell_pair):
target = [10, 11]
circ = Circuit().add(bell_pair, target=target)
expected = Circuit().add_circuit(bell_pair, target=target)
assert circ == expected
def test_circuit_copy(h, bell_pair, cnot_instr):
original = Circuit().add(h).add(bell_pair).add(cnot_instr)
copy = original.copy()
assert copy is not original
assert copy == original
def test_circuit_copy_with_modification(h, bell_pair, cnot_instr):
original = Circuit().add(h).add(bell_pair)
copy = original.copy().add(cnot_instr)
assert copy != original
def test_iadd_operator(cnot_instr, h):
circ = Circuit()
circ += h
circ += cnot_instr
circ += [h, cnot_instr]
assert circ == Circuit().add(h).add(cnot_instr).add(h).add(cnot_instr)
def test_add_operator(h, bell_pair):
addition = h + bell_pair + h + h
expected = Circuit().add(h).add(bell_pair).add(h).add(h)
assert addition == expected
assert addition != (h + h + bell_pair + h)
@pytest.mark.xfail(raises=TypeError)
def test_iadd_with_unknown_type(h):
h += 100
def test_subroutine_register():
# register a private method to avoid Sphinx docs picking this up
@circuit.subroutine(register=True)
def _foo(target):
"""this docstring will be added to the registered attribute"""
return Instruction(Gate.H(), target)
circ = Circuit()._foo(0)
assert circ == Circuit(Instruction(Gate.H(), 0))
assert Circuit._foo.__doc__ == _foo.__doc__
def test_subroutine_returns_circuit():
@circuit.subroutine()
def foo(target):
return Circuit().add(Instruction(Gate.H(), 0))
circ = Circuit().add(foo, 0)
assert circ == Circuit(Instruction(Gate.H(), 0))
def test_subroutine_returns_instruction():
@circuit.subroutine()
def foo(target):
return Instruction(Gate.H(), 0)
circ = Circuit().add(foo, 0)
assert circ == Circuit(Instruction(Gate.H(), 0))
def test_subroutine_returns_iterable():
@circuit.subroutine()
def foo(target):
for qubit in range(1):
yield Instruction(Gate.H(), qubit)
circ = Circuit().add(foo, 0)
assert circ == Circuit(Instruction(Gate.H(), 0))
def test_subroutine_nested():
@circuit.subroutine()
def h(target):
for qubit in target:
yield Instruction(Gate.H(), qubit)
@circuit.subroutine()
def h_nested(target):
for qubit in target:
yield h(target)
circ = Circuit().add(h_nested, [0, 1])
expected = Circuit([Instruction(Gate.H(), j) for i in range(2) for j in range(2)])
assert circ == expected
def test_ir_empty_instructions_result_types():
circ = Circuit()
assert circ.to_ir() == jaqcd.Program(
instructions=[], results=[], basis_rotation_instructions=[]
)
def test_ir_non_empty_instructions_result_types():
circ = Circuit().h(0).cnot(0, 1).probability([0, 1])
expected = jaqcd.Program(
instructions=[jaqcd.H(target=0), jaqcd.CNot(control=0, target=1)],
results=[jaqcd.Probability(targets=[0, 1])],
basis_rotation_instructions=[],
)
assert circ.to_ir() == expected
def test_ir_non_empty_instructions_result_types_basis_rotation_instructions():
circ = Circuit().h(0).cnot(0, 1).sample(observable=Observable.X(), target=[0])
expected = jaqcd.Program(
instructions=[jaqcd.H(target=0), jaqcd.CNot(control=0, target=1)],
results=[jaqcd.Sample(observable=["x"], targets=[0])],
basis_rotation_instructions=[jaqcd.H(target=0)],
)
assert circ.to_ir() == expected
def test_basis_rotation_instructions_all():
circ = Circuit().h(0).cnot(0, 1).sample(observable=Observable.Y())
expected = [
Instruction(Gate.Z(), 0),
Instruction(Gate.S(), 0),
Instruction(Gate.H(), 0),
Instruction(Gate.Z(), 1),
Instruction(Gate.S(), 1),
Instruction(Gate.H(), 1),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_target():
circ = Circuit().h(0).cnot(0, 1).expectation(observable=Observable.X(), target=0)
expected = [Instruction(Gate.H(), 0)]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_tensor_product():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.X() @ Observable.Y() @ Observable.Y(), target=[0, 1, 2])
)
expected = [
Instruction(Gate.H(), 0),
Instruction(Gate.Z(), 1),
Instruction(Gate.S(), 1),
Instruction(Gate.H(), 1),
Instruction(Gate.Z(), 2),
Instruction(Gate.S(), 2),
Instruction(Gate.H(), 2),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_tensor_product_shared_factors():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.X() @ Observable.Y() @ Observable.Y(), target=[0, 1, 2])
.expectation(observable=Observable.X() @ Observable.Y(), target=[0, 1])
)
expected = [
Instruction(Gate.H(), 0),
Instruction(Gate.Z(), 1),
Instruction(Gate.S(), 1),
Instruction(Gate.H(), 1),
Instruction(Gate.Z(), 2),
Instruction(Gate.S(), 2),
Instruction(Gate.H(), 2),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_identity():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.cnot(1, 2)
.cnot(2, 3)
.cnot(3, 4)
.expectation(observable=Observable.X(), target=[0])
.expectation(observable=Observable.I(), target=[2])
.expectation(observable=Observable.I() @ Observable.Y(), target=[1, 3])
.expectation(observable=Observable.I(), target=[0])
.expectation(observable=Observable.X() @ Observable.I(), target=[1, 3])
.expectation(observable=Observable.Y(), target=[2])
)
expected = [
Instruction(Gate.H(), 0),
Instruction(Gate.H(), 1),
Instruction(Gate.Z(), 2),
Instruction(Gate.S(), 2),
Instruction(Gate.H(), 2),
Instruction(Gate.Z(), 3),
Instruction(Gate.S(), 3),
Instruction(Gate.H(), 3),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_different_targets():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.X(), target=0)
.sample(observable=Observable.H(), target=1)
)
expected = [Instruction(Gate.H(), 0), Instruction(Gate.Ry(-np.pi / 4), 1)]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_same_targets():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.H() @ Observable.X(), target=[0, 1])
.sample(observable=Observable.H() @ Observable.X(), target=[0, 1])
.variance(observable=Observable.H() @ Observable.X(), target=[0, 1])
)
expected = [Instruction(Gate.Ry(-np.pi / 4), 0), Instruction(Gate.H(), 1)]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_all_specified_same_targets():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.H())
.sample(observable=Observable.H(), target=[0])
)
expected = [Instruction(Gate.Ry(-np.pi / 4), 0), Instruction(Gate.Ry(-np.pi / 4), 1)]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_specified_all_same_targets():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.sample(observable=Observable.H(), target=[0])
.expectation(observable=Observable.H())
)
expected = [Instruction(Gate.Ry(-np.pi / 4), 0), Instruction(Gate.Ry(-np.pi / 4), 1)]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_same_targets_hermitian():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.sample(observable=Observable.Hermitian(matrix=np.array([[1, 0], [0, -1]])), target=[1])
.expectation(
observable=Observable.Hermitian(matrix=np.array([[1, 0], [0, -1]])), target=[1]
)
)
expected = [Instruction(Gate.Unitary(matrix=np.array([[0, 1], [1, 0]])), target=[1])]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_different_hermitian_targets():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.sample(observable=Observable.Hermitian(matrix=np.array([[1, 0], [0, -1]])), target=[1])
.expectation(observable=Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]])), target=[0])
)
expected = [
Instruction(
Gate.Unitary(
matrix=1.0 / np.sqrt(2.0) * np.array([[1.0, 1.0], [1.0, -1.0]], dtype=complex)
),
target=[0],
),
Instruction(Gate.Unitary(matrix=np.array([[0, 1], [1, 0]])), target=[1]),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_tensor_product_hermitian():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.cnot(1, 2)
.sample(
observable=Observable.Hermitian(matrix=np.array([[1, 0], [0, -1]])) @ Observable.H(),
target=[0, 1],
)
.variance(
observable=Observable.Hermitian(matrix=np.array([[1, 0], [0, -1]])) @ Observable.H(),
target=[0, 1],
)
.expectation(observable=Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]])), target=[2])
)
expected = [
Instruction(Gate.Unitary(matrix=np.array([[0, 1], [1, 0]])), target=[0]),
Instruction(Gate.Ry(-np.pi / 4), 1),
Instruction(
Gate.Unitary(
matrix=1.0 / np.sqrt(2.0) * np.array([[1.0, 1.0], [1.0, -1.0]], dtype=complex)
),
target=[2],
),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_tensor_product_hermitian_qubit_count_2():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.cnot(1, 2)
.expectation(observable=Observable.I(), target=[1])
.sample(
observable=Observable.Hermitian(matrix=np.eye(4)) @ Observable.H(), target=[0, 1, 2]
)
.variance(observable=Observable.H(), target=[2])
.variance(observable=Observable.Hermitian(matrix=np.eye(4)), target=[0, 1])
.expectation(observable=Observable.I(), target=[0])
)
expected = [
Instruction(Gate.Unitary(matrix=np.eye(4)), target=[0, 1]),
Instruction(Gate.Ry(-np.pi / 4), 2),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_multiple_result_types_tensor_product_probability():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.cnot(1, 2)
.probability([0, 1])
.sample(observable=Observable.Z() @ Observable.Z() @ Observable.H(), target=[0, 1, 2])
.variance(observable=Observable.H(), target=[2])
)
expected = [
Instruction(Gate.Ry(-np.pi / 4), 2),
]
assert circ.basis_rotation_instructions == expected
def test_basis_rotation_instructions_call_twice():
circ = (
Circuit()
.h(0)
.cnot(0, 1)
.expectation(observable=Observable.H() @ Observable.X(), target=[0, 1])
.sample(observable=Observable.H() @ Observable.X(), target=[0, 1])
.variance(observable=Observable.H() @ Observable.X(), target=[0, 1])
)
expected = [Instruction(Gate.Ry(-np.pi / 4), 0), Instruction(Gate.H(), 1)]
assert circ.basis_rotation_instructions == expected
assert circ.basis_rotation_instructions == expected
def test_depth_getter(h):
assert h.depth is h._moments.depth
@pytest.mark.xfail(raises=AttributeError)
def test_depth_setter(h):
h.depth = 1
def test_instructions_getter(h):
assert list(h.instructions) == list(h._moments.values())
@pytest.mark.xfail(raises=AttributeError)
def test_instructions_setter(h, h_instr):
h.instructions = iter([h_instr])
def test_moments_getter(h):
assert h.moments is h._moments
@pytest.mark.xfail(raises=AttributeError)
def test_moments_setter(h):
h.moments = Moments()
def test_qubit_count_getter(h):
assert h.qubit_count is h._moments.qubit_count
@pytest.mark.xfail(raises=AttributeError)
def test_qubit_count_setter(h):
h.qubit_count = 1
@pytest.mark.parametrize(
"circuit,expected_qubit_count",
[
(Circuit().h(0).h(1).h(2), 3),
(
Circuit()
.h(0)
.expectation(observable=Observable.H() @ Observable.X(), target=[0, 1])
.sample(observable=Observable.H() @ Observable.X(), target=[0, 1]),
2,
),
(
Circuit().h(0).probability([1, 2]).state_vector(),
1,
),
(
Circuit()
.h(0)
.variance(observable=Observable.H(), target=1)
.state_vector()
.amplitude(["01"]),
2,
),
],
)
def test_qubit_count(circuit, expected_qubit_count):
assert circuit.qubit_count == expected_qubit_count
@pytest.mark.parametrize(
"circuit,expected_qubits",
[
(Circuit().h(0).h(1).h(2), QubitSet([0, 1, 2])),
(
Circuit()
.h(0)
.expectation(observable=Observable.H() @ Observable.X(), target=[0, 1])
.sample(observable=Observable.H() @ Observable.X(), target=[0, 1]),
QubitSet([0, 1]),
),
(
Circuit().h(0).probability([1, 2]).state_vector(),
QubitSet([0]),
),
(
Circuit()
.h(0)
.variance(observable=Observable.H(), target=1)
.state_vector()
.amplitude(["01"]),
QubitSet([0, 1]),
),
],
)
def test_circuit_qubits(circuit, expected_qubits):
assert circuit.qubits == expected_qubits
def test_qubits_getter(h):
assert h.qubits == h._moments.qubits
assert h.qubits is not h._moments.qubits
@pytest.mark.xfail(raises=AttributeError)
def test_qubits_setter(h):
h.qubits = QubitSet(1)
def test_diagram(h):
expected = "foo bar diagram"
mock_diagram = Mock()
mock_diagram.build_diagram.return_value = expected
assert h.diagram(mock_diagram) == expected
mock_diagram.build_diagram.assert_called_with(h)
|
{"hexsha": "91dd8c96a61baf4a7147b305794958b28fa82b0a", "size": 24753, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/unit_tests/braket/circuits/test_circuit.py", "max_stars_repo_name": "Takuya-Miyazaki/amazon-braket-sdk-python", "max_stars_repo_head_hexsha": "e9c868b3360b1c78d9ecb5222796af1fd2670e29", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unit_tests/braket/circuits/test_circuit.py", "max_issues_repo_name": "Takuya-Miyazaki/amazon-braket-sdk-python", "max_issues_repo_head_hexsha": "e9c868b3360b1c78d9ecb5222796af1fd2670e29", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit_tests/braket/circuits/test_circuit.py", "max_forks_repo_name": "Takuya-Miyazaki/amazon-braket-sdk-python", "max_forks_repo_head_hexsha": "e9c868b3360b1c78d9ecb5222796af1fd2670e29", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5592592593, "max_line_length": 100, "alphanum_fraction": 0.6562436876, "include": true, "reason": "import numpy", "num_tokens": 6302}
|
from urllib.parse import MAX_CACHE_SIZE
from transformers.models.bert.modeling_bert import BertForTokenClassification
from transformers.modeling_outputs import SequenceClassifierOutput
from torch.nn import CrossEntropyLoss
from typing import Dict, List
from .abstract_model import (
SpanClassifier,
SpanClassifierOutput,
SpanClassifierDataTrainingArguments,
SequenceClassifierOutputPlus,
translate_into_orig_train_args,
)
import gin
class BertForSpanContextClassification(BertForTokenClassification):
def forward(
self,
span_context_input_ids=None,
span_context_attention_mask=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = self.config.use_return_dict
outputs = self.bert(
span_context_input_ids,
attention_mask=span_context_attention_mask,
)
mask_id = 103
mask_token_mask = (span_context_input_ids == mask_id).type(torch.uint8)
mask_token_mask = mask_token_mask.reshape(tuple(mask_token_mask.shape) + (1,))
sequence_output = outputs[0]
feature_vecs = (sequence_output * mask_token_mask).sum(dim=1)
logits = self.classifier(self.dropout(feature_vecs))
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# loss = None
# if labels is not None:
# loss_fct = CrossEntropyLoss()
# # Only keep active parts of the loss
# if attention_mask is not None:
# active_loss = attention_mask.view(-1) == 1
# active_logits = logits.view(-1, self.num_labels)
# active_labels = torch.where(
# active_loss,
# labels.view(-1),
# torch.tensor(loss_fct.ignore_index).type_as(labels),
# )
# loss = loss_fct(active_logits, active_labels)
# else:
# loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# if not return_dict:
# output = (logits,) + outputs[2:]
# return ((loss,) + output) if loss is not None else output
# return TokenClassifierOutput(
# loss=loss,
# logits=logits,
# hidden_states=outputs.hidden_states,
# attentions=outputs.attentions,
# )
return SequenceClassifierOutputPlus(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
feature_vecs=feature_vecs,
)
from transformers import TrainingArguments
from dataclasses import dataclass, field
from typing import Optional
from loguru import logger
import os
from datasets import ClassLabel, load_dataset, DatasetDict
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorForTokenClassification,
Trainer,
set_seed,
)
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
import numpy as np
import torch
@gin.configurable
@dataclass
class SpanContextClassificationModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={
"help": "Path to pretrained model or model identifier from huggingface.co/models"
}
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
saved_param_path: Optional[str] = field(
default=None,
metadata={"help": "Fine-Tuned parameters. If there is, load this parameter."},
)
@gin.configurable
@dataclass
class SpanContextClassificationDataTrainingArguments(
SpanClassifierDataTrainingArguments
):
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task_name: Optional[str] = field(
default="ner", metadata={"help": "The name of the task (ner, pos...)."}
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (via the datasets library)."
},
)
# overwrite_cache: bool = field(
# default=False,
# metadata={"help": "Overwrite the cached training and evaluation sets"},
# )
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
label_all_tokens: bool = field(
default=False,
metadata={
"help": "Whether to put the label for one word on all tokens of generated by that word or just on the "
"one (in which case the other tokens will have a padding index)."
},
)
max_length: int = field(
default=512,
metadata={"help": "Max sequence length in training."},
)
def __post_init__(self):
self.task_name = self.task_name.lower()
from datasets import Dataset, DatasetDict, DatasetInfo, Sequence, Value
mask_token = "[MASK]"
from tqdm import tqdm
@gin.configurable
class SpanContextClassifier(SpanClassifier):
def __init__(
self,
span_classification_datasets: DatasetDict,
model_args: SpanContextClassificationModelArguments,
data_args: SpanContextClassificationDataTrainingArguments,
training_args: TrainingArguments,
) -> None:
"""[summary]
Args:
span_classification_datasets (DatasetDict): with context tokens
"""
self.model_args = model_args
self.data_args = data_args
training_args = translate_into_orig_train_args(training_args)
self.training_args = training_args
logger.info("Start Loading BERT")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
datasets = span_classification_datasets
datasets = DatasetDict(
{"train": datasets["train"], "validation": datasets["validation"]}
)
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
# text_column_name = "tokens" if "tokens" in column_names else column_names[0]
# label_column_name = (
# f"{data_args.task_name}_tags"
# if f"{data_args.task_name}_tags" in column_names
# else column_names[1]
# )
label_list = features["label"].names
self.label_list = label_list
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name
if model_args.config_name
else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name
if model_args.tokenizer_name
else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
)
model = BertForSpanContextClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
if model_args.saved_param_path:
model.load_state_dict(torch.load(model_args.saved_param_path))
# Preprocessing the dataset
# Padding strategy
self.tokenizer = tokenizer
self.model = model
self.mask_id = self.tokenizer.vocab[mask_token]
span_classification_datasets = DatasetDict(
{
"train": span_classification_datasets["train"],
"validation": span_classification_datasets["validation"],
}
)
super().__init__(span_classification_datasets, data_args)
self.argss += [model_args, data_args, training_args]
tokenized_datasets = self.span_classification_datasets
# Data collator
# data_collator = DataCollatorForTokenClassification(tokenizer)
# Metrics
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
return {
"accuracy_score": accuracy_score(true_labels, true_predictions),
"precision": precision_score(true_labels, true_predictions),
"recall": recall_score(true_labels, true_predictions),
"f1": f1_score(true_labels, true_predictions),
}
# Initialize our Trainer
from transformers import default_data_collator
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"]
if training_args.do_train
else None,
eval_dataset=tokenized_datasets["validation"]
if training_args.do_eval
else None,
tokenizer=tokenizer,
data_collator=default_data_collator,
compute_metrics=compute_metrics,
)
self.trainer = trainer
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.save_model() # Saves the tokenizer too for easy upload
def predict(self, tokens: List[str], start: int, end: int) -> SpanClassifierOutput:
context_tokens = tokens[:start] + [mask_token] + tokens[end:]
tokenized_context = self.tokenizer(
context_tokens,
# padding="max_length",
# truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
# max_length=self.data_args.max_length,
is_split_into_words=True,
return_offsets_mapping=True,
)
tokenized_context
kwargs = {
"span_context_input_ids": torch.LongTensor(
[tokenized_context["input_ids"]]
).to(self.model.device),
"span_context_attention_mask": torch.LongTensor(
[tokenized_context["attention_mask"]]
).to(self.model.device),
}
outputs = self.model(**kwargs)
return SpanClassifierOutput(
label=self.label_list[outputs.logits[0].argmax()],
logits=outputs.logits[0].cpu().detach().numpy(),
)
def get_context_tokens(
self, tokens: List[List[str]], start: List[int], end: List[int]
):
context_tokens = [
tok[:s] + [mask_token] + tok[e:] for tok, s, e in zip(tokens, start, end)
]
return context_tokens
def batch_predict(
self, tokens: List[List[str]], start: List[int], end: List[int]
) -> List[SpanClassifierOutput]:
assert len(tokens) == len(start)
assert len(start) == len(end)
context_tokens = self.get_context_tokens(tokens, start, end)
max_context_len = min(
max(
len(
self.tokenizer(
cont_tok,
is_split_into_words=True,
)["input_ids"]
)
for cont_tok in context_tokens
),
512,
)
# logger.info("max_context_len: %d" % max_context_len)
tokenized_contexts = self.tokenizer(
context_tokens,
padding="max_length",
max_length=max_context_len,
# truncation=True,
is_split_into_words=True,
)
# logger.info("finish tokenizing")
dataset = Dataset.from_dict(
{
"span_context_input_ids": tokenized_contexts["input_ids"],
"span_context_attention_mask": tokenized_contexts["attention_mask"],
}
)
outputs = self.trainer.predict(dataset)
logits = outputs.predictions
ret_list = []
if isinstance(logits, tuple):
logits, feature_vecs = logits
for logit in logits:
ret_list.append(
SpanClassifierOutput(
label=self.label_list[logit.argmax()], logits=logit
)
)
return ret_list
def preprocess_function(self, example: Dict) -> Dict:
context_tokens = self.get_context_tokens(
example["tokens"], example["start"], example["end"]
)
tokenized_inputs = self.tokenizer(
context_tokens,
padding="max_length",
truncation=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
# return_offsets_mapping=True,
max_length=self.data_args.max_length,
)
ret_dict = {
"span_context_input_ids": tokenized_inputs["input_ids"],
"span_context_attention_mask": tokenized_inputs["attention_mask"],
}
if "label" in example:
ret_dict["label"] = example["label"]
return ret_dict
|
{"hexsha": "ed7c4e74b2d1f4f6477ed6349fa2686ef5648c38", "size": 15835, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/context.py", "max_stars_repo_name": "fracivilization/entity_typing", "max_stars_repo_head_hexsha": "ca6c327c47639e09adb55e4d84ec4f9867544e28", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/context.py", "max_issues_repo_name": "fracivilization/entity_typing", "max_issues_repo_head_hexsha": "ca6c327c47639e09adb55e4d84ec4f9867544e28", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/context.py", "max_forks_repo_name": "fracivilization/entity_typing", "max_forks_repo_head_hexsha": "ca6c327c47639e09adb55e4d84ec4f9867544e28", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0706150342, "max_line_length": 117, "alphanum_fraction": 0.6095358383, "include": true, "reason": "import numpy", "num_tokens": 3210}
|
data Nat : Type where
Z : Nat
S : (1 k : Nat) -> Nat
data Bool : Type where
False : Bool
True : Bool
data Thing : Bool -> Type where
TF : Thing False
TT : Thing True
data Maybe : Type -> Type where
Nothing : {a : Type} -> Maybe a
Just : {a : Type} -> a -> Maybe a
ok : (0 b : Bool) -> Thing b -> Bool
ok False TF = True
ok True TT = False
id : {a : Type} -> (1 x : a) -> a
id x = x
test : (1 x : Nat) -> Nat
test x = id x
data Pair : Type -> Type -> Type where
MkPair : (1 x : a) -> (0 y : b) -> Pair a b
fst : (1 p : Pair a b) -> a
fst (MkPair x y) = x
wibble : (1 p : Pair a b) -> a
wibble {a=a} (MkPair x y)
= let test : (1 y : a) -> a
test y = y in
test x
plus : (1 x : Nat) -> (1 y : Nat) -> Nat
plus Z y = y
plus (S k) y = S (plus k y)
holetest1 : (1 x : Nat) -> (1 y : Nat) -> Nat
holetest1 x y = plus ?this y
holetest2 : (1 x : Nat) -> (1 y : Nat) -> Nat
holetest2 x y = plus x ?that
|
{"hexsha": "5403711b7a88987d8c6764e9ea7a73bb719ccf6e", "size": 970, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "idris2/tests/idris2/linear003/Linear.idr", "max_stars_repo_name": "Qqwy/Idris2-Erlang", "max_stars_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idris2/tests/idris2/linear003/Linear.idr", "max_issues_repo_name": "Qqwy/Idris2-Erlang", "max_issues_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idris2/tests/idris2/linear003/Linear.idr", "max_forks_repo_name": "Qqwy/Idris2-Erlang", "max_forks_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7959183673, "max_line_length": 48, "alphanum_fraction": 0.5020618557, "num_tokens": 369}
|
import pandas as pd
import numpy as np
from scipy import stats
import os
from tqdm import tqdm
import pickle as pkl
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.switch_backend('agg')
def stat(seq_length):
print('Seq len info :')
seq_len = np.asarray(seq_length)
idx = np.arange(0, len(seq_len), dtype=np.int32)
print(stats.describe(seq_len))
plt.figure(figsize=(16, 9))
plt.subplot(121)
plt.plot(idx[:], seq_len[:], 'ro')
plt.grid(True)
plt.xlabel('index')
plt.ylabel('seq_len')
plt.title('Scatter Plot')
plt.subplot(122)
plt.hist(seq_len, bins=10, label=['seq_len'])
plt.grid(True)
plt.xlabel('seq_len')
plt.ylabel('freq')
plt.title('Histogram')
plt.savefig('./seq_len_stats.jpg', format='jpg')
def preprocess_data(data_path):
samples, seq_len = [], []
max_len, dead_len, live_len = 0, 0, 0
meta = {}
print('Reading raw files...')
for file in tqdm(os.listdir(data_path)):
if file.startswith('0'):
dead = 0
else:
dead = 1
raw_sample = pd.read_csv(os.path.join(data_path, file), sep=',')
raw_sample = raw_sample.fillna(0)
# columns_size = raw_sample.columns.size CW101 231 360 NZ390.astype(object)
medicine = raw_sample.iloc[:, 209:].as_matrix()
index = raw_sample.iloc[:, 3:208].as_matrix()
# raw_sample.drop(raw_sample.columns[medicine], axis=1, inplace=True)
# for i, idx in enumerate(index):
# if not np.all(idx == np.array(list(idx))):
# print(file)
# break
length = index.shape[0]
if length > max_len:
max_len = length
sample = {'index': index,
'medicine': medicine,
'length': length,
'label': dead,
'name': file}
samples.append(sample)
seq_len.append(length)
if dead == 0:
dead_len += length
else:
live_len += length
stat(seq_len)
print('Dead length {}'.format(dead_len))
print('Live length {}'.format(live_len))
train_samples, test_samples = train_test_split(samples, test_size=0.2)
del samples
meta['train_total'] = len(train_samples)
meta['test_total'] = len(test_samples)
index_dim = train_samples[0]['index'].shape[1]
medicine_dim = train_samples[0]['medicine'].shape[1]
print('Train total {} Test total {}'.format(meta['train_total'], meta['test_total']))
print('Index dim {} Medicine dim {}'.format(index_dim, medicine_dim))
return train_samples, test_samples, max_len, meta, (index_dim, medicine_dim)
def divide_data(train_data, test_data):
train_samples, test_samples = [], []
meta = {}
total = 0
max_len = 0
print('Reading raw files...')
for file in tqdm(os.listdir(train_data)):
total += 1
if file.startswith('0'):
dead = 1
else:
dead = 0
raw_sample = pd.read_csv(os.path.join(train_data, file), sep=',')
raw_sample = raw_sample.fillna(0)
medicine = raw_sample.iloc[:, 209:].as_matrix()
index = raw_sample.iloc[:, 3:208].as_matrix()
length = index.shape[0]
if length > max_len:
max_len = length
sample = {'index': index,
'medicine': medicine,
'length': length,
'label': dead,
'name': file}
train_samples.append(sample)
for file in tqdm(os.listdir(test_data)):
total += 1
if file.startswith('0'):
dead = 0
else:
dead = 1
raw_sample = pd.read_csv(os.path.join(test_data, file), sep=',')
raw_sample = raw_sample.fillna(0)
medicine = raw_sample.iloc[:, 209:].as_matrix()
index = raw_sample.iloc[:, 3:208].as_matrix()
length = index.shape[0]
if length > max_len:
max_len = length
sample = {'index': index,
'medicine': medicine,
'length': length,
'label': dead}
test_samples.append(sample)
index_dim = train_samples[0]['index'].shape[1]
medicine_dim = train_samples[0]['medicine'].shape[1]
meta['train_total'] = len(train_samples)
meta['test_total'] = len(test_samples)
# train_eval_samples = {}
# for sample in train_samples:
# train_eval_samples[str(sample['patient_id'])] = {'label': sample['label']}
#
# test_eval_samples = {}
# for sample in test_samples:
# test_eval_samples[str(sample['patient_id'])] = {'label': sample['label']}
return train_samples, test_samples, max_len, meta, (index_dim, medicine_dim)
def save(filename, obj, message=None):
if message is not None:
print('Saving {}...'.format(message))
with open(filename, 'wb') as fh:
pkl.dump(obj, fh)
def run_prepare(config, flags):
# train_samples, dev_samples, max_len, meta, dim = preprocess_data(config.raw_dir)
train_samples, dev_samples, max_len, meta, dim = divide_data(config.raw_dir + '/train',
config.raw_dir + '/test')
save(flags.train_file, train_samples, message='train file')
del train_samples
save(flags.eval_file, dev_samples, message='eval file')
save(flags.meta, meta, message='meta file')
del dev_samples
return max_len, dim
|
{"hexsha": "6c8aeb35f3926529db5abbc4de2d289fc0276ad1", "size": 5513, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_preprocess.py", "max_stars_repo_name": "shiningliang/DIMM", "max_stars_repo_head_hexsha": "adc9ff2bea0921cffe91989a1adc95184d81e6a5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-01T12:28:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-01T12:28:26.000Z", "max_issues_repo_path": "torch_preprocess.py", "max_issues_repo_name": "shiningliang/DIMM", "max_issues_repo_head_hexsha": "adc9ff2bea0921cffe91989a1adc95184d81e6a5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torch_preprocess.py", "max_forks_repo_name": "shiningliang/DIMM", "max_forks_repo_head_hexsha": "adc9ff2bea0921cffe91989a1adc95184d81e6a5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-07T16:11:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-07T16:11:00.000Z", "avg_line_length": 33.8220858896, "max_line_length": 91, "alphanum_fraction": 0.5900598585, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1310}
|
//---------------------------------------------------------------------------//
//!
//! \file Utility_InverseMomentumUnits.hpp
//! \author Alex Robinson
//! \brief The inverse momentum units
//!
//---------------------------------------------------------------------------//
#ifndef UTILITY_INVERSE_MOMENTUM_UNITS_HPP
#define UTILITY_INVERSE_MOMENTUM_UNITS_HPP
// Boost Includes
#include <boost/units/systems/si/momentum.hpp>
#include <boost/units/conversion.hpp>
// FRENSIE Includes
#include "Utility_InverseAtomicMomentumUnit.hpp"
#include "Utility_InverseMeCMomentumUnit.hpp"
#include "Utility_MomentumUnits.hpp"
#endif // end UTILITY_INVERSE_MOMENTUM_UNITS_HPP
//---------------------------------------------------------------------------//
// end Utility_InverseMomentumUnits.hpp
//---------------------------------------------------------------------------//
|
{"hexsha": "3426f8b12378839e56c19771cb934deec07734de", "size": 873, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "packages/utility/core/src/Utility_InverseMomentumUnits.hpp", "max_stars_repo_name": "bam241/FRENSIE", "max_stars_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2019-11-14T19:58:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-04T17:44:09.000Z", "max_issues_repo_path": "packages/utility/core/src/Utility_InverseMomentumUnits.hpp", "max_issues_repo_name": "bam241/FRENSIE", "max_issues_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 43.0, "max_issues_repo_issues_event_min_datetime": "2020-03-03T19:59:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T03:36:08.000Z", "max_forks_repo_path": "packages/utility/core/src/Utility_InverseMomentumUnits.hpp", "max_forks_repo_name": "bam241/FRENSIE", "max_forks_repo_head_hexsha": "e1760cd792928699c84f2bdce70ff54228e88094", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6.0, "max_forks_repo_forks_event_min_datetime": "2020-02-12T17:37:07.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T18:59:51.000Z", "avg_line_length": 33.5769230769, "max_line_length": 79, "alphanum_fraction": 0.510882016, "num_tokens": 151}
|
Require Import Nat Arith.
Inductive Nat : Type := succ : Nat -> Nat | zero : Nat.
Inductive Lst : Type := cons : Nat -> Lst -> Lst | nil : Lst.
Inductive Tree : Type := node : Nat -> Tree -> Tree -> Tree | leaf : Tree.
Inductive Pair : Type := mkpair : Nat -> Nat -> Pair
with ZLst : Type := zcons : Pair -> ZLst -> ZLst | znil : ZLst.
Fixpoint append (append_arg0 : Lst) (append_arg1 : Lst) : Lst
:= match append_arg0, append_arg1 with
| nil, x => x
| cons x y, z => cons x (append y z)
end.
Fixpoint len (len_arg0 : Lst) : Nat
:= match len_arg0 with
| nil => zero
| cons x y => succ (len y)
end.
Fixpoint rotate (rotate_arg0 : Nat) (rotate_arg1 : Lst) : Lst
:= match rotate_arg0, rotate_arg1 with
| zero, x => x
| succ n, nil => nil
| succ n, cons y x => rotate n (append x (cons y nil))
end.
Lemma append_assoc: forall l1 l2 l3,
append l1 (append l2 l3) = append (append l1 l2) l3.
Proof.
induction l1.
- simpl. intros. rewrite IHl1. reflexivity.
- reflexivity.
Qed.
Lemma lem3: forall l, append l nil = l.
Proof.
induction l.
- simpl. rewrite IHl. reflexivity.
- reflexivity.
Qed.
Theorem theorem0 : forall (x : Lst) (y : Lst), eq (rotate (len x) (append x y)) (append y x).
Proof.
induction x.
- intros. simpl. rewrite <- append_assoc. rewrite IHx. rewrite <- append_assoc. reflexivity.
- intros. simpl. rewrite lem3. reflexivity.
Qed.
|
{"author": "qsctr", "repo": "coq-quantified-theorems", "sha": "d3456ea0a70121e8de87956b45349aa7b943e37d", "save_path": "github-repos/coq/qsctr-coq-quantified-theorems", "path": "github-repos/coq/qsctr-coq-quantified-theorems/coq-quantified-theorems-d3456ea0a70121e8de87956b45349aa7b943e37d/benchmarks/CLAM/goal21.v"}
|
# spectro.py
"""
Classes and definitions related to spectroscopic data.
"""
from __future__ import print_function
import numpy as np
from astropy import wcs
from astropy import units as u
class Line(object):
"""
Class to represent a spectral line.
A Line contains information about the rest wavelength, the observed
wavelength, the redshift, and its name. Those characteristics can
be set when the instance is created, or set or calculated later with
a method. If only some of the parameters are set, the constructor
will try to set the other attributes. For example, if the rest wavelength
and the redshift are given, the construction will calculate and set the
observed wavelength.
Parameters
----------
restwlen : float or Quantity, optional
Rest wavelength.
obswlen : float or Quantity, optional
Observed wavelength.
redshift : float, optional
Redshift that links the rest and observed wavelengths.
name : str, optional
The line identification.
Attributes
----------
restwlen : float or Quantity
Rest wavelength.
obswlen : float or Quantity
Observed wavelength.
redshift : float
Redshift that links the rest and observed wavelengths. The redshift
is 'z', not a velocity.
name : str
The line identification.
Methods
-------
set_obswlen(obswlen)
Set the obswlen attribute. If obswlen parameter is None, try to use
the other attributes to calculate obswlen.
set_restwlen(restwlen)
Set the restwlen attribute. If restwlen parameter is None, try to use
the other attributes to calculate restwlen.
set_redshift(redshift)
Set the redshift attribute. If redshift parameter is None, try to use
the other attributes to calculate redshift.
set_name(name)
Set the name of the line.
validate_wavelengths()
Raises
------
Notes
-----
It is recommended to use the "set" methods to set the attributes.
Examples
--------
>>> myline = Line(restwlen=0.5876*u.micron, redshift=1.)
>>> myline.__dict__
{'name': None, 'obswlen': <Quantity 1.1752 micron>, '
restwlen': <Quantity 0.5876 micron>, 'redshift': 1.0}
Note that the obswlen attribute has been automatically calculated from
the rest wavelength and the redshift specified in the construction call.
>>> myline = Line()
>>> myline.set_restwlen(0.5876*u.micron)
>>> myline.set_redshift(1.)
>>> myline.set_obswlen()
Two of the three wavelength attributes must be set in order to calculate
the third.
"""
def __init__(self, restwlen=None, obswlen=None, redshift=None, name=None):
self.restwlen = restwlen
self.obswlen = obswlen
self.redshift = redshift
self.name = name
# try to set undefined attributes from the others.
self.set_obswlen(obswlen)
self.set_restwlen(restwlen)
self.set_redshift(redshift)
def set_obswlen(self, obswlen=None):
"""
Set or calculate the observed wavelength.
The observed wavelength for the line is set to the obswlen provided as
input. If no input is given and the rest wavelength and redshift are
already set, then the observed wavelength will be calculated from those
instead.
Parameters
----------
obswlen : float or Quantity, optional
Observed wavelength. If specified, that value will be used to set
the obswlen attribute. If it is not specified as input, the method
will try to calculate it from the rest wavelength and the redshift,
if those are already set.
Examples
--------
>>> myline = Line(restwlen=0.5876*u.micron, redshift=1.)
>>> myline.set_obswlen()
>>> myline.obswlen
<Quantity 1.1752 micron>
"""
if obswlen is None:
if self.restwlen is not None and self.redshift is not None:
# apply the redshift to restwlen to get obswlen
obswlen = (self.redshift + 1) * self.restwlen
self.obswlen = obswlen
return
def set_restwlen(self, restwlen=None):
"""
Set or calculate the restwlen wavelength.
The rest wavelength for the line is set to the restwlen provided as
input. If no input is given and the observed wavelength and redshift
are already set, then the rest wavelength will be calculated from
those instead.
Parameters
----------
restwlen : float or Quantity, optional
Rest wavelength. If specified, that value will be used to set
the restwlen attribute. If it is not specified as input, the
method will try to calculate it from the observed wavelength
and the redshift, if those are already set.
Examples
--------
>>> myline = Line(obswlen=1.1752*u.micron, redshift=1.)
>>> myline.set_restwlen()
>>> myline.restwlen
<Quantity 0.5876*u.micron>
"""
if restwlen is None:
if self.obswlen is not None and self.redshift is not None:
# apply the redshift to obswlen to get restwlen
restwlen = self.obswlen / (self.redshift + 1.)
self.restwlen = restwlen
return
def set_redshift(self, redshift=None):
"""
Set or calculate the redshift.
The redshift for the line is set to the redshift provided as
input. If no input is given and the observed and rest wavelengths
are already set, then the redshift will be calculated from
those instead.
Parameters
----------
redshift : float, optional
Redshift. If specified, that value will be used to set
the redshift attribute. If it is not specified as input, the
method will try to calculate it from the observed and the
rest wavelengths, if those are already set.
Examples
--------
>>> myline = Line(restwlen=0.5876*u.micron, obswlen=1.1752*u.micron)
>>> myline.set_redshift()
>>> myline.redshift
1.0
"""
if redshift is None:
if self.restwlen is not None and self.obswlen is not None:
# use the restwlen and obswlen to calculated the redshift
redshift = (self.obswlen - self.restwlen) / self.restwlen
self.redshift = redshift
return
def set_name(self, name):
"""
Set the line name.
Parameters
----------
name : str
Name to assign to this line.
"""
self.name = name
return
def validate_wavelengths(self):
"""
Check that the rest wavelength, observed wavelength, and redshift are
coherent with each others.
If the rest and observed wavelength, and the redshift are set
independently, it is possible that they would be incoherent with
each other, eg. the observed wavelength does not match the rest
wavelength and the redshift. This method will do a quick validation
of the three values, checking if they match. If they don't, an
AssertionError is returned.
Raises
------
AssertionError
Raised if the attributes restwlen, obswlen, and redshift are not
coherent with each other.
"""
assert self.obswlen == (self.redshift + 1) * self.restwlen
class Spectrum(object):
"""
Class representing a spectrum.
A 1-D spectrum is loaded from an FITS HDU. Information about the pixels
and their values, and information about the WCS and units are obtained
directly from the HDU.
Parameters
----------
hdu : HDU
The FITS Header Data Unit, in other words, a FITS extension. Those
can be obtained from an AstroData object or with pyfits/astropy.io.fits.
wunit : Unit, optional
The units for the wavelengths. The Unit class comes from the
astropy.units module. If it is not provided as an argument, the
constructor will try to get the information from the headers, in
particular from the 'WAT1_001' keyword.
"""
def __init__(self, hdu, wunit=None):
self.counts = self.get_counts_array_from_hdu(hdu)
self.pix = self.get_pixel_array_from_hdu(hdu)
try:
self.wcs = self.get_wcs_from_hdu(hdu)
except wcs.InvalidCoordinateError:
raise
self.wlen = self.apply_wcs_to_pixels()
self.wunit = wunit
# print('debug - Spectrum init - length counts:', self.counts.size)
# print('debug - Spectrum init - length pix:', self.pix.size)
# print('debug - Spectrum init - wcs:', self.wcs)
# print('debug - Spectrum init - length wlen:', self.wlen.size)
if self.wunit is None:
self.wunit = self.get_wunit(hdu)
@classmethod
def get_counts_array_from_hdu(cls, hdu):
return hdu.data
@classmethod
def get_pixel_array_from_hdu(cls, hdu):
return np.arange(len(hdu.data))
@classmethod
def get_wcs_from_hdu(cls, hdu):
import warnings
# check that the wcs in the header has the same number
# of axis as the pixel array. (eg. extracted F2 data
# headers are not cleaned of the second axis that then
# no longer exists in the pixel data.)
warnings.simplefilter('ignore')
wcs_from_hdu = wcs.WCS(hdu.header.tostring())
warnings.resetwarnings()
if wcs_from_hdu.wcs.naxis != hdu.header['NAXIS']:
msg = 'WCS and pixel array have different dimensions.\n'
msg += 'WCS has ' + str(wcs_from_hdu.wcs.naxis) + \
' axes, the array has ' + str(hdu.header["NAXIS"]) + ' axes.'
raise wcs.InvalidCoordinateError(msg)
return wcs_from_hdu
@classmethod
def get_wunit(cls, hdu):
unit_str = hdu.header['WAT1_001'].split()[2].split('=')[1]
if unit_str.endswith('s'):
unit_str = unit_str[:-1]
return u.Unit(unit_str)
def apply_wcs_to_pixels(self):
return self.wcs.wcs_pix2world(zip(self.pix), 0)
class LineList(object):
"""
Create a list of Line objects from a line list defined in LINELIST_DICT.
Parameters
----------
name : str
Name of the line list to retrieve from LINELIST_DICT.
redshift : float, optional
Redshift to apply to the lines. Note that the redshift is only
stored. To apply it run the method apply_redshift(). Default = 0.
Attributes
----------
name : str
Name of the line list to retrieve from LINELIST_DICT.
redshift : float
Redshift to apply to the lines.
lines : list of Line
List of Line instances. This is the attribute to access once the
LineList instance has been created.
Methods
-------
reapply_redshift()
If the redshift attribute has been changed, reapply the redshift
to the lines.
Raises
------
KeyError
Raised if the line list name is invalid.
See Also
--------
The Line class.
Examples
--------
>>> mylinelist = LineList('quasar')
>>> mylinelist.lines
[<spectro.Line instance at 0x10339ddd0>, <spectro.Line instance ...
<spectro.Line instance at 0x10339d050>, <spectro.Line instance ...
<spectro.Line instance at 0x10339fb00>, <spectro.Line instance ...
<spectro.Line instance at 0x10339f7e8>, <spectro.Line instance ...]
>>> mylinelist.lines[0].obswlen
<Quantity 0.5876 micron>
>>> mylinelist.redshift = 1.
>>> mylinelist.reapply_redshift()
>>> ll.lines[0].obswlen
<Quantity 1.1752 micron>
"""
def __init__(self, name, redshift=0.):
self.name = name
self.redshift = redshift
self.lines = self._get_lines_from_list()
def _get_lines_from_list(self, name=None):
"""
Load the line list as list of Line instances and apply the redshift.
The private method will check that the list's name is valid and
retrieve that list from the line dictionary. Each line is loaded
into a Line instance and the redshift is applied. The redshift
is taken from the instance's "redshift" attribute.
Parameters
----------
name : str, optional
Name of the line list to retrieve. If not specified, use the
instance's "name" attribute.
Returns
-------
list of Line instances
Raises
------
KeyError
Raised if the line list name is invalid.
See Also
--------
LINELIST_DICT for valid line lists.
The Line class.
"""
if name is None:
name = self.name
lines = []
try:
for line_data in LINELIST_DICT[name]:
line = Line(restwlen=line_data[1], redshift=self.redshift,
name=line_data[0])
lines.append(line)
except KeyError:
print('ERROR: Line list name, "%s", invalid.' % (name))
print('ERROR: Valid lists are:', LINELIST_DICT.keys())
raise
return lines
def append_linelist(self, name):
"""
Append a line list to an existing list.
A line list is appended to the "lines" attribute. This is a simmple
append; the duplicates are not removed, the combined list is not
sorted. The LineList "name" attribute is set to a new string
formatted as "oldname+newname".
Parameters
----------
name : str
Name of the line list to append.
Examples
--------
>>> mylinelist = LineList('quasar')
>>> mylinelist.append_linelist('paschen')
"""
# duplicates are not removed.
# list is not sorted
new_lines = self._get_lines_from_list(name)
self.lines.extend(new_lines)
self.name = '%s+%s' % (self.name, name)
def reapply_redshift(self):
"""
Re-apply the redshift to the lines.
Re-apply the redshift to the lines. This is useful when the
redshift attribute is changed.
Examples
--------
>>> mylinelist = LineList('quasar', redshift=0.5)
>>> mylinelist.redshift = 1.0
>>> mylinelist.reapply_redshift()
"""
for line in self.lines:
line.set_redshift(self.redshift)
line.set_obswlen()
# -------------------------------
# pylint: disable=E1101
# This disable is to ignore the u.micron errors (dynamic loading)
LINELIST_DICT = {
'quasar' : [('SiIV]', 1400.0 * u.angstrom),
# ('SiIV]_1393', 1393.755 * u.angstrom),
# ('SiIV]_1402', 1402.770 * u.angstrom),
('CIV', 1549.0 * u.angstrom),
# ('CIV_1548', 1548.195 * u.angstrom),
# ('CIV_1550', 1550.770 * u.angstrom),
('CIII]', 1909 * u.angstrom),
('FeII_2382', 2382.765 * u.angstrom),
('FeII_2600', 2600.173 * u.angstrom),
('MgII', 2798.0 * u.angstrom),
# ('MgII_2796', 2796.352 * u.angstrom),
# ('MgII_2803', 2803.531 * u.angstrom),
('[OIII]_4959', 4959.0 * u.angstrom),
('[OIII]_5007', 5007.0 * u.angstrom),
('HeI', 0.5876 * u.micron),
('HeI', 1.083 * u.micron),
('H_delta', 0.4101 * u.micron),
('H_gamma', 0.4340 * u.micron),
('H_beta', 0.4861 * u.micron),
('H_alpha', 0.6563 * u.micron),
('Pa_epsilon', 0.9546 * u.micron),
('Pa_delta', 1.005 * u.micron),
('Pa_gamma', 1.094 * u.micron),
('Pa_beta', 1.282 * u.micron),
('Pa_alpha', 1.875 * u.micron)
],
'paschen' : [('Pa_epsilon', 0.9546 * u.micron),
('Pa_delta', 1.005 * u.micron),
('Pa_gamma', 1.094 * u.micron),
('Pa_beta', 1.282 * u.micron),
('Pa_alpha', 1.875 * u.micron)
],
'lyman' : [('Ly_gamma', 972.537 * u.angstrom),
('Ly_beta', 1025.722 * u.angstrom),
('Ly_alpha', 1215.670 * u.angstrom)]
}
# pylint: enable=E1101
|
{"hexsha": "1f53c2c7554239f07f10388d7dfd0c93b719eced", "size": 16657, "ext": "py", "lang": "Python", "max_stars_repo_path": "klpyastro/sciformats/spectro.py", "max_stars_repo_name": "KathleenLabrie/KLpyastro", "max_stars_repo_head_hexsha": "bf29ce13df3e41090fee6ca502167ddd27349aa8", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "klpyastro/sciformats/spectro.py", "max_issues_repo_name": "KathleenLabrie/KLpyastro", "max_issues_repo_head_hexsha": "bf29ce13df3e41090fee6ca502167ddd27349aa8", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "klpyastro/sciformats/spectro.py", "max_forks_repo_name": "KathleenLabrie/KLpyastro", "max_forks_repo_head_hexsha": "bf29ce13df3e41090fee6ca502167ddd27349aa8", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9246435845, "max_line_length": 80, "alphanum_fraction": 0.5920033619, "include": true, "reason": "import numpy,from astropy", "num_tokens": 4112}
|
# Third party imports
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from scipy.interpolate import interp1d
from scipy.integrate import odeint
# Local application imports
from modules.reaction_models import Model
from modules.gof import rSquared, MSE
def simpleLinearFit(time,k):
return k*time
def EER(conversion,time,yfit):
# calculate the mean square erron on the derivative of the experimental conversion
# EER : Explicit Euler eRror
# allocale residuals
residuals = np.zeros(len(conversion))
# in the bulk
for i in range(len(conversion)-1):
# experimental reaction rate
u = (conversion[i+1] - conversion[i])/( time[i+1] - time[i] )
# computed reaction rate
up = yfit[i]
# calculate residual
residuals[i] = u - up
# on the boundary
# experimental reaction rate
u = (conversion[-1] - conversion[-2])/( time[-1] - time[-2] )
# computed reaction rate
up = yfit[-1]
# calculate residual
residuals[-1] = u - up
# calculate sum of square residuals
ss_res = np.sum(residuals**2.0)
# return mean square error
return (1.0/len(conversion))*ss_res
def integralRateRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental integral rate conversion (g)
# calculate the Arrhenius rate constant (k)
# pick up the model
model = Model(modelName)
# define data
x = time
y = np.array([model.g(a) for a in conversion])
# fit integral rate
popt, pcov = curve_fit(simpleLinearFit,x,y)
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant
yfit = simpleLinearFit(time,k) # modeled integral reaction rate
return k, yfit
def conversionRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental conversion (conversion)
# calculate the Arrhenius rate constant (k)
# pick up the model
model = Model(modelName)
# define data
x = time
y = conversion
if modelName not in ['D2','D4']:
# take estimation from the integral rate regression
k_est, yfit = integralRateRegression(x,y,modelName)
# fit conversion
popt, pcov = curve_fit(model.alpha,x,y,p0=k_est) # p0 : initial guess
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant
yfit = np.array([model.alpha(t, k) for t in time]) # modeled conversion fraction
else:
# measure the mean square error on the linear integral rate
k, yfit = integralRateRegression(time,conversion,modelName)
return k, yfit
def differentialRateRegression(time,conversion,modelName):
# perform Non-Linear Regression
# fit the experimental differential rate conversion (f)
# calculate the Arrhenius rate constant (k)
# k_est: estimation for the Arrhenius constant
# ODE: da/dt = k f(a)
def RHS(t, k):
'Function that returns Ca computed from an ODE for a k'
def ODE(a, t):
return k * model.f(a)
u0 = conversion[0]
u_numerical = odeint(ODE, u0, t)
return u_numerical[:,0]
# pick up the model
model = Model(modelName)
# define data
x = time
y = conversion
# take estimation from the integral rate regression
k_est, yfit = integralRateRegression(x,y,modelName)
# fit ODE
popt, pcov = curve_fit(RHS, x, y, p0=k_est) # p0 : initial guess
# popt: optimal values for the parameters so that the sum of the squared residuals of f(xdata, *popt) - ydata is minimized.
k = popt[0] # Arrhenius rate constant from fitting
if modelName not in ['D2','D4']:
yfit = np.array([model.alpha(t, k) for t in time]) # modeled conversion fraction
else:
yfit = k*time # modeled integral rate
return k, yfit
def comprehensiveRegressor(time,conversion,models):
# arguments
# time (numpy array)
# conversion (numpy array)
# models (list of strings)
# returns
# a dataframe containg the fitting information
rate_constant_alpha = []
rate_constant_integral = []
rate_constant_differen = []
mse_coef_alpha = []
mse_coef_integral = []
mse_constant_differen = []
# loop over the models
for modelIndx, modelName in enumerate(models):
# integral rate regression
k_integral, mse_integral = integralRateRegression(time, conversion, modelName)
rate_constant_integral.append(k_integral)
mse_coef_integral.append(mse_integral)
# conversion regression
k_alpha, mse_alpha = conversionRegression(time, conversion, modelName, k_integral)
rate_constant_alpha.append(abs(k_alpha)) # bug: RuntimeWarning: invalid value encountered in sqrt
mse_coef_alpha.append(mse_alpha)
# differential rate regression
k_differen, mse_differen = differentialRateRegression(time, conversion, modelName, k_integral)
rate_constant_differen.append(abs(k_differen)) # bug: RuntimeWarning: invalid value encountered in sqrt
mse_constant_differen.append(mse_differen)
# pass the data to a dictionary
data = {'model': models,
'rate_constant - alpha': rate_constant_alpha,
'rate_constant - integral': rate_constant_integral,
'rate_constant - differential': rate_constant_differen,
'MSE - alpha': mse_coef_alpha,
'MSE - integral': mse_coef_integral,
'MSE - differential': mse_constant_differen}
# dictionary to dataframe
df = pd.DataFrame(data)
return df
def activation_enthalpy(k,T):
# gas constant
R = 8.31446261815324 # J K-1 mol-1
x = 1.0/T
y = np.log(k)
x = x.reshape((-1, 1))
# linear regression for the logarithmic arrenius equation
regr = LinearRegression()
regr.fit(x, y)
y_pred = regr.predict(x)
Ea = - regr.coef_[0]*R # in J mol-1
A = np.exp(regr.intercept_)
R2 = r2_score(y_pred , y)
return {'activation_enthalpy': Ea, 'frequency_factor': A, 'R2_score': R2}
def isocEnthalpy(time,temperature):
# arguments
# time (numpy array)
# temperature (numpy array)
# returns
# the activation enthalpy (Ea) in a list like [activation enthalpy, mean square error]
# and the ln[g(a)A] factor
# gas constant
R = 8.31446261815324 # J K-1 mol-1
x = 1.0/temperature
y = np.log(time)
x = x.reshape((-1, 1))
# linear regression for the logarithmic arrenius equation
regr = LinearRegression()
regr.fit(x, y)
y_pred = regr.predict(x)
Ea = regr.coef_[0]*R*1.0e-3 # in kJ mol-1
gA = regr.intercept_
MSE = mean_squared_error(y, y_pred)
R2 = r2_score(y_pred , y)
return [Ea,MSE], gA
def interpolateTime(time,conversion,interConversion):
# arguments
# time (numpy array)
# conversion (numpy array)
# npoints (integer) - the number of the interpolation points
# returns
# the time for the interpolated points
y = time
x = conversion
interpol = interp1d(x,y,kind='nearest',fill_value="extrapolate")
return interpol(interConversion)
|
{"hexsha": "9df11db69847745d740a225a112d661d40a9956b", "size": 7855, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/regressors.py", "max_stars_repo_name": "vasilogi/solid-kinetics", "max_stars_repo_head_hexsha": "c8726fcdeb56e3aed3576dbc3f318e717d020fb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/regressors.py", "max_issues_repo_name": "vasilogi/solid-kinetics", "max_issues_repo_head_hexsha": "c8726fcdeb56e3aed3576dbc3f318e717d020fb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/regressors.py", "max_forks_repo_name": "vasilogi/solid-kinetics", "max_forks_repo_head_hexsha": "c8726fcdeb56e3aed3576dbc3f318e717d020fb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9308943089, "max_line_length": 131, "alphanum_fraction": 0.6398472311, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1966}
|
# Multiples in Number Range
Repo: https://github.com/Andrewnetwork/MathematicalProgrammingProblems
## 0.) Definitions
A *range* of natural numbers, positive integers, can be defined by the notation $[x,y]$ where $x$ is the starting number and $y$ is the ending number. Example: $[0,10] = [0,1,2,3,4,5,6,7,8,9,10]$.
In the range $[x,y]$ there are $y-x = \lvert[x,y]\lvert$ numbers. Example: $\lvert[2,5]\lvert= 5 - 2 = 3.$ There are three numbers in the range $[2,5]$: $[2,3,4]$
A number $m$ is a *multiple* of some natural number $n$ if there is some integer $i$ such that $n = im$.
## 1.) How Many Multiples in a Range
Let $x,y,z \in \mathbb{N}.$
In the number range $[x,y]$ given $x<y$, how many multiples of $z$ are there?
### Solution
Haskell Code:
```Haskell
multInRange strt stop mult = floor((stop-strt)/mult)
```
Mathematics:
$$\large \lfloor \frac{\lvert[x,y]\lvert}{z} \rfloor$$
$\lvert[x,y]\lvert = y-x$ gives us the total number of natural numbers in this range. $\lfloor \frac{\lvert[x,y]\lvert}{z}\rfloor$ gives us how many times z partitions we can get in this range. I.e. multiples.
### Problem Examples
#### 1: Within the range of natural numbers $[1,10]$ how many multiples of $3$ are there?
Answer: $3$.
Given $x=0,y=10,z=3$
$\large \lfloor \frac{(y-x)}{z} \rfloor = 3$
In haskell: $[1,2..10] = [1,2,3,4,5,6,7,8,9,10]$
Multiples of 3: $[3,6,9]$.
Sum: $3 + 3*2 + 3*3 = 6*3 = 18$
#### 2: Within the range of natural numbers $[1,20]$ how many multiples of $3$ are there?
3,6,9,12,15,18
$3 + 3*2 + 3*3 + 3*4 + 3*5 + 3*6 = 7*3*3 = 7*6 = 21*3 = 63$
## 2.) Sum of Multiples in a Range
Let $x,y,z \in \mathbb{N}.$
In the range $[x,y]$ what is the sum of the multiples of z?
### Solution
Haskell Code:
```Haskell
multInRange strt stop mult = floor((stop-strt)/mult)
sumNat n = (n*(n+1))/2
sumMult strt stop mult = mult * (sumNat (fromInteger (multInRange strt stop mult)))
```
Mathematics:
$$\large\begin{align}
n = \lfloor \frac{(y-x)}{z} \rfloor\\
z*\frac{n*(n+1)}{2}
\end{align}$$
### Problem Examples
#### 1: In the range [0,100] what's the sum of the multiples of 5?
Multiples of $5$ in $[0,100] = [5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80,85,90,95,100]$
How many multiples of $5$ are there in $[0,100]$?
$$n = \lfloor \frac{(y-x)}{z} \rfloor == \lfloor \frac{100}{5} \rfloor = 20$$
Haskell Code:
```Haskell
multInRange 0 100 5
-- Result: 20
```
Sum of these multiples:
$$5 + (5+5) + (5+5+5) + \ldots + (5 + 5 + \ldots n) = 5*1 + 5*2 + 5*3 + \dots + 5*n = 5*\sum_{i=1}^{n}{i} = 5 $$
As we see here, to compute the sum of multiples of $5$ in the range $[0,100]$, we compute the sum of integers up to how many multiples are in that range multiplied by $5$.
$$
\large\begin{align}
z*\frac{n*(n+1)}{2} = 5 * \frac{20*(20+1)}{2} = 1050
\end{align}$$
Haskell Code:
```Haskell
sumMult 0 100 5
-- Result: 1050.0
```
#### 2: In the range [0,100] what's the sum of the multiples of 5 and 6?
Multiples of $5$ and $6$ in $[0,100] = $
$$[5,6,10,12,15,18,20,24,25,30,35,36,40,42,45,48,50,54,55,60,65,66,70,72,75,78,80,84,85,90,95,96,100]$$
We can extend the results from the previous question to multiple multiples on a range.
We can find the sum of the multiples of $5$ and $6$ on the range $[0,100]$. Independently by:
Haskell Code:
```Haskell
a = (sumMult 0 100 5)
b = (sumMult 0 100 6)
```
But alas, $a + b$ is not our answer! We need to take into consideration the multiples $5$ and $6$ share in common so we do not include the twice in our calculation. The multiples of $5$ and $6$ are multiples of $5*6 = 30$. Therefore we achieve the result by computing:
Haskell Code:
```Haskell
(sumMult 0 100 5) + (sumMult 0 100 6) - (sumMult 0 100 (5*6) )
-- Result: 1686.0
```
|
{"hexsha": "5f34401edf01079130bb6f23704c2d5af7e44596", "size": 5717, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "MathematicalProgrammingProblems/Notebooks/Multiples in Number Range.ipynb", "max_stars_repo_name": "Andrewnetwork/HaskellExercises", "max_stars_repo_head_hexsha": "3caeb255b99bd69a39e9d021cb987c3cec4b641a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MathematicalProgrammingProblems/Notebooks/Multiples in Number Range.ipynb", "max_issues_repo_name": "Andrewnetwork/HaskellExercises", "max_issues_repo_head_hexsha": "3caeb255b99bd69a39e9d021cb987c3cec4b641a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MathematicalProgrammingProblems/Notebooks/Multiples in Number Range.ipynb", "max_forks_repo_name": "Andrewnetwork/HaskellExercises", "max_forks_repo_head_hexsha": "3caeb255b99bd69a39e9d021cb987c3cec4b641a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-12-25T02:35:13.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-25T02:35:13.000Z", "avg_line_length": 32.1179775281, "max_line_length": 277, "alphanum_fraction": 0.5118068917, "converted": true, "num_tokens": 1455}
|
C
C $Id: gclrwk.f,v 1.10 2008-07-27 00:20:57 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE GCLRWK(WKID,COFL)
C
INTEGER WKID,COFL
C
CALL GZCLRWK(WKID,COFL)
C
RETURN
END
|
{"hexsha": "34645210ca86d8ddf1ab835b90cbe85c8c25bf45", "size": 445, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ncarg2d/src/libncarg_gks/awi/gclrwk.f", "max_stars_repo_name": "tenomoto/ncl", "max_stars_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 210, "max_stars_repo_stars_event_min_datetime": "2016-11-24T09:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:15:32.000Z", "max_issues_repo_path": "ncarg2d/src/libncarg_gks/awi/gclrwk.f", "max_issues_repo_name": "tenomoto/ncl", "max_issues_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 156, "max_issues_repo_issues_event_min_datetime": "2017-09-22T09:56:48.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T07:02:21.000Z", "max_forks_repo_path": "ncarg2d/src/libncarg_gks/awi/gclrwk.f", "max_forks_repo_name": "tenomoto/ncl", "max_forks_repo_head_hexsha": "a87114a689a1566e9aa03d85bcf6dc7325b47633", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2016-12-14T00:15:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T09:13:00.000Z", "avg_line_length": 24.7222222222, "max_line_length": 71, "alphanum_fraction": 0.5325842697, "num_tokens": 128}
|
import numpy as np
import math
import ChaoticPSOAlgorithm as PSO
def AR1GARCH11HansenSkewedTOptimize(ret:np.ndarray)->[float]:
log=np.log
pi=math.pi
x=ret[0:(len(ret)-2)]
y=ret[1:(len(ret)-1)]
A=np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, y, rcond=None)[0]
def loglik(parameters:[np.ndarray])->float:
residual=ret[1:(len(ret)-1)]-(np.multiply(ret[0:(len(ret)-2)],m)+c)
sigmasquare=np.zeros(shape=(len(residual),1))
LL=np.zeros(shape=(len(residual),1))
sigmasquarezero=np.mean(np.square(residual))
residualzero=np.sqrt(np.mean(np.square(residual)))
if (parameters[1]+parameters[2])>=1:
return 99999999999.99
if parameters[1]+parameters[2]<1:
for i in range(len(LL)):
newsigma=parameters[0]+parameters[1]*residualzero*residualzero+parameters[2]*sigmasquarezero
sigmasquare[i]=newsigma
r=residual[i]
zt=r*r/newsigma
LL[i]=0.5*log(2*pi)+0.5*log(newsigma)+0.5*log(zt)
sigmasquarezero=newsigma
residualzero=residual[i]
return sum(LL)
lowerbound=np.zeros((3,1))
lowerbound[0]=0.001
lowerbound[1]=0.1
lowerbound[2]=0.60
upperbound=np.zeros((3,1))
upperbound[0]=0.49
upperbound[1]=0.99
upperbound[2]=0.99999
tolerance=0.000000001
numofswarms=100
initialgusssize=1000
maximumiteration=500
initialguess=np.zeros((3,1))
residual=ret[1:(len(ret)-1)]-(np.multiply(ret[0:(len(ret)-2)],m)+c)
initialguess[0]=0.1*np.var(residual)
initialguess[1]=0.15
initialguess[2]=0.75
optimizedparameters=np.zeros((5,1))
optimizedparameters[0]=c
optimizedparameters[1]=m
optimizedpara=PSO.chaoticPSOOptimize(loglik,lowerbound,upperbound,maximumiteration,initialgusssize,initialguess,numofswarms,tolerance)
optimizedparameters[2]=optimizedpara[0]
optimizedparameters[3]=optimizedpara[1]
optimizedparameters[4]=optimizedpara[2]
return optimizedparameters
def GetInSampleSigma(optpara:np.ndarray,ret:np.ndarray)->[np.ndarray]:
residual=ret[1:(len(ret)-1)]-(ret[0:(len(ret)-2)]*optpara[1]+optpara[0])
sigmasquare=np.zeros(shape=(len(residual),1))
sigmasquarezero=np.mean(np.square(residual))
residualzero=np.sqrt(np.mean(np.square(residual)))
for i in range(len(sigmasquare)):
sigmasquare[i]=optpara[2]+optpara[3]*residualzero*residualzero+optpara[4]*sigmasquarezero
sigmasquarezero=sigmasquare[i]
residualzero=residual[i]
return sigmasquare
|
{"hexsha": "476e858ddd4db4129ecdef11bdd58be9d5954e30", "size": 2609, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonGlobalOptimizationLib/PythonGlobalOptimizationLib/Models/AR1GARCH11HansenSkewedT.py", "max_stars_repo_name": "zhenshaoaixixi0507/PythonGlobalOptimizationLib", "max_stars_repo_head_hexsha": "31884aed6a7015482edda5dd4eab33c70138b40c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PythonGlobalOptimizationLib/PythonGlobalOptimizationLib/Models/AR1GARCH11HansenSkewedT.py", "max_issues_repo_name": "zhenshaoaixixi0507/PythonGlobalOptimizationLib", "max_issues_repo_head_hexsha": "31884aed6a7015482edda5dd4eab33c70138b40c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonGlobalOptimizationLib/PythonGlobalOptimizationLib/Models/AR1GARCH11HansenSkewedT.py", "max_forks_repo_name": "zhenshaoaixixi0507/PythonGlobalOptimizationLib", "max_forks_repo_head_hexsha": "31884aed6a7015482edda5dd4eab33c70138b40c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5303030303, "max_line_length": 138, "alphanum_fraction": 0.6584898429, "include": true, "reason": "import numpy", "num_tokens": 785}
|
from core.nlp.response_generator.product.base.base_response_generator import BaseResponseGenerator
import numpy as np
class OYSRepeatResponseGenerator(BaseResponseGenerator):
"""
OYS(On Your Side)
"""
def __call__(self):
responses = self.__create_oys_after_repeat()
self.response_data['regular'] = responses
return self.response_data
def __create_oys_after_repeat(self):
options = [
["so thats the thing stuck in your head now😣"],
["so it is the thing kinda bothering you now🤔"],
["so that is what comes to your mind now😥"],
["so that is what you are concerned about🧐"],
["so that's what your mind dwells on now😢"],
["so thats the thing stressing you now😞"],
]
np.random.shuffle(options)
return options[0]
|
{"hexsha": "d3c5bf027e49050d889eef9380996009f2ff34d5", "size": 858, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/nlp/response_generator/product/cct/oys_repeat_response_generator.py", "max_stars_repo_name": "hirokig/CBT", "max_stars_repo_head_hexsha": "ac92490d2379f9c331973ca4301c7b10d7774b32", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "core/nlp/response_generator/product/cct/oys_repeat_response_generator.py", "max_issues_repo_name": "hirokig/CBT", "max_issues_repo_head_hexsha": "ac92490d2379f9c331973ca4301c7b10d7774b32", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-08T20:25:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T00:33:49.000Z", "max_forks_repo_path": "core/nlp/response_generator/product/cct/oys_repeat_response_generator.py", "max_forks_repo_name": "hirokig/CBT", "max_forks_repo_head_hexsha": "ac92490d2379f9c331973ca4301c7b10d7774b32", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-06T09:35:37.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-06T09:35:37.000Z", "avg_line_length": 33.0, "max_line_length": 98, "alphanum_fraction": 0.6328671329, "include": true, "reason": "import numpy", "num_tokens": 189}
|
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import numpy as np
import tensorflow as tf
import torch
import fastestimator as fe
from fastestimator.test.unittest_util import is_equal
class TestTranspose(unittest.TestCase):
def test_np(self):
n = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
b = fe.backend.transpose(n)
self.assertTrue(is_equal(b, np.array([[0, 3, 6], [1, 4, 7], [2, 5, 8]])))
def test_tf(self):
t = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
b = fe.backend.transpose(t)
self.assertTrue(is_equal(b, tf.constant([[0, 3, 6], [1, 4, 7], [2, 5, 8]])))
def test_torch(self):
p = torch.tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
b = fe.backend.transpose(p)
self.assertTrue(is_equal(b, torch.tensor([[0, 3, 6], [1, 4, 7], [2, 5, 8]])))
|
{"hexsha": "5b2c4d352e98108e9ba2ae77fdbffa1bfec29d27", "size": 1499, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/PR_test/unit_test/backend/test_transpose.py", "max_stars_repo_name": "DwijayDS/fastestimator", "max_stars_repo_head_hexsha": "9b288cb2bd870f971ec4cee09d0b3205e1316a94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2019-05-21T21:29:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T05:55:21.000Z", "max_issues_repo_path": "test/PR_test/unit_test/backend/test_transpose.py", "max_issues_repo_name": "vbvg2008/fastestimator", "max_issues_repo_head_hexsha": "6061a4fbbeb62a2194ef82ba8017f651710d0c65", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 93, "max_issues_repo_issues_event_min_datetime": "2019-05-23T18:36:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T17:15:55.000Z", "max_forks_repo_path": "test/PR_test/unit_test/backend/test_transpose.py", "max_forks_repo_name": "vbvg2008/fastestimator", "max_forks_repo_head_hexsha": "6061a4fbbeb62a2194ef82ba8017f651710d0c65", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2019-05-09T15:41:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T17:00:08.000Z", "avg_line_length": 37.475, "max_line_length": 85, "alphanum_fraction": 0.6144096064, "include": true, "reason": "import numpy", "num_tokens": 427}
|
#! /usr/bin/env python
# Written by Vasaant S/O Krishnan in 2015. Run without arguments for instrunctions."
import ephem
from numpy import *
import sys
import string
inp=sys.argv[0:]
del inp[0]
if len(inp)==0:
print" Script to determine the midpoint between two points"
print" in the sky"
print" Type 'midPt.py RA1 Dec1 RA2 Dec2'"
print" All coordinates must be of the form:"
print" hh:mm:ss(.ssssssss) or hh mm ss(.ssssssss)"
print" (Don't mix!).\n"
sys.exit()
##########################################################################
#
# This section from angsep.py
#
# Find and replace any ":" and "=" from inputs
newinp=[]
for x in inp:
newinp.append(string.replace(x, ":", " "))
inp=newinp
# Find and delete alphanumeric entries like "RA" and "DEC"
newline=""
for x in inp:
newline=newline+" "
for y in x:
newline=newline+y
inp=string.split(newline)
newinp=[]
for x in inp:
try:
newinp.append(float(x))
except ValueError:
pass
inp=newinp
if len(inp)==4:
ra1 =string.split(inp[0], ":")
dec1=string.split(inp[1], ":")
ra2 =string.split(inp[2], ":")
dec2=string.split(inp[3], ":")
elif len(inp)==12:
ra1 =inp[0:3]
dec1=inp[3:6]
ra2 =inp[6:9]
dec2=inp[9:12]
else:
print" Too few or too many parameters."
sys.exit()
RA_HH_1 = str(int(ra1[0]))
RA_MM_1 = str(int(ra1[1]))
RA_SS_1 = str(float(ra1[2]))
DEC_DD_1 = str(int(dec1[0]))
DEC_MM_1 = str(int(dec1[1]))
DEC_SS_1 = str(float(dec1[2]))
RA_HH_2 = str(int(ra2[0]))
RA_MM_2 = str(int(ra2[1]))
RA_SS_2 = str(float(ra2[2]))
DEC_DD_2 = str(int(dec2[0]))
DEC_MM_2 = str(int(dec2[1]))
DEC_SS_2 = str(float(dec2[2]))
#
##########################################################################
# This is now my own:
a = ephem.Equatorial(RA_HH_1+':'+RA_MM_1+':'+RA_SS_1,DEC_DD_1+':'+DEC_MM_1+':'+DEC_SS_1)
b = ephem.Equatorial(RA_HH_2+':'+RA_MM_2+':'+RA_SS_2,DEC_DD_2+':'+DEC_MM_2+':'+DEC_SS_2)
midRA = b.ra - (b.ra - a.ra )/2.0
midDec = b.dec - (b.dec - a.dec)/2.0
print ephem.hours(midRA) , ephem.degrees(midDec)
|
{"hexsha": "826c04c62595219af79f76870d15e6ebb8edd73f", "size": 2075, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/midPt.py", "max_stars_repo_name": "vasaantk/bin", "max_stars_repo_head_hexsha": "a8c264482ad3e5f78308f53d8af0667b02d6968d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/midPt.py", "max_issues_repo_name": "vasaantk/bin", "max_issues_repo_head_hexsha": "a8c264482ad3e5f78308f53d8af0667b02d6968d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/midPt.py", "max_forks_repo_name": "vasaantk/bin", "max_forks_repo_head_hexsha": "a8c264482ad3e5f78308f53d8af0667b02d6968d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0555555556, "max_line_length": 88, "alphanum_fraction": 0.5913253012, "include": true, "reason": "from numpy", "num_tokens": 686}
|
from keras import backend as K
import numpy as np
import math
from keras.constraints import Constraint
from keras.constraints import MinMaxNorm
from keras.initializers import Constant
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
GlobalMaxPooling2D,
GlobalAveragePooling2D,
Add,
Multiply,
)
from keras.layers.pooling import(
_Pooling2D,
_GlobalPooling2D,
)
from pooling import ow_pool
class PosUnitModule(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
# Arguments
rate: rate for enforcing the constraint: weights will be
rescaled to yield positive and unit sum, rate=1.0 stands
for strict enforcement of the constraint, while rate<1.0
means that weights will be rescaled at each step to slowly
move towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, rate=1, axis=0):
self.axis = axis
self.rate = rate
def __call__(self, w):
inc = K.minimum(w, K.epsilon()) * self.rate
pos = w - K.min(inc, axis=self.axis, keepdims=True)
abs_sum = K.sum(K.abs(pos), axis=self.axis, keepdims=True)
desired = self.rate + (1 - self.rate) * abs_sum
return pos * desired/ (K.maximum(K.epsilon(), abs_sum))
def get_config(self):
return {'axis': self.axis,
'rate': self.rate}
|
{"hexsha": "77dbebb3aafc4f6afa2177074c87fe73dadfb433", "size": 2013, "ext": "py", "lang": "Python", "max_stars_repo_path": "pooling/ow_constraints.py", "max_stars_repo_name": "jiforcen/orderedweightedpooling", "max_stars_repo_head_hexsha": "8cf13f86fcfb132080b5dd56463701f597bf3b60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-15T21:17:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-15T21:17:57.000Z", "max_issues_repo_path": "pooling/ow_constraints.py", "max_issues_repo_name": "jiforcen/orderedweightedpooling", "max_issues_repo_head_hexsha": "8cf13f86fcfb132080b5dd56463701f597bf3b60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pooling/ow_constraints.py", "max_forks_repo_name": "jiforcen/orderedweightedpooling", "max_forks_repo_head_hexsha": "8cf13f86fcfb132080b5dd56463701f597bf3b60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-03T01:56:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T20:44:49.000Z", "avg_line_length": 35.9464285714, "max_line_length": 77, "alphanum_fraction": 0.6368604074, "include": true, "reason": "import numpy", "num_tokens": 478}
|
@testset "Array interface" begin
# Test aliases
@test QuantizedArray(rand(10), k=2, method=:sample) isa QuantizedVector
@test QuantizedArray(rand(2, 10), k=2, method=:sample) isa QuantizedMatrix
# Test outer constructor checks
@test_throws AssertionError QuantizedArray(rand(2, 2, 2), k=2, m=1, method=:sample)
@test_throws AssertionError QuantizedArray(rand(2, 10), k=-1, m=1, method=:sample)
@test_throws AssertionError QuantizedArray(rand(2, 10), k=2, m=-1, method=:sample)
@test_throws AssertionError QuantizedArray(rand(3, 10), k=2, m=2, method=:sample)
for method in [:pq, :opq, :rvq]
@test_throws AssertionError QuantizedArray(rand([1,2,3], 3, 10), k=2, m=2, method=method)
end
# Test quantize function
T = Float32
X = rand(T, 3, 10)
qa = QuantizedArray(X, k=2, m=3, method=:sample)
@test QuantizedArrays.quantizer(qa) === qa.quantizer
@test quantize(QuantizedArrays.quantizer(qa), X) == qa
# Test other interface parts
@test size(qa) == qa.quantizer.dims
@test eltype(qa) == T
@test QuantizedArrays.nvars(rand(10)) == 1
@test QuantizedArrays.nvars(rand(2, 10)) == 2
# Test setindex!
@test_throws ErrorException qa[1] = one(T)
@test_throws ErrorException qa[1,1] = one(T)
# Test orthogonal reconstruction: getindex
# vector
truevector = [0, 1, 0, 2, 0]
codes = [0x00, 0x01, 0x02]
vectors = [0 1 2]
data = [0x00 0x01 0x00 0x02 0x00]
cb = CodeBook(codes, vectors)
q = QuantizedArrays.OrthogonalQuantization()
rot = diagm(0=>ones(eltype(truevector), length(truevector)))
aq = ArrayQuantizer(q, size(truevector), [cb], length(codes), Distances.SqEuclidean(), rot)
qa = QuantizedArray(aq, data)
@test qa[:] == truevector
@test all(qa .== truevector)
# matrix
truematrix = [0 1 0 2 0 3;
0 1 0 2 0 3]
codes = [0x00, 0x01, 0x02, 0x03]
vectors = [0 1 2 3;
0 1 2 3]
data = [0x00 0x01 0x00 0x02 0x00 0x03]
cb = CodeBook(codes, vectors)
q = QuantizedArrays.OrthogonalQuantization()
rot = diagm(0=>ones(eltype(truematrix), size(truematrix, 1)))
aq = ArrayQuantizer(q, size(truematrix), [cb], length(codes), Distances.SqEuclidean(), rot)
qa = QuantizedArray(aq, data)
@test qa[:] == truematrix[:]
@test qa[:,:] == truematrix
@test all(qa .== truematrix)
# Test aditive reconstruction: getindex
# vector
truevector = [0, 1, 0, 2, 0] .+ [-1, -1, 1, 1, 2]
codes = [0x00, 0x01, 0x02]
vectors1 = [0 1 2]
vectors2 = [-1 1 2]
data = [0x00 0x01 0x00 0x02 0x00;
0x00 0x00 0x01 0x01 0x02]
cbs = [CodeBook(codes, vectors1), CodeBook(codes, vectors2)]
q = QuantizedArrays.AdditiveQuantization()
rot = diagm(0=>ones(eltype(truevector), length(truevector)))
aq = ArrayQuantizer(q, size(truevector), cbs, length(codes), Distances.SqEuclidean(), rot)
qa = QuantizedArray(aq, data)
@test qa[:] == truevector
@test all(qa .== truevector)
# matrix
truematrix = [0 1 0 2 0 3;
0 1 0 2 0 3] .+
[ 2 3 1 4 4 1;
-2 -3 -1 -4 -4 -1]
codes = [0x00, 0x01, 0x02, 0x03]
vectors1 = [0 1 2 3;
0 1 2 3]
vectors2 = [1 2 3 4
-1 -2 -3 -4]
data = [0x00 0x01 0x00 0x02 0x00 0x03;
0x01 0x02 0x00 0x03 0x03 0x00]
cbs = [CodeBook(codes, vectors1), CodeBook(codes, vectors2)]
q = QuantizedArrays.AdditiveQuantization()
rot = diagm(0=>ones(eltype(truematrix), size(truematrix, 1)))
aq = ArrayQuantizer(q, size(truematrix), cbs, length(codes), Distances.SqEuclidean(), rot)
qa = QuantizedArray(aq, data)
@test qa[:] == truematrix[:]
@test qa[:,:] == truematrix
@test all(qa .== truematrix)
# Setindex tests
@test_throws ErrorException qa[1] = 1
@test_throws ErrorException qa[1, 1] = 1
end
|
{"hexsha": "c3406525b52fa4cf17a8000e458f05f4e17f8a00", "size": 3933, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/interface.jl", "max_stars_repo_name": "UnofficialJuliaMirror/QuantizedArrays.jl-a7db621c-8ce0-11e9-16a1-0f86dc86bd10", "max_stars_repo_head_hexsha": "efcd74e6480f1b54a5dbcd4213a77bc92073df86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-06-21T14:48:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T02:09:45.000Z", "max_issues_repo_path": "test/interface.jl", "max_issues_repo_name": "UnofficialJuliaMirror/QuantizedArrays.jl-a7db621c-8ce0-11e9-16a1-0f86dc86bd10", "max_issues_repo_head_hexsha": "efcd74e6480f1b54a5dbcd4213a77bc92073df86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-06-21T11:32:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-05T08:35:12.000Z", "max_forks_repo_path": "test/interface.jl", "max_forks_repo_name": "UnofficialJuliaMirror/QuantizedArrays.jl-a7db621c-8ce0-11e9-16a1-0f86dc86bd10", "max_forks_repo_head_hexsha": "efcd74e6480f1b54a5dbcd4213a77bc92073df86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-08-23T07:48:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:25:37.000Z", "avg_line_length": 36.7570093458, "max_line_length": 97, "alphanum_fraction": 0.6142893466, "num_tokens": 1448}
|
#
# This file has been taken and modified from:
# https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
#
# COPYRIGHT
#
# All contributions by François Chollet:
# Copyright (c) 2015, François Chollet.
# All rights reserved.
#
# Each contributor holds copyright over their respective contributions.
# The project versioning (Git) records all such contribution source information.
#
# LICENSE
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Visualization of the filters of the CNN, via gradient ascent in input space.
This script can run on CPU in a few minutes (with the TensorFlow backend).
All convolutional layers are processed, but only the top filters of that
layer are shown. The best neural network is loaded, but the weight file must
be set manually in case of a retrain.
Results are saved in the subfolder "layers/".
"""
from utils import load_best_hyperspace
from neural_net import WEIGHTS_DIR, build_model
from scipy.misc import imsave
import numpy as np
from keras import backend as K
import time
import os
# Dimensions of the generated pictures for each filter.
img_width = 32
img_height = 32
weight_file = "{}/f37d5.hdf5".format(WEIGHTS_DIR)
LAYERS_DIR = "layers"
# Load model in test phase mode: no dropout, and use fixed BN
K.set_learning_phase(0)
model = build_model(load_best_hyperspace())
model.load_weights(weight_file)
print('Model loaded.')
model.summary()
def normalize(x):
"""Utility function to normalize a tensor by its L2 norm."""
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def deprocess_image(x):
"""Utility function to convert a tensor into a valid image."""
# Normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# Clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# Convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# Placeholder for the input images
input_img = model.input
# Symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = {layer.name: layer for layer in model.layers[1:]}
# The type of layers plotted can be changed by changing the "add" keyword.
# For example, we could plot every convolutional layer by replacing "add" by
# "conv".
layers_to_plot = [l.name for l in model.layers if "add" in l.name.lower()]
# We take add because it is a strategic bottleneck from the residual
# connections.
for layer_name in layers_to_plot:
kept_filters = []
layer_obj = layer_dict[layer_name]
if K.image_data_format() == 'channels_first':
nb_filters = layer_obj.output_shape[1]
else:
nb_filters = layer_obj.output_shape[-1]
print("Processing layer '{}' with shape {}.".format(
layer_name, layer_obj.output_shape))
for filter_index in range(0, nb_filters):
# We scan through all filters.
print('Processing filter {}'.format(filter_index))
start_time = time.time()
# We build a loss function that maximizes the activation
# of the `nth` filter of the current layer
layer_output = layer_obj.output
if K.image_data_format() == 'channels_first':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# We compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# Normalization trick: we normalize the gradient
grads = normalize(grads)
# This function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# Step size for gradient ascent
step = 1.
# We start from a gray image with some random noise
if K.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# We run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# Some filters get stuck to 0, we can skip them
break
# Decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter {} processed in {}s'.format(
filter_index, end_time - start_time))
# We will stich only the best filters that fit on a perfect square grid
# (excess is discarded). The file name will say how many filters were kept.
# Some filters can be discarded due to a negative loss (diverged), too.
n = int(float(len(kept_filters))**0.5)
# The filters that have the highest loss are assumed to be better-looking.
# We will only keep the top 64 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# Build a black picture with enough space for our `n x n` filters
# of size `img_width x img_height`, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# Fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
if not os.path.exists(LAYERS_DIR):
os.makedirs(LAYERS_DIR)
# Save the result to disk
imsave(
'{}/{}_best_filters_{}_({}x{})_out_of_{}.png'.format(
LAYERS_DIR, layer_name, n**2, n, n, nb_filters
),
stitched_filters
)
|
{"hexsha": "c7d42d25e418dfa0b38ad691b46447e3a8b4ee4b", "size": 7269, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyper_param/conv_filters_visualization.py", "max_stars_repo_name": "EnisBerk/hyperopt-keras-sample", "max_stars_repo_head_hexsha": "dc6892f023b83ee3b5b92f2a258676ad6bbc0a94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-05-07T08:24:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T23:55:42.000Z", "max_issues_repo_path": "hyper_param/conv_filters_visualization.py", "max_issues_repo_name": "EnisBerk/hyperopt-keras-sample", "max_issues_repo_head_hexsha": "dc6892f023b83ee3b5b92f2a258676ad6bbc0a94", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-05-16T19:15:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:16:27.000Z", "max_forks_repo_path": "hyper_param/conv_filters_visualization.py", "max_forks_repo_name": "EnisBerk/hyperopt-keras-sample", "max_forks_repo_head_hexsha": "dc6892f023b83ee3b5b92f2a258676ad6bbc0a94", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-05-28T19:47:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-17T05:57:51.000Z", "avg_line_length": 35.286407767, "max_line_length": 104, "alphanum_fraction": 0.6734076214, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1774}
|
module module_example
! all data can be accessed from out side of this module
implicit none
real, public :: x = 100.
real :: y = 100.
end module module_example
|
{"hexsha": "19e0164c40ca786467868bdbea8665bc9dca08ed", "size": 174, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "basics/subroutine/scope/scopingunit_module.f90", "max_stars_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_stars_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basics/subroutine/scope/scopingunit_module.f90", "max_issues_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_issues_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basics/subroutine/scope/scopingunit_module.f90", "max_forks_repo_name": "ComplicatedPhenomenon/Fortran_Takeoff", "max_forks_repo_head_hexsha": "a13180050367e59a91973af96ab680c2b76097be", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8571428571, "max_line_length": 58, "alphanum_fraction": 0.6896551724, "num_tokens": 47}
|
# -------------------------------------------------------------------
import cv2
import numpy as np
import time
from enum import Enum
# =============================================================================
# Ref. design
# https://github.com/Xilinx/Vitis-AI/blob/v1.1/mpsoc/vitis_ai_dnndk_samples/tf_yolov3_voc_py/tf_yolov3_voc.py
# From Vitis-AI Zoo
# 1. data channel order: BGR(0~255)
# 2. resize: 416 * 416(H * W)
# 3. mean_value: 0.0, 0.0, 0.0
# 4. scale: 1 / 255.0
# 5. reisze mode: biliner
# Data from yolov4_leaky_spp_m.prototxt
# and Xilinx yolov4-test.py
yolo_anchors = np.array([(12, 16), (19, 36), (40, 28), (36, 75), (76, 55), (72, 146), (142, 110), (192, 243),(459, 401)], np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
# -------------------------------------------------------------------
# YOLOv4 data collected from notebook (dpu_test.ipynb)
#
# inputTensor[0]: name=data_fixed, dims=[1, 416, 416, 3], dtype=xint8
#
# outputTensor[0]: name=layer138-conv_fixed, dims=[1, 52, 52, 255], dtype=xint8
# outputTensor[1]: name=layer149-conv_fixed, dims=[1, 26, 26, 255], dtype=xint8
# outputTensor[2]: name=layer160-conv_fixed, dims=[1, 13, 13, 255], dtype=xint8
# -------------------------------------------------------------------
# Load .xmodel downloaded from Vitis-AI repository
#yolov4_model_path = "models/yolov4_leaky_spp_m/yolov4_leaky_spp_m.xmodel"
yolov4_model_path = "models/yolov4_leaky_spp_m_pruned_0_36/yolov4_leaky_spp_m_pruned_0_36.xmodel"
# =============================================================================
# -------------------------------------------------------------------
def resize_with_padding(image, size):
# resize image with unchanged aspect ratio using padding
ih, iw, _ = image.shape
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw,nh), interpolation=cv2.INTER_LINEAR)
new_image = np.ones((h,w,3), np.uint8) * 128
h_start = (h-nh)//2
w_start = (w-nw)//2
new_image[h_start:h_start+nh, w_start:w_start+nw, :] = image
return new_image
# -------------------------------------------------------------------
def preprocess_img(image, size, fixpos):
image = image[...,::-1]
image = resize_with_padding(image, size)
image_data = np.array(image, dtype='float32', order='C')
fix_scale = 2**fixpos
image_data *= fix_scale/255
image_data = np.expand_dims(image_data, 0)
return image_data
# -------------------------------------------------------------------
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# -------------------------------------------------------------------
def draw_outputs(img, outputs, class_names):
boxes, objectness, classes, nums = outputs
ih, iw, _ = img.shape
wh = np.array([iw, ih])
for i in range(nums):
x1y1 = tuple((np.array(boxes[i][0:2]) * wh).astype(np.int32))
x2y2 = tuple((np.array(boxes[i][2:4]) * wh).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(
img, '{} {:.4f}'.format(class_names[int(classes[i])],
objectness[i]), x1y1,
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
# -------------------------------------------------------------------
def draw_outputs_scale(img, scale, offset, outputs, class_names):
boxes, objectness, classes, nums = outputs
for i in range(nums):
x1y1 = tuple(
((np.array(boxes[i][0:2]) - offset) * scale).astype(np.int32))
x2y2 = tuple(
((np.array(boxes[i][2:4]) - offset) * scale).astype(np.int32))
img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
img = cv2.putText(
img, '{} {:.4f}'.format(class_names[int(classes[i])],
objectness[i]), x1y1,
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
return img
# -------------------------------------------------------------------
def yolo_box(conv_output, grid_size, num_classes, anchors, layer_id):
box_xy = conv_output[:, :, :, :, 0:2]
box_wh = conv_output[:, :, :, :, 2:4]
objectness = conv_output[:, :, :, :, 4]
objectness = np.expand_dims(objectness, axis=-1)
class_probs = conv_output[:, :, :, :, 5:]
box_xy = sigmoid(box_xy)
objectness = sigmoid(objectness)
class_probs = sigmoid(class_probs)
grid = np.meshgrid(range(grid_size), range(grid_size))
grid = np.expand_dims(np.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + grid) / grid_size
box_wh = np.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
box_x1y1 = box_x1y1
box_x2y2 = box_x2y2
bbox = np.concatenate((box_x1y1, box_x2y2), axis=-1)
return bbox, objectness, class_probs
# -------------------------------------------------------------------
def yolo_non_max_suppression(outputs, score_thres, iou_thres):
# bbox, objectness, class_index
b, c, t = [], [], []
for o in outputs:
b.append(np.reshape(o[0], (-1, o[0].shape[-1])))
c.append(np.reshape(o[1], (-1, o[1].shape[-1])))
t.append(np.reshape(o[2], (-1, o[2].shape[-1])))
bbox = np.concatenate(b)
objectness = np.concatenate(c)
class_probs = np.concatenate(t)
scores = objectness * class_probs
# find the highes class score for each box
max_scores = np.amax(scores, axis=-1)
class_index = np.argmax(scores, axis=-1)
# pick boxes that meet score threshold
pick_index = np.where(max_scores > score_thres)
maxScores = max_scores[pick_index]
classIndex = class_index[pick_index]
x1 = bbox[pick_index, 0][0]
y1 = bbox[pick_index, 1][0]
x2 = bbox[pick_index, 2][0]
y2 = bbox[pick_index, 3][0]
boxArea = (x2 - x1) * (y2 - y1)
boxIndex = np.argsort(maxScores)
# start non maximum suppression
boxes, confidence, classes = [], [], []
nums = 0
while len(boxIndex) > 0:
last = len(boxIndex) - 1
# pick the boxes with the highest scores
k = boxIndex[last]
boxes.append([x1[k], y1[k], x2[k], y2[k]])
confidence.append(maxScores[k])
classes.append(classIndex[k])
nums = nums + 1
# calculate IOU
xx1 = np.maximum(x1[k], x1[boxIndex[:last]])
yy1 = np.maximum(y1[k], y1[boxIndex[:last]])
xx2 = np.minimum(x2[k], x2[boxIndex[:last]])
yy2 = np.minimum(y2[k], y2[boxIndex[:last]])
inter_area = (xx2 - xx1) * (yy2 - yy1)
union_area = boxArea[k] + boxArea[boxIndex[:last]] - inter_area
iou = inter_area / union_area
# delete the currect and the boxes with larger IOU
boxIndex = np.delete(
boxIndex, np.concatenate(([last], np.where(iou > iou_thres)[0])))
return boxes, confidence, classes, nums
# -------------------------------------------------------------------
def fix2float(fix_point, value):
return value.astype(np.float32) * np.exp2(fix_point, dtype=np.float32)
# -------------------------------------------------------------------
def float2fix(fix_point, value):
return value.astype(np.float32) / np.exp2(fix_point, dtype=np.float32)
|
{"hexsha": "0d3b0fd64fe6418b3513e7b92893a88bcf61d71e", "size": 7317, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/dpu_yolov4.py", "max_stars_repo_name": "dramoz/kv260-atrover", "max_stars_repo_head_hexsha": "7b698b5b033dad5dd40c96e2aa61ec7f6a186e0c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/dpu_yolov4.py", "max_issues_repo_name": "dramoz/kv260-atrover", "max_issues_repo_head_hexsha": "7b698b5b033dad5dd40c96e2aa61ec7f6a186e0c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/dpu_yolov4.py", "max_forks_repo_name": "dramoz/kv260-atrover", "max_forks_repo_head_hexsha": "7b698b5b033dad5dd40c96e2aa61ec7f6a186e0c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9119170984, "max_line_length": 139, "alphanum_fraction": 0.5351920186, "include": true, "reason": "import numpy", "num_tokens": 2089}
|
[STATEMENT]
lemma lemma2_6_5_a': assumes t:"trans r" and "(M,N) \<in> mul_eq r" shows "(M -s ds r S, N -s ds r S) \<in> mul_eq r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (M -s r \<down>s S, N -s r \<down>s S) \<in> mul_eq r
[PROOF STEP]
using assms lemma2_6_5_a[OF t] ds_ds_subseteq_ds[OF t]
[PROOF STATE]
proof (prove)
using this:
trans r
(M, N) \<in> mul_eq r
\<lbrakk>r \<down>s ?S \<subseteq> ?S; (?M, ?N) \<in> mul_eq r\<rbrakk> \<Longrightarrow> (?M -s ?S, ?N -s ?S) \<in> mul_eq r
r \<down>s (r \<down>s ?S) \<subseteq> r \<down>s ?S
goal (1 subgoal):
1. (M -s r \<down>s S, N -s r \<down>s S) \<in> mul_eq r
[PROOF STEP]
by auto
|
{"llama_tokens": 316, "file": "Decreasing-Diagrams_Decreasing_Diagrams", "length": 2}
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/hhbbc/optimize.h"
#include <vector>
#include <string>
#include <utility>
#include <algorithm>
#include <cassert>
#include <bitset>
#include <boost/dynamic_bitset.hpp>
#include <folly/Optional.h>
#include <folly/gen/Base.h>
#include <folly/gen/String.h>
#include "hphp/util/trace.h"
#include "hphp/util/match.h"
#include "hphp/runtime/vm/hhbc.h"
#include "hphp/runtime/base/datatype.h"
#include "hphp/hhbbc/hhbbc.h"
#include "hphp/hhbbc/analyze.h"
#include "hphp/hhbbc/cfg.h"
#include "hphp/hhbbc/cfg-opts.h"
#include "hphp/hhbbc/dce.h"
#include "hphp/hhbbc/func-util.h"
#include "hphp/hhbbc/interp.h"
#include "hphp/hhbbc/interp-internal.h"
#include "hphp/hhbbc/interp-state.h"
#include "hphp/hhbbc/misc.h"
#include "hphp/hhbbc/options-util.h"
#include "hphp/hhbbc/representation.h"
#include "hphp/hhbbc/type-system.h"
#include "hphp/hhbbc/unit-util.h"
namespace HPHP { namespace HHBBC {
//////////////////////////////////////////////////////////////////////
VisitContext::VisitContext(const Index& index, const FuncAnalysis& ainfo,
CollectedInfo& collect, php::WideFunc& func)
: index(index), ainfo(ainfo), collect(collect), func(func) {
assertx(ainfo.ctx.func == func);
}
//////////////////////////////////////////////////////////////////////
namespace {
//////////////////////////////////////////////////////////////////////
/*
* For filtering assertions, some opcodes are considered to have no
* use for a stack input assertion.
*
* For now this is restricted to opcodes that do literally nothing.
*/
bool ignoresStackInput(Op op) {
switch (op) {
case Op::UGetCUNop:
case Op::CGetCUNop:
case Op::PopU:
return true;
default:
return false;
}
not_reached();
}
template<class TyBC, class ArgType>
folly::Optional<Bytecode> makeAssert(ArrayTypeTable::Builder& arrTable,
ArgType arg,
Type t) {
if (t.subtypeOf(BBottom)) return folly::none;
auto const rat = make_repo_type(arrTable, t);
using T = RepoAuthType::Tag;
if (options.FilterAssertions) {
// Cell and InitCell don't add any useful information, so leave them
// out entirely.
if (rat == RepoAuthType{T::Cell}) return folly::none;
if (rat == RepoAuthType{T::InitCell}) return folly::none;
}
return Bytecode { TyBC { arg, rat } };
}
template<class Gen>
void insert_assertions_step(ArrayTypeTable::Builder& arrTable,
const php::Func& func,
const Bytecode& bcode,
const State& state,
std::bitset<kMaxTrackedLocals> mayReadLocalSet,
std::vector<uint8_t> obviousStackOutputs,
Gen gen) {
if (state.unreachable) return;
for (LocalId i = 0; i < state.locals.size(); ++i) {
if (func.locals[i].killed) continue;
if (options.FilterAssertions) {
// Do not emit assertions for untracked locals.
if (i >= mayReadLocalSet.size()) break;
if (!mayReadLocalSet.test(i)) continue;
}
auto const realT = state.locals[i];
auto const op = makeAssert<bc::AssertRATL>(arrTable, i, realT);
if (op) gen(*op);
}
if (!options.InsertStackAssertions) return;
assert(obviousStackOutputs.size() == state.stack.size());
auto const assert_stack = [&] (size_t idx) {
assert(idx < state.stack.size());
if (obviousStackOutputs[state.stack.size() - idx - 1]) return;
if (ignoresStackInput(bcode.op)) return;
auto const realT = state.stack[state.stack.size() - idx - 1].type;
auto const flav = stack_flav(realT);
if (options.FilterAssertions && !realT.strictSubtypeOf(flav)) {
return;
}
auto const op =
makeAssert<bc::AssertRATStk>(
arrTable,
static_cast<uint32_t>(idx),
realT
);
if (op) gen(*op);
};
for (auto i = size_t{0}; i < bcode.numPop(); ++i) assert_stack(i);
// The base instructions are special in that they may read from the
// stack without necessarily popping it. We want type assertions on
// the stack slots they'll read.
switch (bcode.op) {
case Op::BaseC: assert_stack(bcode.BaseC.arg1); break;
case Op::BaseGC: assert_stack(bcode.BaseGC.arg1); break;
case Op::BaseSC:
assert_stack(bcode.BaseSC.arg1);
assert_stack(bcode.BaseSC.arg2);
break;
case Op::Dim: {
switch (bcode.Dim.mkey.mcode) {
case MEC: case MPC:
assert_stack(bcode.Dim.mkey.idx);
break;
case MW: case MEL: case MPL: case MEI:
case MET: case MPT: case MQT:
break;
}
}
default: break;
}
}
/*
* When filter assertions is on, we use this to avoid putting stack
* assertions on some "obvious" instructions.
*
* These are instructions that push an output type that is always the
* same, i.e. where an AssertT is never going to add information.
* (E.g. "Int 3" obviously pushes an Int, and the JIT has no trouble
* figuring that out, so there's no reason to assert about it.)
*
* TODO(#3676101): It turns out many hhbc opcodes have known output
* types---there are some super polymorphic ones, but many always do
* bools or objects, etc. We might consider making stack flavors have
* subtypes and adding this to the opcode table.
*/
bool hasObviousStackOutput(const Bytecode& op, const Interp& interp) {
switch (op.op) {
case Op::Null:
case Op::NullUninit:
case Op::True:
case Op::False:
case Op::Int:
case Op::Double:
case Op::String:
case Op::Array:
case Op::Dict:
case Op::Vec:
case Op::Keyset:
case Op::NewDArray:
case Op::NewDictArray:
case Op::NewVArray:
case Op::NewStructDArray:
case Op::NewStructDict:
case Op::NewVec:
case Op::NewKeysetArray:
case Op::AddNewElemC:
case Op::NewCol:
case Op::NewPair:
case Op::ClassName:
case Op::File:
case Op::Dir:
case Op::Concat:
case Op::ConcatN:
case Op::Not:
case Op::Xor:
case Op::Same:
case Op::NSame:
case Op::Eq:
case Op::Neq:
case Op::Lt:
case Op::Gt:
case Op::Lte:
case Op::Gte:
case Op::Cmp:
case Op::Shl:
case Op::Shr:
case Op::CastBool:
case Op::CastInt:
case Op::CastDouble:
case Op::CastString:
case Op::CastDict:
case Op::CastVec:
case Op::CastKeyset:
case Op::CastVArray:
case Op::CastDArray:
case Op::DblAsBits:
case Op::InstanceOfD:
case Op::IsLateBoundCls:
case Op::IsTypeStructC:
case Op::CombineAndResolveTypeStruct:
case Op::RecordReifiedGeneric:
case Op::InstanceOf:
case Op::Print:
case Op::Exit:
case Op::AKExists:
case Op::IssetL:
case Op::IsUnsetL:
case Op::IssetG:
case Op::IssetS:
case Op::IsTypeC:
case Op::IsTypeL:
case Op::OODeclExists:
return true;
case Op::This:
case Op::BareThis:
if (auto tt = thisType(interp.index, interp.ctx)) {
auto t = interp.state.stack.back().type;
if (is_opt(t)) t = unopt(std::move(t));
return !t.strictSubtypeOf(*tt);
}
return true;
case Op::CGetL:
case Op::CGetQuietL:
case Op::CUGetL:
case Op::CGetL2:
case Op::PushL:
return true;
// The output of SetL is obvious if you know what its input is
// (which we'll assert if we know).
case Op::SetL:
return true;
// The output of SetM isn't quite as obvious as SetL, but the jit
// can work it out from the input just as well as hhbbc (if not better).
case Op::SetM:
return true;
default:
return false;
}
}
void insert_assertions(VisitContext& visit, BlockId bid, State state) {
BytecodeVec newBCs;
auto& func = visit.func;
auto const& cblk = func.blocks()[bid];
newBCs.reserve(cblk->hhbcs.size());
auto const& index = visit.index;
auto const& ainfo = visit.ainfo;
auto& arrTable = *index.array_table_builder();
auto const ctx = AnalysisContext { ainfo.ctx.unit, func, ainfo.ctx.cls };
std::vector<uint8_t> obviousStackOutputs(state.stack.size(), false);
auto fallthrough = cblk->fallthrough;
auto interp = Interp { index, ctx, visit.collect, bid, cblk.get(), state };
for (auto& op : cblk->hhbcs) {
FTRACE(2, " == {}\n", show(func, op));
auto gen = [&] (const Bytecode& newb) {
newBCs.push_back(newb);
newBCs.back().srcLoc = op.srcLoc;
FTRACE(2, " + {}\n", show(func, newBCs.back()));
};
if (state.unreachable) {
fallthrough = NoBlockId;
if (!(instrFlags(op.op) & TF)) {
gen(bc::BreakTraceHint {});
gen(bc::String { s_unreachable.get() });
gen(bc::Fatal { FatalOp::Runtime });
}
break;
}
auto const preState = state;
auto const flags = step(interp, op);
insert_assertions_step(
arrTable,
*func,
op,
preState,
flags.mayReadLocalSet,
obviousStackOutputs,
gen
);
if (op.op == Op::CGetL2) {
obviousStackOutputs.emplace(obviousStackOutputs.end() - 1,
hasObviousStackOutput(op, interp));
} else {
for (int i = 0; i < op.numPop(); i++) {
obviousStackOutputs.pop_back();
}
for (auto i = op.numPush(); i--; ) {
obviousStackOutputs.emplace_back(hasObviousStackOutput(op, interp));
}
}
gen(op);
}
if (cblk->fallthrough != fallthrough || cblk->hhbcs != newBCs) {
auto const blk = func.blocks()[bid].mutate();
blk->fallthrough = fallthrough;
blk->hhbcs = std::move(newBCs);
}
}
//////////////////////////////////////////////////////////////////////
template<class Gen>
bool propagate_constants(const Bytecode& op, State& state, Gen gen) {
auto const numPop = op.numPop();
auto const numPush = op.numPush();
auto const stkSize = state.stack.size();
constexpr auto numCells = 4;
TypedValue constVals[numCells];
// All outputs of the instruction must have constant types for this
// to be allowed.
for (auto i = size_t{0}; i < numPush; ++i) {
auto const& ty = state.stack[stkSize - i - 1].type;
if (i < numCells) {
auto const v = tv(ty);
if (!v) return false;
constVals[i] = *v;
} else if (!is_scalar(ty)) {
return false;
}
}
// Pop the inputs, and push the constants.
for (auto i = size_t{0}; i < numPop; ++i) {
DEBUG_ONLY auto flavor = op.popFlavor(i);
assertx(flavor != Flavor::U);
// Even for CU we only support C's.
gen(bc::PopC {});
}
for (auto i = size_t{0}; i < numPush; ++i) {
auto const v = i < numCells ?
constVals[i] : *tv(state.stack[stkSize - i - 1].type);
gen(gen_constant(v));
state.stack[stkSize - i - 1].type = from_cell(v);
}
return true;
}
bool propagate_constants(const Bytecode& bc, State& state,
BytecodeVec& out) {
return propagate_constants(bc, state, [&] (const Bytecode& bc) {
out.push_back(bc);
});
}
//////////////////////////////////////////////////////////////////////
// Create a new fatal error block. Update the given FuncAnalysis if
// it is non-null - specifically, assign the new block an rpoId.
BlockId make_fatal_block(php::WideFunc& func, const php::Block* srcBlk,
FuncAnalysis* ainfo) {
FTRACE(1, " ++ new block {}\n", func.blocks().size());
auto bid = make_block(func, srcBlk);
auto const blk = func.blocks()[bid].mutate();
auto const srcLoc = srcBlk->hhbcs.back().srcLoc;
blk->hhbcs = {
bc_with_loc(srcLoc, bc::String { s_unreachable.get() }),
bc_with_loc(srcLoc, bc::Fatal { FatalOp::Runtime })
};
blk->fallthrough = NoBlockId;
blk->throwExit = NoBlockId;
blk->exnNodeId = NoExnNodeId;
if (ainfo) {
assertx(ainfo->bdata.size() == bid);
assertx(bid + 1 == func.blocks().size());
auto const rpoId = safe_cast<uint32_t>(ainfo->rpoBlocks.size());
ainfo->rpoBlocks.push_back(bid);
ainfo->bdata.push_back(FuncAnalysis::BlockData { rpoId, State {} });
}
return bid;
}
//////////////////////////////////////////////////////////////////////
template<class Fun>
void visit_blocks(const char* what, VisitContext& visit, Fun&& fun) {
BlockId curBlk = NoBlockId;
SCOPE_ASSERT_DETAIL(what) {
if (curBlk == NoBlockId) return std::string{"\nNo block processed\n"};
auto const& state = visit.ainfo.bdata[curBlk].stateIn;
auto const debug = state_string(*visit.func, state, visit.collect);
return folly::sformat("block #{}\nin-{}", curBlk, debug);
};
FTRACE(1, "|---- {}\n", what);
for (auto const bid : visit.ainfo.rpoBlocks) {
curBlk = bid;
FTRACE(2, "block #{}\n", bid);
auto const& state = visit.ainfo.bdata[bid].stateIn;
if (!state.initialized) {
FTRACE(2, " unreachable\n");
continue;
}
// TODO(#3732260): We should probably do an extra interp pass here
// in debug builds to check that no transformation to the bytecode
// was made that changes the block output state.
fun(visit, bid, state);
}
assert(check(*visit.func));
}
//////////////////////////////////////////////////////////////////////
IterId iterFromInit(const php::WideFunc& func, BlockId initBlock) {
auto const& op = func.blocks()[initBlock]->hhbcs.back();
if (op.op == Op::IterInit) return op.IterInit.ita.iterId;
if (op.op == Op::LIterInit) return op.LIterInit.ita.iterId;
always_assert(false);
}
/*
* Attempt to convert normal iterators into liters. In order for an iterator to
* be converted to a liter, the following needs to be true:
*
* - The iterator is initialized with the value in a local at exactly one block.
*
* - That same local is not modified on all possible paths from the
* initialization to every usage of that iterator.
*
* The first condition is actually more restrictive than necessary, but
* enforcing that the iterator is initialized at exactly one place simplifies
* the bookkeeping and is always true with how we currently emit bytecode.
*/
struct OptimizeIterState {
void operator()(VisitContext& visit, BlockId bid, State state) {
auto& func = visit.func;
auto const& ainfo = visit.ainfo;
auto const blk = func.blocks()[bid].get();
auto const ctx = AnalysisContext { ainfo.ctx.unit, func, ainfo.ctx.cls };
auto interp = Interp { visit.index, ctx, visit.collect, bid, blk, state };
for (uint32_t opIdx = 0; opIdx < blk->hhbcs.size(); ++opIdx) {
// If we've already determined that nothing is eligible, we can just stop.
if (!eligible.any()) break;
auto const& op = blk->hhbcs[opIdx];
FTRACE(2, " == {}\n", show(func, op));
if (state.unreachable) break;
// At every op, we check the known state of all live iterators and mark it
// as ineligible as necessary.
for (IterId it = 0; it < state.iters.size(); ++it) {
match<void>(
state.iters[it],
[] (DeadIter) {},
[&] (const LiveIter& ti) {
FTRACE(4, " iter {: <2} :: {}\n",
it, show(*func, state.iters[it]));
// The init block is unknown. This can only happen if there's more
// than one block where this iterator was initialized. This makes
// tracking the iteration loop ambiguous, and can't happen with how
// we currently emit bytecode, so just pessimize everything.
if (ti.initBlock == NoBlockId) {
FTRACE(2, " - pessimize all\n");
eligible.clear();
return;
}
// Otherwise, if the iterator doesn't have an equivalent local,
// either it was never initialized with a local to begin with, or
// that local got changed within the loop. Either way, this
// iteration loop isn't eligible.
if (eligible[ti.initBlock] && ti.baseLocal == NoLocalId) {
FTRACE(2, " - blk:{} ineligible\n", ti.initBlock);
eligible[ti.initBlock] = false;
} else if (ti.baseUpdated) {
FTRACE(2, " - blk:{} updated\n", ti.initBlock);
updated[ti.initBlock] = true;
}
}
);
}
auto const fixupForInit = [&] {
auto const base = topStkLocal(state);
if (base == NoLocalId && eligible[bid]) {
FTRACE(2, " - blk:{} ineligible\n", bid);
eligible[bid] = false;
}
fixups.emplace_back(Fixup{bid, opIdx, bid, base});
FTRACE(2, " + fixup ({})\n", fixups.back().show(*func));
};
auto const fixupFromState = [&] (IterId it) {
match<void>(
state.iters[it],
[] (DeadIter) {},
[&] (const LiveIter& ti) {
if (ti.initBlock != NoBlockId) {
assertx(iterFromInit(func, ti.initBlock) == it);
fixups.emplace_back(
Fixup{bid, opIdx, ti.initBlock, ti.baseLocal}
);
FTRACE(2, " + fixup ({})\n", fixups.back().show(*func));
}
}
);
};
// Record a fixup for this iteration op. This iteration loop may not be
// ultimately eligible, but we'll check that before actually doing the
// transformation.
switch (op.op) {
case Op::IterInit:
assertx(opIdx == blk->hhbcs.size() - 1);
fixupForInit();
break;
case Op::IterNext:
fixupFromState(op.IterNext.ita.iterId);
break;
case Op::IterFree:
fixupFromState(op.IterFree.iter1);
break;
default:
break;
}
step(interp, op);
}
}
// We identify iteration loops by the block of the initialization op (which we
// enforce is exactly one block). A fixup describes a transformation to an
// iteration instruction which must be applied only if its associated loop is
// eligible.
struct Fixup {
BlockId block; // Block of the op
uint32_t op; // Index into the block of the op
BlockId init; // Block of the loop's initializer
LocalId base; // Invariant base of the iterator
std::string show(const php::Func& f) const {
return folly::sformat(
"blk:{},{},blk:{},{}",
block, op, init,
base != NoLocalId ? local_string(f, base) : "-"
);
}
};
std::vector<Fixup> fixups;
// All of the associated iterator operations within an iterator loop can be
// optimized to liter if the iterator's initialization block is eligible.
boost::dynamic_bitset<> eligible;
// For eligible blocks, the "updated" flag tracks whether there was *any*
// change to the base initialized in that block (including "safe" changes).
boost::dynamic_bitset<> updated;
};
void optimize_iterators(VisitContext& visit) {
// Quick exit. If there's no iterators, or if no associated local survives
// to the end of the iterator, there's nothing to do.
auto& func = visit.func;
auto const& ainfo = visit.ainfo;
if (!func->numIters || !ainfo.hasInvariantIterBase) return;
OptimizeIterState state;
// All blocks starts out eligible. We'll remove initialization blocks as go.
// Similarly, the iterator bases for all blocks start out not being updated.
state.eligible.resize(func.blocks().size(), true);
state.updated.resize(func.blocks().size(), false);
// Visit all the blocks and build up the fixup state.
visit_blocks("optimize_iterators", visit, state);
if (!state.eligible.any()) return;
FTRACE(2, "Rewrites:\n");
for (auto const& fixup : state.fixups) {
auto const& cblk = func.blocks()[fixup.block];
auto const& op = cblk->hhbcs[fixup.op];
if (!state.eligible[fixup.init]) {
// This iteration loop isn't eligible, so don't apply the fixup
FTRACE(2, " * ({}): {}\n", fixup.show(*func), show(func, op));
continue;
}
auto const flags = state.updated[fixup.init]
? IterArgs::Flags::None
: IterArgs::Flags::BaseConst;
BytecodeVec newOps;
assertx(fixup.base != NoLocalId);
// Rewrite the iteration op to its liter equivalent:
switch (op.op) {
case Op::IterInit: {
auto args = op.IterInit.ita;
auto const target = op.IterInit.target2;
args.flags = flags;
newOps = {
bc_with_loc(op.srcLoc, bc::PopC {}),
bc_with_loc(op.srcLoc, bc::LIterInit{args, fixup.base, target})
};
break;
}
case Op::IterNext: {
auto args = op.IterNext.ita;
auto const target = op.IterNext.target2;
args.flags = flags;
newOps = {
bc_with_loc(op.srcLoc, bc::LIterNext{args, fixup.base, target}),
};
break;
}
case Op::IterFree:
newOps = {
bc_with_loc(
op.srcLoc,
bc::LIterFree { op.IterFree.iter1, fixup.base }
)
};
break;
default:
always_assert(false);
}
FTRACE(
2, " ({}): {} ==> {}\n",
fixup.show(*func), show(func, op),
[&] {
using namespace folly::gen;
return from(newOps)
| map([&] (const Bytecode& bc) { return show(func, bc); })
| unsplit<std::string>(",");
}()
);
auto const blk = func.blocks()[fixup.block].mutate();
blk->hhbcs.erase(blk->hhbcs.begin() + fixup.op);
blk->hhbcs.insert(blk->hhbcs.begin() + fixup.op,
newOps.begin(), newOps.end());
}
FTRACE(10, "{}", show(*func));
}
//////////////////////////////////////////////////////////////////////
/*
* Use the information in the index to resolve a type-constraint to its
* underlying type, if possible.
*/
void fixTypeConstraint(const Index& index, TypeConstraint& tc) {
if (!tc.isCheckable() || !tc.isObject() || tc.isResolved()) return;
if (interface_supports_non_objects(tc.typeName())) return;
auto const resolved = index.resolve_type_name(tc.typeName());
assertx(!RuntimeOption::EvalHackArrDVArrs ||
(resolved.type != AnnotType::VArray &&
resolved.type != AnnotType::DArray));
if (resolved.type == AnnotType::Object) {
auto const resolvedValue = match<folly::Optional<res::Class>>(
resolved.value,
[&] (boost::blank) { return folly::none; },
[&] (const res::Class& c) { return folly::make_optional(c); },
[&] (const res::Record&) { always_assert(false); return folly::none; }
);
if (!resolvedValue || !resolvedValue->resolved()) return;
// Can't resolve if it resolves to a magic interface. If we mark it as
// resolved, we'll think its an object and not do the special magic
// interface checks at runtime.
if (interface_supports_non_objects(resolvedValue->name())) return;
if (!resolvedValue->couldHaveMockedDerivedClass()) tc.setNoMockObjects();
}
tc.resolveType(resolved.type, tc.isNullable() || resolved.nullable);
FTRACE(1, "Retype tc {} -> {}\n", tc.typeName(), tc.displayName());
}
//////////////////////////////////////////////////////////////////////
void do_optimize(const Index& index, FuncAnalysis&& ainfo,
php::WideFunc& func) {
FTRACE(2, "{:-^70} {}\n", "Optimize Func", func->name);
bool again;
folly::Optional<CollectedInfo> collect;
folly::Optional<VisitContext> visit;
collect.emplace(index, ainfo.ctx, nullptr, CollectionOpts{}, &ainfo);
visit.emplace(index, ainfo, *collect, func);
update_bytecode(func, std::move(ainfo.blockUpdates), &ainfo);
optimize_iterators(*visit);
do {
again = false;
FTRACE(10, "{}", show(*func));
/*
* Note: it's useful to do dead block removal before DCE, so it can remove
* code relating to the branch to the dead block.
*/
remove_unreachable_blocks(ainfo, func);
if (options.LocalDCE) {
visit_blocks("local DCE", *visit, local_dce);
}
if (options.GlobalDCE) {
split_critical_edges(index, ainfo, func);
if (global_dce(index, ainfo, func)) again = true;
if (control_flow_opts(ainfo, func)) again = true;
assert(check(*func));
/*
* Global DCE can change types of locals across blocks. See
* dce.cpp for an explanation.
*
* We need to perform a final type analysis before we do
* anything else.
*/
auto const ctx = AnalysisContext { ainfo.ctx.unit, func, ainfo.ctx.cls };
ainfo = analyze_func(index, ctx, CollectionOpts{});
update_bytecode(func, std::move(ainfo.blockUpdates), &ainfo);
collect.emplace(index, ainfo.ctx, nullptr, CollectionOpts{}, &ainfo);
visit.emplace(index, ainfo, *collect, func);
}
// If we merged blocks, there could be new optimization opportunities
} while (again);
if (func->name == s_86pinit.get() ||
func->name == s_86sinit.get() ||
func->name == s_86linit.get()) {
auto const& blk = *func.blocks()[func->mainEntry];
if (blk.hhbcs.size() == 2 &&
blk.hhbcs[0].op == Op::Null &&
blk.hhbcs[1].op == Op::RetC) {
FTRACE(2, "Erasing {}::{}\n", func->cls->name, func->name);
func->cls->methods.erase(
std::find_if(func->cls->methods.begin(),
func->cls->methods.end(),
[&](const std::unique_ptr<php::Func>& f) {
return f.get() == func;
}));
func.release();
return;
}
}
if (options.InsertAssertions) {
visit_blocks("insert assertions", *visit, insert_assertions);
}
// NOTE: We shouldn't duplicate blocks that are shared between two Funcs
// in this loop. We shrink BytecodeVec at the time we parse the function,
// so we only shrink when we've already mutated (and COWed) the bytecode.
for (auto bid : func.blockRange()) {
auto const& block = func.blocks()[bid];
assertx(block->hhbcs.size());
if (block->hhbcs.capacity() == block->hhbcs.size()) continue;
func.blocks()[bid].mutate()->hhbcs.shrink_to_fit();
}
for (auto& p : func->params) fixTypeConstraint(index, p.typeConstraint);
fixTypeConstraint(index, func->retTypeConstraint);
}
//////////////////////////////////////////////////////////////////////
}
//////////////////////////////////////////////////////////////////////
Bytecode gen_constant(const TypedValue& cell) {
switch (cell.m_type) {
case KindOfUninit:
return bc::NullUninit {};
case KindOfNull:
return bc::Null {};
case KindOfBoolean:
if (cell.m_data.num) {
return bc::True {};
} else {
return bc::False {};
}
case KindOfInt64:
return bc::Int { cell.m_data.num };
case KindOfDouble:
return bc::Double { cell.m_data.dbl };
case KindOfString:
assert(cell.m_data.pstr->isStatic());
case KindOfPersistentString:
return bc::String { cell.m_data.pstr };
case KindOfVec:
assert(cell.m_data.parr->isStatic());
case KindOfPersistentVec:
assert(cell.m_data.parr->isVecType());
return bc::Vec { cell.m_data.parr };
case KindOfDict:
assert(cell.m_data.parr->isStatic());
case KindOfPersistentDict:
assert(cell.m_data.parr->isDictType());
return bc::Dict { cell.m_data.parr };
case KindOfKeyset:
assert(cell.m_data.parr->isStatic());
case KindOfPersistentKeyset:
assert(cell.m_data.parr->isKeysetType());
return bc::Keyset { cell.m_data.parr };
case KindOfDArray:
case KindOfVArray:
assert(cell.m_data.parr->isStatic());
case KindOfPersistentDArray:
case KindOfPersistentVArray:
assert(cell.m_data.parr->isPHPArrayType());
return bc::Array { cell.m_data.parr };
case KindOfResource:
case KindOfObject:
case KindOfRFunc:
case KindOfFunc:
case KindOfClass:
case KindOfLazyClass: // TODO (T68822846)
case KindOfClsMeth:
case KindOfRClsMeth:
case KindOfRecord:
always_assert(0 && "invalid constant in propagate_constants");
}
not_reached();
}
void optimize_func(const Index& index, FuncAnalysis&& ainfo,
php::WideFunc& func) {
auto const bump = trace_bump_for(ainfo.ctx.cls, func);
SCOPE_ASSERT_DETAIL("optimize_func") {
return "Optimizing:" + show(ainfo.ctx);
};
Trace::Bump bumper1{Trace::hhbbc, bump};
Trace::Bump bumper2{Trace::hhbbc_cfg, bump};
Trace::Bump bumper3{Trace::hhbbc_dce, bump};
Trace::Bump bumper4{Trace::hhbbc_index, bump};
do_optimize(index, std::move(ainfo), func);
}
void update_bytecode(php::WideFunc& func, BlockUpdates&& blockUpdates,
FuncAnalysis* ainfo) {
for (auto& ent : blockUpdates) {
auto blk = func.blocks()[ent.first].mutate();
auto const srcLoc = blk->hhbcs.front().srcLoc;
if (!ent.second.unchangedBcs) {
if (ent.second.replacedBcs.size()) {
blk->hhbcs = std::move(ent.second.replacedBcs);
} else {
blk->hhbcs = { bc_with_loc(blk->hhbcs.front().srcLoc, bc::Nop {}) };
}
} else {
blk->hhbcs.erase(blk->hhbcs.begin() + ent.second.unchangedBcs,
blk->hhbcs.end());
blk->hhbcs.reserve(blk->hhbcs.size() + ent.second.replacedBcs.size());
for (auto& bc : ent.second.replacedBcs) {
blk->hhbcs.push_back(std::move(bc));
}
}
if (blk->hhbcs.empty()) {
blk->hhbcs.push_back(bc_with_loc(srcLoc, bc::Nop {}));
}
auto fatal_block = NoBlockId;
auto fatal = [&] {
if (fatal_block == NoBlockId) {
fatal_block = make_fatal_block(func, blk, ainfo);
}
return fatal_block;
};
blk->fallthrough = ent.second.fallthrough;
auto hasCf = false;
forEachTakenEdge(blk->hhbcs.back(),
[&] (BlockId& bid) {
hasCf = true;
if (bid == NoBlockId) bid = fatal();
});
if (blk->fallthrough == NoBlockId &&
!(instrFlags(blk->hhbcs.back().op) & TF)) {
if (hasCf) {
blk->fallthrough = fatal();
} else {
blk->hhbcs.push_back(bc::BreakTraceHint {});
blk->hhbcs.push_back(bc::String { s_unreachable.get() });
blk->hhbcs.push_back(bc::Fatal { FatalOp::Runtime });
}
}
}
blockUpdates.clear();
}
//////////////////////////////////////////////////////////////////////
void optimize_class_prop_type_hints(const Index& index, Context ctx) {
assertx(!ctx.func);
auto const bump = trace_bump_for(ctx.cls, nullptr);
Trace::Bump bumper{Trace::hhbbc, bump};
for (auto& prop : ctx.cls->properties) {
fixTypeConstraint(
index,
const_cast<TypeConstraint&>(prop.typeConstraint)
);
}
}
//////////////////////////////////////////////////////////////////////
}}
|
{"hexsha": "283389da2ece22c280c3b766db61a4aed60042b1", "size": 31622, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "hphp/hhbbc/optimize.cpp", "max_stars_repo_name": "alexsn/hhvm", "max_stars_repo_head_hexsha": "6061999778c513d2433c3282902ab1befc4f60b2", "max_stars_repo_licenses": ["PHP-3.01", "Zend-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hphp/hhbbc/optimize.cpp", "max_issues_repo_name": "alexsn/hhvm", "max_issues_repo_head_hexsha": "6061999778c513d2433c3282902ab1befc4f60b2", "max_issues_repo_licenses": ["PHP-3.01", "Zend-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hphp/hhbbc/optimize.cpp", "max_forks_repo_name": "alexsn/hhvm", "max_forks_repo_head_hexsha": "6061999778c513d2433c3282902ab1befc4f60b2", "max_forks_repo_licenses": ["PHP-3.01", "Zend-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-01-29T08:44:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-29T08:44:22.000Z", "avg_line_length": 32.7689119171, "max_line_length": 80, "alphanum_fraction": 0.5904749858, "num_tokens": 8233}
|
import numpy as np
class Loop(object):
@staticmethod
def train(env, brain_names, models, data, begin_episode, save_frequency, reset_config, max_step, max_episode, sampler_manager, resampling_interval, policy_mode):
assert policy_mode == 'off-policy', "multi-agents algorithms now support off-policy only."
brains_num = len(brain_names)
batch_size = data.batch_size
agents_num = [0] * brains_num
state = [0] * brains_num
action = [0] * brains_num
new_action = [0] * brains_num
next_action = [0] * brains_num
reward = [0] * brains_num
next_state = [0] * brains_num
dones = [0] * brains_num
dones_flag = [0] * brains_num
rewards = [0] * brains_num
for episode in range(begin_episode, max_episode):
if episode % resampling_interval == 0:
reset_config.update(sampler_manager.sample_all())
obs = env.reset(config=reset_config, train_mode=True)
for i, brain_name in enumerate(brain_names):
agents_num[i] = len(obs[brain_name].agents)
dones_flag[i] = np.zeros(agents_num[i])
rewards[i] = np.zeros(agents_num[i])
step = 0
last_done_step = -1
while True:
step += 1
for i, brain_name in enumerate(brain_names):
state[i] = obs[brain_name].vector_observations
action[i] = models[i].choose_action(s=state[i])
actions = {f'{brain_name}': action[i] for i, brain_name in enumerate(brain_names)}
obs = env.step(vector_action=actions)
for i, brain_name in enumerate(brain_names):
reward[i] = np.array(obs[brain_name].rewards)[:, np.newaxis]
next_state[i] = obs[brain_name].vector_observations
dones[i] = np.array(obs[brain_name].local_done)[:, np.newaxis]
unfinished_index = np.where(dones_flag[i] == False)[0]
dones_flag[i] += obs[brain_name].local_done
rewards[i][unfinished_index] += np.array(obs[brain_name].rewards)[unfinished_index]
s = [np.array(e) for e in zip(*state)]
a = [np.array(e) for e in zip(*action)]
r = [np.array(e) for e in zip(*reward)]
s_ = [np.array(e) for e in zip(*next_state)]
done = [np.array(e) for e in zip(*dones)]
data.add(s, a, r, s_, done)
s, a, r, s_, done = data.sample()
for i, brain_name in enumerate(brain_names):
next_action[i] = models[i].get_target_action(s=s_[:, i])
new_action[i] = models[i].choose_inference_action(s=s[:, i])
a_ = np.array([np.array(e) for e in zip(*next_action)])
if policy_mode == 'off-policy':
for i in range(brains_num):
models[i].learn(
episode=episode,
ap=np.array([np.array(e) for e in zip(*next_action[:i])]).reshape(batch_size, -1) if i != 0 else np.zeros((batch_size, 0)),
al=np.array([np.array(e) for e in zip(*next_action[-(brains_num - i - 1):])]).reshape(batch_size, -1) if brains_num - i != 1 else np.zeros((batch_size, 0)),
ss=s.reshape(batch_size, -1),
ss_=s_.reshape(batch_size, -1),
aa=a.reshape(batch_size, -1),
aa_=a_.reshape(batch_size, -1),
s=s[:, i],
r=r[:, i]
)
if all([all(dones_flag[i]) for i in range(brains_num)]):
if last_done_step == -1:
last_done_step = step
if policy_mode == 'off-policy':
break
if step >= max_step:
break
# if train_mode == 'perEpisode':
# for i in range(brains_num):
# models[i].learn(episode)
for i in range(brains_num):
models[i].writer_summary(
episode,
total_reward=rewards[i].mean(),
step=last_done_step
)
print('-' * 40)
print(f'episode {episode:3d} | step {step:4d} last_done_step | {last_done_step:4d}')
if episode % save_frequency == 0:
for i in range(brains_num):
models[i].save_checkpoint(episode)
@staticmethod
def no_op(env, brain_names, models, data, brains, steps, choose=False, **kwargs):
assert isinstance(steps, int), 'multi-agent no_op.steps must have type of int'
if steps < data.batch_size:
steps = data.batch_size
brains_num = len(brain_names)
agents_num = [0] * brains_num
state = [0] * brains_num
action = [0] * brains_num
reward = [0] * brains_num
next_state = [0] * brains_num
dones = [0] * brains_num
obs = env.reset(train_mode=False)
for i, brain_name in enumerate(brain_names):
agents_num[i] = len(obs[brain_name].agents)
if brains[brain_name].vector_action_space_type == 'continuous':
action[i] = np.zeros((agents_num[i], brains[brain_name].vector_action_space_size[0]), dtype=np.int32)
else:
action[i] = np.zeros((agents_num[i], len(brains[brain_name].vector_action_space_size)), dtype=np.int32)
a = [np.array(e) for e in zip(*action)]
for step in range(steps):
print(f'no op step {step}')
for i, brain_name in enumerate(brain_names):
state[i] = obs[brain_name].vector_observations
if choose:
action[i] = models[i].choose_action(s=state[i])
actions = {f'{brain_name}': action[i] for i, brain_name in enumerate(brain_names)}
obs = env.step(vector_action=actions)
for i, brain_name in enumerate(brain_names):
reward[i] = np.array(obs[brain_name].rewards)[:, np.newaxis]
next_state[i] = obs[brain_name].vector_observations
dones[i] = np.array(obs[brain_name].local_done)[:, np.newaxis]
s = [np.array(e) for e in zip(*state)]
a = [np.array(e) for e in zip(*action)]
r = [np.array(e) for e in zip(*reward)]
s_ = [np.array(e) for e in zip(*next_state)]
done = [np.array(e) for e in zip(*dones)]
data.add(s, a, r, s_, done)
@staticmethod
def inference(env, brain_names, models, reset_config, sampler_manager, resampling_interval):
"""
inference mode. algorithm model will not be train, only used to show agents' behavior
"""
brains_num = len(brain_names)
state = [0] * brains_num
action = [0] * brains_num
while True:
if np.random.uniform() < 0.2: # the environment has probability below 0.2 to change its parameters while running in the inference mode.
reset_config.update(sampler_manager.sample_all())
obs = env.reset(config=reset_config, train_mode=False)
while True:
for i, brain_name in enumerate(brain_names):
state[i] = obs[brain_name].vector_observations
action[i] = models[i].choose_inference_action(s=state[i])
actions = {f'{brain_name}': action[i] for i, brain_name in enumerate(brain_names)}
obs = env.step(vector_action=actions)
|
{"hexsha": "72bf3a8f32353177685959502be1e3cd16f9a16b", "size": 7801, "ext": "py", "lang": "Python", "max_stars_repo_path": "ma_loop.py", "max_stars_repo_name": "StepNeverStop/RL-TF1", "max_stars_repo_head_hexsha": "c9e75819504a8db4c587e2aa3e4c9c8845fd9f08", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-21T03:22:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T08:40:07.000Z", "max_issues_repo_path": "ma_loop.py", "max_issues_repo_name": "StepNeverStop/RL-TF1", "max_issues_repo_head_hexsha": "c9e75819504a8db4c587e2aa3e4c9c8845fd9f08", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2020-01-28T23:08:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:06:11.000Z", "max_forks_repo_path": "ma_loop.py", "max_forks_repo_name": "StepNeverStop/RL-TF1", "max_forks_repo_head_hexsha": "c9e75819504a8db4c587e2aa3e4c9c8845fd9f08", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-05T01:07:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-21T06:39:57.000Z", "avg_line_length": 49.3734177215, "max_line_length": 184, "alphanum_fraction": 0.5403153442, "include": true, "reason": "import numpy", "num_tokens": 1787}
|
# Optimization for Medical Image Segmentation with 2D U-Net on Intel® Xeon Scalable Platform
#### Agenda
1. Background information introduction
2. Intel's optimization technologies on Intel® Xeon Scalable Processors
3. Let's do coding!
### 1. Background Information Introduction
#### 1.1 Brain MRI scan
Magnetic resonance imaging (MRI) of the brain is a safe and painless test that uses a magnetic field and radio waves to produce detailed images of the brain and the brain stem. An MRI differs from a CAT scan (also called a CT scan or a computed axial tomography scan) because it does not use radiation.
An MRI scanner consists of a large doughnut-shaped magnet that often has a tunnel in the center. Patients are placed on a table that slides into the tunnel. Some centers have open MRI machines that have larger openings and are helpful for patients with claustrophobia. MRI machines are located in hospitals and radiology centers.
During the exam, radio waves manipulate the magnetic position of the atoms of the body, which are picked up by a powerful antenna and sent to a computer. The computer performs millions of calculations, resulting in clear, cross-sectional black and white images of the body. These images can be converted into three-dimensional (3-D) pictures of the scanned area. This helps pinpoint problems in the brain and the brain stem when the scan focuses on those areas.
**Reference:** https://kidshealth.org/en/parents/mri-brain.html
<table><tr><td></td><td></td></tr></table>
**Reference:** https://github.com/IntelAI/unet
### 1.2 U-Net for brain images segmentation
U-Net implementation in TensorFlow for FLAIR abnormality segmentation in brain MRI based on a deep learning segmentation algorithm used in [Association of genomic subtypes of lower-grade gliomas with shape features automatically extracted by a deep learning algorithm](https://doi.org/10.1016/j.compbiomed.2019.05.002).
```latex
@article{buda2019association,
title={Association of genomic subtypes of lower-grade gliomas with shape features automatically extracted by a deep learning algorithm},
author={Buda, Mateusz and Saha, Ashirbani and Mazurowski, Maciej A},
journal={Computers in Biology and Medicine},
volume={109},
year={2019},
publisher={Elsevier},
doi={10.1016/j.compbiomed.2019.05.002}
}
```
Topology structured as the following:
**Reference:** https://github.com/mateuszbuda/brain-segmentation-pytorch
### 2. Intel's optimization technologies on Intel® Xeon Scalable Processors
In order to take full advantage of Intel® architecture and to extract maximum performance, the TensorFlow framework has been optimized with Intel® Math Kernel Library for Deep Neural Networks (Intel® oneDNN) primitives, a popular performance library for deep learning applications.
For more information about the optimizations as well as performance data, see the blog post:[TensorFlow Optimizations on Modern Intel® Architecture](https://software.intel.com/en-us/articles/tensorflow-optimizations-on-modern-intel-architecture).
Installation guide of Intel Optimization for TensorFlow can be found at [Intel® Optimization for TensorFlow Installation Guide](https://software.intel.com/en-us/articles/intel-optimization-for-tensorflow-installation-guide).
#### 2.1 Optimization with TensorFlow switches
**intra_op_parallelism_threads**
- Number of threads in each threadpool for an operation (like matrix multiplication or reduction).
- Recommend: #physical cores, found in Linux with ‘lscpu’ command.
**inter_op_parallelism_threads**
- Number of thread pools for independent operations.
- Recommend: #cpu sockets, found in Linux with ‘lscpu’ command.
Note, need to test with the model & platform to find the best parameters.
#### 2.2 Optimization with Intel® oneDNN switches
Intel oneDNN utilizes OpenMP to leverage Intel architecture.
Following environment variables for vectorization and multi-threading.
**KMP_AFFINITY**
- Restricts execution of certain threads to a subset of the physical processing units in a multiprocessor computer.
- Recommend: ```export KMP_AFFINITY=granularity=fine,compact,1,0```
**KMP_BLOCKTIME**
- Set the time (milliseconds), that a thread wait for, after completing the execution of a parallel region, before sleeping.
- Recommend: ```export KMP_BLOCKTIME=0 (or 1)```
**OMP_NUM_THREADS**
- Set maximum number of threads to use for OpenMP parallel regions
- Recommend: ```export OMP_NUM_THREADS=num physical cores```
Note, recommend users tuning these values for their specific neural network model and platform.
#### 2.3 Optimization with miscellaneous configurations/tools
**Numactl**
- Running on a NUMA-enabled machine brings with it special considerations. NUMA or non-uniform memory access is a memory layout design used in data center machines meant to take advantage of locality of memory in multi-socket machines with multiple memory controllers and blocks. In most cases, inference runs best when confining both the execution and memory usage to a single NUMA node.
- Recommend: ```numactl --cpunodebind=N --membind=N python <pytorch_script>```
**Batch size**
- Can increase usage and efficiency of hardware resources.
- Optional according to your requirements.
A more detailed introduction of maximizing performance with Intel Optimization for TensorFlow can be found [here](https://software.intel.com/en-us/articles/maximize-tensorflow-performance-on-cpu-considerations-and-recommendations-for-inference).
https://software.intel.com/en-us/articles/maximize-tensorflow-performance-on-cpu-considerations-and-recommendations-for-inference
### 3. Let's do coding!
#### 3.0 Dataset
We use [brain tumor segmentation (BraTS) subset](https://drive.google.com/file/d/1A2IU8Sgea1h3fYLpYtFb2v7NYdMjvEhU/view?usp=sharing) of the [Medical Segmentation Decathlon](http://medicaldecathlon.com/) dataset. The dataset has the [Creative Commons Attribution-ShareAlike 4.0 International license](https://creativecommons.org/licenses/by-sa/4.0/).
Please follow instructions [here](https://github.com/IntelAI/unet/blob/master/2D/00_Prepare-Data.ipynb) to prepare the dataset.
#### 3.1 Import required packages
```python
%matplotlib inline
import os
import numpy as np
import tensorflow as tf
import keras as K
import h5py
import math
import pandas as pd
import time
import matplotlib.pyplot as plt
import sys; sys.argv=['']; del sys
from argparser import args
from data import load_data
from model import unet
```
#### 3.2 Check TensorFlow version, and do sanity check
```python
print ("We are using TensorFlow version", tf.__version__,\
"with Intel(R) oneDNN", "enabled" if tf.pywrap_tensorflow.IsMklEnabled() else "disabled",)
```
#### 3.3 Define the DICE coefficient and loss function
The Sørensen–Dice coefficient is a statistic used for comparing the similarity of two samples. Given two sets, X and Y, it is defined as
\begin{equation}
Dice = \frac{2|X\cap Y|}{|X|+|Y|}
\end{equation}
```python
def calc_dice(target, prediction, smooth=0.01):
"""
Sorensen Dice coefficient
"""
prediction = np.round(prediction)
numerator = 2.0 * np.sum(target * prediction) + smooth
denominator = np.sum(target) + np.sum(prediction) + smooth
coef = numerator / denominator
return coef
def calc_soft_dice(target, prediction, smooth=0.01):
"""
Sorensen (Soft) Dice coefficient - Don't round preictions
"""
numerator = 2.0 * np.sum(target * prediction) + smooth
denominator = np.sum(target) + np.sum(prediction) + smooth
coef = numerator / denominator
return coef
```
#### 3.4 Load images
```python
data_path = os.path.join("../../data/decathlon/144x144/")
data_filename = "Task01_BrainTumour.h5"
hdf5_filename = os.path.join(data_path, data_filename)
imgs_train, msks_train, imgs_validation, msks_validation, imgs_testing, msks_testing = load_data(hdf5_filename)
imgs_warmup=imgs_testing[:500]
imgs_infere=imgs_testing[500:2500]
print("Number of imgs_warmup: {}".format(imgs_warmup.shape[0]))
print("Number of imgs_infere: {}".format(imgs_infere.shape[0]))
```
#### 3.5 Load model
```python
unet_model = unet()
model = unet_model.load_model(os.path.join("./output/unet_model_for_decathlon.hdf5"))
```
#### 3.6 Define function to inference on input images and plot results out
```python
def plot_results(model, imgs_validation, msks_validation, img_no):
img = imgs_validation[idx:idx+1]
msk = msks_validation[idx:idx+1]
pred_mask = model.predict(img, verbose=1, steps=None)
plt.figure(figsize=(15, 15))
plt.subplot(1, 3, 1)
plt.imshow(img[0, :, :, 0], cmap="bone", origin="lower")
plt.axis("off")
plt.title("MRI Input", fontsize=20)
plt.subplot(1, 3, 2)
plt.imshow(msk[0, :, :, 0], origin="lower")
plt.axis("off")
plt.title("Ground truth", fontsize=20)
plt.subplot(1, 3, 3)
plt.imshow(pred_mask[0, :, :, 0], origin="lower")
plt.axis("off")
plt.title("Prediction\nDice = {:.4f}".format(calc_dice(pred_mask, msk)), fontsize=20)
plt.tight_layout()
```
#### 3.7 Run inference and plot
```python
indicies_validation = [40, 63, 43, 55, 99]
for idx in indicies_validation:
plot_results(model, imgs_validation, msks_validation, idx)
```
#### 3.8 Demo of 2D U-Net inference optimization
See demo in console.
*Performance results:*
- Latency
- Time spent for processing 1 image.
- Unit: millisecond per frame (ms/f)
- Throughput
- Number of processed images in 1 second.
- Unit: frames per second (f/s)
*Runs:*
- Single stream
- Batch size 1
- Without numactl
- Default configuration
- Configuration with optimization
- With numactl
- Default configuration
- Configuration with optimization
- Batch size 128
- With numactl
- Default configuration
- Configuration with optimization
- Multiple streams
- Batch size 128
- Configuration with optimization
- 2 streams
- 4 streams
- 8 streams
##### 3.8.1 Configuration with optimization & numactl
```python
def plot_perf(x_labels, latencys, throughputs, xlabel, batch_size):
x = np.arange(len(x_labels))
plt.figure(figsize=(16,6))
plt.subplot(121)
ax1 = plt.gca()
ax1.bar(x, latencys, width=0.3)
ax1.set_title('Latency (batch size: {}, the less the better)'.format(batch_size))
ax1.set_xlabel(xlabel)
ax1.set_ylabel('Latency (ms)')
ax1.set_xticks(x)
ax1.set_xticklabels(x_labels)
for rect, label in zip(ax1.patches, latencys):
height = rect.get_height()
ax1.text(rect.get_x() + rect.get_width() / 2, height, label, ha='center', va='bottom')
plt.subplot(122)
ax2 = plt.gca()
ax2.bar(x, throughputs, color='tab:green', width=0.3)
ax2.set_title('Throughput (batch size: {}, the larger the better)'.format(batch_size))
ax2.set_xlabel(xlabel)
ax2.set_ylabel('Through (fps)')
ax2.set_xticks(x)
ax2.set_xticklabels(x_labels)
for rect, label in zip(ax2.patches, throughputs):
height = rect.get_height()
ax2.text(rect.get_x() + rect.get_width() / 2, height, label, ha='center', va='bottom')
plt.show()
```
```python
# load result performance data
rsts = []
with open('04_rst_opt_numa.csv', 'r') as f:
for line in f.readlines():
rst = line.strip().split(',')
rsts.append(rst)
```
```python
# batch size: 1
x_labels = ['default conf', 'conf with optmization', 'conf with opt + numactl']
latencys_conf_1 = [float(rsts[1][2]), float(rsts[2][2]), float(rsts[3][2])]
throughputs_conf_1 = [float(rsts[1][3]), float(rsts[2][3]), float(rsts[3][3])]
plot_perf(x_labels, latencys_conf_1, throughputs_conf_1, 'Configuration', 1)
```
```python
# batch size: 128
x_labels = ['default conf', 'conf with optmization', 'conf with opt + numactl']
latencys_conf_128 = [float(rsts[4][2]), float(rsts[5][2]), float(rsts[6][2])]
throughputs_conf_128 = [float(rsts[4][3]), float(rsts[5][3]), float(rsts[6][3])]
plot_perf(x_labels, latencys_conf_128, throughputs_conf_128, 'Configuration', 128)
```
##### 3.8.2 number of streams
```python
# load result performance data
rsts = []
with open('04_rst_instances.csv', 'r') as f:
for line in f.readlines():
rst = line.strip().split(',')
rsts.append(rst)
```
```python
# batch size: 1
x_labels = [1, 2, 4, 8]
latencys_stream_1 = [float(rsts[1][2]), float(rsts[3][2]), float(rsts[4][2]), float(rsts[5][2])]
throughputs_stream_1 = [float(rsts[1][3]), float(rsts[3][3]), float(rsts[4][3]), float(rsts[5][3])]
plot_perf(x_labels, latencys_stream_1, throughputs_stream_1, 'Number of streams', 1)
```
```python
# batch size: 128
x_labels = [1, 2, 4, 8]
latencys_stream_128 = [float(rsts[2][2]), float(rsts[6][2]), float(rsts[7][2]), float(rsts[8][2])]
throughputs_stream_128 = [float(rsts[2][3]), float(rsts[6][3]), float(rsts[7][3]), float(rsts[8][3])]
plot_perf(x_labels, latencys_stream_128, throughputs_stream_128, 'Number of streams', 128)
```
### 4. Summary
```python
def plot_summary(x_labels, y_labels, xlabel, ylabel, title, color):
x = np.arange(len(x_labels))
plt.figure(figsize=(16,6))
ax = plt.gca()
ax.bar(x, y_labels, color=color, width=0.3)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(x)
ax.set_xticklabels(x_labels)
for rect, label in zip(ax.patches, y_labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2, height, label, ha='center', va='bottom')
plt.show()
```
#### 4.0 Scenarios:
1. You need to get the inference result as soon as possible. (Online processing)
2. You need to process a large bunch of data in a certain period. (Offline processing)
#### 4.1 Online processing
From **latency** perspective point of view.
\* Run 1 stream processing 1 image/frame on 1 socket with configuration with optimization
```python
x_labels = ['default conf,\nbs=1, stream=1', 'conf with optmization,\nbs=1, stream=1', 'conf with opt + numactl,\nbs=1, stream=1', 'conf with opt + numactl,\nbs=1, stream=2', 'conf with opt + numactl,\nbs=1, stream=4']
x = np.arange(len(x_labels))
latencys_summary = [latencys_conf_1[0], latencys_conf_1[1], latencys_conf_1[2], latencys_stream_1[1], latencys_stream_1[2]]
plot_summary(x_labels, latencys_summary, 'Configuration', 'Latency (ms)', 'Latency with different configurations (the less the better)', 'tab:blue')
print('Number of sockets: 2')
```
#### 4.2 Offline processing
From **throughput** perspective point of view.
\* Run multiple streams processing a batch of images/frames on all sockets with configuration with optimization
```python
x_labels = ['conf with opt + numactl,\nbs=1, stream=1', 'conf with opt + numactl,\nbs=128, stream=1', 'conf with opt + numactl,\nbs=128, stream=4', 'conf with opt + numactl,\nbs=128, stream=8']
x = np.arange(len(x_labels))
throughputs_summary = [throughputs_conf_1[2], throughputs_conf_128[2], throughputs_stream_128[1], throughputs_stream_128[2]]
plot_summary(x_labels, throughputs_summary, 'Configuration', 'Throughput (fps)', 'Throughput with different configurations (the larger the better)', 'tab:green')
print('Number of sockets: 2')
```
**The optimization methods in this course are not the best.**
**All optimization methods should be adjusted based on test result. It depends on the detailed model, dataset, HardWare & user case.**
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: EPL-2.0
|
{"hexsha": "ee5777379bb40b914a9485fe1db40af1e8cf0c6e", "size": 23013, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "2D/04_Inference.ipynb", "max_stars_repo_name": "jingxu10/medical-decathlon", "max_stars_repo_head_hexsha": "711dba6acb1bfb8bac88b4936980bd21b45995bd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2D/04_Inference.ipynb", "max_issues_repo_name": "jingxu10/medical-decathlon", "max_issues_repo_head_hexsha": "711dba6acb1bfb8bac88b4936980bd21b45995bd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2D/04_Inference.ipynb", "max_forks_repo_name": "jingxu10/medical-decathlon", "max_forks_repo_head_hexsha": "711dba6acb1bfb8bac88b4936980bd21b45995bd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7619808307, "max_line_length": 470, "alphanum_fraction": 0.6043975144, "converted": true, "num_tokens": 4217}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# @file base.py
# @authors Tiantian Guo
# Kyeong Soo (Joseph) Kim <Kyeongsoo.Kim@xjtlu.edu.cn>
# @date 2019-04-22
#
# @brief Simulate DASH video streaming.
#
# @remarks It is part of Tiantian's master thesis project and
# modified by Kyeong Soo (Joseph) Kim for EEE415 Labs.
### import modules
import sys
sys.path.insert(0, '.') # for modules in the current directory
import matplotlib.pyplot as plt
import numpy as np
from itertools import *
from bw_predictor import bw_predictor # to be implemented by user
def simulate_dash(br, bw, bl, sd, nps, nfs, ws):
# set parameters
nq = br.shape[1] # number of quality levels
ns = len(br) # number of segments
phi = np.array(list(product(range(1, nq+1), repeat=nfs+1))) # quality patterns over current and future segments
w1, w2, w3 = ws # weights of QoE
# create a bw_predictor object for bandwidth prediction
bp = bw_predictor(
model_fname='lstm_model.h5',
scaler_fname='lstm_scaler.joblib',
n_past_segments=nps,
n_future_segments=nfs)
t = np.zeros(nfs+2)
ts = np.zeros(nfs+1)
qoe = np.zeros(len(phi))
Q = np.zeros(ns, dtype=int) # to be used as index
T = np.zeros(ns+1)
Ts = np.zeros(ns)
# buffer update during the intial period w/o adaptation
Q[:nps] = 1
T[0] = 5
for i in range(nps):
T[i+1] = max(T[i]-br[i][Q[i]-1]*sd/bw[i]+sd, 0)
Ts[i] = max(br[i][Q[i]-1]*sd/bw[i]-T[i], 0)
# main simulation loop for adaptation
idxs = np.arange(nps, ns-nfs)
for i in idxs:
pbws = bp.predict(bw[i-nps:i].reshape((1, nps))) # prediced bandwdiths
for j in range(len(phi)): # optimization over quality patterns
q = phi[j][:]
e = np.mean(q)
v = abs(q[1:]-q[:-1]).mean()
# buffer updating for the current and future segments
# N.B.: be careful about the indexes.
t[0] = T[i]
for k in range(nfs+1):
t[k+1] = max(t[k]-br[i+k][q[k]-1]*sd/pbws[k]+sd, 0)
ts[k] = max(br[i+k][q[k]-1]*sd/pbws[k]-t[k], 0)
tst = np.sum(ts)
ttt = sd*(nfs+1)+tst
ps = tst/ttt
delta = (t[nfs+1]-t[0])/(nfs+1)
qoe[j] = e-w1*v-w2*ps+w3*delta
qindex = np.argmax(qoe, axis=0)
Q[i] = phi[qindex][0]
# update buffer level for the current segment
T[i+1] = max(T[i]-br[i][Q[i]-1]*sd/bw[i]+sd, 0)
Ts[i] = max(br[i][Q[i]-1]*sd/bw[i]-T[i], 0)
# limit performance metrics to the adaptation period
Q = Q[idxs]
Ts = Ts[idxs]
T = T[idxs+1]
E = Q.mean() # average requested media quality
V = abs(Q[1:]-Q[:-1]).mean() # quality switching frequency
TSS = Ts.sum()
PSS = TSS/(ns*sd + TSS) # ratio of starvation event in time domain
QoE = E - w1*V - w2*PSS
return QoE, Q, T
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-B",
"--dash_bitrates",
help=
"name of a DASH bitrates file name; default is 'bigbuckbunny.npy' (for 'big buck bunny')",
default='bigbuckbunny.npy',
type=str)
parser.add_argument(
"-C",
"--channel_bandwidths",
help=
"name of a channel bandwidths file name; default is 'bandwidths.npy'",
default='bandwidths.npy',
type=str)
parser.add_argument(
"-L",
"--bandwidth_levels",
help="comma-separated numbers for bandwidth levels to generate; default is '20,60,100,500,1000'",
default='20,60,100,500,1000',
type=str)
parser.add_argument(
"-D",
"--segment_duration",
help=
"duration of each segment in a DASH video stream; default is 2 [s]",
default=2,
type=float)
parser.add_argument(
"-P",
"--n_past_segments",
help=
"number of past segments used for bandwidth prediction; default is 5",
default=5,
type=int)
parser.add_argument(
"-F",
"--n_future_segments",
help=
"number of future segments to predict bandwidths for; default is 1",
default=1,
type=int)
parser.add_argument(
"-W",
"--qoe_weights",
help="comma-separated numbers for QoE weights (i.e., w1, w2, w3(->lambda)); default is '0.3333,2,0.9'",
default='0.3333,2,0.9',
type=str)
args = parser.parse_args()
dash_bitrates = args.dash_bitrates
channel_bandwidths = args.channel_bandwidths
bl = np.sort(list(map(int, args.bandwidth_levels.split(',')))) # sorted bandwidth levels from a string into a list
sd = args.segment_duration
nps = args.n_past_segments
nfs = args.n_future_segments
qw = list(map(float, args.qoe_weights.split(','))) # w3 -> lambda; labmda is a Python keyword
# read data
br = np.load(dash_bitrates) # dash bitrates
bw = np.load(channel_bandwidths) # channel bandwdiths
# simulate DASH video streaming
QoE, Q, T = simulate_dash(br, bw, bl, sd, nps, nfs, qw)
# print QoE
print("QoE: {0:.4e}".format(QoE))
# plot the figures
ns = len(br) # number of segments
x = np.arange(ns)
bit = np.empty(ns)
Tplot = np.empty(ns) # T for plotting
bit[:] = np.nan # ignore values outside the adaptation period
Tplot[:] = np.nan # "
for i in range(nps, ns-nfs):
bit[i] = br[i][Q[i-nps]-1]
Tplot[i] = T[i-nps]
plt.close('all')
fig, axs = plt.subplots(2, 1)
# 1st subplot
axs[0].plot(x, bw, color='blue', label='Bandwidth')
axs[0].plot(x, bit, color='red', label='Video Bitrate')
axs[0].set_xlabel('Segment Index')
axs[0].set_ylabel('Bitrate [kbs]')
axs[0].legend()
# 2nd subplot
r_ax = axs[1].twinx()
p1 = axs[1].plot(x, bw, color='blue', label='Bandwidth')
p2 = r_ax.plot(x, Tplot, color='red', label='Buffer Reservation')
axs[1].set_xlabel('Segment Index')
axs[1].set_ylabel('Bitrate [kbps]')
r_ax.set_ylabel('Buffer Reservation [kb]')
ps = p1+p2
labels = [p.get_label() for p in ps] # combine legends
axs[1].legend(ps, labels, loc='upper left')
plt.tight_layout()
plt.show()
|
{"hexsha": "e4d7328fdbd74e8a7b8da688c908d725fb3b1656", "size": 6499, "ext": "py", "lang": "Python", "max_stars_repo_path": "base.py", "max_stars_repo_name": "kyeongsoo/dash-simulation", "max_stars_repo_head_hexsha": "ceccfee61d7102146e83b0a2d60d87693c871198", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "base.py", "max_issues_repo_name": "kyeongsoo/dash-simulation", "max_issues_repo_head_hexsha": "ceccfee61d7102146e83b0a2d60d87693c871198", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "base.py", "max_forks_repo_name": "kyeongsoo/dash-simulation", "max_forks_repo_head_hexsha": "ceccfee61d7102146e83b0a2d60d87693c871198", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-06T14:02:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-06T14:02:35.000Z", "avg_line_length": 33.3282051282, "max_line_length": 119, "alphanum_fraction": 0.5685490075, "include": true, "reason": "import numpy", "num_tokens": 1927}
|
[STATEMENT]
lemma red_lcl_add_aux:
"extTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e, (hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e', (hp s', l0 ++ lcl s')\<rangle>"
and reds_lcl_add_aux:
"extTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>es, (hp s, l0 ++ lcl s)\<rangle> [-ta\<rightarrow>] \<langle>es', (hp s', l0 ++ lcl s')\<rangle>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>) &&& (extTA,P,t \<turnstile> \<langle>es,s\<rangle> [-ta\<rightarrow>] \<langle>es',s'\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>es,(hp s, l0 ++ lcl s)\<rangle> [-ta\<rightarrow>] \<langle>es',(hp s', l0 ++ lcl s')\<rangle>)
[PROOF STEP]
proof (induct arbitrary: l0 and l0 rule:red_reds.inducts)
[PROOF STATE]
proof (state)
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
case (BlockRed e h x V vo ta e' h' x' T)
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>e,(h, x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', x')\<rangle>
extTA,P,t \<turnstile> \<langle>e,(hp (h, x(V := vo)), ?l0.18 ++ lcl (h, x(V := vo)))\<rangle> -ta\<rightarrow> \<langle>e',(hp (h', x'), ?l0.18 ++ lcl (h', x'))\<rangle>
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
note IH = \<open>\<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp (h, x(V := vo)), l0 ++ lcl (h, x(V := vo)))\<rangle> -ta\<rightarrow> \<langle>e',(hp (h', x'), l0 ++ lcl (h', x'))\<rangle>\<close>[simplified]
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>e,(h, ?l0.0 ++ x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', ?l0.0 ++ x')\<rangle>
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
have lrew: "\<And>x x'. x(V := vo) ++ x'(V := vo) = (x ++ x')(V := vo)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x x'. x(V := vo) ++ x'(V := vo) = (x ++ x')(V := vo)
[PROOF STEP]
by(simp add:fun_eq_iff map_add_def)
[PROOF STATE]
proof (state)
this:
?x18(V := vo) ++ ?x'18(V := vo) = (?x18 ++ ?x'18)(V := vo)
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
have lrew1: "\<And>X X' X'' vo. (X(V := vo) ++ X')(V := (X ++ X'') V) = X ++ X'(V := X'' V)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>X X' X'' vo. (X(V := vo) ++ X')(V := (X ++ X'') V) = X ++ X'(V := X'' V)
[PROOF STEP]
by(simp add: fun_eq_iff map_add_def)
[PROOF STATE]
proof (state)
this:
(?X18(V := ?vo18) ++ ?X'18)(V := (?X18 ++ ?X''18) V) = ?X18 ++ ?X'18(V := ?X''18 V)
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
have lrew2: "\<And>X X'. (X(V := None) ++ X') V = X' V"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>X X'. (X(V := None) ++ X') V = X' V
[PROOF STEP]
by(simp add: map_add_def)
[PROOF STATE]
proof (state)
this:
(?X19(V := None) ++ ?X'19) V = ?X'19 V
goal (98 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 98 subgoals...
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
proof(cases vo)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. vo = None \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
2. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
vo = None
goal (2 subgoals):
1. vo = None \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
2. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
from IH[of "l0(V := vo)"]
[PROOF STATE]
proof (chain)
picking this:
extTA,P,t \<turnstile> \<langle>e,(h, l0(V := vo) ++ x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
extTA,P,t \<turnstile> \<langle>e,(h, l0(V := vo) ++ x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle>
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
apply(simp del: fun_upd_apply add: lrew)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>e,(h, (l0 ++ x)(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(h', l0 ++ x'(V := x V))\<rangle>
[PROOF STEP]
apply(drule red_reds.BlockRed)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>{V:?T1=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:?T1=(l0(V := vo) ++ x') V; e'},(h', (l0(V := vo) ++ x')(V := (l0 ++ x) V))\<rangle> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(h', l0 ++ x'(V := x V))\<rangle>
[PROOF STEP]
by(simp only: lrew1 None lrew2)
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
goal (1 subgoal):
1. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
case (Some v)
[PROOF STATE]
proof (state)
this:
vo = \<lfloor>v\<rfloor>
goal (1 subgoal):
1. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
with \<open>extTA,P,t \<turnstile> \<langle>e,(h, x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', x')\<rangle>\<close>
[PROOF STATE]
proof (chain)
picking this:
extTA,P,t \<turnstile> \<langle>e,(h, x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', x')\<rangle>
vo = \<lfloor>v\<rfloor>
[PROOF STEP]
have "x' V \<noteq> None"
[PROOF STATE]
proof (prove)
using this:
extTA,P,t \<turnstile> \<langle>e,(h, x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', x')\<rangle>
vo = \<lfloor>v\<rfloor>
goal (1 subgoal):
1. x' V \<noteq> None
[PROOF STEP]
by -(drule red_lcl_incr, auto split: if_split_asm)
[PROOF STATE]
proof (state)
this:
x' V \<noteq> None
goal (1 subgoal):
1. \<And>a. vo = \<lfloor>a\<rfloor> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
with IH[of "l0(V := vo)"]
[PROOF STATE]
proof (chain)
picking this:
extTA,P,t \<turnstile> \<langle>e,(h, l0(V := vo) ++ x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle>
x' V \<noteq> None
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
extTA,P,t \<turnstile> \<langle>e,(h, l0(V := vo) ++ x(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle>
x' V \<noteq> None
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
[PROOF STEP]
apply(clarsimp simp del: fun_upd_apply simp add: lrew)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,(h, (l0 ++ x)(V := vo))\<rangle> -ta\<rightarrow> \<langle>e',(h', l0(V := vo) ++ x')\<rangle>; x' V = \<lfloor>y\<rfloor>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:T=\<lfloor>y\<rfloor>; e'},(h', l0 ++ x'(V := x V))\<rangle>
[PROOF STEP]
apply(drule red_reds.BlockRed)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>y. \<lbrakk>x' V = \<lfloor>y\<rfloor>; extTA,P,t \<turnstile> \<langle>{V:?T3 y=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:?T3 y=(l0(V := vo) ++ x') V; e'},(h', (l0(V := vo) ++ x')(V := (l0 ++ x) V))\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(h, l0 ++ x)\<rangle> -ta\<rightarrow> \<langle>{V:T=\<lfloor>y\<rfloor>; e'},(h', l0 ++ x'(V := x V))\<rangle>
[PROOF STEP]
by(simp add: lrew1 Some del: fun_upd_apply)
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>{V:T=vo; e},(hp (h, x), l0 ++ lcl (h, x))\<rangle> -ta\<rightarrow> \<langle>{V:T=x' V; e'},(hp (h', x'(V := x V)), l0 ++ lcl (h', x'(V := x V)))\<rangle>
goal (97 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 97 subgoals...
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (97 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 97 subgoals...
[PROOF STEP]
case RedTryFail
[PROOF STATE]
proof (state)
this:
typeof_addr (hp s_) a_ = \<lfloor>Class_type D_\<rfloor>
\<not> P \<turnstile> D_ \<preceq>\<^sup>* C_
goal (97 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 97 subgoals...
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
typeof_addr (hp s_) a_ = \<lfloor>Class_type D_\<rfloor>
\<not> P \<turnstile> D_ \<preceq>\<^sup>* C_
goal (1 subgoal):
1. extTA,P,t \<turnstile> \<langle>try Throw a_ catch(C_ V_) e2_,(hp s_, l0 ++ lcl s_)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw a_,(hp s_, l0 ++ lcl s_)\<rangle>
[PROOF STEP]
by(auto intro: red_reds.RedTryFail)
[PROOF STATE]
proof (state)
this:
extTA,P,t \<turnstile> \<langle>try Throw a_ catch(C_ V_) e2_,(hp s_, l0 ++ lcl s_)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw a_,(hp s_, l0 ++ lcl s_)\<rangle>
goal (96 subgoals):
1. \<And>h' a h C l l0. (h', a) \<in> allocate h (Class_type C) \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Class_type C)\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
2. \<And>h C l l0. allocate h (Class_type C) = {} \<Longrightarrow> extTA,P,t \<turnstile> \<langle>new C,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
3. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>e\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>newA T\<lfloor>e'\<rceil>,(hp s', l0 ++ lcl s')\<rangle>
4. \<And>i h' a h T l l0. \<lbrakk>0 \<le>s i; (h', a) \<in> allocate h (Array_type T (nat (sint i)))\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>NewHeapElem a (Array_type T (nat (sint i)))\<rbrace>\<rightarrow> \<langle>addr a,(hp (h', l), l0 ++ lcl (h', l))\<rangle>
5. \<And>i T s l0. i <s 0 \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt NegativeArraySize),(hp s, l0 ++ lcl s)\<rangle>
6. \<And>i h T l l0. \<lbrakk>0 \<le>s i; allocate h (Array_type T (nat (sint i))) = {}\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>newA T\<lfloor>Val (Intg i)\<rceil>,(hp (h, l), l0 ++ lcl (h, l))\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt OutOfMemory),(hp (h, l), l0 ++ lcl (h, l))\<rangle>
7. \<And>e s ta e' s' C l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast C e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>Cast C e',(hp s', l0 ++ lcl s')\<rangle>
8. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Val v,(hp s, l0 ++ lcl s)\<rangle>
9. \<And>s v U T l0. \<lbrakk>typeof\<^bsub>hp s\<^esub> v = \<lfloor>U\<rfloor>; \<not> P \<turnstile> U \<le> T\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>Cast T (Val v),(hp s, l0 ++ lcl s)\<rangle> -\<lbrace>\<rbrace>\<rightarrow> \<langle>Throw (addr_of_sys_xcpt ClassCast),(hp s, l0 ++ lcl s)\<rangle>
10. \<And>e s ta e' s' T l0. \<lbrakk>extTA,P,t \<turnstile> \<langle>e,s\<rangle> -ta\<rightarrow> \<langle>e',s'\<rangle>; \<And>l0. extTA,P,t \<turnstile> \<langle>e,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e',(hp s', l0 ++ lcl s')\<rangle>\<rbrakk> \<Longrightarrow> extTA,P,t \<turnstile> \<langle>e instanceof T,(hp s, l0 ++ lcl s)\<rangle> -ta\<rightarrow> \<langle>e' instanceof T,(hp s', l0 ++ lcl s')\<rangle>
A total of 96 subgoals...
[PROOF STEP]
qed(fastforce intro:red_reds.intros simp del: fun_upd_apply)+
|
{"llama_tokens": 19954, "file": "JinjaThreads_J_SmallStep", "length": 33}
|
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.load_buffer import load_data_from_npy_chaining,load_data_from_npy_chaining_mult
from rlkit.samplers.data_collector import MdpPathCollector, \
CustomMDPPathCollector
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic
from rlkit.torch.sac.cql import CQLTrainer
from rlkit.torch.conv_networks import CNN, ConcatCNN, ConcatBottleneckCNN, VQVAEEncoderConcatCNN, \
ConcatBottleneckVQVAECNN, VQVAEEncoderCNN
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from rlkit.util.video import VideoSaveFunction
from rlkit.launchers.launcher_util import setup_logger
import argparse, os
import roboverse
import numpy as np
import os
from os.path import expanduser
DEFAULT_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
DEFAULT_PRIOR_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
DEFAULT_TASK_BUFFER = ('/media/avi/data/Work/github/avisingh599/minibullet'
'/data/oct6_Widow250DrawerGraspNeutral-v0_20K_save_all'
'_noise_0.1_2020-10-06T19-37-26_100.npy')
CUSTOM_LOG_DIR = '/home/asap7772/workflow_output'
def process_buffer(args):
path = '/home/asap7772/cog_data/'
buffers = []
home = os.path.expanduser("~")
p_data_path = os.path.join(home, 'prior_data/') if args.azure else '/nfs/kun1/users/asap7772/prior_data/'
ba = lambda x, p=args.prob, y=None: buffers.append((os.path.join(path, x),dict(p=p,alter_type=y,)))
if args.buffer == 0:
path = p_data_path
ba('closed_drawer_prior.npy',y='zero')
ba('drawer_task.npy')
elif args.buffer == 1:
path = p_data_path
ba('blocked_drawer_1_prior.npy',y='zero')
ba('drawer_task.npy')
elif args.buffer == 2:
path = p_data_path
ba('blocked_drawer_2_prior.npy',y='zero')
ba('drawer_task.npy')
else:
assert False, "Invalid Buffer"
return buffers
def experiment(variant):
eval_env = roboverse.make(variant['env'], transpose_image=True)
if variant['num_sample'] != 0:
eval_env.num_obj_sample=variant['num_sample']
expl_env = eval_env
action_dim = eval_env.action_space.low.size
print(action_dim)
cnn_params = variant['cnn_params']
cnn_params.update(
dropout = variant['dropout'],
dropout_prob = variant['dropout_prob'],
)
if variant['bigger_net']:
print('bigger_net')
cnn_params.update(
hidden_sizes=[1024, 512, 512, 512, 256],
)
if variant['deeper_net']:
print('deeper conv net')
cnn_params.update(
kernel_sizes=[3, 3, 3, 3, 3],
n_channels=[32, 32, 32, 32, 32],
strides=[1, 1, 1, 1, 1],
paddings=[1, 1, 1, 1, 1],
pool_sizes=[2, 2, 1, 1, 1],
pool_strides=[2, 2, 1, 1, 1],
pool_paddings=[0, 0, 0, 0, 0]
)
if variant['smaller_net']:
print('smaller conv net')
cnn_params.update(
kernel_sizes=[3],
n_channels=[32],
strides=[1],
paddings=[1],
pool_sizes=[2],
pool_strides=[2],
pool_paddings=[0],
hidden_sizes=[16,],
)
cnn_params.update(
input_width=48,
input_height=48,
input_channels=3,
output_size=1,
added_fc_input_size=action_dim,
normalize_conv_activation=variant['normalize_conv_activation']
)
if variant['resnet_enc']:
qf1 = VQVAEEncoderConcatCNN(**cnn_params)
qf2 = VQVAEEncoderConcatCNN(**cnn_params)
target_qf1 = VQVAEEncoderConcatCNN(**cnn_params)
target_qf2 = VQVAEEncoderConcatCNN(**cnn_params)
else:
if variant['bottleneck']:
qf1 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
qf2 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
if variant['share_encoder']:
raise NotImplementedError
target_qf1 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
target_qf2 = ConcatBottleneckCNN(action_dim, bottleneck_dim=variant['bottleneck_dim'],deterministic=variant['deterministic_bottleneck'])
else:
qf1 = ConcatCNN(**cnn_params)
qf2 = ConcatCNN(**cnn_params)
target_qf1 = ConcatCNN(**cnn_params)
target_qf2 = ConcatCNN(**cnn_params)
target_qf1.load_state_dict(qf1.state_dict())
target_qf2.load_state_dict(qf2.state_dict())
if variant['resnet_policy']:
cnn_params.update(
output_size=256,
added_fc_input_size=0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
policy_obs_processor = VQVAEEncoderCNN(**cnn_params)
else:
cnn_params.update(
output_size=256,
added_fc_input_size=0,
hidden_sizes=[1024, 512],
spectral_norm_fc=False,
spectral_norm_conv=False,
normalize_conv_activation=False,
)
policy_obs_processor = CNN(**cnn_params)
policy = TanhGaussianPolicy(
obs_dim=cnn_params['output_size'],
action_dim=action_dim,
hidden_sizes=[256, 256, 256],
obs_processor=policy_obs_processor,
shared_encoder=variant['share_encoder'],
)
eval_policy = MakeDeterministic(policy)
eval_path_collector = MdpPathCollector(
eval_env,
eval_policy,
)
expl_path_collector = CustomMDPPathCollector(
eval_env,
)
observation_key = 'image'
replay_buffer = load_data_from_npy_chaining(variant, expl_env, observation_key, duplicate=variant['duplicate'], num_traj=variant['num_traj'])
# Translate 0/1 rewards to +4/+10 rewards.
if variant['use_positive_rew']:
if set(np.unique(replay_buffer._rewards)).issubset({0, 1}):
replay_buffer._rewards = replay_buffer._rewards * 6.0
replay_buffer._rewards = replay_buffer._rewards + 4.0
assert set(np.unique(replay_buffer._rewards)).issubset(
set(6.0 * np.array([0, 1]) + 4.0))
trainer = CQLTrainer(
# enviroment
env=eval_env,
# networks
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
# l1/l2 regularization
regularization=variant['regularization'],
regularization_type=variant['regularization_type'],
regularization_const=variant['regularization_const'],
# bottleneck
bottleneck=variant['bottleneck'],
bottleneck_const=variant['bottleneck_const'],
bottleneck_lagrange=variant['bottleneck_lagrange'],
only_bottleneck = variant['only_bottleneck'],
# dr3
dr3=variant['dr3'],
dr3_feat=variant['dr3_feat'],
dr3_weight=variant['dr3_weight'],
# logging
log_dir = variant['log_dir'],
wand_b=not variant['debug'],
variant_dict=variant,
validation=variant['val'],
validation_buffer=None,
# diagnostic
no_td=variant['no_td'],
no_data_qval=variant['no_data_qval'],
# others
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
eval_both=False,
batch_rl=True,
**variant['algorithm_kwargs']
)
video_func = VideoSaveFunction(variant)
algorithm.post_epoch_funcs.append(video_func)
algorithm.to(ptu.device)
algorithm.train()
def enable_gpus(gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
return
if __name__ == "__main__":
variant = dict(
algorithm="CQL",
version="normal",
algorithm_kwargs=dict(
num_epochs=3000,
num_eval_steps_per_epoch=300,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=30,
batch_size=256,
),
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
policy_lr=1E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
# Target nets/ policy vs Q-function update
policy_eval_start=10000,
num_qs=2,
# min Q
temp=1.0,
min_q_version=3,
min_q_weight=5.0,
# lagrange
with_lagrange=False, # Defaults to False
lagrange_thresh=5.0,
# extra params
num_random=1,
max_q_backup=False,
deterministic_backup=False,
),
cnn_params=dict(
kernel_sizes=[3, 3, 3],
n_channels=[16, 16, 16],
strides=[1, 1, 1],
hidden_sizes=[1024, 512, 256],
paddings=[1, 1, 1],
pool_type='max2d',
pool_sizes=[2, 2, 1], # the one at the end means no pool
pool_strides=[2, 2, 1],
pool_paddings=[0, 0, 0],
image_augmentation=True,
image_augmentation_padding=4,
spectral_norm_conv=False,
spectral_norm_fc=False,
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
)
parser = argparse.ArgumentParser()
# environment
parser.add_argument("--env", type=str, required=True)
parser.add_argument("--eval_every_n", default=1, type=int)
parser.add_argument("--max-path-length", type=int, required=True)
parser.add_argument("--num-eval-per-epoch", type=int, default=5)
parser.add_argument('--eval_num', default=0, type=int)
# replay buffer
parser.add_argument("--use-positive-rew", action="store_true", default=False)
parser.add_argument("--prior-buffer", type=str, default=DEFAULT_PRIOR_BUFFER)
parser.add_argument("--task-buffer", type=str, default=DEFAULT_TASK_BUFFER)
parser.add_argument("--duplicate", action="store_true", default=False)
parser.add_argument("--val", action="store_true", default=False)
# reduce dataset size either with proportion of data or absolute number of trajectories
parser.add_argument("--prob", default=1, type=float)
parser.add_argument('--num_traj', default=0, type=int)
#cql hyperparams
parser.add_argument("--min-q-version", default=3, type=int,
help=("min_q_version = 3 (CQL(H)), "
"version = 2 (CQL(rho))"))
parser.add_argument("--min-q-weight", default=1.0, type=float,
help="Value of alpha in CQL")
parser.add_argument("--use-lagrange", action="store_true", default=False)
parser.add_argument("--lagrange-thresh", default=5.0, type=float,
help="Value of tau, used with --use-lagrange")
parser.add_argument("--max-q-backup", action="store_true", default=False,
help="For max_{a'} backups, set this to true")
parser.add_argument("--no-deterministic-backup", action="store_true",
default=False,
help="By default, deterministic backup is used")
parser.add_argument("--policy-eval-start", default=10000,
type=int)
# architecture design choices
parser.add_argument("--bigger_net", action="store_true", default=False)
parser.add_argument('--smaller_net', action='store_true')
parser.add_argument("--deeper_net", action="store_true", default=False)
parser.add_argument("--resnet_enc", action="store_true", default=False)
parser.add_argument("--resnet_policy", action="store_true", default=False)
parser.add_argument("--share_encoder", action="store_true", default=False)
parser.add_argument('--only_one', action='store_true')
# dr3
parser.add_argument("--dr3", action="store_true", default=False)
parser.add_argument("--dr3_feat", action="store_true", default=False)
parser.add_argument("--dr3_weight", default=0.001, type=float)
# l1/l2 regularization
parser.add_argument('--regularization', action='store_true')
parser.add_argument('--regularization_type', type=str, default='l1')
parser.add_argument('--regularization_const', type=float, default=1)
# dropout
parser.add_argument('--dropout', action='store_true')
parser.add_argument('--dropout_prob', type=float, default=0.0)
# bottleneck
parser.add_argument("--deterministic_bottleneck", action="store_true", default=False)
parser.add_argument("--only_bottleneck", action="store_true", default=False)
parser.add_argument("--bottleneck", action='store_true')
parser.add_argument('--bottleneck_const', type=float, default=0.5)
parser.add_argument('--bottleneck_dim', type=int, default=16)
parser.add_argument('--bottleneck_lagrange', action='store_true')
# diagnostics
parser.add_argument('--normalize_conv_activation', action='store_true')
parser.add_argument('--no_td', action='store_true')
parser.add_argument('--no_data_qval', action='store_true')
parser.add_argument('--clip_grad_val', type=float, default=10)
#other hyperparams
parser.add_argument("--gpu", default='0', type=str)
parser.add_argument("--discount", default=0.99, type=float)
parser.add_argument("--policy-lr", default=1e-4, type=float)
parser.add_argument("--seed", default=10, type=int)
parser.add_argument("--name", default='test', type=str)
args = parser.parse_args()
enable_gpus(args.gpu)
variant['no_td'] = args.no_td
variant['no_data_qval'] = args.no_data_qval
variant['clip_grad_val'] = args.clip_grad_val
variant['smaller_net'] = args.smaller_net
variant['regularization'] = args.regularization
variant['regularization_type'] = args.regularization_type
variant['regularization_const'] = args.regularization_const
variant['dropout'] = args.dropout
variant['dropout_prob'] = args.dropout_prob
variant['trainer_kwargs']['discount'] = args.discount
variant['bigger_net'] = args.bigger_net
variant['deeper_net'] = args.deeper_net
variant['resnet_enc'] = args.resnet_enc
variant['resnet_policy'] = args.resnet_policy
variant['share_encoder'] = args.share_encoder
variant['normalize_conv_activation'] = args.normalize_conv_activation
variant['spectral_norm_conv'] = args.spectral_norm_conv
variant['spectral_norm_fc'] = args.spectral_norm_fc
variant['env'] = args.env
variant['val'] = args.val
variant['algorithm_kwargs']['max_path_length'] = args.max_path_length
variant['algorithm_kwargs']['num_eval_steps_per_epoch'] = \
args.num_eval_per_epoch*args.max_path_length
variant['algorithm_kwargs']['eval_every_n_epochs'] = args.eval_every_n
variant['prior_buffer'] = args.prior_buffer
variant['task_buffer'] = args.task_buffer
variant['bottleneck'] = args.bottleneck
variant['bottleneck_const'] = args.bottleneck_const
variant['dr3'] = args.dr3
variant['dr3_feat'] = args.dr3_feat
variant['dr3_weight'] = args.dr3_weight
variant['bottleneck_lagrange'] = args.bottleneck_lagrange
variant['bottleneck_dim'] = args.bottleneck_dim
variant['deterministic_bottleneck']=args.deterministic_bottleneck
variant['only_bottleneck'] = args.only_bottleneck
variant['num_traj'] = args.num_traj
variant['num_sample'] = args.eval_num
variant['trainer_kwargs']['discount'] = args.discount
variant['debug'] = False
variant['trainer_kwargs']['max_q_backup'] = args.max_q_backup
variant['trainer_kwargs']['deterministic_backup'] = \
not args.no_deterministic_backup
variant['trainer_kwargs']['min_q_weight'] = args.min_q_weight
variant['trainer_kwargs']['policy_lr'] = args.policy_lr
variant['trainer_kwargs']['min_q_version'] = args.min_q_version
variant['trainer_kwargs']['policy_eval_start'] = args.policy_eval_start
variant['trainer_kwargs']['lagrange_thresh'] = args.lagrange_thresh
variant['trainer_kwargs']['with_lagrange'] = args.use_lagrange
variant['duplicate'] = args.duplicate
# Translate 0/1 rewards to +4/+10 rewards.
variant['use_positive_rew'] = args.use_positive_rew
variant['seed'] = args.seed
buffers = process_buffer(args)
variant['buffer'] = buffers
variant['bufferidx'] = args.buffer
variant['prior_buffer'] = buffers[0]
variant['task_buffer'] = buffers[1]
ptu.set_gpu_mode(True)
exp_prefix = 'cql-cog-{}'.format(args.env)
if os.path.isdir(CUSTOM_LOG_DIR):
base_log_dir = CUSTOM_LOG_DIR
else:
base_log_dir = None
variant['base_log_dir'] = base_log_dir
log_dir = setup_logger(args.name, variant=variant, base_log_dir=base_log_dir,
snapshot_mode='gap_and_last', snapshot_gap=10,)
variant['log_dir'] = log_dir
experiment(variant)
|
{"hexsha": "38d98342676663bd3e80c762998c82a0c49b0a91", "size": 17889, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/cql_workflow.py", "max_stars_repo_name": "Asap7772/OfflineRlWorkflow", "max_stars_repo_head_hexsha": "d9589bcd752616ddd5a798120227e2bcdb1d8e77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/cql_workflow.py", "max_issues_repo_name": "Asap7772/OfflineRlWorkflow", "max_issues_repo_head_hexsha": "d9589bcd752616ddd5a798120227e2bcdb1d8e77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/cql_workflow.py", "max_forks_repo_name": "Asap7772/OfflineRlWorkflow", "max_forks_repo_head_hexsha": "d9589bcd752616ddd5a798120227e2bcdb1d8e77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-09T16:31:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T16:31:54.000Z", "avg_line_length": 38.8047722343, "max_line_length": 148, "alphanum_fraction": 0.654089105, "include": true, "reason": "import numpy", "num_tokens": 4432}
|
#include <boost/random/uniform_int_distribution.hpp>
#include <boost/random/random_device.hpp>
#include <limits>
#include "random.hpp"
#include "fast_hash.hpp"
namespace prologcoin { namespace common {
bool random::for_testing_ = false;
static fast_hash testing_rnd_;
static boost::random::random_device random_;
void random::set_for_testing(bool for_testing)
{
for_testing_ = for_testing;
}
class for_testing_random {
public:
uint32_t operator () () {
testing_rnd_ << static_cast<int>(static_cast<uint32_t>(testing_rnd_));
return static_cast<uint32_t>(testing_rnd_);
}
inline const uint32_t min() {
return std::numeric_limits<uint32_t>::min();
}
inline const uint32_t max() {
return std::numeric_limits<uint32_t>::max();
}
typedef uint32_t result_type;
private:
static fast_hash random_;
};
static for_testing_random for_testing_random_;
std::string random::next(size_t entropy_bits)
{
std::string chars("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"1234567890");
boost::random::uniform_int_distribution<> index_dist(0, chars.size() - 1);
size_t n = (entropy_bits * 1000 + 5953) / 5954;
std::string s(n, ' ');
for (size_t i = 0; i < n; ++i) {
if (for_testing_) {
s[i] = chars[index_dist(for_testing_random_)];
} else {
s[i] = chars[index_dist(random_)];
}
}
return s;
}
int random::next_int(int max)
{
if (for_testing_) {
return static_cast<int>(for_testing_random_() % max);
} else {
return static_cast<int>(static_cast<unsigned int>(random_()) % max);
}
}
uint64_t random::next_int(uint64_t max)
{
if (for_testing_) {
return static_cast<uint64_t>(for_testing_random_() % max);
} else {
return static_cast<uint64_t>(static_cast<unsigned int>(random_()) % max);
}
}
void random::next_bytes(uint8_t *bytes, size_t n)
{
for (size_t i = 0; i < n; i++) {
bytes[i] = next_int(256);
}
}
}}
|
{"hexsha": "66f80429889196b088fe4f8b3bf2c8ddece87635", "size": 1948, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/common/random.cpp", "max_stars_repo_name": "datavetaren/prologcoin", "max_stars_repo_head_hexsha": "8583db7d99a8007f634210aefdfb92bf45596fd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38.0, "max_stars_repo_stars_event_min_datetime": "2017-06-14T07:13:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T15:41:25.000Z", "max_issues_repo_path": "src/common/random.cpp", "max_issues_repo_name": "datavetaren/prologcoin", "max_issues_repo_head_hexsha": "8583db7d99a8007f634210aefdfb92bf45596fd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2017-07-01T11:13:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-27T05:40:30.000Z", "max_forks_repo_path": "src/common/random.cpp", "max_forks_repo_name": "datavetaren/prologcoin", "max_forks_repo_head_hexsha": "8583db7d99a8007f634210aefdfb92bf45596fd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-07-05T20:38:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-02T04:30:46.000Z", "avg_line_length": 22.3908045977, "max_line_length": 78, "alphanum_fraction": 0.6848049281, "num_tokens": 505}
|
'''
Created on 4 Sep 2017
@author: ywz
'''
import numpy
class Krylov:
def __init__(self):
pass
def cg(self, Ax, b, cg_iters=10, verbose=False, eps=1e-10):
x = numpy.zeros_like(b)
r = b.copy()
p = b.copy()
r_dot_r = r.dot(r)
for _ in range(cg_iters):
z = Ax(p)
v = r_dot_r / p.dot(z)
x += v * p
r -= v * z
new_r_dot_r = r.dot(r)
beta = new_r_dot_r / r_dot_r
p = r + beta * p
r_dot_r = new_r_dot_r
if r_dot_r < eps:
break
if verbose:
print("residual norm: {:5f}, solution norm: {:5f}".format(r_dot_r, numpy.linalg.norm(x)))
return x
if __name__ == "__main__":
from numpy.linalg import inv
n = 5
A = numpy.random.rand(n, n)
A = A.T.dot(A) + 0.01 * numpy.eye(n)
b = numpy.random.rand(n)
x = inv(A).dot(b)
krylov = Krylov()
y = krylov.cg(lambda x: A.dot(x), b, verbose=True)
print(x)
print(y)
|
{"hexsha": "cf8fd77c686bd4f71430cd820b725f230506993b", "size": 1147, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter03/krylov.py", "max_stars_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_stars_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 114, "max_stars_repo_stars_event_min_datetime": "2018-10-20T15:32:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T14:16:25.000Z", "max_issues_repo_path": "Chapter03/krylov.py", "max_issues_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_issues_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-10-18T12:39:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T03:28:19.000Z", "max_forks_repo_path": "Chapter03/krylov.py", "max_forks_repo_name": "jvstinian/Python-Reinforcement-Learning-Projects", "max_forks_repo_head_hexsha": "6c97c68351fc4af426cb5c3583d75aebfabac8aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 72, "max_forks_repo_forks_event_min_datetime": "2018-10-12T13:02:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T13:03:26.000Z", "avg_line_length": 20.1228070175, "max_line_length": 101, "alphanum_fraction": 0.4472537053, "include": true, "reason": "import numpy,from numpy", "num_tokens": 327}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
General functions and plotting functions
"""
__author__ = "Josephine Yates"
__email__ = "josephine.yates@yahoo.fr"
import argparse
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind
import scipy.stats as st
import scipy.sparse as sp
from scipy.stats import fisher_exact
import networkx as nx
from community import community_louvain
from scipy.stats import kruskal
import seaborn as sns
import collections as collec
import os
import xml.etree.ElementTree as ET
import operator
import pandas
import csv
from scipy.stats import mannwhitneyu, chisquare
from sklearn.metrics.pairwise import pairwise_distances
from docx import Document
from docx.shared import Inches
import ast
import logging
import scipy
import PicSureHpdsLib
import PicSureClient
## General methods
def removekey(d, key):
"""This functions returns a copy of a dictionnary with a removed key
Parameters: d (dict): dictionnary
key: the key that must be deleted
Returns: copy of dictionnary d without the key
"""
r = dict(d)
del r[key]
return r
def get_CI(a):
"""Returns the 95% confidence interval for a list/array a
Parameters: a (list): list or array we want the CI for
Returns: (tuple) with 95% confidence interval
"""
return scipy.stats.t.interval(0.95, len(a)-1, loc=np.mean(a), scale=scipy.stats.sem(a,nan_policy='omit'))
def get_data_df(column_head, resource):
"""Enables the user to download the data as a pandas dataframe indexed by UDN IDs (through API)
Parameters : column_head (str): name of the header that will be selected. For example, if the columns that
should be selected contain "this string", then column_head="this string".
resource : picsure api resource
Returns: df (pandas.core.Dataframe): dataframe indexed by UDN IDs of the selected columns
"""
dictionary=resource.dictionary().find(column_head)
query=resource.query()
query.select().add(dictionary.keys())
query.select().add('\\000_UDN ID\\')
df=query.getResultsDataFrame()
df.set_index("\\000_UDN ID\\", inplace=True)
query.select().clear()
return df
def create_table(ind_clusters,a_or_p,clusters_un,avg_HPO_clusters,CI_HPO_clusters,gender_distrib,OR_diag,IC_OR,avg_onset,CI_onset,avg_UDN_eval,CI_UDN_eval,docname):
"""Creates a word document with automatically rendered table of cluster characteristics
Parameters: len(ind_clusters): number of clusters
a_or_p: str, "A" or "P", changes the display
clusters_un: dictionnary with cluster as key and list of UDN IDs as value
avg_HPO_clusters: dictionnary with cluster as key and avg HPO per patient as value
CI_HPO_clusters: dictionnary with cluster as key and tuple of 95% CI for avg HPO as value
gender_distrib: dictionnary with cluster as key and count of female/male as value
OR_diag: dictionnary with cluster as key and OR as value
IC_OR: dictionnary with cluster as key and tuple of 95% CI for OR as value
avg_onset: dictionnary with cluster as key and avg onset as value
CI_onset: dictionnary with cluster as key and tuple of 95% CI for onset as value
avg_UDN_eval: dictionnary with cluster as key and avg UDN eval as value
CI_UDN_eval: dictionnary with cluster as key and tuple of 95% CI for avg UDN eval as value
docname: str, name of document to save
Returns: None
Saves a word document with table of cluster characteristics
"""
document = Document()
document.add_heading('Tables'+docname, 0)
table = document.add_table(rows=1, cols=len(ind_clusters)+1)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Clusters'
for i in range(1,len(ind_clusters)+1):
hdr_cells[i].text = "Cluster C"+str(i)+a_or_p
row_cells = table.add_row().cells
row_cells[0].text = "# of patients per cluster"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(len(clusters_un[ind_clusters[i-1]]))
row_cells = table.add_row().cells
row_cells[0].text = "Female:male ratio"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(int(np.round_(gender_distrib[ind_clusters[i-1]]["Female"]*10/gender_distrib[ind_clusters[i-1]]["Male"])))+":10"
row_cells = table.add_row().cells
row_cells[0].text = "Avg # of HPO terms per patient"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(np.round_(avg_HPO_clusters[ind_clusters[i-1]],decimals=1))+" (95% CI: "+str(np.round_(CI_HPO_clusters[ind_clusters[i-1]][0],decimals=1))+" - "+str(np.round_(CI_HPO_clusters[ind_clusters[i-1]][1],decimals=1))+")"
row_cells = table.add_row().cells
row_cells[0].text = "Odds ratio diagnosed"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(np.round_(OR_diag[ind_clusters[i-1]],decimals=1))+" (95% CI: "+str(np.round_(IC_OR[ind_clusters[i-1]]["low"],decimals=1))+" - "+str(np.round_(IC_OR[ind_clusters[i-1]]["up"],decimals=1))+")"
row_cells = table.add_row().cells
row_cells[0].text = "Average age at onset in y"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(np.round_(avg_onset[ind_clusters[i-1]],decimals=1))+" (95% CI: "+str(np.round_(CI_onset[ind_clusters[i-1]][0],decimals=1))+" - "+str(np.round_(CI_onset[ind_clusters[i-1]][1],decimals=1))+")"
row_cells = table.add_row().cells
row_cells[0].text = "Average age at UDN evaluation in y"
for i in range(1,len(ind_clusters)+1):
row_cells[i].text = str(np.round_(avg_UDN_eval[ind_clusters[i-1]],decimals=1))+" (95% CI: "+str(np.round_(CI_UDN_eval[ind_clusters[i-1]][0],decimals=1))+" - "+str(np.round_(CI_UDN_eval[ind_clusters[i-1]][1],decimals=1))+")"
document.add_page_break()
document.save(docname+'.docx')
def create_stat_table(kr_HPO_ad,kr_HPO_ped,kr_UDN_ad,kr_UDN_ped,kr_onset_ad,kr_onset_ped,docname):
"""Creates word document to save Kruskal Wallis results for clusters
Parameters: kr_HPO_ad (tuple): Kruskal Wallis results of HPO for adult clusters
kr_HPO_ped (tuple): Kruskal Wallis results of HPO for pediatric clusters
kr_UDN_ad (tuple): Kruskal Wallis results of UDN eval for adult clusters
kr_UDN_ped (tuple): Kruskal Wallis results of UDN eval for pediatric clusters
kr_onset_ad (tuple): Kruskal Wallis results of onset age for adult clusters
kr_onset_ped (tuple): Kruskal Wallis results of onset age for pediatric clusters
docname (str): name of doc to save
Returns: None
Saves the word document with docname
"""
document = Document()
document.add_heading('Tables stats '+docname, 0)
table = document.add_table(rows=1, cols=3)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Variable'
hdr_cells[1].text = 'Kruskal-Wallis H index and p-value'
row_cells = table.add_row().cells
row_cells[1].text = "Adult"
row_cells[2].text = "Pediatric"
row_cells = table.add_row().cells
row_cells[0].text = "Avg # of HPO terms per patient"
row_cells[1].text = "H = "+str(np.round_(kr_HPO_ad[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_HPO_ad[1],precision=2))
row_cells[2].text = "H = "+str(np.round_(kr_HPO_ped[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_HPO_ped[1],precision=2))
row_cells = table.add_row().cells
row_cells[0].text = "Average age at onset in y"
row_cells[1].text = "H = "+str(np.round_(kr_onset_ad[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_onset_ad[1],precision=2))
row_cells[2].text = "H = "+str(np.round_(kr_onset_ped[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_onset_ped[1],precision=2))
row_cells = table.add_row().cells
row_cells[0].text = "Average age at UDN evaluation in y"
row_cells[1].text = "H = "+str(np.round_(kr_UDN_ad[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_UDN_ad[1],precision=2))
row_cells[2].text = "H = "+str(np.round_(kr_UDN_ped[0],decimals=1))+ \
" , p = "+str(np.format_float_scientific(kr_UDN_ped[1],precision=2))
document.add_page_break()
document.save(docname+'.docx')
## Plotting functions
def show_distrib_HPO(HPO_list,name):
"""Plots the distribution of count of HPO terms per patient
Parameters : HPO_list (list): list of counts for each patient of HPO terms
name (str): string, title of the figure
Returns : None
Shows matplotlib plot of distribution of HPO
"""
distrib=collec.Counter(HPO_list)
X=[key for key in distrib.keys()]
Y=[distrib[key] for key in distrib.keys()]
plt.figure(figsize=(20,15))
plt.plot(X,Y,"o")
plt.xlabel("Number of HPO terms",fontsize=40)
plt.ylabel("Count of patients",fontsize=40)
plt.title(name,fontsize=50)
plt.xticks(fontsize=40)
plt.yticks(fontsize=40)
plt.yscale("log")
plt.xscale("log")
plt.axes().set_ylim(None,200)
plt.show()
plt.savefig("HPO_terms_log")
def show_age_distrib(demographics):
"""Show the age distribution in the network
Parameters: demographics (pandas.core.DataFrame): olumns containing age at symptom onset
Returns: None
Shows the age distribution as a plot
"""
X=list(collec.Counter(demographics["\\00_Demographics\\Age at symptom onset in years\\"].fillna(0)))
Y=[collec.Counter(demographics["\\00_Demographics\\Age at symptom onset in years\\"])[i] for i in X]
plt.figure(figsize=(20,20))
plt.plot(X,Y)
plt.title("Age at symptom onset (in y) distribution in UDN")
plt.xlabel("Age at symptom onset (in y)")
plt.ylabel("Count of patients")
plt.show()
def plot_distribution_genomic_data(genomic_data,namefile,var_or_gene):
"""Show the distribution of counts of candidate genes or variant per patient in the UDN database
Parameters: genomic_data (dict): dictionary, with UDN ID as key and list with dictionaries as value, dict contaning characteristics
of the considered genomic data
namefile (str): file of the name to save the figure in
var_or_gene (str): "variants" if variants is considered, "genes" else
Returns: None
Show the distribution in a scatter plot and the counter, as well as total number of candidate genes/variants
"""
count_gene_per_patient=collec.Counter([len(genomic_data[patient]) for patient in genomic_data])
X_gene=list(count_gene_per_patient)
Y_gene=[count_gene_per_patient[ct] for ct in X_gene]
print("Number of total candidate ",var_or_gene," : ",np.sum([X_gene[i]*Y_gene[i] for i in range(len(X_gene))]))
plt.figure(figsize=(10,5))
plt.plot(X_gene,Y_gene,"o")
plt.xticks(np.arange(0,18))
plt.title("Distribution of number of candidate "+var_or_gene+" per patient")
plt.xlabel("Number of candidate "+var_or_gene)
plt.ylabel("Count of patients")
plt.savefig(namefile,bbox_inches="tight",dpi=300)
plt.show()
def heatmap_phen(clusters_un,phen_ranked,ind_groups,ad_or_ped,nb_phen,figsize,vmin,vmax,figname):
"""Displays heatmap of phenotype enrichment analysis for each cluster with analyzed composition
Parameters: clusters_un (dict): dictionary with cluster number as key and list of patients in the
cluster as value
phen_ranked (dict): dictionary with cluster number as key, two arrays as value,
one with list of phenotypes ranked according to composition,
second with composition of each phenotype
ind_groups (list): indices to take into consideration
ad_or_ped (str): "adult" or "pediatric", changes the display
nb_phen (int): number of best phen to display
figsize (int): size of the figure displayed
vmin (int): minimum value for the heatmap (here percentage)
vmax (int): max value for the heatmap (here percentage)
figname (str): name under which you save the heatmap
Returns: None
Shows the heatmap of phenotype enrichment analysis for each cluster
"""
if ad_or_ped=="adult":
cluster_list=["Cluster C"+str(cluster+1)+"A, N="+ \
str(len(clusters_un[cluster])) for cluster in ind_groups]
elif ad_or_ped=="pediatric":
cluster_list=["Cluster C"+str(cluster+1)+"P, N="+ \
str(len(clusters_un[cluster])) for cluster in ind_groups]
list_phen_max=[]
for cluster in ind_groups:
i,j=0,0
while j<nb_phen:
if not(phen_ranked[cluster][0][i]) in list_phen_max:
list_phen_max.append(phen_ranked[cluster][0][i])
j+=1
i+=1
heatmap_mat=[[] for i in range(len(list_phen_max))]
for i,phen in enumerate(list_phen_max):
for cluster in ind_groups:
if phen in phen_ranked[cluster][0]:
indphen=np.where(phen_ranked[cluster][0]==phen)[0][0]
heatmap_mat[i].append(phen_ranked[cluster][1][indphen]*100)
else:
heatmap_mat[i].append(0)
sns.set()
fig,ax=plt.subplots(figsize=(figsize,figsize))
sns.heatmap(heatmap_mat,cbar=True,cmap="YlGnBu",xticklabels=cluster_list,
yticklabels=list_phen_max,ax=ax,vmin=vmin,vmax=vmax)
plt.ylabel("Phenotypes")
plt.savefig(figname+".svg",bbox_inches="tight",dpi=350)
plt.show()
def heatmap_real(count_dg_real,clusters_un,nb_outliers,figsize,vmin,vmax,figname):
"""Show the heatmap of cluster according to disease group composition
Parameters: count_dg_real (dict): dictionary with cluster number as key, dictionary as value, with disease group as key and count of disease
group
clusters_un (dict): dictionary with cluster number as key, list of patients as value
nb_outliers (int): nb of outliers in network (adult or pediatric)
figsize (int): size of the figure
vmin (int): minimum value for the heatmap (here percentage)
vmax (int): max value for the heatmap (here percentage)
figname (str): name of figure to save
Returns: None
Shows the heatmap associated to the composition in disease group for each cluster
"""
type_disease={'chromosomal_anomalies': 0, 'rare_abdominal_surgical_diseases': 1, 'rare_allergic_diseases': 2,
'rare_bone_diseases': 3, 'rare_cardiac_diseases': 4, 'rare_cardiac_malformations': 5,
'rare_circulatory_system_diseases': 6, 'rare_developmental_anomalies_during_embryogenesis': 7,
'rare_endocrine_diseases': 8, 'rare_eye_diseases': 9, 'rare_gastroenterological_diseases': 10,
'rare_genetic_diseases': 11, 'rare_gynaecological_and_obstetric_diseases': 12, 'rare_haematological_diseases': 13,
'rare_hepatic_diseases': 14, 'rare_immunological_diseases': 15, 'rare_inborn_errors_of_metabolism': 16,
'rare_infectious_diseases': 17, 'rare_infertility': 18, 'rare_intoxications': 19, 'rare_neoplastic_diseases': 20,
'rare_neurological_diseases': 21, 'rare_odontological_diseases': 22, 'rare_otorhinolaryngological_diseases': 23,
'rare_renal_diseases': 24, 'rare_respiratory_diseases': 25, 'rare_rheumatological_diseases_of_childhood': 26,
'rare_skin_diseases': 27, 'rare_sucking_swallowing_disorders': 28, 'rare_surgical_maxillo_facial_diseases': 29,
'rare_surgical_thoracic_diseases': 30, 'rare_systemic_and_rheumatological_diseases': 31,
'rare_teratologic_disorders': 32, 'rare_urogenital_diseases': 33}
heatmap_mat=[[0 for i in range(len(type_disease))] for j in range(len(count_dg_real))]
cluster_list=np.sort([cluster for cluster in count_dg_real])
yl=["" for i in range(len(type_disease))]
for dis in type_disease:
listname=dis.split(".")[0].split("_")
name=""
for string in listname:
name+=string+" "
yl[type_disease[dis]]=name
for cluster in count_dg_real:
for dg in count_dg_real[cluster]:
index=cluster_list.tolist().index(cluster)
heatmap_mat[index][type_disease[dg]]=count_dg_real[cluster][dg]
cluster_list=["Cluster C"+str(cluster+1)+", N="+str(len(clusters_un[cluster])) for cluster in count_dg_real]
cluster_list[len(cluster_list)-1]="Outliers, N={}".format(nb_outliers)
sns.set()
fig,ax=plt.subplots(figsize=(figsize*1.3,figsize//1.5))
ax=sns.heatmap(heatmap_mat,cbar=True,xticklabels=yl,yticklabels=cluster_list,vmin=vmin,vmax=vmax)
ax.set_xticks(np.arange(len(type_disease))+0.5)
ax.set_yticks(np.arange(len(count_dg_real))+0.5)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor", va="top")
plt.setp(ax.get_yticklabels(), rotation=0, ha="right",
rotation_mode="default", va="center_baseline")
plt.xlabel("Disease subgroup")
plt.savefig(figname+".png",bbox_inches="tight",dpi=500)
plt.show()
|
{"hexsha": "0986ae6ddb19f80a83ef3ade08712e4dbc754e57", "size": 17841, "ext": "py", "lang": "Python", "max_stars_repo_path": "UDN_utils.py", "max_stars_repo_name": "hms-dbmi/UDN-gateway-clusters", "max_stars_repo_head_hexsha": "de6d251762d47c98d4720db1d749cfc8fff30b75", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-17T05:52:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T05:52:01.000Z", "max_issues_repo_path": "UDN_utils.py", "max_issues_repo_name": "hms-dbmi/UDN-gateway-clusters", "max_issues_repo_head_hexsha": "de6d251762d47c98d4720db1d749cfc8fff30b75", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "UDN_utils.py", "max_forks_repo_name": "hms-dbmi/UDN-gateway-clusters", "max_forks_repo_head_hexsha": "de6d251762d47c98d4720db1d749cfc8fff30b75", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.0145772595, "max_line_length": 244, "alphanum_fraction": 0.6625749678, "include": true, "reason": "import numpy,import scipy,from scipy,import networkx", "num_tokens": 4666}
|
import matplotlib.pyplot as plt
import numpy as np
def get_dimensions(matrix):
"""
A helper function to get the dimensions of the matrix
Args:
matrix (2D array): A 2D array that is
representing a matrix
Returns:
tuple : A tuple containing the dimensions of
the matrix
"""
return len(matrix), len(matrix[0])
def generate_random_matrix(rows, cols):
"""
This function generates a psuedorandom
matrix (in this code it will be represented
by a 2D array)
Args:
rows (integer): The number of rows that will
be inside of the matrix.
cols (integer): The number of columns that will
be inside of the matrix
Return:
matrix: A 2-D array representation of a matrix
with columns [i] and rows [j]
"""
# First, set the seed in order to have random numbers
np.random.seed(np.random.randint(1, 10))
# Generate the matrix of rows=i and columns=j
# The range of communication values for each node in the
# matrix is 1-10
matrix = np.random.randint(1, 10, size=(rows, cols))
# Return the matrix to be corrected
return correct_matrix(matrix)
def correct_matrix(matrix):
"""
Takes in the psuedorandom matrix and corrects it,
for this function, correction means to
set transitivity, M[i,j] = M[j,i]
Args:
matrix (2D array): A representation of a matrix
using a 2D array of rows i and columns j
Return:
matrix (2D array): The corrected 2D array
representation of a matrix
"""
# Go through the 2D array
for row in range(get_dimensions(matrix)[0]):
for col in range(get_dimensions(matrix)[1]):
# Make sure there is no out of bounds error
if not col >= row:
# Check for transitivity between the nodes
# if not, set the values equal to one another
if matrix[row][col] != matrix[col][row]:
matrix[row][col] = matrix[col][row]
# Return the now corrected matrix
return matrix
def generate_plot(matrix):
"""
Generates a plot for the psuedorandom matrix
and displays it.
Args:
matrix (2D array): A 2D array representation of a matrix
"""
# Label the graph
plt.title('Communication')
plt.xlabel('Index of Row')
plt.ylabel('Communication Value')
# Plot each row that is in the matrix with the connection value
for row in matrix:
plt.scatter(range(len(row)), row, color = 'k')
centroids = select_centroids(matrix)
counter = 0
colmap = {0: 'red', 1: 'blue', 2: 'green', 3: 'aquamarine', 4:'yellow', 5:'orange', 6:'brown', 7:'pink'}
for centroid in centroids:
plt.scatter(centroid[0], centroid[1], color = colmap[counter])
counter += 1
print("centroids = {}".format(centroids))
plt.show()
def get_random_point(matrix):
position = (np.random.randint(0, get_dimensions(matrix)[0]), np.random.randint(0, get_dimensions(matrix)[1]))
random_point = (position[0], matrix[position[0]][position[1]])
return random_point
def select_centroids(matrix):
"""[summary]
Args:
matrix (2D array): A 2D array representation of a Matrix
Returns:
centroids (array of tuples): Array of k-spots holding
the randomly selected points in the matrix
"""
k = int((get_dimensions(matrix)[0] * get_dimensions(matrix)[1]) / 2)
centroids = []
print("k = {}".format(k))
for i in range(k):
random_point = get_random_point(matrix)
while random_point in centroids:
random_point = get_random_point(matrix)
centroids.append(random_point)
return centroids
def cluster_matrix(matrix):
"""
Clusters the communication values inside each node of the matrix
Args:
matrix (2D array): A 2D array representation of a matrix
"""
# Each cluster can have a maximum of 2 nodes
# Therefore, number of clusters is (# nodes) / 2
pass
def main():
# Create a random matrix, display to user in console and display the plot
matrix = np.array(generate_random_matrix(4, 4))
print("matrix = \n{}".format(matrix))
generate_plot(matrix)
# Call the main function
main()
|
{"hexsha": "d7d03f4ed30afd59067e301dec5b7f7282287451", "size": 4492, "ext": "py", "lang": "Python", "max_stars_repo_path": "Communication_Cluster/Ignore/communication_cluster (first try did not work).py", "max_stars_repo_name": "AqeelMohamed/KL_WeightedGraph", "max_stars_repo_head_hexsha": "8592d9ed6220fb3226b4e9c8f1cc705e97a5afd7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Communication_Cluster/Ignore/communication_cluster (first try did not work).py", "max_issues_repo_name": "AqeelMohamed/KL_WeightedGraph", "max_issues_repo_head_hexsha": "8592d9ed6220fb3226b4e9c8f1cc705e97a5afd7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Communication_Cluster/Ignore/communication_cluster (first try did not work).py", "max_forks_repo_name": "AqeelMohamed/KL_WeightedGraph", "max_forks_repo_head_hexsha": "8592d9ed6220fb3226b4e9c8f1cc705e97a5afd7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5526315789, "max_line_length": 114, "alphanum_fraction": 0.6093054319, "include": true, "reason": "import numpy", "num_tokens": 1044}
|
!
! (c) 2019 Guide Star Engineering, LLC
! This Software was developed for the US Nuclear Regulatory Commission (US NRC)
! under contract "Multi-Dimensional Physics Implementation into Fuel Analysis under
! Steady-state and Transients (FAST)", contract # NRC-HQ-60-17-C-0007
!
! NEMO - Numerical Engine (for) Multiphysics Operators
! Copyright (c) 2007, Stefano Toninel
! Gian Marco Bianchi University of Bologna
! David P. Schmidt University of Massachusetts - Amherst
! Salvatore Filippone University of Rome Tor Vergata
! All rights reserved.
!
! Redistribution and use in source and binary forms, with or without modification,
! are permitted provided that the following conditions are met:
!
! 1. Redistributions of source code must retain the above copyright notice,
! this list of conditions and the following disclaimer.
! 2. Redistributions in binary form must reproduce the above copyright notice,
! this list of conditions and the following disclaimer in the documentation
! and/or other materials provided with the distribution.
! 3. Neither the name of the NEMO project nor the names of its contributors
! may be used to endorse or promote products derived from this software
! without specific prior written permission.
!
! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
! ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
! WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
! ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
! (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
! LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
! ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
! (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
! SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
!
!---------------------------------------------------------------------------------
!
! $Id: smooth_mesh.f90 3323 2008-08-28 15:44:18Z sfilippo $
!
! Description: sweeps through the mesh to find the optimum location of all the vertices
!
SUBMODULE (tools_mesh_optimize) smooth_mesh_implementation
USE class_iterating, ONLY: iterating
IMPLICIT NONE
CONTAINS
MODULE PROCEDURE smooth_mesh
USE class_bc
USE class_psblas
USE class_cell
USE class_connectivity
USE class_keytable
USE class_least_squares, ONLY : free_least_squares, set_least_squares
USE class_mesh
USE class_vector
USE class_vertex
USE tools_mesh_basics, ONLY : geom_cell, geom_diff, geom_face
USE tools_mesh_check
USE tools_mesh_optimize, nemo_protect_name => smooth_mesh
IMPLICIT NONE
! Local variables
! Variables for list of unconstrained vertices
! note that unconstrained(:) is a list of integers, sized to ncells
INTEGER, ALLOCATABLE :: unconstrained(:) ! list of vertices that move
! (and connected only to tets)
! constrained lists a mobile, but constrained vertex
INTEGER, ALLOCATABLE :: constrained(:)
! shared flags a vertex that is shared by another processor, sized to ncells
INTEGER :: n_shared, tot_n_shared ! number of shared vertices
INTEGER, ALLOCATABLE :: shared(:) !list of shared (overlap) vertices
LOGICAL, ALLOCATABLE :: shared_flag(:) !flags if a vertex is shared
LOGICAL, ALLOCATABLE :: all_tets(:) !flags if a vertex is used only by tets
LOGICAL, ALLOCATABLE :: mixed(:) !flags if a vertex is used only by tets
REAL(psb_dpk_),ALLOCATABLE :: vpos(:,:) ! list of vertex positions
INTEGER :: n_unconstrained ! number of unconstrained vertices
INTEGER :: n_constrained ! number of constrained vertices
TYPE(connectivity) :: c2v ! given all vertices, finds connected cells
TYPE(connectivity) :: f2v ! given all vertices, finds connected faces
TYPE(connectivity) :: b2v ! given all verts, finds the boundaries to which they belong
INTEGER,POINTER :: ib2v(:)=>null() ! given a vert, the bndry to which it belongs
INTEGER :: iv,i ! vertex number, cell id, and loop index
INTEGER :: nverts !number of local vertices in mesh
INTEGER :: info
INTEGER :: tangled ! number of tangled cells in the initial mesh
! parameters for calling Opt-MS library
INTEGER :: dims
INTEGER :: technique, functionID
TYPE(vector) :: new_pos ! the new vertex position, after smoothing
INTEGER,ALLOCATABLE :: idloc(:),idglob(:)
! for error handling
INTEGER :: err_act,icontxt,mypnum
CHARACTER(len=32), PARAMETER :: WHERE = 'smooth_mesh'
! Sets error handling for PSBLAS-2 routines
info = 0
CALL psb_erractionsave(err_act)
icontxt = icontxt_()
mypnum = mypnum_()
IF (msh%ncd == 2) RETURN ! can't yet do 2d meshes
nverts = psb_cd_get_local_rows(msh%desc_v) !local and shared vertices
! allocate storage for shared vertex positions
CALL psb_geall(vpos,msh%desc_v,info,3)
IF (info/=0) THEN
WRITE(*,100)
CALL abort_psblas
ENDIF
! note shared (overlap) vertices
CALL psb_get_overlap(shared,msh%desc_v,info)
IF (info/=0) THEN
WRITE(*,200)
CALL abort_psblas
ENDIF
ALLOCATE(shared_flag(nverts), all_tets(nverts), mixed(nverts),stat=info)
IF (info/=0) THEN
WRITE(*,600)
CALL abort_psblas
ENDIF
shared_flag = .FALSE.
IF (ALLOCATED(shared)) THEN
n_shared=SIZE(shared)
DO i = 1, n_shared
shared_flag( shared(i) ) = .TRUE.
END DO
ELSE
n_shared=0 ! if there are no shared vertices, shared will still have size 1
ENDIF
tot_n_shared = n_shared
CALL psb_sum(icontxt,tot_n_shared)
IF (info/=0) THEN
WRITE(*,200)
CALL abort_psblas
ENDIF
!initialize OptMS parameters
dims = 3
technique = -1 ! -1 is default, 4 is OPTMS_COMBINED: see SMuserDefs.h
functionID = 27 ! Minimize Max SMRS Volume Ratio
!(SMRS Vol. Ratio is Knupp's metric to the -3/2 power)
! pass parameters and initialize OptMS data structures for 3D smoothing
info = initoptms(dims,technique,functionID)
IF (info /= 0) THEN
WRITE(*,100)
CALL abort_psblas
ENDIF
!initialize OptMS parameters for 2D smoothing
dims = 2
technique = 3 ! -1 is default, 4 is OPTMS_COMBINED: see SMuserDefs.h
functionID = 7 ! was 7 ! Minimize Max Jacobian
! pass parameters and initialize OptMS data structures
info = initoptms2d(dims,technique,functionID)
! prepare the c2v connectivity, required for calculating unconstrained vertices
CALL msh%v2c%get_dual_conn(c2v)
! prepare the f2v connectivity, required for calculating surf. vtx movement
CALL msh%v2f%get_dual_conn(f2v)
! prepare the b2v connectivity, required for calculating surf. vtx movement
CALL msh%v2b%get_dual_conn(b2v)
ALLOCATE(unconstrained(nverts),constrained(nverts),stat=info)
IF(info /= 0) THEN
WRITE(*,600)
CALL abort_psblas
END IF
! decide which vertices we will smooth and which are special boundary vertices
CALL mobile_verts(msh,bc,c2v,shared_flag,unconstrained,n_unconstrained,constrained, &
& n_constrained, all_tets, mixed)
CALL check_right_handed(msh,shared,shared_flag,tangled,all_tets)
IF (tangled > 0) THEN
WRITE(6,*)
WRITE(6,'(a,i4,a,i3,a)')"Warning: ",tangled, &
& " inverted cells detected on processor ",mypnum_(),"."
WRITE(6,'(a)')"Attempting to untangle mesh."
ENDIF
! ============= repeatedly do surface sweeps ======================
CALL surface_iter%reset()
surface_iteration: DO
CALL surface_iter%increment()
IF (mypnum_() == 0) WRITE(6,'(a,i4,2a)')" - Iteration: ", &
& surface_iter%current_iteration(), CHAR(27),CHAR(77)
! Loop over interior vertices
DO i=1,n_constrained
iv = constrained(i) ! get index of unconstrained vertex out of the list
CALL b2v%get_ith_conn(ib2v,iv)
IF ( SIZE(ib2v) == 1 ) THEN
! a normal sliding vertex can only belong to one boundary,
! so assume ib2v has only 1 elem.
IF (.NOT. mixed(iv) ) &
& CALL smooth_surf_vtx(iv,ib2v(1),msh,f2v,shared_flag, tangled, all_tets)
ENDIF
! TBD: we could do some sort of averaging with points adjoining 2 sliding BC's
END DO
!copy all shared vertices to a dense vector and average among overlapping processors
IF (tot_n_shared > 0) THEN ! there are shared vertices
DO i = 1,n_shared
iv = shared(i)
vpos(iv,1) = msh%verts(iv)%x_()
vpos(iv,2) = msh%verts(iv)%y_()
vpos(iv,3) = msh%verts(iv)%z_()
END DO ! end of loop over shared vertices
CALL psb_ovrl(vpos,msh%desc_v,info,update=psb_avg_)
IF(info /= 0) THEN
WRITE(*,700)
CALL abort_psblas
END IF
!now copy the averaged positions from vpos back to the vertices
DO i = 1,n_shared
iv = shared(i)
new_pos = vector_(vpos(iv,1),vpos(iv,2),vpos(iv,3))
msh%verts(iv) = new_pos
END DO
CALL update_vertex_halo(msh%verts,msh%desc_v)
ENDIF ! end of if-test for allocation of shared vertex storage
tangled = 0 ! assume that after one surface sweep, things are untangled
IF ( surface_iter%stop_iterating() ) EXIT surface_iteration
END DO surface_iteration ! end of surface sweep loops
! Now that the surface is hopefully in good shape, do interior smoothing
IF (mypnum_() == 0) WRITE(6,*)
! Free memory, because these will be recalculated later. For now, since
! they are immediately out of date, they are worse than useless
DEALLOCATE(msh%face_cntr, msh%af, msh%area,msh%cell_cntr,msh%vol,msh%df, &
& msh%dist,msh%interp)
CALL free_least_squares(msh%lsr)
! set all interior points (except at region boundaries) to the avg. position
CALL laplacian_smooth (msh%desc_v, msh%v2v, n_unconstrained, unconstrained, msh%verts, mixed)
CALL interior_iter%reset()
IF (mypnum_() == 0) WRITE(6,'(a,i4,2a)')" - Iteration: ", &
& interior_iter%current_iteration(),CHAR(27),CHAR(77)
interior_iteration: DO
! Loop over interior vertices
DO i=1,n_unconstrained
iv = unconstrained(i) ! get index of unconstrained vertex out of the list
! Optimize the location of the interior vertex
IF (.NOT. mixed(iv) ) &
& CALL smooth_interior_vtx(iv,msh,c2v,shared_flag, all_tets)
END DO ! end of loop over unconstrained vertices
!copy all shared vertices to a dense vector and average among overlapping processors
IF (tot_n_shared > 0) THEN ! there are shared vertices
DO i = 1,n_shared
iv = shared(i)
vpos(iv,1) = msh%verts(iv)%x_()
vpos(iv,2) = msh%verts(iv)%y_()
vpos(iv,3) = msh%verts(iv)%z_()
END DO
CALL psb_ovrl(vpos,msh%desc_v,info,update=psb_avg_)
IF(info /= 0) THEN
WRITE(*,700)
CALL abort_psblas
END IF
!now copy the averaged positions from vpos back to the vertices
DO i = 1,n_shared
iv = shared(i)
new_pos = vector_(vpos(iv,1),vpos(iv,2),vpos(iv,3))
msh%verts(iv) = new_pos
END DO
CALL update_vertex_halo(msh%verts,msh%desc_v)
ENDIF ! end of if-test for allocation of shared vertex storage
CALL check_right_handed(msh,shared,shared_flag,tangled,all_tets)
!share largest value of tangled among processors
CALL psb_amx(icontxt,tangled)
CALL psb_check_error(info,TRIM(WHERE),'psb_amx',icontxt)
IF (tangled > 0) THEN
WRITE(6,*)
WRITE(6,'(a,i4,a,i3,a)')"Warning: ",tangled, &
& " inverted cells detected on processor ",mypnum_(),"."
WRITE(6,'(a)')"Attempting to untangle mesh."
WRITE(6,*)
ENDIF
CALL interior_iter%increment()
IF (mypnum_() == 0) WRITE(6,'(a,i4,2a)')" - Iteration: ", &
& interior_iter%current_iteration(),CHAR(27),CHAR(77)
IF ( interior_iter%stop_iterating() ) EXIT interior_iteration
END DO interior_iteration
IF (mypnum_() == 0) THEN
WRITE(6,*)
WRITE(6,*)
ENDIF !End of if-test for processor zero
!now recalculate all mesh geometry
! Computes face-related metrics members MSH%FACE_CNTR, MSH%AF, MSH%AREA
CALL geom_face(msh%verts,msh%v2f,msh%ncd, &
& msh%face_cntr,msh%af,msh%area)
! Computes cell-related metrics members MSH%CELL_CNTR, MSH%VOL
CALL geom_cell(msh%verts,msh%faces,msh%cells,msh%v2f,msh%v2c, &
& msh%f2c,msh%ncd,msh%cell_cntr,msh%vol)
! Computes face-related metrics members MSH%DF, MSH%DIST, MSH%INTERP
CALL geom_diff(msh%faces,msh%f2b,msh%face_cntr,msh%af,msh%cell_cntr, &
& msh%df,msh%dist,msh%interp)
! Computes metrics for cell-centered least squares regression
CALL set_least_squares(msh%lsr,msh%ncd,msh%desc_c,msh%c2c,msh%f2b, &
& msh%faces,msh%cell_cntr,msh%face_cntr)
! free allocated memory
CALL psb_gefree(vpos,msh%desc_v,info)
IF ( ALLOCATED(shared) ) THEN
DEALLOCATE (shared)
ENDIF
DEALLOCATE (shared_flag, all_tets)
info=freeoptms()
info=freeoptms2d()
CALL free_conn(c2v)
CALL free_conn(f2v)
CALL free_conn(b2v)
DEALLOCATE(unconstrained, constrained)
IF ( ALLOCATED(idloc) ) DEALLOCATE(idloc)
IF ( ALLOCATED(idglob) ) DEALLOCATE(idglob)
IF(info /= 0) THEN
WRITE(*,600)
CALL abort_psblas
END IF
IF(info /= 0) THEN
WRITE(*,600)
CALL abort_psblas
END IF
100 FORMAT(' ERROR! Failure to allocate dense matrix in SMOOTH_MESH')
200 FORMAT(' ERROR! Failed to get overlap points in SMOOTH_MESH')
600 FORMAT(' ERROR! Memory allocation failure in SMOOTH_MESH')
700 FORMAT(' ERROR! Failure to average shared vertex positions in SMOOTH_MESH')
END PROCEDURE smooth_mesh
END SUBMODULE smooth_mesh_implementation
|
{"hexsha": "d4cbfc1f9d85c9ea8b82e430066385cef7201ca3", "size": 15849, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/FV/src/mesh_optimize/smooth_mesh.f90", "max_stars_repo_name": "sourceryinstitute/MORFEUS-Source", "max_stars_repo_head_hexsha": "38659b1097e89e3dea8f5e7e83acf98bfe6623bd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2019-09-19T03:00:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T02:07:07.000Z", "max_issues_repo_path": "src/FV/src/mesh_optimize/smooth_mesh.f90", "max_issues_repo_name": "sourceryinstitute/MORFEUS-Source", "max_issues_repo_head_hexsha": "38659b1097e89e3dea8f5e7e83acf98bfe6623bd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2019-09-10T04:59:39.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-28T22:02:45.000Z", "max_forks_repo_path": "src/FV/src/mesh_optimize/smooth_mesh.f90", "max_forks_repo_name": "sourceryinstitute/MORFEUS-Source", "max_forks_repo_head_hexsha": "38659b1097e89e3dea8f5e7e83acf98bfe6623bd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-28T03:13:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-06T04:13:20.000Z", "avg_line_length": 38.0985576923, "max_line_length": 101, "alphanum_fraction": 0.6059057354, "num_tokens": 3943}
|
[STATEMENT]
lemma (in PolynRg) polyn_expr_tOp_c:"\<lbrakk>pol_coeff S c; pol_coeff S d\<rbrakk> \<Longrightarrow>
\<exists>e. pol_coeff S e \<and> (fst e = fst c + fst d) \<and>
(snd e) (fst e) = (snd c (fst c)) \<cdot>\<^sub>r\<^bsub>S\<^esub> (snd d) (fst d) \<and>
polyn_expr R X (fst e) e =
(polyn_expr R X (fst c) c) \<cdot>\<^sub>r (polyn_expr R X (fst d) d)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>pol_coeff S c; pol_coeff S d\<rbrakk> \<Longrightarrow> \<exists>e. pol_coeff S e \<and> fst e = fst c + fst d \<and> snd e (fst e) = snd c (fst c) \<cdot>\<^sub>r\<^bsub>S\<^esub> snd d (fst d) \<and> polyn_expr R X (fst e) e = polyn_expr R X (fst c) c \<cdot>\<^sub>r polyn_expr R X (fst d) d
[PROOF STEP]
by (cases c, cases d) (simp add: polyn_expr_tOpTr)
|
{"llama_tokens": 377, "file": "Group-Ring-Module_Algebra5", "length": 1}
|
#
# relu paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def relu(name: str, x):
import paddle as pdpd
pdpd.enable_static()
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
out = pdpd.nn.functional.relu(node_x)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out],
inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([-2, 0, 1]).astype('float32')
relu("relu", data)
if __name__ == "__main__":
main()
|
{"hexsha": "6952bd27cd8ff6ee11ed4655141f3c0c1a036658", "size": 836, "ext": "py", "lang": "Python", "max_stars_repo_path": "ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py", "max_stars_repo_name": "monroid/openvino", "max_stars_repo_head_hexsha": "8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2406, "max_stars_repo_stars_event_min_datetime": "2020-04-22T15:47:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:27:37.000Z", "max_issues_repo_path": "ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py", "max_issues_repo_name": "thomas-yanxin/openvino", "max_issues_repo_head_hexsha": "031e998a15ec738c64cc2379d7f30fb73087c272", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4948, "max_issues_repo_issues_event_min_datetime": "2020-04-22T15:12:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:45:42.000Z", "max_forks_repo_path": "ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py", "max_forks_repo_name": "thomas-yanxin/openvino", "max_forks_repo_head_hexsha": "031e998a15ec738c64cc2379d7f30fb73087c272", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 991, "max_forks_repo_forks_event_min_datetime": "2020-04-23T18:21:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:40:57.000Z", "avg_line_length": 21.4358974359, "max_line_length": 73, "alphanum_fraction": 0.6363636364, "include": true, "reason": "import numpy", "num_tokens": 221}
|
# -*- coding: utf-8 -*-
"""
@author: Soufiane Mourragui
2020/06/17
READ PDXE DRUG RESPONSE DATA
"""
import os
import pandas as pd
import numpy as np
from functools import reduce
def read_PDXE_response(PDXE_drug_response_df, PDXE_drug_name, X_target):
# X_target has to be a DataFrame with genes in columns
y_target = PDXE_drug_response_df[PDXE_drug_response_df['Treatment'] == PDXE_drug_name]
y_target = y_target.set_index('Model')
if np.unique([len(x) for x in X_target.index])[0] == 2:
X_target.index = [e[0] for e in X_target.index]
common_samples = np.intersect1d(list(y_target.index), X_target.index)
y_target = y_target.loc[common_samples]
X_target_response = X_target.loc[common_samples]
del common_samples
return X_target_response, y_target
|
{"hexsha": "b5c8e0a869c2d03046d9416903c61fd67b1fcaf6", "size": 805, "ext": "py", "lang": "Python", "max_stars_repo_path": "read_data/read_PDXE_response.py", "max_stars_repo_name": "fuhrmanj/TRANSACT_manuscript", "max_stars_repo_head_hexsha": "71ca2ec42bdd5d547d4b965aa7f84838bfd5b812", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-10-26T13:18:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T10:03:09.000Z", "max_issues_repo_path": "read_data/read_PDXE_response.py", "max_issues_repo_name": "fuhrmanj/TRANSACT_manuscript", "max_issues_repo_head_hexsha": "71ca2ec42bdd5d547d4b965aa7f84838bfd5b812", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-21T09:55:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T13:48:47.000Z", "max_forks_repo_path": "read_data/read_PDXE_response.py", "max_forks_repo_name": "fuhrmanj/TRANSACT_manuscript", "max_forks_repo_head_hexsha": "71ca2ec42bdd5d547d4b965aa7f84838bfd5b812", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-04T15:40:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-26T09:37:33.000Z", "avg_line_length": 26.8333333333, "max_line_length": 90, "alphanum_fraction": 0.7304347826, "include": true, "reason": "import numpy", "num_tokens": 224}
|
(***********************************************************************************
* Copyright (c) 2016-2019 The University of Sheffield, UK
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* SPDX-License-Identifier: BSD-2-Clause
***********************************************************************************)
(* This file is automatically generated, please do not modify! *)
section\<open>Testing Document\_getElementById\<close>
text\<open>This theory contains the test cases for Document\_getElementById.\<close>
theory Document_getElementById
imports
"Core_DOM_BaseTest"
begin
definition Document_getElementById_heap :: heap\<^sub>f\<^sub>i\<^sub>n\<^sub>a\<^sub>l where
"Document_getElementById_heap = create_heap [(cast (document_ptr.Ref 1), cast (create_document_obj html (Some (cast (element_ptr.Ref 1))) [])),
(cast (element_ptr.Ref 1), cast (create_element_obj ''html'' [cast (element_ptr.Ref 2), cast (element_ptr.Ref 9)] fmempty None)),
(cast (element_ptr.Ref 2), cast (create_element_obj ''head'' [cast (element_ptr.Ref 3), cast (element_ptr.Ref 4), cast (element_ptr.Ref 5), cast (element_ptr.Ref 6), cast (element_ptr.Ref 7), cast (element_ptr.Ref 8)] fmempty None)),
(cast (element_ptr.Ref 3), cast (create_element_obj ''meta'' [] (fmap_of_list [(''charset'', ''utf-8'')]) None)),
(cast (element_ptr.Ref 4), cast (create_element_obj ''title'' [cast (character_data_ptr.Ref 1)] fmempty None)),
(cast (character_data_ptr.Ref 1), cast (create_character_data_obj ''Document.getElementById'')),
(cast (element_ptr.Ref 5), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''author''), (''title'', ''Tetsuharu OHZEKI''), (''href'', ''mailto:saneyuki.snyk@gmail.com'')]) None)),
(cast (element_ptr.Ref 6), cast (create_element_obj ''link'' [] (fmap_of_list [(''rel'', ''help''), (''href'', ''https://dom.spec.whatwg.org/#dom-document-getelementbyid'')]) None)),
(cast (element_ptr.Ref 7), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharness.js'')]) None)),
(cast (element_ptr.Ref 8), cast (create_element_obj ''script'' [] (fmap_of_list [(''src'', ''/resources/testharnessreport.js'')]) None)),
(cast (element_ptr.Ref 9), cast (create_element_obj ''body'' [cast (element_ptr.Ref 10), cast (element_ptr.Ref 11), cast (element_ptr.Ref 12), cast (element_ptr.Ref 13), cast (element_ptr.Ref 16), cast (element_ptr.Ref 19)] fmempty None)),
(cast (element_ptr.Ref 10), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''log'')]) None)),
(cast (element_ptr.Ref 11), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', '''')]) None)),
(cast (element_ptr.Ref 12), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''test1'')]) None)),
(cast (element_ptr.Ref 13), cast (create_element_obj ''div'' [cast (element_ptr.Ref 14), cast (element_ptr.Ref 15)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''1st'')]) None)),
(cast (element_ptr.Ref 14), cast (create_element_obj ''p'' [cast (character_data_ptr.Ref 2)] (fmap_of_list [(''id'', ''test5''), (''data-name'', ''2nd'')]) None)),
(cast (character_data_ptr.Ref 2), cast (create_character_data_obj ''P'')),
(cast (element_ptr.Ref 15), cast (create_element_obj ''input'' [] (fmap_of_list [(''id'', ''test5''), (''type'', ''submit''), (''value'', ''Submit''), (''data-name'', ''3rd'')]) None)),
(cast (element_ptr.Ref 16), cast (create_element_obj ''div'' [cast (element_ptr.Ref 17)] (fmap_of_list [(''id'', ''outer'')]) None)),
(cast (element_ptr.Ref 17), cast (create_element_obj ''div'' [cast (element_ptr.Ref 18)] (fmap_of_list [(''id'', ''middle'')]) None)),
(cast (element_ptr.Ref 18), cast (create_element_obj ''div'' [] (fmap_of_list [(''id'', ''inner'')]) None)),
(cast (element_ptr.Ref 19), cast (create_element_obj ''script'' [cast (character_data_ptr.Ref 3)] fmempty None)),
(cast (character_data_ptr.Ref 3), cast (create_character_data_obj ''%3C%3Cscript%3E%3E''))]"
definition Document_getElementById_document :: "(unit, unit, unit, unit, unit, unit) object_ptr option" where "Document_getElementById_document = Some (cast (document_ptr.Ref 1))"
text \<open>"Document.getElementById with a script-inserted element"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test2'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
result \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(result, None, ''should not be null.'');
tmp0 \<leftarrow> result . tagName;
assert_equals(tmp0, ''div'', ''should have appended element's tag name'');
gBody . removeChild(test);
removed \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(removed, None, ''should not get removed element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via setAttribute/removeAttribute"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test3'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return ''test3-updated'';
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . removeAttribute(''id'');
e2 \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"Ensure that the id attribute only affects elements present in a document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test4-should-not-exist'';
e \<leftarrow> Document_getElementById_document . createElement(''div'');
e . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp0, None, ''should be null'');
tmp1 \<leftarrow> Document_getElementById_document . body;
tmp1 . appendChild(e);
tmp2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp2, e, ''should be the appended element'')
}) Document_getElementById_heap"
by eval
text \<open>"in tree order, within the context object's tree"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test5'';
target \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target, None, ''should not be null'');
tmp0 \<leftarrow> target . getAttribute(''data-name'');
assert_equals(tmp0, ''1st'', ''should return the 1st'');
element4 \<leftarrow> Document_getElementById_document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
element4 . setAttribute(''data-name'', ''4th'');
gBody . appendChild(element4);
target2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target2, None, ''should not be null'');
tmp1 \<leftarrow> target2 . getAttribute(''data-name'');
assert_equals(tmp1, ''1st'', ''should be the 1st'');
tmp2 \<leftarrow> target2 . parentNode;
tmp2 . removeChild(target2);
target3 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_not_equals(target3, None, ''should not be null'');
tmp3 \<leftarrow> target3 . getAttribute(''data-name'');
assert_equals(tmp3, ''4th'', ''should be the 4th'')
}) Document_getElementById_heap"
by eval
text \<open>"Modern browsers optimize this method with using internal id cache. This test checks that their optimization should effect only append to `Document`, not append to `Node`."\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test6'';
s \<leftarrow> Document_getElementById_document . createElement(''div'');
s . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . createElement(''div'');
tmp0 . appendChild(s);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp1, None, ''should be null'')
}) Document_getElementById_heap"
by eval
text \<open>"changing attribute's value via `Attr` gotten from `Element.attribute`."\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test7'';
element \<leftarrow> Document_getElementById_document . createElement(''div'');
element . setAttribute(''id'', TEST_ID);
gBody . appendChild(element);
target \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(target, element, ''should return the element before changing the value'');
element . setAttribute(''id'', (TEST_ID @ ''-updated''));
target2 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(target2, None, ''should return null after updated id via Attr.value'');
target3 \<leftarrow> Document_getElementById_document . getElementById((TEST_ID @ ''-updated''));
assert_equals(target3, element, ''should be equal to the updated element.'')
}) Document_getElementById_heap"
by eval
text \<open>"update `id` attribute via element.id"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test12'';
test \<leftarrow> Document_getElementById_document . createElement(''div'');
test . setAttribute(''id'', TEST_ID);
gBody . appendChild(test);
UPDATED_ID \<leftarrow> return (TEST_ID @ ''-updated'');
test . setAttribute(''id'', UPDATED_ID);
e \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e, test, ''should get the element with id.'');
old \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(old, None, ''shouldn't get the element by the old id.'');
test . setAttribute(''id'', '''');
e2 \<leftarrow> Document_getElementById_document . getElementById(UPDATED_ID);
assert_equals(e2, None, ''should return null when the passed id is none in document.'')
}) Document_getElementById_heap"
by eval
text \<open>"where insertion order and tree order don't match"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test13'';
container \<leftarrow> Document_getElementById_document . createElement(''div'');
container . setAttribute(''id'', (TEST_ID @ ''-fixture''));
gBody . appendChild(container);
element1 \<leftarrow> Document_getElementById_document . createElement(''div'');
element1 . setAttribute(''id'', TEST_ID);
element2 \<leftarrow> Document_getElementById_document . createElement(''div'');
element2 . setAttribute(''id'', TEST_ID);
element3 \<leftarrow> Document_getElementById_document . createElement(''div'');
element3 . setAttribute(''id'', TEST_ID);
element4 \<leftarrow> Document_getElementById_document . createElement(''div'');
element4 . setAttribute(''id'', TEST_ID);
container . appendChild(element2);
container . appendChild(element4);
container . insertBefore(element3, element4);
container . insertBefore(element1, element2);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element1, ''should return 1st element'');
container . removeChild(element1);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element2, ''should return 2nd element'');
container . removeChild(element2);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element3, ''should return 3rd element'');
container . removeChild(element3);
test \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(test, element4, ''should return 4th element'');
container . removeChild(element4)
}) Document_getElementById_heap"
by eval
text \<open>"Inserting an id by inserting its parent node"\<close>
lemma "test (do {
gBody \<leftarrow> Document_getElementById_document . body;
TEST_ID \<leftarrow> return ''test14'';
a \<leftarrow> Document_getElementById_document . createElement(''a'');
b \<leftarrow> Document_getElementById_document . createElement(''b'');
a . appendChild(b);
b . setAttribute(''id'', TEST_ID);
tmp0 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp0, None);
gBody . appendChild(a);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(TEST_ID);
assert_equals(tmp1, b)
}) Document_getElementById_heap"
by eval
text \<open>"Document.getElementById must not return nodes not present in document"\<close>
lemma "test (do {
TEST_ID \<leftarrow> return ''test15'';
outer \<leftarrow> Document_getElementById_document . getElementById(''outer'');
middle \<leftarrow> Document_getElementById_document . getElementById(''middle'');
inner \<leftarrow> Document_getElementById_document . getElementById(''inner'');
tmp0 \<leftarrow> Document_getElementById_document . getElementById(''middle'');
outer . removeChild(tmp0);
new_el \<leftarrow> Document_getElementById_document . createElement(''h1'');
new_el . setAttribute(''id'', ''heading'');
inner . appendChild(new_el);
tmp1 \<leftarrow> Document_getElementById_document . getElementById(''heading'');
assert_equals(tmp1, None)
}) Document_getElementById_heap"
by eval
end
|
{"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/Core_DOM/common/tests/Document_getElementById.thy"}
|
"""
Miscellaneous facial features detection implementation
"""
import cv2
import numpy as np
from enum import Enum
class Eyes(Enum):
LEFT = 1
RIGHT = 2
class FacialFeatures:
eye_key_indicies = [
[
# Left eye
# eye lower contour
33,
7,
163,
144,
145,
153,
154,
155,
133,
# eye upper contour (excluding corners)
246,
161,
160,
159,
158,
157,
173
],
[
# Right eye
# eye lower contour
263,
249,
390,
373,
374,
380,
381,
382,
362,
# eye upper contour (excluding corners)
466,
388,
387,
386,
385,
384,
398
]
]
# custom img resize function
@classmethod
def resize_img(cls, img, scale_percent):
width = int(img.shape[1] * scale_percent / 100.0)
height = int(img.shape[0] * scale_percent / 100.0)
return cv2.resize(img, (width, height), interpolation = cv2.INTER_AREA)
# calculate eye apsect ratio to detect blinking
# and/ or control closing/ opening of eye
@classmethod
def eye_aspect_ratio(cls, image_points, side):
p1, p2, p3, p4, p5, p6 = 0, 0, 0, 0, 0, 0
tip_of_eyebrow = 0
# get the contour points at img pixel first
# following the eye aspect ratio formula with little modifications
# to match the facemesh model
if side == Eyes.LEFT:
eye_key_left = FacialFeatures.eye_key_indicies[0]
p2 = np.true_divide(
np.sum([image_points[eye_key_left[10]], image_points[eye_key_left[11]]], axis=0),
2)
p3 = np.true_divide(
np.sum([image_points[eye_key_left[13]], image_points[eye_key_left[14]]], axis=0),
2)
p6 = np.true_divide(
np.sum([image_points[eye_key_left[2]], image_points[eye_key_left[3]]], axis=0),
2)
p5 = np.true_divide(
np.sum([image_points[eye_key_left[5]], image_points[eye_key_left[6]]], axis=0),
2)
p1 = image_points[eye_key_left[0]]
p4 = image_points[eye_key_left[7]]
tip_of_eyebrow = image_points[105]
elif side == Eyes.RIGHT:
eye_key_right = FacialFeatures.eye_key_indicies[1]
p3 = np.true_divide(
np.sum([image_points[eye_key_right[10]], image_points[eye_key_right[11]]], axis=0),
2)
p2 = np.true_divide(
np.sum([image_points[eye_key_right[13]], image_points[eye_key_right[14]]], axis=0),
2)
p5 = np.true_divide(
np.sum([image_points[eye_key_right[2]], image_points[eye_key_right[3]]], axis=0),
2)
p6 = np.true_divide(
np.sum([image_points[eye_key_right[5]], image_points[eye_key_right[6]]], axis=0),
2)
p1 = image_points[eye_key_right[7]]
p4 = image_points[eye_key_right[0]]
tip_of_eyebrow = image_points[334]
# https://downloads.hindawi.com/journals/cmmm/2020/1038906.pdf
# Fig (3)
ear = np.linalg.norm(p2-p6) + np.linalg.norm(p3-p5)
ear /= (2 * np.linalg.norm(p1-p4) + 1e-6)
ear = ear * (np.linalg.norm(tip_of_eyebrow-image_points[2]) / np.linalg.norm(image_points[6]-image_points[2]))
return ear
# calculate mouth aspect ratio to detect mouth movement
# to control opening/ closing of mouth in avatar
# https://miro.medium.com/max/1508/0*0rVqugQAUafxXYXE.jpg
@classmethod
def mouth_aspect_ratio(cls, image_points):
p1 = image_points[78]
p2 = image_points[81]
p3 = image_points[13]
p4 = image_points[311]
p5 = image_points[308]
p6 = image_points[402]
p7 = image_points[14]
p8 = image_points[178]
mar = np.linalg.norm(p2-p8) + np.linalg.norm(p3-p7) + np.linalg.norm(p4-p6)
mar /= (2 * np.linalg.norm(p1-p5) + 1e-6)
return mar
@classmethod
def mouth_distance(cls, image_points):
p1 = image_points[78]
p5 = image_points[308]
return np.linalg.norm(p1-p5)
@classmethod
def detect_iris(cls, img, marks, side):
"""
return:
x: the x coordinate of the iris in the input image.
y: the y coordinate of the iris in the input image.
x_rate: how much the iris is toward the left. 0 means totally left and 1 is totally right.
y_rate: how much the iris is toward the top. 0 means totally top and 1 is totally bottom.
"""
# change the value here to suit your camera/ eye
left_eye_threshold = 34
right_eye_threshold = 45
mask = np.full(img.shape[:2], 255, np.uint8)
region = None
if side == Eyes.LEFT:
region = np.zeros((len(FacialFeatures.eye_key_indicies[0]), 2), np.int32)
# get the pixel of the eyes region
for i in range(region.shape[0]):
region[i, 0] = marks[FacialFeatures.eye_key_indicies[0][i]][0]
region[i, 1] = marks[FacialFeatures.eye_key_indicies[0][i]][1]
elif side == Eyes.RIGHT:
region = np.zeros((len(FacialFeatures.eye_key_indicies[1]), 2), np.int32)
for i in range(region.shape[0]):
region[i, 0] = marks[FacialFeatures.eye_key_indicies[1][i]][0]
region[i, 1] = marks[FacialFeatures.eye_key_indicies[1][i]][1]
try:
cv2.fillPoly(mask, [region], (0, 0, 0))
eye = cv2.bitwise_not(img, img.copy(), mask=mask)
# Cropping on the eye
margin = 4
min_x = np.min(region[:, 0]) - margin
max_x = np.max(region[:, 0]) + margin
min_y = np.min(region[:, 1]) - margin
max_y = np.max(region[:, 1]) + margin
eye = eye[min_y:max_y, min_x:max_x]
# filtering
eye_gray = cv2.cvtColor(eye, cv2.COLOR_BGR2GRAY)
eye_gray = cv2.GaussianBlur(eye_gray, (5, 5), 0)
# cv2.imshow("left eye gray" if side == Eyes.LEFT else "right eye gray",
# FacialFeatures.resize_img(eye_gray, 300))
# follow tutorial for eye-motion tracking
# https://youtu.be/kbdbZFT9NQI
# threshold the image to show the pupil roi
_, threshold = cv2.threshold(eye_gray,
left_eye_threshold if side == Eyes.LEFT else right_eye_threshold,
255, cv2.THRESH_BINARY_INV)
# cv2.imshow("left eye threshold" if side == Eyes.LEFT else "right eye threshold", threshold)
# search for contours and get the largest one
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key = lambda x: cv2.contourArea(x), reverse = True)
cnt = contours[0]
cv2.drawContours(eye, [cnt], 0, (0, 255, 0), 1)
# get the contour box, and get its x and y position
(x, y, w, h) = cv2.boundingRect(cnt)
x_center, y_center = x + int(w / 2), y + int(h / 2)
# drawing
# cv2.rectangle(eye, (x, y), (x+w, y+h), (255, 0, 0), 1)
# cv2.line(eye, (x_center, 0), (x_center, eye.shape[0]), (0, 255, 0), 1)
# cv2.line(eye, (0, y_center), (eye.shape[1], y_center), (0, 255, 0), 1)
# print("%d, %d, %d, %d" % (min_x + margin, max_x - margin, min_y + margin, max_y - margin))
# print("right eye: %d, %d, %.2f, %.2f" % (x_right, y_right, x_ratio_right, y_ratio_right))
# calculate the ratio
x_ratio = np.clip(x_center / (max_x - min_x - margin * 2), 0, 1)
y_ratio = np.clip(y_center / (max_y - min_y - margin * 2), 0, 1)
# cv2.imshow("left eye" if side == Eyes.LEFT else "right eye",
# FacialFeatures.resize_img(eye, 300))
return x_center + (min_x - margin), y_center + (min_y - margin), x_ratio, y_ratio
except Exception:
return 0, 0, 0.5, 0.5
|
{"hexsha": "bde232be1efca2f22ed22f1f11299e6947b52db1", "size": 8483, "ext": "py", "lang": "Python", "max_stars_repo_path": "vtuber/facial_features.py", "max_stars_repo_name": "goodspark/VTuber-Python-Unity-bak", "max_stars_repo_head_hexsha": "9ef5ebd34eb831d0355076364025d4dbd203f854", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-24T10:46:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-24T10:46:06.000Z", "max_issues_repo_path": "vtuber/facial_features.py", "max_issues_repo_name": "goodspark/VTuber-Python-Unity-bak", "max_issues_repo_head_hexsha": "9ef5ebd34eb831d0355076364025d4dbd203f854", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vtuber/facial_features.py", "max_forks_repo_name": "goodspark/VTuber-Python-Unity-bak", "max_forks_repo_head_hexsha": "9ef5ebd34eb831d0355076364025d4dbd203f854", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6244897959, "max_line_length": 118, "alphanum_fraction": 0.5419073441, "include": true, "reason": "import numpy", "num_tokens": 2310}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
def TestData(data):
print(type(data))
print(data)
print(data.strides(32,7))
# In[2]:
def AutoSB( inputs_train,outputs_train):
# firstmodel
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
from sklearn.model_selection import GridSearchCV
from joblib import dump, load
import scipy.io
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from scipy.spatial import Voronoi,voronoi_plot_2d
import numpy as np
model_SVM=SVR()
param_grid_SVM = { 'kernel':['linear','rbf'],
'C':[0.01,0.1,1,10,100],
'gamma':[1,10,100,1000,10000,'auto']}
gridSVM = GridSearchCV(model_SVM,param_grid_SVM,refit=True,verbose=0)
gridSVM.fit(inputs_train,outputs_train)
print(gridSVM.best_params_)
print(gridSVM.best_score_)
model_RFR=RandomForestRegressor()
param_grid_RFR = { 'n_estimators':[200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000],
'max_features':['auto', 'sqrt', 'log2'],
'max_depth': [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, None],
'min_samples_leaf': [1, 2, 4],
'min_samples_split': [2, 5, 10]}
gridRFR = GridSearchCV(model_RFR,param_grid_RFR,refit=True,verbose=0)
gridRFR.fit(inputs_train,outputs_train)
print(gridRFR.best_params_)
print(gridRFR.best_score_)
model_BR= BayesianRidge(n_iter=500)
param_grid_BR = {'alpha_1':[1e-4,1e-5,1e-6,1e-7],
'alpha_2':[1e-4,1e-5,1e-6,1e-7],
'lambda_1':[1e-4,1e-5,1e-6,1e-7],
'lambda_2':[1e-4,1e-5,1e-6,1e-7]}
gridBR= GridSearchCV(model_BR,param_grid_BR,refit=True,verbose=0)
gridBR.fit(inputs_train,outputs_train)
print(gridBR.best_params_)
print(gridBR.best_score_)
return gridSVM,gridRFR,gridBR
# In[3]:
def modelEvaluation(moldelList,Experiments,Expout):
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
from sklearn.model_selection import GridSearchCV
from joblib import dump, load
import scipy.io
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from scipy.spatial import Voronoi,voronoi_plot_2d
import numpy as np
LScore=[];
MSElist = []
for model in moldelList:
MSE = mean_squared_error(Expout, model.predict(Experiments), multioutput='raw_values')
Score = r2_score(model.predict(Experiments),Expout);
LScore.append(Score);
MSElist.append(MSE);
return LScore,MSElist
# In[ ]:
def AdaptativeSampling(model,Experimentos,Salidas,NumExpe):
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge, LinearRegression
from sklearn.model_selection import GridSearchCV
from joblib import dump, load
import scipy.io
import numpy as np
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from scipy.spatial import Voronoi,voronoi_plot_2d
import numpy as np
[Ne,Dim]=Experimentos.shape
scores =cross_val_score(model, Experimentos,Salidas, cv=Ne,scoring='neg_mean_gamma_deviance')
vor = Voronoi(Experimentos,qhull_options="QJ")
worst_regions=np.argsort(scores)
LenVertex=len(vor.vertices)
VoroniScoresExp=[]
VoroniScores=[]
for i,vertex in enumerate(vor.vertices):
vetexscore=[]
for j, case in enumerate(vor.regions):
if i in case:
vetexscore.append(scores[j])
VoroniScores.append(np.mean(np.array(vetexscore)))
VoroniScoresExp.append(vetexscore)
point_selection_order=np.argsort(np.array(VoroniScores))
New_points=vor.vertices[:NumExpe,:]
return New_points,VoroniScores, VoroniScoresExp
|
{"hexsha": "f608403e8a86fb3de3bf55dcea5644d786165b16", "size": 4376, "ext": "py", "lang": "Python", "max_stars_repo_path": "Magnetic circuit/AutoML_SM_V1.py", "max_stars_repo_name": "Duchanoy/ASAMS", "max_stars_repo_head_hexsha": "829e9c8c32a4d26b3acdc25de95804aa956e44f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Magnetic circuit/AutoML_SM_V1.py", "max_issues_repo_name": "Duchanoy/ASAMS", "max_issues_repo_head_hexsha": "829e9c8c32a4d26b3acdc25de95804aa956e44f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Magnetic circuit/AutoML_SM_V1.py", "max_forks_repo_name": "Duchanoy/ASAMS", "max_forks_repo_head_hexsha": "829e9c8c32a4d26b3acdc25de95804aa956e44f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1515151515, "max_line_length": 97, "alphanum_fraction": 0.678702011, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1179}
|
# 4. faza: Analiza podatkov
tabela3[-dv2,] %>% group_by(Drzava)
# komu v povprečju najvec: tabela5$Stevilo_nastopov in tabela
pod9 <- tabela5[-which(tabela5$Stevilo_nastopov + tabela5$Prvi_nastop < 1993),]
# komu smo dali max točke in kdo je zmagal SLO
df1 <- tabela4 %>% group_by(Leto) %>% filter(Tocke == max(Tocke))
pod_zmagovalci2 <- pod_zmagovalci[-which(grepl(1994, pod_zmagovalci$LETO)),]
pod_zmagovalci2 <- pod_zmagovalci2[-which(grepl(2000, pod_zmagovalci2$LETO)),]
pod_zmagovalci2 <- subset(pod_zmagovalci2, LETO > 1992)
df1 <- df1 %>% merge(pod_zmagovalci2, by.x="Leto", by.y="LETO") %>%
select(-c(NASTOPAJOCI, NASLOV)) %>%
rename("Zmagovalna" = DRZAVA, "Zmag_tocke" = TOCKE)
# število glasovanj za zmagovalce
sum(df1$Drzava == df1$Zmagovalna)
sort(df1[which(df1$Drzava != df1$Zmagovalna),]$Drzava)
# jugoslavija
df2 <- tabela3 %>% group_by(Leto) %>% filter(Tocke == max(Tocke))
pod_zmagovalci3 <- subset(pod_zmagovalci, LETO < 1993 & LETO > 1960)
pod_zmagovalci3 <- pod_zmagovalci3[-which(grepl(1977, pod_zmagovalci3$LETO)),]
pod_zmagovalci3 <- pod_zmagovalci3[-which(grepl(1978, pod_zmagovalci3$LETO)),]
pod_zmagovalci3 <- pod_zmagovalci3[-which(grepl(1979, pod_zmagovalci3$LETO)),]
pod_zmagovalci3 <- pod_zmagovalci3[-which(grepl(1980, pod_zmagovalci3$LETO)),]
pod_zmagovalci3 <- pod_zmagovalci3[-which(grepl(1985, pod_zmagovalci3$LETO)),]
df2 <- df2 %>% merge(pod_zmagovalci3, by.x="Leto", by.y="LETO") %>%
select(-c(NASTOPAJOCI, NASLOV)) %>%
rename("Zmagovalna" = DRZAVA, "Zmag_tocke" = TOCKE)
# število glasovanj za zmagovalce
sum(df2$Drzava == df2$Zmagovalna)
# ali lahko napovemo komu da slovenija max točk?
summary(lm(Vsota ~ Drzava, data = tabela9))
summary(lm(Zmag_tocke ~ Tocke, data=df1))
summary(model6 <- lm(Tocke ~ Drzava, data=subset(tabela4, Drzava != "Slovenia" &
Drzava != "Morocco" &
Drzava != "Luxembourg" &
Drzava != "Yugoslavia" &
Drzava != "Czech Republic" &
Drzava != "Slovakia" &
Drzava != "Monaco" &
Drzava != "Australia" &
Drzava != "Andorra" &
Drzava != "Serbia & Montenegro")))
# za koga glasujemo, ko ne glasujemo pravilno
df1$Drzava[which(df1$Drzava != df1$Zmagovalna)]
# jezik, finale, vrsta zasedbe (Ž,M,SKUPINA) pesmi
graf2
summary(lm(UVRSTITEV ~ TOCKE, data = tabela1))
summary(lm(UVRSTITEV ~ as.numeric(TOCKE), data = df3))
# slovenija uvrstitev, finale, jezik, zasedba
summary(model1 <- lm(Finale ~ Jezik, data=df3))
summary(model2 <- lm(UVRSTITEV ~ Jezik, data=df3))
summary(model3 <- lm(Finale ~ Zasedba, data=df3))
summary(model4 <- lm(UVRSTITEV ~ Zasedba, data=df3))
summary(model5 <- lm(UVRSTITEV ~ Jezik + Zasedba, data=df3))
|
{"hexsha": "9fa7f30221bbe0c3ae32aca0ad225d1f2f8e7455", "size": 3078, "ext": "r", "lang": "R", "max_stars_repo_path": "analiza/analiza.r", "max_stars_repo_name": "brinapirc/APPR-2020-21", "max_stars_repo_head_hexsha": "19b58cc14ec8fce5c76cdeea956937adc2b957d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analiza/analiza.r", "max_issues_repo_name": "brinapirc/APPR-2020-21", "max_issues_repo_head_hexsha": "19b58cc14ec8fce5c76cdeea956937adc2b957d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-09-03T09:59:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-07T15:42:55.000Z", "max_forks_repo_path": "analiza/analiza.r", "max_forks_repo_name": "brinapirc/APPR-2020-21", "max_forks_repo_head_hexsha": "19b58cc14ec8fce5c76cdeea956937adc2b957d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.5365853659, "max_line_length": 85, "alphanum_fraction": 0.6078622482, "num_tokens": 1105}
|
"""This creates Pandas dataframes containing predictions.
"""
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
from typing import Callable, List, Iterable
import logging
import sys
import itertools as it
from pathlib import Path
import numpy as np
import pandas as pd
from zensols.persist import persisted
from zensols.deeplearn.vectorize import CategoryEncodableFeatureVectorizer
from zensols.deeplearn.batch import Batch, BatchStash, DataPoint
from . import ModelResultError, ModelResult, EpochResult
logger = logging.getLogger(__name__)
@dataclass
class PredictionsDataFrameFactory(object):
"""Create a Pandas data frame containing results from a result as output from a
``ModelExecutor``. The data frame contains the feature IDs, labels,
predictions mapped back to their original value from the feature data item.
Currently only classification models are supported.
"""
source: Path = field()
"""The source file from where the results were unpickled."""
result: ModelResult = field()
"""The epoch containing the results."""
stash: BatchStash = field()
"""The batch stash used to generate the results from the
:class:`~zensols.deeplearn.model.ModelExecutor`. This is used to get the
vectorizer to reverse map the labels.
"""
column_names: List[str] = field(default=None)
"""The list of string column names for each data item the list returned from
``data_point_transform`` to be added to the results for each
label/prediction
"""
data_point_transform: Callable[[DataPoint], tuple] = field(default=None)
"""A function that returns a tuple, each with an element respective of
``column_names`` to be added to the results for each label/prediction; if
``None`` (the default), ``str`` used (see the `Iris Jupyter Notebook
<https://github.com/plandes/deeplearn/blob/master/notebook/iris.ipynb>`_
example)
"""
batch_limit: int = sys.maxsize
"""The max number of batche of results to output."""
def __post_init__(self):
if self.column_names is None:
self.column_names = ('data',)
if self.data_point_transform is None:
self.data_point_transform = lambda dp: (str(dp),)
@property
def name(self) -> str:
"""The name of the results taken from :class:`.ModelResult`."""
return self.result.name
@property
def epoch_result(self) -> EpochResult:
"""The epoch containing the results."""
return self.result.test.results[0]
def _batch_data_frame(self) -> Iterable[pd.DataFrame]:
"""Return a data from for each batch.
"""
transform = self.data_point_transform
batches = zip(self.epoch_result.batch_ids,
self.epoch_result.batch_predictions,
self.epoch_result.batch_labels)
i: int
preds: List[np.ndarray]
labs: List[np.ndarray]
for i, preds, labs in it.islice(batches, self.batch_limit):
batch: Batch = self.stash[i]
vec: CategoryEncodableFeatureVectorizer = \
batch.get_label_feature_vectorizer()
if not isinstance(vec, CategoryEncodableFeatureVectorizer):
raise ModelResultError(
f'expecting a category feature vectorizer but got: {vec}')
inv_trans = vec.label_encoder.inverse_transform
preds = inv_trans(preds)
labs = inv_trans(labs)
rows = []
for dp, lab, pred in zip(batch.get_data_points(), labs, preds):
assert dp.label == lab
row = [dp.id, lab, pred, lab == pred]
row.extend(transform(dp))
rows.append(row)
cols = 'id label pred correct'.split() + list(self.column_names)
yield pd.DataFrame(rows, columns=cols)
@property
@persisted('_dataframe')
def dataframe(self) -> pd.DataFrame:
"""Return the dataframe of results. The first columns are generated from
``data_point_tranform``, and the remaining columns are:
- id: the ID of the feature (not batch) data item
- label: the label given by the feature data item
- pred: the prediction
- correct: whether or not the prediction was correct
"""
return pd.concat(self._batch_data_frame(), ignore_index=True)
|
{"hexsha": "d7d5081507b6a74359c0ed8bc548257d3945e5c9", "size": 4416, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/zensols/deeplearn/result/pred.py", "max_stars_repo_name": "plandes/deeplearn", "max_stars_repo_head_hexsha": "925f02200c62a7dc798e474ed94a86e009fd1ebf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-30T17:19:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-04T03:48:59.000Z", "max_issues_repo_path": "src/python/zensols/deeplearn/result/pred.py", "max_issues_repo_name": "plandes/deeplearn", "max_issues_repo_head_hexsha": "925f02200c62a7dc798e474ed94a86e009fd1ebf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/zensols/deeplearn/result/pred.py", "max_forks_repo_name": "plandes/deeplearn", "max_forks_repo_head_hexsha": "925f02200c62a7dc798e474ed94a86e009fd1ebf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1967213115, "max_line_length": 83, "alphanum_fraction": 0.6607789855, "include": true, "reason": "import numpy", "num_tokens": 961}
|
#ifndef ILLUMINATE_TEST_WORKER_GROUP_HPP
#define ILLUMINATE_TEST_WORKER_GROUP_HPP
// Includes {{{
#include <boost/optional.hpp>
#include <boost/thread.hpp>
#include <iostream>
#include <vector>
#include "future.hpp"
#include "test_worker.hpp"
// }}}
namespace Illuminate {
//! A group of test workers
/*!
This class creates a group of worker threads which immediately start executing the tests in the global test tree and storing their results in \c futures.
\sa TestWorker
*/
class TestWorkerGroup { // {{{
protected:
//! queue of test tasks
TestQueue queue;
//! a mutex that must be locked before accessing the test queue
boost::shared_ptr<boost::mutex> queue_mutex;
public:
//! futures providing the test results
TestFutures futures;
//! the thread group of the workers
boost::thread_group workers;
//! Constructor.
TestWorkerGroup(
//! number_of_workers number of workers to create
unsigned int number_of_workers,
//! the fetcher to use for obtaining test results
TestResultFetcher fetchResult=Test::run,
//! the ids of the tests to run; if none, then all tests are run
boost::optional<std::vector<unsigned int> const&> maybe_test_ids=boost::none
);
//! Destructor.
~TestWorkerGroup();
}; // }}}
}
#endif
|
{"hexsha": "8039c152985e55b8f910d86b5a69d170f99ff2a7", "size": 1323, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "includes/illuminate/test_worker_group.hpp", "max_stars_repo_name": "gcross/Illuminate", "max_stars_repo_head_hexsha": "862f665ccd4b67411bc332f534e1655585750823", "max_stars_repo_licenses": ["0BSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "includes/illuminate/test_worker_group.hpp", "max_issues_repo_name": "gcross/Illuminate", "max_issues_repo_head_hexsha": "862f665ccd4b67411bc332f534e1655585750823", "max_issues_repo_licenses": ["0BSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "includes/illuminate/test_worker_group.hpp", "max_forks_repo_name": "gcross/Illuminate", "max_forks_repo_head_hexsha": "862f665ccd4b67411bc332f534e1655585750823", "max_forks_repo_licenses": ["0BSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9411764706, "max_line_length": 153, "alphanum_fraction": 0.6953892668, "num_tokens": 299}
|
import logging
import os
import gensim.downloader as api
from gensim import matutils
from nltk.tokenize import word_tokenize
import numpy as np
from quasimodo.parameters_reader import ParametersReader
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.assertion_output.tsv_output_submodule import get_version
SLICE_SIZE = 10000 # Change this in case of Memory problems
TOPK = 100
parameters_reader = ParametersReader()
OUT_DIR = parameters_reader.get_parameter("out-dir") or os.path.dirname(__file__) + "/out/"
def save_tsv_triples(triples):
version = get_version()
save_file = OUT_DIR + "quasimodo" + str(version) + ".tsv"
with open(save_file, "w") as f:
f.write("\t".join(["subject", "predicate", "object", "modality", "is_negative", "score", "sentences source",
"typicality", "saliency"]) + "\n")
f.write("\n".join(triples))
def get_raw_predicate(predicate):
if "has_body_part" == predicate:
return "have"
if "has_" in predicate:
return "be"
return predicate
class SaliencyAndTypicalityComputationSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Saliency and typicality"
self.total_per_subject = dict()
self.total_per_po = dict()
self.idx2keys = []
self.idx2total = []
self.vectors = None
self.probabilities_po = None
self.closest_indexes = []
def get_all_triples_as_tsv(self, input_interface):
triples = []
for generated_fact in input_interface.get_generated_facts():
row_tsv = generated_fact.get_tsv()
subj = generated_fact.get_subject().get()
pred = get_raw_predicate(generated_fact.get_predicate().get())
obj = generated_fact.get_object().get()
score = generated_fact.get_score().scores[0][0]
po = pred + " " + obj
if self.max_tau.get(subj, 0) != 0:
tau = score / self.total_per_subject[subj] / self.max_tau.get(subj, 0)
else:
tau = score / self.total_per_subject[subj]
if self.max_sigma.get(po, 0) != 0:
sigma = score / self.total_per_po[po] / self.max_sigma.get(po, 0)
else:
sigma = score / self.total_per_po[po]
triples.append(row_tsv + "\t" + str(tau) + "\t" + str(sigma))
return triples
def set_max_tau_and_sigma(self, input_interface):
self.max_tau = dict()
self.max_sigma = dict()
for generated_fact in input_interface.get_generated_facts():
subj = generated_fact.get_subject().get()
pred = get_raw_predicate(generated_fact.get_predicate().get())
obj = generated_fact.get_object().get()
score = generated_fact.get_score().scores[0][0]
po = pred + " " + obj
self.max_tau[subj] = max(score / self.total_per_subject[subj],
self.max_tau.get(subj, 0))
self.max_sigma[po] = max(score / self.total_per_po[po],
self.max_sigma.get(po, 0))
def process(self, input_interface):
logging.info("Start TSV output submodule")
self.initialize_statistics(input_interface)
self.initialize_idx_correspondences()
self.initialize_po_vectors()
self.set_closest_indexes()
self.compute_probabilities()
self.match_probabilities()
self.set_max_tau_and_sigma(input_interface)
self.save_final_results(input_interface)
return input_interface
def save_final_results(self, input_interface):
triples = self.get_all_triples_as_tsv(input_interface)
save_tsv_triples(triples)
def match_probabilities(self):
for i, key in enumerate(self.idx2keys):
self.total_per_po[key] = self.probabilities_po[i]
def set_closest_indexes(self):
self.closest_indexes = []
distances_temp = None
for i in range(self.vectors.shape[0]):
if i % SLICE_SIZE == 0:
distances_temp = np.dot(self.vectors[i:i + SLICE_SIZE], self.vectors.T)
idx_closest = matutils.argsort(distances_temp[i % SLICE_SIZE],
topn=TOPK,
reverse=True)
self.closest_indexes.append([(j, (1 - distances_temp[i % SLICE_SIZE][j])) for j in idx_closest])
return self.closest_indexes
def compute_probabilities(self):
self.probabilities_po = np.zeros(self.vectors.shape[0])
for i in range(self.vectors.shape[0]):
closest_indexes = self.closest_indexes[i]
norm = sum([x[1] for x in closest_indexes])
weighted_sum = sum([x[1] * self.idx2total[x[0]] for x in closest_indexes])
self.probabilities_po[i] = weighted_sum / norm
def initialize_po_vectors(self):
model = api.load("word2vec-google-news-300")
self.vectors = np.zeros((len(self.idx2keys), model.vector_size))
for i, sentence in enumerate(self.idx2keys):
sentence = sentence.lower().replace("_", " ")
sentence = word_tokenize(sentence)
counter = 0
for word in sentence:
if word in model.vocab:
self.vectors[i] += model.get_vector(word)
counter += 1
if counter == 0:
pass
else:
self.vectors[i] = self.vectors[i] / counter
norms = np.sqrt((self.vectors * self.vectors).sum(axis=1)).reshape(self.vectors.shape[0], 1)
self.vectors /= norms
def initialize_idx_correspondences(self):
self.idx2keys = list(self.total_per_po.keys())
self.idx2total = [self.total_per_po[key] for key in self.idx2keys]
def initialize_statistics(self, input_interface):
self.total_per_subject = dict()
self.total_per_po = dict()
for generated_fact in input_interface.get_generated_facts():
subj = generated_fact.get_subject().get()
pred = get_raw_predicate(generated_fact.get_predicate().get())
obj = generated_fact.get_object().get()
score = generated_fact.get_score().scores[0][0]
self.total_per_subject[subj] = self.total_per_subject.setdefault(subj, 0) + score
po = pred + " " + obj
self.total_per_po[po] = self.total_per_po.setdefault(po, 0) + score
|
{"hexsha": "33511614b0b38ed75ef9ccfe5da157f9571fac0e", "size": 6615, "ext": "py", "lang": "Python", "max_stars_repo_path": "quasimodo/assertion_output/saliency_and_typicality_computation_submodule.py", "max_stars_repo_name": "Aunsiels/CSK", "max_stars_repo_head_hexsha": "c88609bc76d865b4987aaf30ddf1247a2031b1a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-11-28T13:26:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T09:53:10.000Z", "max_issues_repo_path": "quasimodo/assertion_output/saliency_and_typicality_computation_submodule.py", "max_issues_repo_name": "Aunsiels/CSK", "max_issues_repo_head_hexsha": "c88609bc76d865b4987aaf30ddf1247a2031b1a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-26T20:31:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-15T08:52:47.000Z", "max_forks_repo_path": "quasimodo/assertion_output/saliency_and_typicality_computation_submodule.py", "max_forks_repo_name": "Aunsiels/CSK", "max_forks_repo_head_hexsha": "c88609bc76d865b4987aaf30ddf1247a2031b1a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-08-14T23:23:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T14:02:35.000Z", "avg_line_length": 41.0869565217, "max_line_length": 116, "alphanum_fraction": 0.6234315949, "include": true, "reason": "import numpy", "num_tokens": 1485}
|
/-
Copyright (c) 2020 Aaron Anderson, Jalex Stark. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson, Jalex Stark
-/
import linear_algebra.matrix.charpoly.coeff
import linear_algebra.matrix.to_lin
import ring_theory.power_basis
/-!
# The minimal polynomial divides the characteristic polynomial of a matrix.
-/
noncomputable theory
universes u v
open polynomial matrix
variables {R : Type u} [comm_ring R]
variables {n : Type v} [decidable_eq n] [fintype n]
open finset
variable {M : matrix n n R}
namespace matrix
theorem is_integral : is_integral R M := ⟨M.charpoly, ⟨charpoly_monic M, aeval_self_charpoly M⟩⟩
theorem minpoly_dvd_charpoly {K : Type*} [field K] (M : matrix n n K) :
(minpoly K M) ∣ M.charpoly :=
minpoly.dvd _ _ (aeval_self_charpoly M)
end matrix
section power_basis
open algebra
/-- The characteristic polynomial of the map `λ x, a * x` is the minimal polynomial of `a`.
In combination with `det_eq_sign_charpoly_coeff` or `trace_eq_neg_charpoly_coeff`
and a bit of rewriting, this will allow us to conclude the
field norm resp. trace of `x` is the product resp. sum of `x`'s conjugates.
-/
lemma charpoly_left_mul_matrix {K S : Type*} [field K] [comm_ring S] [algebra K S]
(h : power_basis K S) :
(left_mul_matrix h.basis h.gen).charpoly = minpoly K h.gen :=
begin
apply minpoly.unique,
{ apply matrix.charpoly_monic },
{ apply (injective_iff_map_eq_zero (left_mul_matrix _)).mp (left_mul_matrix_injective h.basis),
rw [← polynomial.aeval_alg_hom_apply, aeval_self_charpoly] },
{ intros q q_monic root_q,
rw [matrix.charpoly_degree_eq_dim, fintype.card_fin, degree_eq_nat_degree q_monic.ne_zero],
apply with_bot.some_le_some.mpr,
exact h.dim_le_nat_degree_of_root q_monic.ne_zero root_q }
end
end power_basis
|
{"author": "nick-kuhn", "repo": "leantools", "sha": "567a98c031fffe3f270b7b8dea48389bc70d7abb", "save_path": "github-repos/lean/nick-kuhn-leantools", "path": "github-repos/lean/nick-kuhn-leantools/leantools-567a98c031fffe3f270b7b8dea48389bc70d7abb/src/linear_algebra/matrix/charpoly/minpoly.lean"}
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parallel.data_parallel import DataParallel
from util.metrics import batch_pix_accuracy, batch_intersection_union
from . import resnet
up_kwargs = {'mode': 'bilinear', 'align_corners': True}
__all__ = ['BaseNet', 'MultiEvalModule']
class BaseNet(nn.Module):
def __init__(self, nclass, aux, c = None, depth=None, genotype=None, dropout_prob=0,
double_down_channel=True, backbone=None, dilated=True, norm_layer=None,
base_size=520, crop_size=480, mean=[.485, .456, .406],std=[.229, .224, .225],
pb_root='/train_tiny_data/imgseg/pretrained/'):
super(BaseNet, self).__init__()
self.nclass = nclass
self.aux = aux
self.mean = mean
self.std = std
self.base_size = base_size
self.crop_size = crop_size
if backbone == 'resnet50':
self.pretrained = resnet.resnet50(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=pb_root)
elif backbone == 'resnet101':
self.pretrained = resnet.resnet101(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=pb_root)
elif backbone == 'resnet152':
self.pretrained = resnet.resnet152(pretrained=True, dilated=dilated,
norm_layer=norm_layer, root=pb_root)
else:
print('This model does not need pretrained backbone')
# bilinear upsample options
self._up_kwargs = up_kwargs
def base_forward(self, x):
"""For some model like : FCN, PSP"""
if self.backbone.startswith('wideresnet'):
x = self.pretrained.mod1(x)
x = self.pretrained.pool2(x)
x = self.pretrained.mod2(x)
x = self.pretrained.pool3(x)
x = self.pretrained.mod3(x)
x = self.pretrained.mod4(x)
x = self.pretrained.mod5(x)
c3 = x.clone()
x = self.pretrained.mod6(x)
x = self.pretrained.mod7(x)
x = self.pretrained.bn_out(x)
return None, None, c3, x
else:
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
def evaluate(self, x, target=None):
pred = self.forward(x)
if isinstance(pred, (tuple, list)):
pred = pred[0]
if target is None:
return pred
correct, labeled = batch_pix_accuracy(pred.data, target.data)
inter, union = batch_intersection_union(pred.data, target.data, self.nclass)
return correct, labeled, inter, union
class MultiEvalModule(DataParallel):
"""Multi-size Segmentation Eavluator"""
def __init__(self, module, nclass, device_ids=None, flip=True,
scales=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75]):
super(MultiEvalModule, self).__init__(module, device_ids)
self.nclass = nclass
self.base_size = module.base_size
self.crop_size = module.crop_size
self.scales = scales
self.flip = flip
print('MultiEvalModule: base_size {}, crop_size {}'. \
format(self.base_size, self.crop_size))
def forward(self, image):
"""Mult-size Evaluation"""
# only single image is supported for evaluation
batch, _, h, w = image.size()
assert(batch == 1)
stride_rate = 2.0/3.0
crop_size = self.crop_size
stride = int(crop_size * stride_rate)
with torch.cuda.device_of(image):
scores = image.new().resize_(batch,self.nclass,h,w).zero_().cuda()
for scale in self.scales:
long_size = int(math.ceil(self.base_size * scale))
if h > w:
height = long_size
width = int(1.0 * w * long_size / h + 0.5)
short_size = width
else:
width = long_size
height = int(1.0 * h * long_size / w + 0.5)
short_size = height
"""
short_size = int(math.ceil(self.base_size * scale))
if h > w:
width = short_size
height = int(1.0 * h * short_size / w)
long_size = height
else:
height = short_size
width = int(1.0 * w * short_size / h)
long_size = width
"""
# resize image to current size
cur_img = resize_image(image, height, width, **self.module._up_kwargs)
if long_size <= crop_size:
pad_img = pad_image(cur_img, self.module.mean,
self.module.std, crop_size)
outputs = module_inference(self.module, pad_img, self.flip)
outputs = crop_image(outputs, 0, height, 0, width)
else:
if short_size < crop_size:
# pad if needed
pad_img = pad_image(cur_img, self.module.mean,
self.module.std, crop_size)
else:
pad_img = cur_img
_,_,ph,pw = pad_img.size()
assert(ph >= height and pw >= width)
# grid forward and normalize
h_grids = int(math.ceil(1.0 * (ph-crop_size)/stride)) + 1
w_grids = int(math.ceil(1.0 * (pw-crop_size)/stride)) + 1
with torch.cuda.device_of(image):
outputs = image.new().resize_(batch,self.nclass,ph,pw).zero_().cuda()
count_norm = image.new().resize_(batch,1,ph,pw).zero_().cuda()
# grid evaluation
for idh in range(h_grids):
for idw in range(w_grids):
h0 = idh * stride
w0 = idw * stride
h1 = min(h0 + crop_size, ph)
w1 = min(w0 + crop_size, pw)
crop_img = crop_image(pad_img, h0, h1, w0, w1)
# pad if needed
pad_crop_img = pad_image(crop_img, self.module.mean,
self.module.std, crop_size)
output = module_inference(self.module, pad_crop_img, self.flip)
outputs[:,:,h0:h1,w0:w1] += crop_image(output,
0, h1-h0, 0, w1-w0)
count_norm[:,:,h0:h1,w0:w1] += 1
assert((count_norm==0).sum()==0)
outputs = outputs / count_norm
outputs = outputs[:,:,:height,:width]
score = resize_image(outputs, h, w, **self.module._up_kwargs)
scores += score
return scores
def module_inference(module, image, flip=True):
output = module.evaluate(image)
if flip:
fimg = flip_image(image)
foutput = module.evaluate(fimg)
output += flip_image(foutput)
return output.exp()
def resize_image(img, h, w, **up_kwargs):
return F.interpolate(img, (h, w), **up_kwargs)
def pad_image(img, mean, std, crop_size):
b,c,h,w = img.size()
assert(c==3)
padh = crop_size - h if h < crop_size else 0
padw = crop_size - w if w < crop_size else 0
pad_values = -np.array(mean) / np.array(std)
img_pad = img.new().resize_(b,c,h+padh,w+padw)
for i in range(c):
# note that pytorch pad params is in reversed orders
img_pad[:,i,:,:] = F.pad(img[:,i,:,:], (0, padw, 0, padh), value=pad_values[i])
assert(img_pad.size(2)>=crop_size and img_pad.size(3)>=crop_size)
return img_pad
def crop_image(img, h0, h1, w0, w1):
return img[:,:,h0:h1,w0:w1]
def flip_image(img):
assert(img.dim()==4)
with torch.cuda.device_of(img):
idx = torch.arange(img.size(3)-1, -1, -1).type_as(img).long()
return img.index_select(3, idx)
|
{"hexsha": "bd70c5134f3b142e3d6429ef854651ecc5ef605b", "size": 8503, "ext": "py", "lang": "Python", "max_stars_repo_path": "NasUnet/models/base.py", "max_stars_repo_name": "mlvc-lab/Segmentation-NAS", "max_stars_repo_head_hexsha": "a9387a1546dacfa2dc6ee1f70366542a1552e541", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-03-26T11:05:08.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-22T08:37:20.000Z", "max_issues_repo_path": "NasUnet/models/base.py", "max_issues_repo_name": "mlvc-lab/Segmentation-NAS", "max_issues_repo_head_hexsha": "a9387a1546dacfa2dc6ee1f70366542a1552e541", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NasUnet/models/base.py", "max_forks_repo_name": "mlvc-lab/Segmentation-NAS", "max_forks_repo_head_hexsha": "a9387a1546dacfa2dc6ee1f70366542a1552e541", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-26T11:05:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T11:29:00.000Z", "avg_line_length": 41.681372549, "max_line_length": 95, "alphanum_fraction": 0.5286369517, "include": true, "reason": "import numpy", "num_tokens": 1994}
|
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule ONNXRuntime_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("ONNXRuntime")
JLLWrappers.@generate_main_file("ONNXRuntime", UUID("09e6dd1b-8208-5c7e-a336-6e9061773d0b"))
end # module ONNXRuntime_jll
|
{"hexsha": "c446a9ba8ccdd32e41396aa3f7e7c88eb786ac65", "size": 322, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ONNXRuntime_jll.jl", "max_stars_repo_name": "JuliaBinaryWrappers/ONNXRuntime_jll.jl", "max_stars_repo_head_hexsha": "38d8b1d39aed84bcf3cee02f064b6aa58c5f0ceb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-06T02:13:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T02:13:22.000Z", "max_issues_repo_path": "src/ONNXRuntime_jll.jl", "max_issues_repo_name": "JuliaBinaryWrappers/ONNXRuntime_jll.jl", "max_issues_repo_head_hexsha": "38d8b1d39aed84bcf3cee02f064b6aa58c5f0ceb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ONNXRuntime_jll.jl", "max_forks_repo_name": "JuliaBinaryWrappers/ONNXRuntime_jll.jl", "max_forks_repo_head_hexsha": "38d8b1d39aed84bcf3cee02f064b6aa58c5f0ceb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2, "max_line_length": 92, "alphanum_fraction": 0.8167701863, "num_tokens": 105}
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory Interrupt_C
imports CSpace_All Finalise_C
begin
context kernel_m begin
lemma invokeIRQHandler_AckIRQ_ccorres:
"ccorres dc xfdc
invs' (UNIV \<inter> {s. irq_' s = ucast irq}) []
(invokeIRQHandler (AckIRQ irq)) (Call invokeIRQHandler_AckIRQ_'proc)"
apply (cinit lift: irq_')
apply (ctac add: maskInterrupt_ccorres)
apply (simp add: from_bool_def false_def)
done
lemma getIRQSlot_ccorres:
"ccorres ((=) \<circ> Ptr) irqSlot_'
\<top> UNIV hs
(getIRQSlot irq)
(\<acute>irqSlot :== CTypesDefs.ptr_add intStateIRQNode_Ptr (uint irq))"
apply (rule ccorres_from_vcg[where P=\<top> and P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: getIRQSlot_def liftM_def getInterruptState_def
locateSlot_conv)
apply (simp add: simpler_gets_def bind_def return_def)
apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def
cinterrupt_relation_def size_of_def
sint_ucast_eq_uint is_down of_int_uint_ucast
cte_level_bits_def mult.commute mult.left_commute ucast_nat_def)
done
lemma ptr_add_assertion_irq_guard:
"ccorres dc xfdc P Q hs a
(Guard F
\<lbrace>uint irq = 0 \<or> array_assertion intStateIRQNode_Ptr (nat (uint irq)) (hrs_htd \<acute>t_hrs)\<rbrace>
c;;m)
\<Longrightarrow> ccorres dc xfdc P Q hs a
(Guard F
\<lbrace>ptr_add_assertion intStateIRQNode_Ptr
(sint (ucast (irq :: 16 word)::32 signed word)) False
(hrs_htd \<acute>t_hrs)\<rbrace> c ;; m)"
by (simp add: ptr_add_assertion_def sint_ucast_eq_uint is_down)
lemma cte_at_irq_node':
"invs' s \<Longrightarrow>
cte_at' (irq_node' s + 2 ^ cte_level_bits * ucast (irq :: 10 word)) s"
by (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def
cte_level_bits_def real_cte_at')
lemma invokeIRQHandler_SetIRQHandler_ccorres:
"ccorres dc xfdc
(invs' and sch_act_simple
and irq_handler_inv_valid' (SetIRQHandler irq cp slot))
(UNIV \<inter> {s. irq_' s = ucast irq} \<inter> {s. slot_' s = Ptr slot}
\<inter> {s. ccap_relation cp (cap_' s)}) []
(invokeIRQHandler (SetIRQHandler irq cp slot))
(Call invokeIRQHandler_SetIRQHandler_'proc)"
proof -
have valid_objs_invs'_strg: "\<And>s. invs' s \<longrightarrow> valid_objs' s"
by (clarsimp)
show ?thesis
apply (cinit lift: irq_' slot_' cap_')
apply (rule ccorres_Guard_intStateIRQNode_array_Ptr)
apply (rule ccorres_move_array_assertion_irq)
apply (simp add: ucast_up_ucast is_up of_int_uint_ucast[symmetric])
apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified])
apply (rule ccorres_symb_exec_r)
apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"])
apply (rule ccorres_call)
apply (rule cteInsert_ccorres[simplified dc_def])
apply simp
apply simp
apply simp
apply (simp add: pred_conj_def)
apply (strengthen ntfn_badge_derived_enough_strg[unfolded o_def]
invs_mdb_strengthen' valid_objs_invs'_strg)
apply (wp cteDeleteOne_other_cap[unfolded o_def])[1]
apply vcg
apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def
gs_set_assn_Delete_cstate_relation[unfolded o_def])
apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv)
apply wp
apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def
ghost_assertion_data_set_def)
apply (clarsimp simp: cte_at_irq_node' ucast_nat_def)
apply (clarsimp simp: invs_pspace_aligned' cte_wp_at_ctes_of badge_derived'_def
Collect_const_mem unat_gt_0 valid_cap_simps' ARM_HYP.maxIRQ_def)
apply (drule word_le_nat_alt[THEN iffD1])
apply (clarsimp simp:uint_0_iff unat_gt_0 uint_up_ucast is_up unat_def[symmetric])
apply (drule valid_globals_ex_cte_cap_irq[where irq=irq])
apply (auto simp add:Word.uint_up_ucast is_up unat_def[symmetric])
done
qed
lemma invokeIRQHandler_ClearIRQHandler_ccorres:
"ccorres dc xfdc
(invs' and (\<lambda>s. weak_sch_act_wf (ksSchedulerAction s) s) and K(irq \<le> 0xFF))
(UNIV \<inter> {s. irq_' s = ucast irq}) []
(invokeIRQHandler (ClearIRQHandler irq))
(Call invokeIRQHandler_ClearIRQHandler_'proc)"
apply (cinit lift: irq_')
apply (rule ccorres_Guard_intStateIRQNode_array_Ptr)
apply (rule ccorres_move_array_assertion_irq)
apply (simp add: ucast_up_ucast is_up of_int_uint_ucast[symmetric])
apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified])
apply (rule ccorres_symb_exec_r)
apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def])
apply vcg
apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def
gs_set_assn_Delete_cstate_relation[unfolded o_def])
apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv)
apply wp
apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def
ghost_assertion_data_set_def)
apply (clarsimp simp: cte_at_irq_node' ucast_nat_def)
apply (simp add: of_int_uint_ucast[symmetric])
apply (drule word_le_nat_alt[THEN iffD1])
apply (auto simp add:Word.uint_up_ucast is_up unat_def[symmetric])
apply (case_tac "of_int (uint irq) \<noteq> 0 \<longrightarrow> 0 < unat irq")
by (auto simp: Collect_const_mem unat_eq_0)
lemma ntfn_case_can_send:
"(case cap of NotificationCap x1 x2 x3 x4 \<Rightarrow> f x3
| _ \<Rightarrow> v) = (if isNotificationCap cap then f (capNtfnCanSend cap)
else v)"
by (cases cap, simp_all add: isCap_simps)
lemma list_length_geq_helper[simp]:
"\<lbrakk>\<not> length args < 2\<rbrakk>
\<Longrightarrow> \<exists>y ys. args = y # ys"
by (frule length_ineq_not_Nil(3), simp, metis list.exhaust)
lemma decodeIRQHandlerInvocation_ccorres:
notes if_cong[cong] gen_invocation_type_eq[simp]
shows
"interpret_excaps extraCaps' = excaps_map extraCaps \<Longrightarrow>
ccorres (intr_and_se_rel \<currency> dc) (liftxf errstate id (K ()) ret__unsigned_long_')
(invs' and (\<lambda>s. ksCurThread s = thread)
and ct_active' and sch_act_simple
and (excaps_in_mem extraCaps o ctes_of)
and (\<lambda>s. \<exists>slot. cte_wp_at' (\<lambda>cte. cteCap cte = IRQHandlerCap irq) slot s)
and (\<lambda>s. \<forall>v \<in> set extraCaps.
ex_cte_cap_wp_to' isCNodeCap (snd v) s))
(UNIV
\<inter> {s. invLabel_' s = label}
\<inter> {s. irq_' s = ucast irq}
\<inter> {s. excaps_' s = extraCaps'}) []
(decodeIRQHandlerInvocation label irq extraCaps
>>= invocationCatch thread isBlocking isCall InvokeIRQHandler)
(Call decodeIRQHandlerInvocation_'proc)"
apply (cinit' lift: invLabel_' irq_' excaps_'
simp: decodeIRQHandlerInvocation_def invocation_eq_use_types)
apply (rule ccorres_Cond_rhs)
apply (simp add: returnOk_bind ccorres_invocationCatch_Inr)
apply (rule ccorres_rhs_assoc)+
apply (simp add: performInvocation_def bindE_assoc, simp add: liftE_bindE)
apply (ctac(no_vcg) add: setThreadState_ccorres)
apply (ctac(no_vcg) add: invokeIRQHandler_AckIRQ_ccorres)
apply (simp add: liftE_alternative returnOk_liftE[symmetric])
apply (rule ccorres_alternative2)
apply (rule ccorres_return_CE, simp+)[1]
apply (wp sts_invs_minor')+
apply (rule ccorres_Cond_rhs)
apply (rule ccorres_rhs_assoc)+
apply csymbr
apply csymbr
apply (simp add: list_case_If2 split_def del: Collect_const)
apply (rule ccorres_if_bind)
apply (rule ccorres_if_lhs[rotated])
apply (rule ccorres_cond_false_seq)
apply (simp add: Let_def split_def ntfn_case_can_send
del: Collect_const)
apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0])
apply (rule ccorres_move_c_guard_cte)
apply (ctac(no_vcg))
apply (rule ccorres_assert)
apply (rule_tac P="\<lambda>s. ksCurThread s = thread"
in ccorres_cross_over_guard)
apply (csymbr | rule ccorres_Guard_Seq)+
apply (simp add: if_1_0_0 cap_get_tag_isCap del: Collect_const)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: hd_conv_nth del: Collect_const)
apply (rule ccorres_cond_true_seq)
apply (rule ccorres_from_vcg_split_throws[where P=\<top> and P'=UNIV])
apply vcg
apply (rule conseqPre, vcg)
apply (clarsimp simp: throwError_bind invocationCatch_def)
apply (simp add: throwError_def return_def)
apply (simp add: syscall_error_rel_def syscall_error_to_H_cases)
apply (simp add: exception_defs)
apply (rule ccorres_rhs_assoc)+
apply csymbr+
apply (subgoal_tac "(capNtfnCanSend_CL (cap_notification_cap_lift ntfnCap) = 0)
= (\<not> capNtfnCanSend rv)")
apply (simp add: if_1_0_0 from_bool_0 hd_conv_nth del: Collect_const)
apply (rule ccorres_Cond_rhs_Seq)
apply (rule ccorres_split_throws)
apply (rule ccorres_from_vcg_throws[where P=\<top> and P'=UNIV])
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: throwError_bind invocationCatch_def)
apply (simp add: throwError_def return_def)
apply (simp add: syscall_error_rel_def syscall_error_to_H_cases)
apply (simp add: exception_defs)
apply vcg
apply (simp add: hd_conv_nth liftE_bindE returnOk_bind
invocationCatch_def performInvocation_def
bind_assoc bind_bindE_assoc excaps_map_def
del: Collect_const)
apply (ctac(no_vcg) add: setThreadState_ccorres)
apply (ctac(no_vcg) add: invokeIRQHandler_SetIRQHandler_ccorres)
apply (simp add: liftE_alternative returnOk_liftE[symmetric])
apply (rule ccorres_alternative2)
apply (rule ccorres_return_CE, simp+)[1]
apply (wp sts_invs_minor' hoare_vcg_ex_lift
| simp)+
apply (clarsimp simp: cap_get_tag_isCap[symmetric]
dest!: cap_get_tag_to_H)
apply (simp add: to_bool_def)
apply simp
apply (simp add: getSlotCap_def)
apply (wp getCTE_wp)
apply (clarsimp simp: Collect_const_mem neq_Nil_conv
dest!: interpret_excaps_eq)
apply (simp add: rf_sr_ksCurThread if_1_0_0 mask_def[where n=4]
"StrictC'_thread_state_defs" cap_get_tag_isCap excaps_map_def
word_sless_def word_sle_def)
apply (simp add: invocationCatch_def throwError_bind
interpret_excaps_test_null Collect_True
excaps_map_def
del: Collect_const
cong: StateSpace.state.fold_congs globals.fold_congs)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (rule ccorres_Cond_rhs)
apply (simp add: invocationCatch_def performInvocation_def
returnOk_bind liftE_bindE bind_assoc
bind_bindE_assoc bind_assoc)
apply (rule ccorres_rhs_assoc)+
apply (ctac(no_vcg) add: setThreadState_ccorres)
apply (ctac(no_vcg) add: invokeIRQHandler_ClearIRQHandler_ccorres)
apply (simp add: liftE_alternative returnOk_liftE[symmetric])
apply (rule ccorres_alternative2)
apply (rule ccorres_return_CE, simp+)[1]
apply (wp sts_invs_minor')+
apply (rule ccorres_equals_throwError)
apply (fastforce simp: invocationCatch_def throwError_bind
split: gen_invocation_labels.split)
apply (simp add: ccorres_cond_iffs cong: StateSpace.state.fold_congs globals.fold_congs)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply simp
apply (clarsimp simp: Collect_const_mem tcb_at_invs')
apply (clarsimp simp: invs_queues invs_valid_objs'
ct_in_state'_def
ccap_rights_relation_def
mask_def[where n=4]
"StrictC'_thread_state_defs")
apply (subst pred_tcb'_weakenE, assumption, fastforce)+
apply (clarsimp simp: rf_sr_ksCurThread word_sle_def word_sless_def
sysargs_rel_n_def word_less_nat_alt)
apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def
excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth
slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def
dest!: interpret_excaps_eq split: bool.splits)
apply (intro conjI impI allI)
apply (clarsimp simp: cte_wp_at_ctes_of neq_Nil_conv sysargs_rel_def n_msgRegisters_def
excaps_map_def excaps_in_mem_def word_less_nat_alt hd_conv_nth
slotcap_in_mem_def valid_tcb_state'_def from_bool_def toBool_def
dest!: interpret_excaps_eq split: bool.splits)+
apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')[4]
apply (drule ctes_of_valid')
apply fastforce
apply (clarsimp simp add:valid_cap_simps' ARM_HYP.maxIRQ_def)
apply (erule order.trans,simp)
apply (auto dest: st_tcb_at_idle_thread' ctes_of_valid')
done
declare mask_of_mask[simp]
lemma ucast_maxIRQ_le_eq:
"UCAST(10 \<rightarrow> 32) irq \<le> SCAST(32 signed \<rightarrow> 32) Kernel_C.maxIRQ \<Longrightarrow>
UCAST(10 \<rightarrow> 16) irq \<le> SCAST(32 signed \<rightarrow> 16) Kernel_C.maxIRQ"
apply (subst ucast_le_ucast[where 'a=16 and 'b=32, symmetric])
apply simp
by (clarsimp simp: ucast_up_ucast is_up Kernel_C.maxIRQ_def)
lemma ucast_maxIRQ_le_eq':
"UCAST(10 \<rightarrow> 32) irq \<le> SCAST(32 signed \<rightarrow> 32) Kernel_C.maxIRQ \<Longrightarrow> irq \<le> maxIRQ"
apply (clarsimp simp: Kernel_C.maxIRQ_def maxIRQ_def)
by word_bitwise
lemma invokeIRQControl_expanded_ccorres:
"ccorres (\<lambda>_ r. r = scast EXCEPTION_NONE) (ret__unsigned_long_')
(invs' and cte_at' parent and (\<lambda>_. (ucast irq) \<le> (scast Kernel_C.maxIRQ :: machine_word)))
(UNIV \<inter> {s. irq_' s = ucast irq}
\<inter> {s. controlSlot_' s = cte_Ptr parent}
\<inter> {s. handlerSlot_' s = cte_Ptr slot}) hs
(do x <- setIRQState irqstate.IRQSignal irq;
cteInsert (capability.IRQHandlerCap irq) parent slot
od)
(Call invokeIRQControl_'proc)"
apply (cinit' lift: irq_' controlSlot_' handlerSlot_')
apply (ctac add: setIRQState_ccorres)
apply csymbr
apply (rule ccorres_add_return2)
apply (ctac (no_vcg) add: cteInsert_ccorres)
apply (rule_tac P=\<top> and P'=UNIV in ccorres_from_vcg_throws)
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: return_def)
apply wp+
apply clarsimp
apply (vcg exspec=setIRQState_modifies)
apply (clarsimp simp: is_simple_cap'_def isCap_simps valid_cap_simps' capAligned_def
word_bits_def)
apply (rule conjI)
apply (fastforce simp: word_bits_def intro!: ucast_maxIRQ_le_eq ucast_maxIRQ_le_eq')
apply (simp add: invs_mdb' invs_valid_objs' invs_pspace_aligned')
apply (rule conjI)
apply (clarsimp simp: maxIRQ_def Kernel_C.maxIRQ_def)
apply unat_arith
apply (clarsimp simp: Collect_const_mem ccap_relation_def cap_irq_handler_cap_lift
cap_to_H_def c_valid_cap_def cl_valid_cap_def
word_bw_assocs mask_twice maxIRQ_def Kernel_C.maxIRQ_def ucast_ucast_a
is_up ucast_ucast_b is_down)
apply (subst less_mask_eq)
apply (rule le_m1_iff_lt[THEN iffD1,THEN iffD1])
apply simp
apply (erule order.trans, simp)
apply (rule word_eqI)
apply (simp add: nth_ucast word_size)
done
lemma invokeIRQControl_ccorres:
"ccorres (K (K \<bottom>) \<currency> dc) (liftxf errstate id (K ()) ret__unsigned_long_')
(invs' and cte_at' parent and (\<lambda>_. (ucast irq) \<le> (scast Kernel_C.maxIRQ :: machine_word)))
(UNIV \<inter> {s. irq_' s = ucast irq}
\<inter> {s. controlSlot_' s = cte_Ptr parent}
\<inter> {s. handlerSlot_' s = cte_Ptr slot}) hs
(performIRQControl (Invocations_H.IssueIRQHandler irq slot parent))
(Call invokeIRQControl_'proc)"
unfolding performIRQControl_def
apply simp
apply (rule ccorres_liftE)
apply (rule ccorres_rel_imp)
apply (rule ccorres_guard_imp)
apply (rule invokeIRQControl_expanded_ccorres)
apply assumption
apply simp+
done
lemma Arch_invokeIRQControl_ccorres:
"ccorres ((K (K \<bottom>)) \<currency> dc) (liftxf errstate id (K ()) ret__unsigned_long_')
(invs' and cte_at' parent and (\<lambda>_. (ucast irq) \<le> (scast Kernel_C.maxIRQ :: machine_word)))
(UNIV \<inter> {s. irq_' s = ucast irq}
\<inter> {s. handlerSlot_' s = cte_Ptr slot}
\<inter> {s. controlSlot_' s = cte_Ptr parent}
\<inter> {s. trigger_' s = from_bool trigger}) hs
(performIRQControl (ArchIRQControl (IssueIRQHandler irq slot parent trigger)))
(Call Arch_invokeIRQControl_'proc)"
unfolding ARM_HYP_H.performIRQControl_def IRQ_def
apply (cinit lift: irq_' handlerSlot_' controlSlot_' trigger_'
simp: ARM_HYP_H.performIRQControl_def IRQ_def)
apply (rule ccorres_liftE)
apply (ctac (no_vcg) add: setIRQTrigger_ccorres)
apply (rule ccorres_add_return2)
apply (ctac (no_vcg) add: invokeIRQControl_expanded_ccorres)
apply (rule_tac P=\<top> and P'=UNIV in ccorres_from_vcg_throws)
apply (rule allI, rule conseqPre, vcg)
apply (clarsimp simp: return_def)
apply (wpsimp simp: guard_is_UNIV_def)+
done
lemma unat_ucast_16_32:
"unat (ucast (x::(16 word))::32 signed word) = unat x"
apply (subst unat_ucast)
apply (rule Divides.mod_less, simp)
apply (rule less_le_trans[OF unat_lt2p])
apply simp
done
lemma Platform_maxIRQ:
"ARM_HYP.maxIRQ = scast Kernel_C.maxIRQ"
by (simp add: ARM_HYP.maxIRQ_def Kernel_C.maxIRQ_def)
lemma liftME_invocationCatch:
"liftME f m >>= invocationCatch thread isBlocking isCall f'
= m >>= invocationCatch thread isBlocking isCall (f' \<circ> f)"
apply (simp add: liftME_def bindE_def bind_assoc)
apply (rule bind_cong [OF refl])
apply (simp add: lift_def throwError_bind invocationCatch_def
returnOk_bind
split: sum.split)
done
lemma maxIRQ_ucast_scast [simp]:
"ucast (scast Kernel_C.maxIRQ :: 10 word) = scast Kernel_C.maxIRQ"
by (clarsimp simp: Kernel_C.maxIRQ_def)
lemma decodeIRQ_arch_helper: "x \<noteq> IRQIssueIRQHandler \<Longrightarrow>
(case x of IRQIssueIRQHandler \<Rightarrow> f | _ \<Rightarrow> g) = g"
by (clarsimp split: gen_invocation_labels.splits)
lemma decodeIRQ_arch_helper': "x \<noteq> ArchInvocationLabel ARMIRQIssueIRQHandler \<Longrightarrow>
(case x of ArchInvocationLabel ARMIRQIssueIRQHandler \<Rightarrow> f | _ \<Rightarrow> g) = g"
by (clarsimp split: invocation_label.splits arch_invocation_label.splits)
lemma Arch_decodeIRQControlInvocation_ccorres:
notes if_cong[cong]
shows
"interpret_excaps extraCaps' = excaps_map extraCaps \<Longrightarrow>
ccorres (intr_and_se_rel \<currency> dc) (liftxf errstate id (K ()) ret__unsigned_long_')
(invs' and (\<lambda>s. ksCurThread s = thread)
and sch_act_simple and ct_active'
and (excaps_in_mem extraCaps o ctes_of)
and cte_wp_at' (\<lambda>cte. cteCap cte = IRQControlCap) slot
and (\<lambda>s. \<forall>v \<in> set extraCaps. s \<turnstile>' fst v)
and sysargs_rel args buffer)
(UNIV
\<inter> {s. invLabel_' s = label} \<inter> {s. srcSlot_' s = cte_Ptr slot}
\<inter> {s. unat (length___unsigned_long_' s) = length args}
\<inter> {s. excaps_' s = extraCaps'}
\<inter> {s. buffer_' s = option_to_ptr buffer}) []
(Arch.decodeIRQControlInvocation label args slot (map fst extraCaps)
>>= invocationCatch thread isBlocking isCall (InvokeIRQControl o ArchIRQControl))
(Call Arch_decodeIRQControlInvocation_'proc)"
apply (cinit' lift: invLabel_' srcSlot_' length___unsigned_long_' excaps_' buffer_')
apply (simp add: ARM_HYP_H.decodeIRQControlInvocation_def invocation_eq_use_types
del: Collect_const
cong: StateSpace.state.fold_congs globals.fold_congs)
apply (rule ccorres_Cond_rhs)
apply (simp add: list_case_If2
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_rhs_assoc)+
apply csymbr+
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: word_less_nat_alt throwError_bind invocationCatch_def)
apply (rule ccorres_cond_true_seq)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply csymbr
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: interpret_excaps_test_null excaps_map_def
throwError_bind invocationCatch_def)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (simp add: interpret_excaps_test_null excaps_map_def
word_less_nat_alt Let_def
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer])
apply csymbr
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer])
apply (rule ccorres_add_return)
apply csymbr
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer])
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=3 and buffer=buffer])
apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0])
apply (rule ccorres_move_c_guard_cte)
apply ctac
apply (rule ccorres_assert2)
apply (simp add: rangeCheck_def unlessE_def ARM_HYP.minIRQ_def
ucast_nat_def word_le_nat_alt[symmetric]
linorder_not_le[symmetric] Platform_maxIRQ
length_ineq_not_Nil hd_conv_nth cast_simps
maxIRQ_ucast_scast
del: Collect_const cong: call_ignore_cong)
apply (simp add: split_def invocationCatch_use_injection_handler
injection_handler_bindE bindE_assoc
del: Collect_const)
apply (ctac add: ccorres_injection_handler_csum1[OF Arch_checkIRQ_ccorres])
apply (simp add: injection_liftE)
apply (simp add: liftE_bindE bind_assoc del: Collect_const)
apply (ctac add: isIRQActive_ccorres)
apply (simp add: from_bool_0 del: Collect_const)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: throwError_bind invocationCatch_def whenE_def
injection_handler_throwError)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (simp add: split_def invocationCatch_use_injection_handler
injection_handler_bindE bindE_assoc whenE_def
injection_handler_returnOk
del: Collect_const)
apply (ctac add: ccorres_injection_handler_csum1
[OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def])
apply (simp add: Collect_False split_def del: Collect_const)
apply csymbr
apply (ctac add: ccorres_injection_handler_csum1
[OF ensureEmptySlot_ccorres])
apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr
performInvocation_def bindE_assoc) using [[goals_limit=2]]
apply (ctac add: setThreadState_ccorres)
apply (ctac(no_vcg) add: Arch_invokeIRQControl_ccorres)
apply (rule ccorres_alternative2)
apply (rule ccorres_return_CE, simp+)[1]
apply (rule ccorres_return_C_errorE, simp+)[1]
apply (wp sts_invs_minor')+
apply (simp add: Collect_const_mem)
apply (vcg exspec=setThreadState_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E [OF refl])
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=ensureEmptySlot_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E[OF refl] hoare_drop_imps)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=lookupTargetSlot_modifies)
apply simp
apply (wp hoare_drop_imps isIRQActive_inv)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=isIRQActive_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E[OF refl] checkIRQ_ret_good)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=Arch_checkIRQ_modifies)
apply (wp hoare_vcg_const_imp_lift |wp (once) hoare_drop_imps)+
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply vcg
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply simp
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply simp
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply simp
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply (clarsimp simp: decodeIRQ_arch_helper')
apply (simp add: throwError_bind invocationCatch_def)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (clarsimp simp: interpret_excaps_test_null excaps_map_def
Collect_const_mem word_sless_def word_sle_def
ThreadState_Restart_def unat_of_nat mask_def)
apply (rule conjI)
apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def unat_ucast)
apply (cut_tac unat_lt2p[where x="args ! 3"])
apply clarsimp
apply (clarsimp simp: sysargs_rel_to_n word_less_nat_alt unat_ucast)
apply (auto,
auto simp: ct_in_state'_def neq_Nil_conv word_bits_def
excaps_in_mem_def slotcap_in_mem_def
cte_wp_at_ctes_of numeral_eqs[symmetric]
valid_tcb_state'_def
elim!: pred_tcb'_weakenE
dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1]
apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric]
word_sle_def word_sless_def)
apply (drule interpret_excaps_eq[rule_format, where n=0], simp)
apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs"
rf_sr_ksCurThread ccap_rights_relation_def
rightsFromWord_wordFromRights)
apply (simp cong: conj_cong)
apply (clarsimp simp: Kernel_C.maxIRQ_def word_le_nat_alt
ucast_nat_def ucast_ucast_mask mask_eq_ucast_eq unat_ucast_mask
less_mask_eq[unfolded word_less_nat_alt])
apply (cases "args ! Suc 0 = 0"; clarsimp simp: true_def false_def)
done
lemma decodeIRQControlInvocation_ccorres:
notes if_cong[cong]
shows
"interpret_excaps extraCaps' = excaps_map extraCaps \<Longrightarrow>
ccorres (intr_and_se_rel \<currency> dc) (liftxf errstate id (K ()) ret__unsigned_long_')
(invs' and (\<lambda>s. ksCurThread s = thread)
and sch_act_simple and ct_active'
and (excaps_in_mem extraCaps o ctes_of)
and cte_wp_at' (\<lambda>cte. cteCap cte = IRQControlCap) slot
and (\<lambda>s. \<forall>v \<in> set extraCaps. s \<turnstile>' fst v)
and sysargs_rel args buffer)
(UNIV
\<inter> {s. invLabel_' s = label} \<inter> {s. srcSlot_' s = cte_Ptr slot}
\<inter> {s. unat (length___unsigned_long_' s) = length args}
\<inter> {s. excaps_' s = extraCaps'}
\<inter> {s. buffer_' s = option_to_ptr buffer}) []
(decodeIRQControlInvocation label args slot (map fst extraCaps)
>>= invocationCatch thread isBlocking isCall InvokeIRQControl)
(Call decodeIRQControlInvocation_'proc)"
supply gen_invocation_type_eq[simp]
apply (cinit' lift: invLabel_' srcSlot_' length___unsigned_long_' excaps_' buffer_')
apply (simp add: decodeIRQControlInvocation_def invocation_eq_use_types
del: Collect_const
cong: StateSpace.state.fold_congs globals.fold_congs)
apply (rule ccorres_Cond_rhs)
apply (simp add: list_case_If2
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_rhs_assoc)+
apply csymbr+
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: word_less_nat_alt if_1_0_0
throwError_bind invocationCatch_def)
apply (rule ccorres_cond_true_seq)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply csymbr
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: interpret_excaps_test_null if_1_0_0 excaps_map_def
throwError_bind invocationCatch_def)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (simp add: interpret_excaps_test_null if_1_0_0 excaps_map_def
word_less_nat_alt Let_def
del: Collect_const cong: call_ignore_cong)
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=0 and buffer=buffer])
apply csymbr
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=1 and buffer=buffer])
apply (rule ccorres_add_return)
apply (ctac add: getSyscallArg_ccorres_foo[where args=args and n=2 and buffer=buffer])
apply (rule getSlotCap_ccorres_fudge_n[where vals=extraCaps and n=0])
apply (rule ccorres_move_c_guard_cte)
apply ctac
apply (rule ccorres_assert2)
apply (simp add: rangeCheck_def unlessE_def ARM_HYP.minIRQ_def
ucast_nat_def word_le_nat_alt[symmetric]
linorder_not_le[symmetric] Platform_maxIRQ
length_ineq_not_Nil hd_conv_nth cast_simps
maxIRQ_ucast_scast
del: Collect_const cong: call_ignore_cong)
apply (simp add: split_def invocationCatch_use_injection_handler injection_handler_bindE
bindE_assoc
del: Collect_const)
apply (ctac add: ccorres_injection_handler_csum1[OF Arch_checkIRQ_ccorres])
apply (simp add: injection_liftE)
apply (simp add: liftE_bindE bind_assoc del: Collect_const)
apply (ctac add: isIRQActive_ccorres)
apply (simp add: from_bool_0 del: Collect_const)
apply (rule ccorres_Cond_rhs_Seq)
apply (simp add: throwError_bind invocationCatch_def whenE_def
injection_handler_throwError)
apply (rule syscall_error_throwError_ccorres_n)
apply (simp add: syscall_error_to_H_cases)
apply (simp add: split_def invocationCatch_use_injection_handler
injection_handler_bindE bindE_assoc whenE_def
injection_handler_returnOk
del: Collect_const)
apply (ctac add: ccorres_injection_handler_csum1
[OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def])
apply (simp add: Collect_False split_def del: Collect_const)
apply csymbr
apply (ctac add: ccorres_injection_handler_csum1
[OF ensureEmptySlot_ccorres])
apply (simp add: injection_handler_returnOk ccorres_invocationCatch_Inr
performInvocation_def bindE_assoc)
apply (ctac add: setThreadState_ccorres)
apply (ctac(no_vcg) add: invokeIRQControl_ccorres)
apply (rule ccorres_alternative2)
apply (rule ccorres_return_CE, simp+)[1]
apply (rule ccorres_return_C_errorE, simp+)[1]
apply (wp sts_invs_minor')+
apply (simp add: Collect_const_mem)
apply (vcg exspec=setThreadState_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E [OF refl])
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=ensureEmptySlot_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E[OF refl] hoare_drop_imps)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=lookupTargetSlot_modifies)
apply simp
apply (wp hoare_drop_imps isIRQActive_inv)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=isIRQActive_modifies)
apply simp
apply (rule ccorres_split_throws)
apply (rule ccorres_return_C_errorE, simp+)[1]
apply vcg
apply simp
apply (wp injection_wp_E[OF refl] checkIRQ_ret_good)
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=Arch_checkIRQ_modifies)
apply (wp hoare_vcg_const_imp_lift |wp (once) hoare_drop_imps)+
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply vcg
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply simp
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply simp
apply wp
apply (simp add: Collect_const_mem all_ex_eq_helper)
apply (vcg exspec=getSyscallArg_modifies)
apply (clarsimp simp: decodeIRQ_arch_helper)
apply (simp add: liftME_invocationCatch)
apply (rule ccorres_add_returnOk)
apply (ctac add: Arch_decodeIRQControlInvocation_ccorres)
apply (rule ccorres_return_CE, simp+)[1]
apply (rule ccorres_return_C_errorE, simp+)[1]
apply wp
apply (vcg exspec=Arch_decodeIRQControlInvocation_modifies)
apply (simp add: syscall_error_to_H_cases)
apply (clarsimp simp: interpret_excaps_test_null excaps_map_def
Collect_const_mem word_sless_def word_sle_def
ThreadState_Restart_def unat_of_nat mask_def)
apply (rule conjI)
apply (simp add: Kernel_C.maxIRQ_def word_le_nat_alt ucast_nat_def
unat_ucast)
apply (cut_tac unat_lt2p[where x="args ! 2"])
apply clarsimp
apply (clarsimp simp: sysargs_rel_to_n word_less_nat_alt unat_ucast)
apply (auto,
auto simp: ct_in_state'_def neq_Nil_conv word_bits_def
excaps_in_mem_def slotcap_in_mem_def
cte_wp_at_ctes_of numeral_eqs[symmetric]
valid_tcb_state'_def
elim!: pred_tcb'_weakenE
dest!: st_tcb_at_idle_thread' interpret_excaps_eq)[1]
apply (clarsimp simp: neq_Nil_conv numeral_eqs[symmetric]
word_sle_def word_sless_def)
apply (drule interpret_excaps_eq[rule_format, where n=0], simp)
apply (clarsimp simp: mask_def[where n=4] "StrictC'_thread_state_defs"
rf_sr_ksCurThread ccap_rights_relation_def
rightsFromWord_wordFromRights)
apply (simp cong: conj_cong)
apply (clarsimp simp: Kernel_C.maxIRQ_def word_le_nat_alt
ucast_nat_def ucast_ucast_mask mask_eq_ucast_eq unat_ucast_mask
less_mask_eq[unfolded word_less_nat_alt])
done
end
end
|
{"author": "NICTA", "repo": "l4v", "sha": "3c3514fe99082f7b6a6fb8445b8dfc592ff7f02b", "save_path": "github-repos/isabelle/NICTA-l4v", "path": "github-repos/isabelle/NICTA-l4v/l4v-3c3514fe99082f7b6a6fb8445b8dfc592ff7f02b/proof/crefine/ARM_HYP/Interrupt_C.thy"}
|
'''
ModelNet dataset. Support ModelNet40, ModelNet10, XYZ and normal channels. Up to 10000 points.
'''
import os
import os.path
import json
import numpy as np
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
def pc_normalize(pc):
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
class ModelNetDataset():
def __init__(self, root, batch_size = 32, npoints = 1024, split='train', normalize=True, normal_channel=False, modelnet10=False, cache_size=15000, shuffle=None):
self.root = root
self.batch_size = batch_size
self.npoints = npoints
self.normalize = normalize
if modelnet10:
self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt')
else:
self.catfile = os.path.join(self.root, 'shape_names.txt')
self.cat = [line.rstrip() for line in open(self.catfile)]
self.classes = dict(zip(self.cat, range(len(self.cat))))
self.normal_channel = normal_channel
shape_ids = {}
if modelnet10:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))]
shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))]
else:
shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))]
shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))]
assert(split=='train' or split=='test')
shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]]
# list of (shape_name, shape_txt_file_path) tuple
self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i])+'.txt') for i in range(len(shape_ids[split]))]
self.cache_size = cache_size # how many data points to cache in memory
self.cache = {} # from index to (point_set, cls) tuple
if shuffle is None:
if split == 'train': self.shuffle = True
else: self.shuffle = False
else:
self.shuffle = shuffle
self.reset()
def _augment_batch_data(self, batch_data):
if self.normal_channel:
rotated_data = provider.rotate_point_cloud_with_normal(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data)
else:
rotated_data = provider.rotate_point_cloud(batch_data)
rotated_data = provider.rotate_perturbation_point_cloud(rotated_data)
jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3])
jittered_data = provider.shift_point_cloud(jittered_data)
jittered_data = provider.jitter_point_cloud(jittered_data)
rotated_data[:,:,0:3] = jittered_data
return provider.shuffle_points(rotated_data)
def _get_item(self, index):
if index in self.cache:
point_set, cls = self.cache[index]
else:
fn = self.datapath[index]
cls = self.classes[self.datapath[index][0]]
cls = np.array([cls]).astype(np.int32)
point_set = np.loadtxt(fn[1],delimiter=',').astype(np.float32)
# Take the first npoints
point_set = point_set[0:self.npoints,:]
if self.normalize:
point_set[:,0:3] = pc_normalize(point_set[:,0:3])
if not self.normal_channel:
point_set = point_set[:,0:3]
if len(self.cache) < self.cache_size:
self.cache[index] = (point_set, cls)
return point_set, cls
def __getitem__(self, index):
return self._get_item(index)
def __len__(self):
return len(self.datapath)
def num_channel(self):
if self.normal_channel:
return 6
else:
return 3
def reset(self):
self.idxs = np.arange(0, len(self.datapath))
if self.shuffle:
np.random.shuffle(self.idxs)
self.num_batches = (len(self.datapath)+self.batch_size-1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
return self.batch_idx < self.num_batches
def next_batch(self, augment=False):
''' returned dimension may be smaller than self.batch_size '''
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx+1) * self.batch_size, len(self.datapath))
bsize = end_idx - start_idx
batch_data = np.zeros((bsize, self.npoints, self.num_channel()))
batch_label = np.zeros((bsize), dtype=np.int32)
for i in range(bsize):
ps,cls = self._get_item(self.idxs[i+start_idx])
batch_data[i] = ps
batch_label[i] = cls
self.batch_idx += 1
if augment: batch_data = self._augment_batch_data(batch_data)
return batch_data, batch_label
if __name__ == '__main__':
d = ModelNetDataset(root = '../data/modelnet40_normal_resampled', split='test')
print(d.shuffle)
print(len(d))
import time
tic = time.time()
for i in range(10):
ps, cls = d[i]
print(time.time() - tic)
print(ps.shape, type(ps), cls)
print(d.has_next_batch())
ps_batch, cls_batch = d.next_batch(True)
print(ps_batch.shape)
print(cls_batch.shape)
|
{"hexsha": "78f326e080724ef2735fc52a57639bd42bf65b55", "size": 5582, "ext": "py", "lang": "Python", "max_stars_repo_path": "modelnet_dataset.py", "max_stars_repo_name": "lukovkin/pointnet2", "max_stars_repo_head_hexsha": "04271d7bc4b9a6ab18144be4feb262eba3df2a9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2429, "max_stars_repo_stars_event_min_datetime": "2017-08-31T03:43:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T02:32:36.000Z", "max_issues_repo_path": "modelnet_dataset.py", "max_issues_repo_name": "zazgf/Tensorflow1.15-pointNet", "max_issues_repo_head_hexsha": "a21425c0066c83215d91e1e810418161f2500125", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 217, "max_issues_repo_issues_event_min_datetime": "2017-09-08T10:28:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T13:28:55.000Z", "max_forks_repo_path": "modelnet_dataset.py", "max_forks_repo_name": "zazgf/Tensorflow1.15-pointNet", "max_forks_repo_head_hexsha": "a21425c0066c83215d91e1e810418161f2500125", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 922, "max_forks_repo_forks_event_min_datetime": "2017-08-31T18:02:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T01:31:58.000Z", "avg_line_length": 38.4965517241, "max_line_length": 165, "alphanum_fraction": 0.6255822286, "include": true, "reason": "import numpy", "num_tokens": 1343}
|
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy as np
import os
os.environ['CC'] = 'gcc'
os.environ['CXX'] = 'g++'
graph_software_dir = '{}/software/'.format(os.path.expanduser('~'))
extensions = [
Extension(
name='multicut',
include_dirs=[np.get_include(), '{}/graph/include'.format(graph_software_dir)],
sources=['multicut.pyx', 'cpp-multicut.cpp'],
extra_compile_args=['-O4', '-std=c++11'],
language='c++'
),
Extension(
name='lifted_multicut',
include_dirs=[np.get_include(), '{}/graph/include'.format(graph_software_dir)],
sources=['lifted_multicut.pyx', 'cpp-lifted-multicut.cpp'],
extra_compile_args=['-O4', '-std=c++11'],
language='c++'
)
]
setup(
name='algorithms',
ext_modules = cythonize(extensions)
)
|
{"hexsha": "67d448846e02367666433581a972016431446b2d", "size": 864, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms/setup.py", "max_stars_repo_name": "aplbrain/biologicalgraphs", "max_stars_repo_head_hexsha": "7ef86c8893bcabcb469cf184079456a923f1bab1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-07-29T21:26:01.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-26T23:13:07.000Z", "max_issues_repo_path": "algorithms/setup.py", "max_issues_repo_name": "Rhoana/bio_constrained_graphs", "max_issues_repo_head_hexsha": "6f9adb4d9a4fe915e14fc72a335eaa23c37338c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms/setup.py", "max_forks_repo_name": "Rhoana/bio_constrained_graphs", "max_forks_repo_head_hexsha": "6f9adb4d9a4fe915e14fc72a335eaa23c37338c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-30T08:21:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T19:28:48.000Z", "avg_line_length": 27.0, "max_line_length": 87, "alphanum_fraction": 0.6273148148, "include": true, "reason": "import numpy", "num_tokens": 222}
|
/-
Copyright (c) 2020 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.mv_polynomial.default
import Mathlib.data.fintype.card
import Mathlib.PostPort
universes u_1 u_3 u_2
namespace Mathlib
/-!
# Homogeneous polynomials
A multivariate polynomial `φ` is homogeneous of degree `n`
if all monomials occuring in `φ` have degree `n`.
## Main definitions/lemmas
* `is_homogeneous φ n`: a predicate that asserts that `φ` is homogeneous of degree `n`.
* `homogeneous_component n`: the additive morphism that projects polynomials onto
their summand that is homogeneous of degree `n`.
* `sum_homogeneous_component`: every polynomial is the sum of its homogeneous components
-/
namespace mv_polynomial
/-
TODO
* create definition for `∑ i in d.support, d i`
* define graded rings, and show that mv_polynomial is an example
-/
/-- A multivariate polynomial `φ` is homogeneous of degree `n`
if all monomials occuring in `φ` have degree `n`. -/
def is_homogeneous {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) (n : ℕ) :=
∀ {d : σ →₀ ℕ}, coeff d φ ≠ 0 → (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n
theorem is_homogeneous_monomial {σ : Type u_1} {R : Type u_3} [comm_semiring R] (d : σ →₀ ℕ) (r : R) (n : ℕ) (hn : (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n) : is_homogeneous (monomial d r) n := sorry
theorem is_homogeneous_C (σ : Type u_1) {R : Type u_3} [comm_semiring R] (r : R) : is_homogeneous (coe_fn C r) 0 := sorry
theorem is_homogeneous_zero (σ : Type u_1) (R : Type u_3) [comm_semiring R] (n : ℕ) : is_homogeneous 0 n :=
fun (d : σ →₀ ℕ) (hd : coeff d 0 ≠ 0) => false.elim (hd (coeff_zero d))
theorem is_homogeneous_one (σ : Type u_1) (R : Type u_3) [comm_semiring R] : is_homogeneous 1 0 :=
is_homogeneous_C σ 1
theorem is_homogeneous_X {σ : Type u_1} (R : Type u_3) [comm_semiring R] (i : σ) : is_homogeneous (X i) 1 := sorry
namespace is_homogeneous
theorem coeff_eq_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (d : σ →₀ ℕ) (hd : (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) ≠ n) : coeff d φ = 0 :=
eq.mp (Eq._oldrec (Eq.refl (¬coeff d φ ≠ 0)) (propext not_not)) (mt hφ hd)
theorem inj_right {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {m : ℕ} {n : ℕ} (hm : is_homogeneous φ m) (hn : is_homogeneous φ n) (hφ : φ ≠ 0) : m = n := sorry
theorem add {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {ψ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (hψ : is_homogeneous ψ n) : is_homogeneous (φ + ψ) n := sorry
theorem sum {σ : Type u_1} {R : Type u_3} [comm_semiring R] {ι : Type u_2} (s : finset ι) (φ : ι → mv_polynomial σ R) (n : ℕ) (h : ∀ (i : ι), i ∈ s → is_homogeneous (φ i) n) : is_homogeneous (finset.sum s fun (i : ι) => φ i) n := sorry
theorem mul {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {ψ : mv_polynomial σ R} {m : ℕ} {n : ℕ} (hφ : is_homogeneous φ m) (hψ : is_homogeneous ψ n) : is_homogeneous (φ * ψ) (m + n) := sorry
theorem prod {σ : Type u_1} {R : Type u_3} [comm_semiring R] {ι : Type u_2} (s : finset ι) (φ : ι → mv_polynomial σ R) (n : ι → ℕ) (h : ∀ (i : ι), i ∈ s → is_homogeneous (φ i) (n i)) : is_homogeneous (finset.prod s fun (i : ι) => φ i) (finset.sum s fun (i : ι) => n i) := sorry
theorem total_degree {σ : Type u_1} {R : Type u_3} [comm_semiring R] {φ : mv_polynomial σ R} {n : ℕ} (hφ : is_homogeneous φ n) (h : φ ≠ 0) : total_degree φ = n := sorry
end is_homogeneous
/-- `homogeneous_component n φ` is the part of `φ` that is homogeneous of degree `n`.
See `sum_homogeneous_component` for the statement that `φ` is equal to the sum
of all its homogeneous components. -/
def homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) : linear_map R (mv_polynomial σ R) (mv_polynomial σ R) :=
linear_map.comp
(submodule.subtype
(finsupp.supported R R (set_of fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n)))
(finsupp.restrict_dom R R (set_of fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n))
theorem coeff_homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (d : σ →₀ ℕ) : coeff d (coe_fn (homogeneous_component n) φ) =
ite ((finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n) (coeff d φ) 0 := sorry
theorem homogeneous_component_apply {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) : coe_fn (homogeneous_component n) φ =
finset.sum
(finset.filter (fun (d : σ →₀ ℕ) => (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) = n)
(finsupp.support φ))
fun (d : σ →₀ ℕ) => monomial d (coeff d φ) := sorry
theorem homogeneous_component_is_homogeneous {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) : is_homogeneous (coe_fn (homogeneous_component n) φ) n := sorry
theorem homogeneous_component_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) : coe_fn (homogeneous_component 0) φ = coe_fn C (coeff 0 φ) := sorry
theorem homogeneous_component_eq_zero' {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (h : ∀ (d : σ →₀ ℕ), d ∈ finsupp.support φ → (finset.sum (finsupp.support d) fun (i : σ) => coe_fn d i) ≠ n) : coe_fn (homogeneous_component n) φ = 0 := sorry
theorem homogeneous_component_eq_zero {σ : Type u_1} {R : Type u_3} [comm_semiring R] (n : ℕ) (φ : mv_polynomial σ R) (h : total_degree φ < n) : coe_fn (homogeneous_component n) φ = 0 := sorry
theorem sum_homogeneous_component {σ : Type u_1} {R : Type u_3} [comm_semiring R] (φ : mv_polynomial σ R) : (finset.sum (finset.range (total_degree φ + 1)) fun (i : ℕ) => coe_fn (homogeneous_component i) φ) = φ := sorry
|
{"author": "AurelienSaue", "repo": "Mathlib4_auto", "sha": "590df64109b08190abe22358fabc3eae000943f2", "save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto", "path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/ring_theory/polynomial/homogeneous.lean"}
|
# Copyright (c) 2016 by Mike Jarvis and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import galsim
import piff
import numpy as np
import os
import fitsio
from piff_test_helper import timer
fiducial_kolmogorov = galsim.Kolmogorov(half_light_radius=1.0)
fiducial_gaussian = galsim.Gaussian(half_light_radius=1.0)
fiducial_moffat = galsim.Moffat(half_light_radius=1.0, beta=3.0)
def make_data(gsobject, scale, g1, g2, u0, v0, flux, noise=0., pix_scale=1., fpu=0., fpv=0.,
nside=32, nom_u0=0., nom_v0=0., rng=None, include_pixel=True):
"""Make a Star instance filled with a Kolmogorov profile
:param gsobject The fiducial gsobject profile to use.
:param scale: The scale to apply to the gsobject.
:param g1, g2: The shear to apply to the gsobject.
:param u0, v0: The sub-pixel offset to apply.
:param flux: The flux of the star
:param noise: RMS Gaussian noise to be added to each pixel [default: 0]
:param pix_scale: pixel size in "wcs" units [default: 1.]
:param fpu,fpv: position of this cutout in some larger focal plane [default: 0,0]
:param nside: The size of the array [default: 32]
:param nom_u0, nom_v0: The nominal u0,v0 in the StarData [default: 0,0]
:param rng: If adding noise, the galsim deviate to use for the random numbers
[default: None]
:param include_pixel: Include integration over pixel. [default: True]
"""
k = gsobject.withFlux(flux).dilate(scale).shear(g1=g1, g2=g2).shift(u0, v0)
if noise == 0.:
var = 1.e-6
else:
var = noise**2
weight = galsim.Image(nside, nside, dtype=float, init_value=1./var, scale=pix_scale)
star = piff.Star.makeTarget(x=nside/2+nom_u0/pix_scale, y=nside/2+nom_v0/pix_scale,
u=fpu, v=fpv, scale=pix_scale, stamp_size=nside, weight=weight)
star.image.setOrigin(0,0)
method = 'auto' if include_pixel else 'no_pixel'
k.drawImage(star.image, method=method,
offset=galsim.PositionD(nom_u0/pix_scale, nom_v0/pix_scale), use_true_center=False)
if noise != 0:
gn = galsim.GaussianNoise(sigma=noise, rng=rng)
star.image.addNoise(gn)
return star
@timer
def test_simple():
"""Initial simple test of Gaussian, Kolmogorov, and Moffat PSFs.
"""
# Here is the true PSF
scale = 1.3
g1 = 0.23
g2 = -0.17
du = 0.1
dv = 0.4
for fiducial in [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]:
print()
print("fiducial = ", fiducial)
print()
psf = fiducial.dilate(scale).shear(g1=g1, g2=g2).shift(du, dv)
# Draw the PSF onto an image. Let's go ahead and give it a non-trivial WCS.
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64, 64, wcs=wcs)
# This is only going to come out right if we (unphysically) don't convolve by the pixel.
psf.drawImage(image, method='no_pixel')
# Make a StarData instance for this image
stardata = piff.StarData(image, image.true_center)
fiducial_star = piff.Star(stardata, None)
# First try fastfit.
print('Fast fit')
model = piff.GSObjectModel(fiducial, fastfit=True, include_pixel=False)
fit = model.fit(model.initialize(fiducial_star)).fit
print('True scale = ', scale, ', model scale = ', fit.params[0])
print('True g1 = ', g1, ', model g1 = ', fit.params[1])
print('True g2 = ', g2, ', model g2 = ', fit.params[2])
print('True du = ', du, ', model du = ', fit.center[0])
print('True dv = ', dv, ', model dv = ', fit.center[1])
# This test is fairly accurate, since we didn't add any noise and didn't convolve by
# the pixel, so the image is very accurately a sheared GSObject.
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-4)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-7)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-7)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-7)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-7)
# Now try fastfit=False.
print('Slow fit')
model = piff.GSObjectModel(fiducial, fastfit=False, include_pixel=False)
fit = model.fit(model.initialize(fiducial_star)).fit
print('True scale = ', scale, ', model scale = ', fit.params[0])
print('True g1 = ', g1, ', model g1 = ', fit.params[1])
print('True g2 = ', g2, ', model g2 = ', fit.params[2])
print('True du = ', du, ', model du = ', fit.center[0])
print('True dv = ', dv, ', model dv = ', fit.center[1])
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-6)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-6)
# Now test running it via the config parser
config = {
'model' : {
'type' : 'GSObjectModel',
'gsobj': repr(fiducial),
'include_pixel': False
}
}
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=3)
else:
logger = piff.config.setup_logger(verbose=1)
model = piff.Model.process(config['model'], logger)
fit = model.fit(model.initialize(fiducial_star)).fit
# Same tests.
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-6)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-6)
# Also need to test ability to serialize
outfile = os.path.join('output', 'gsobject_test.fits')
with fitsio.FITS(outfile, 'rw', clobber=True) as f:
model.write(f, 'psf_model')
with fitsio.FITS(outfile, 'r') as f:
roundtrip_model = piff.GSObjectModel.read(f, 'psf_model')
assert model.__dict__ == roundtrip_model.__dict__
# Finally, we should also test with pixel convolution included. This really only makes
# sense for fastfit=False, since HSM FindAdaptiveMom doesn't account for the pixel shape
# in its measurements.
# Draw the PSF onto an image. Let's go ahead and give it a non-trivial WCS.
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64,64, wcs=wcs)
psf.drawImage(image, method='auto')
# Make a StarData instance for this image
stardata = piff.StarData(image, image.true_center)
fiducial_star = piff.Star(stardata, None)
print('Slow fit, pixel convolution included.')
model = piff.GSObjectModel(fiducial, fastfit=False, include_pixel=True)
star = model.initialize(fiducial_star)
star = model.fit(star, fastfit=True) # Get better results with one round of fastfit.
fit = model.fit(star).fit
print('True scale = ', scale, ', model scale = ', fit.params[0])
print('True g1 = ', g1, ', model g1 = ', fit.params[1])
print('True g2 = ', g2, ', model g2 = ', fit.params[2])
print('True du = ', du, ', model du = ', fit.center[0])
print('True dv = ', dv, ', model dv = ', fit.center[1])
# Accuracy goals are a bit looser here since it's harder to fit with the pixel involved.
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-6)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-6)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-5)
@timer
def test_center():
"""Fit with centroid free and PSF center constrained to an initially mis-registered PSF.
"""
influx = 150.
scale = 2.0
u0, v0 = 0.6, -0.4
g1, g2 = 0.1, 0.2
for fiducial in [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]:
print()
print("fiducial = ", fiducial)
print()
s = make_data(fiducial, scale, g1, g2, u0, v0, influx, pix_scale=0.5, include_pixel=False)
mod = piff.GSObjectModel(fiducial, include_pixel=False)
star = mod.initialize(s)
print('Flux, ctr after reflux:',star.fit.flux,star.fit.center)
for i in range(3):
star = mod.fit(star)
star = mod.reflux(star)
print('Flux, ctr, chisq after fit {:d}:'.format(i),
star.fit.flux, star.fit.center, star.fit.chisq)
np.testing.assert_almost_equal(star.fit.flux/influx, 1.0, decimal=8)
np.testing.assert_allclose(star.fit.center[0], u0)
np.testing.assert_allclose(star.fit.center[1], v0)
# Residual image when done should be dominated by structure off the edge of the fitted
# region.
mask = star.weight.array > 0
# This comes out fairly close, but only 2 dp of accuracy, compared to 3 above.
star2 = mod.draw(star)
print('max image abs diff = ',np.max(np.abs(star2.image.array-s.image.array)))
print('max image abs value = ',np.max(np.abs(s.image.array)))
peak = np.max(np.abs(s.image.array[mask]))
np.testing.assert_almost_equal(star2.image.array[mask]/peak, s.image.array[mask]/peak,
decimal=8)
# Measured centroid of PSF model should be close to 0,0
star3 = mod.draw(star.withFlux(influx, (0,0)))
flux, cenx, ceny, sigma, e1, e2, flag = star3.hsm
print('HSM measurements: ',flux, cenx, ceny, sigma, g1, g2, flag)
np.testing.assert_allclose(cenx, 0, atol=1.e-4)
np.testing.assert_allclose(ceny, 0, atol=1.e-4)
np.testing.assert_allclose(e1, g1, rtol=1.e-4)
np.testing.assert_allclose(e2, g2, rtol=1.e-4)
# test copy_image
star_copy = mod.draw(star, copy_image=True)
star_nocopy = mod.draw(star, copy_image=False)
star.image.array[0,0] = 132435
assert star_nocopy.image.array[0,0] == star.image.array[0,0]
assert star_copy.image.array[0,0] != star.image.array[0,0]
assert star_copy.image.array[1,1] == star.image.array[1,1]
@timer
def test_uncentered():
"""Fit with centroid shift included in the PSF model. (I.e. centered=False)
"""
influx = 150.
scale = 2.0
u0, v0 = 0.6, -0.4
g1, g2 = 0.1, 0.2
for fiducial in [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]:
print()
print("fiducial = ", fiducial)
print()
s = make_data(fiducial, scale, g1, g2, u0, v0, influx, pix_scale=0.5, include_pixel=False)
mod = piff.GSObjectModel(fiducial, include_pixel=False, centered=False)
star = mod.initialize(s)
print('Flux, ctr after reflux:',star.fit.flux,star.fit.center)
for i in range(3):
star = mod.fit(star)
star = mod.reflux(star)
print('Flux, ctr, chisq after fit {:d}:'.format(i),
star.fit.flux, star.fit.center, star.fit.chisq)
np.testing.assert_allclose(star.fit.flux, influx)
np.testing.assert_allclose(star.fit.center[0], 0)
np.testing.assert_allclose(star.fit.center[1], 0)
# Residual image when done should be dominated by structure off the edge of the fitted
# region.
mask = star.weight.array > 0
# This comes out fairly close, but only 2 dp of accuracy, compared to 3 above.
star2 = mod.draw(star)
print('max image abs diff = ',np.max(np.abs(star2.image.array-s.image.array)))
print('max image abs value = ',np.max(np.abs(s.image.array)))
peak = np.max(np.abs(s.image.array[mask]))
np.testing.assert_almost_equal(star2.image.array[mask]/peak, s.image.array[mask]/peak,
decimal=8)
# Measured centroid of PSF model should be close to u0, v0
star3 = mod.draw(star.withFlux(influx, (0,0)))
flux, cenx, ceny, sigma, e1, e2, flag = star3.hsm
print('HSM measurements: ',flux, cenx, ceny, sigma, g1, g2, flag)
np.testing.assert_allclose(cenx, u0, rtol=1.e-4)
np.testing.assert_allclose(ceny, v0, rtol=1.e-4)
np.testing.assert_allclose(e1, g1, rtol=1.e-4)
np.testing.assert_allclose(e2, g2, rtol=1.e-4)
@timer
def test_interp():
"""First test of use with interpolator. Make a bunch of noisy
versions of the same PSF, interpolate them with constant interp
to get an average PSF
"""
influx = 150.
if __name__ == '__main__':
fiducial_list = [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]
niter = 3
npos = 10
else:
fiducial_list = [fiducial_moffat]
niter = 1 # Not actually any need for interating in this case.
npos = 4
for fiducial in fiducial_list:
print()
print("fiducial = ", fiducial)
print()
mod = piff.GSObjectModel(fiducial, include_pixel=False)
g1 = g2 = u0 = v0 = 0.0
# Interpolator will be simple mean
interp = piff.Polynomial(order=0)
# Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
positions = np.linspace(0.,1.,npos)
stars = []
rng = galsim.BaseDeviate(1234)
for u in positions:
for v in positions:
s = make_data(fiducial, 1.0, g1, g2, u0, v0, influx,
noise=0.1, pix_scale=0.5, fpu=u, fpv=v, rng=rng, include_pixel=False)
s = mod.initialize(s)
stars.append(s)
# Also store away a noiseless copy of the PSF, origin of focal plane
s0 = make_data(fiducial, 1.0, g1, g2, u0, v0, influx, pix_scale=0.5, include_pixel=False)
s0 = mod.initialize(s0)
# Polynomial doesn't need this, but it should work nonetheless.
interp.initialize(stars)
# Iterate solution using interpolator
for iteration in range(niter):
# Refit PSFs star by star:
for i,s in enumerate(stars):
stars[i] = mod.fit(s)
# Run the interpolator
interp.solve(stars)
# Install interpolator solution into each
# star, recalculate flux, report chisq
chisq = 0.
dof = 0
for i,s in enumerate(stars):
s = interp.interpolate(s)
s = mod.reflux(s)
chisq += s.fit.chisq
dof += s.fit.dof
stars[i] = s
print('iteration',iteration,'chisq=',chisq, 'dof=',dof)
# Now use the interpolator to produce a noiseless rendering
s1 = interp.interpolate(s0)
s1 = mod.reflux(s1)
print('Flux, ctr, chisq after interpolation: ',s1.fit.flux, s1.fit.center, s1.fit.chisq)
np.testing.assert_almost_equal(s1.fit.flux/influx, 1.0, decimal=3)
s1 = mod.draw(s1)
print('max image abs diff = ',np.max(np.abs(s1.image.array-s0.image.array)))
print('max image abs value = ',np.max(np.abs(s0.image.array)))
peak = np.max(np.abs(s0.image.array))
np.testing.assert_almost_equal(s1.image.array/peak, s0.image.array/peak, decimal=3)
@timer
def test_missing():
"""Next: fit mean PSF to multiple images, with missing pixels.
"""
if __name__ == '__main__':
fiducial_list = [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]
else:
fiducial_list = [fiducial_moffat]
for fiducial in fiducial_list:
print()
print("fiducial = ", fiducial)
print()
mod = piff.GSObjectModel(fiducial, include_pixel=False)
g1 = g2 = u0 = v0 = 0.0
# Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
positions = np.linspace(0.,1.,4)
influx = 150.
stars = []
np_rng = np.random.RandomState(1234)
rng = galsim.BaseDeviate(1234)
for u in positions:
for v in positions:
# Draw stars in focal plane positions around a unit ring
s = make_data(fiducial, 1.0, g1, g2, u0, v0, influx,
noise=0.1, pix_scale=0.5, fpu=u, fpv=v, rng=rng, include_pixel=False)
s = mod.initialize(s)
# Kill 10% of each star's pixels
bad = np_rng.rand(*s.image.array.shape) < 0.1
s.weight.array[bad] = 0.
s.image.array[bad] = -999.
s = mod.reflux(s, fit_center=False) # Start with a sensible flux
stars.append(s)
# Also store away a noiseless copy of the PSF, origin of focal plane
s0 = make_data(fiducial, 1.0, g1, g2, u0, v0, influx, pix_scale=0.5, include_pixel=False)
s0 = mod.initialize(s0)
interp = piff.Polynomial(order=0)
interp.initialize(stars)
oldchisq = 0.
# Iterate solution using interpolator
for iteration in range(40):
# Refit PSFs star by star:
for i,s in enumerate(stars):
stars[i] = mod.fit(s)
# Run the interpolator
interp.solve(stars)
# Install interpolator solution into each
# star, recalculate flux, report chisq
chisq = 0.
dof = 0
for i,s in enumerate(stars):
s = interp.interpolate(s)
s = mod.reflux(s)
chisq += s.fit.chisq
dof += s.fit.dof
stars[i] = s
###print(' chisq=',s.fit.chisq, 'dof=',s.fit.dof)
print('iteration',iteration,'chisq=',chisq, 'dof=',dof)
if oldchisq>0 and chisq<oldchisq and oldchisq-chisq < dof/10.:
break
else:
oldchisq = chisq
# Now use the interpolator to produce a noiseless rendering
s1 = interp.interpolate(s0)
s1 = mod.reflux(s1)
print('Flux, ctr after interpolation: ',s1.fit.flux, s1.fit.center, s1.fit.chisq)
# Less than 2 dp of accuracy here!
np.testing.assert_almost_equal(s1.fit.flux/influx, 1.0, decimal=3)
s1 = mod.draw(s1)
print('max image abs diff = ',np.max(np.abs(s1.image.array-s0.image.array)))
print('max image abs value = ',np.max(np.abs(s0.image.array)))
peak = np.max(np.abs(s0.image.array))
np.testing.assert_almost_equal(s1.image.array/peak, s0.image.array/peak, decimal=3)
@timer
def test_gradient():
"""Next: fit spatially-varying PSF to multiple images.
"""
if __name__ == '__main__':
fiducial_list = [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]
else:
fiducial_list = [fiducial_moffat]
for fiducial in fiducial_list:
print()
print("fiducial = ", fiducial)
print()
mod = piff.GSObjectModel(fiducial, include_pixel=False)
# Interpolator will be linear
interp = piff.Polynomial(order=1)
# Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
positions = np.linspace(0.,1.,4)
influx = 150.
stars = []
rng = galsim.BaseDeviate(1234)
for u in positions:
# Put gradient in pixel size
for v in positions:
# Draw stars in focal plane positions around a unit ring
# spatially-varying fwhm, g1, g2.
s = make_data(fiducial, 1.0+u*0.1+0.1*v, 0.1*u, 0.1*v, 0.5*u, 0.5*v, influx,
noise=0.1, pix_scale=0.5, fpu=u, fpv=v, rng=rng,
include_pixel=False)
s = mod.initialize(s)
stars.append(s)
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(4, 4)
# for star, ax in zip(stars, axes.ravel()):
# ax.imshow(star.data.image.array)
# plt.show()
# Also store away a noiseless copy of the PSF, origin of focal plane
s0 = make_data(fiducial, 1.0, 0., 0., 0., 0., influx, pix_scale=0.5, include_pixel=False)
s0 = mod.initialize(s0)
# Polynomial doesn't need this, but it should work nonetheless.
interp.initialize(stars)
oldchisq = 0.
# Iterate solution using interpolator
for iteration in range(40):
# Refit PSFs star by star:
for i,s in enumerate(stars):
stars[i] = mod.fit(s)
# Run the interpolator
interp.solve(stars)
# Install interpolator solution into each
# star, recalculate flux, report chisq
chisq = 0.
dof = 0
for i,s in enumerate(stars):
s = interp.interpolate(s)
s = mod.reflux(s)
chisq += s.fit.chisq
dof += s.fit.dof
stars[i] = s
###print(' chisq=',s.fit.chisq, 'dof=',s.fit.dof)
print('iteration',iteration,'chisq=',chisq, 'dof=',dof)
if oldchisq>0 and np.abs(oldchisq-chisq) < dof/10.:
break
else:
oldchisq = chisq
for i, s in enumerate(stars):
print(i, s.fit.center)
# Now use the interpolator to produce a noiseless rendering
s1 = interp.interpolate(s0)
s1 = mod.reflux(s1)
print('Flux, ctr, chisq after interpolation: ',s1.fit.flux, s1.fit.center, s1.fit.chisq)
np.testing.assert_almost_equal(s1.fit.flux/influx, 1.0, decimal=2)
s1 = mod.draw(s1)
print('max image abs diff = ',np.max(np.abs(s1.image.array-s0.image.array)))
print('max image abs value = ',np.max(np.abs(s0.image.array)))
peak = np.max(np.abs(s0.image.array))
np.testing.assert_almost_equal(s1.image.array/peak, s0.image.array/peak, decimal=2)
@timer
def test_gradient_center():
"""Next: fit spatially-varying PSF, with spatially-varying centers to multiple images.
"""
if __name__ == '__main__':
fiducial_list = [fiducial_gaussian, fiducial_kolmogorov, fiducial_moffat]
else:
fiducial_list = [fiducial_moffat]
for fiducial in fiducial_list:
print()
print("fiducial = ", fiducial)
print()
mod = piff.GSObjectModel(fiducial, include_pixel=False)
# Interpolator will be linear
interp = piff.Polynomial(order=1)
# Draw stars on a 2d grid of "focal plane" with 0<=u,v<=1
positions = np.linspace(0.,1.,4)
influx = 150.
stars = []
rng = galsim.BaseDeviate(1234)
for u in positions:
# Put gradient in pixel size
for v in positions:
# Draw stars in focal plane positions around a unit ring
# spatially-varying fwhm, g1, g2.
s = make_data(fiducial, 1.0+u*0.1+0.1*v, 0.1*u, 0.1*v, 0.5*u, 0.5*v,
influx, noise=0.1, pix_scale=0.5, fpu=u, fpv=v, rng=rng,
include_pixel=False)
s = mod.initialize(s)
stars.append(s)
# import matplotlib.pyplot as plt
# fig, axes = plt.subplots(4, 4)
# for star, ax in zip(stars, axes.ravel()):
# ax.imshow(star.data.image.array)
# plt.show()
# Also store away a noiseless copy of the PSF, origin of focal plane
s0 = make_data(fiducial, 1.0, 0., 0., 0., 0., influx, pix_scale=0.5, include_pixel=False)
s0 = mod.initialize(s0)
# Polynomial doesn't need this, but it should work nonetheless.
interp.initialize(stars)
oldchisq = 0.
# Iterate solution using interpolator
for iteration in range(40):
# Refit PSFs star by star:
for i,s in enumerate(stars):
stars[i] = mod.fit(s)
# Run the interpolator
interp.solve(stars)
# Install interpolator solution into each
# star, recalculate flux, report chisq
chisq = 0.
dof = 0
for i,s in enumerate(stars):
s = interp.interpolate(s)
s = mod.reflux(s)
chisq += s.fit.chisq
dof += s.fit.dof
stars[i] = s
###print(' chisq=',s.fit.chisq, 'dof=',s.fit.dof)
print('iteration',iteration,'chisq=',chisq, 'dof=',dof)
if oldchisq>0 and np.abs(oldchisq-chisq) < dof/10.:
break
else:
oldchisq = chisq
for i, s in enumerate(stars):
print(i, s.fit.center, s.fit.params[0:2])
# Now use the interpolator to produce a noiseless rendering
s1 = interp.interpolate(s0)
s1 = mod.reflux(s1)
print('Flux, ctr, chisq after interpolation: ',s1.fit.flux, s1.fit.center, s1.fit.chisq)
# Less than 2 dp of accuracy here!
np.testing.assert_almost_equal(s1.fit.flux/influx, 1.0, decimal=2)
s1 = mod.draw(s1)
print('max image abs diff = ',np.max(np.abs(s1.image.array-s0.image.array)))
print('max image abs value = ',np.max(np.abs(s0.image.array)))
peak = np.max(np.abs(s0.image.array))
np.testing.assert_almost_equal(s1.image.array/peak, s0.image.array/peak, decimal=2)
@timer
def test_direct():
""" Simple test for directly instantiated Gaussian, Kolmogorov, and Moffat without going through
GSObjectModel explicitly.
"""
# Here is the true PSF
scale = 1.3
g1 = 0.23
g2 = -0.17
du = 0.1
dv = 0.4
gsobjs = [galsim.Gaussian(sigma=1.0),
galsim.Kolmogorov(half_light_radius=1.0),
galsim.Moffat(half_light_radius=1.0, beta=3.0),
galsim.Moffat(half_light_radius=1.0, beta=2.5, trunc=3.0)]
models = [piff.Gaussian(fastfit=True, include_pixel=False),
piff.Kolmogorov(fastfit=True, include_pixel=False),
piff.Moffat(fastfit=True, beta=3.0, include_pixel=False),
piff.Moffat(fastfit=True, beta=2.5, trunc=3.0, include_pixel=False)]
for gsobj, model in zip(gsobjs, models):
print()
print("gsobj = ", gsobj)
print()
psf = gsobj.dilate(scale).shear(g1=g1, g2=g2).shift(du, dv)
# Draw the PSF onto an image. Let's go ahead and give it a non-trivial WCS.
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64,64, wcs=wcs)
# This is only going to come out right if we (unphysically) don't convolve by the pixel.
psf.drawImage(image, method='no_pixel')
# Make a StarData instance for this image
stardata = piff.StarData(image, image.true_center)
star = piff.Star(stardata, None)
star = model.initialize(star)
# First try fastfit.
print('Fast fit')
fit = model.fit(star).fit
print('True scale = ', scale, ', model scale = ', fit.params[0])
print('True g1 = ', g1, ', model g1 = ', fit.params[1])
print('True g2 = ', g2, ', model g2 = ', fit.params[2])
print('True du = ', du, ', model du = ', fit.center[0])
print('True dv = ', dv, ', model dv = ', fit.center[1])
# This test is fairly accurate, since we didn't add any noise and didn't convolve by
# the pixel, so the image is very accurately a sheared GSObject.
# These tests are more strict above. The truncated Moffat included here but not there
# doesn't work quite as well.
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-4)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-5)
# Also need to test ability to serialize
outfile = os.path.join('output', 'gsobject_direct_test.fits')
with fitsio.FITS(outfile, 'rw', clobber=True) as f:
model.write(f, 'psf_model')
with fitsio.FITS(outfile, 'r') as f:
roundtrip_model = piff.GSObjectModel.read(f, 'psf_model')
assert model.__dict__ == roundtrip_model.__dict__
# repeat with fastfit=False
models = [piff.Gaussian(fastfit=False, include_pixel=False),
piff.Kolmogorov(fastfit=False, include_pixel=False),
piff.Moffat(fastfit=False, beta=3.0, include_pixel=False),
piff.Moffat(fastfit=False, beta=2.5, trunc=3.0, include_pixel=False)]
for gsobj, model in zip(gsobjs, models):
print()
print("gsobj = ", gsobj)
print()
psf = gsobj.dilate(scale).shear(g1=g1, g2=g2).shift(du, dv)
# Draw the PSF onto an image. Let's go ahead and give it a non-trivial WCS.
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
image = galsim.Image(64,64, wcs=wcs)
# This is only going to come out right if we (unphysically) don't convolve by the pixel.
psf.drawImage(image, method='no_pixel')
# Make a StarData instance for this image
stardata = piff.StarData(image, image.true_center)
star = piff.Star(stardata, None)
star = model.initialize(star)
print('Slow fit')
fit = model.fit(star).fit
print('True scale = ', scale, ', model scale = ', fit.params[0])
print('True g1 = ', g1, ', model g1 = ', fit.params[1])
print('True g2 = ', g2, ', model g2 = ', fit.params[2])
print('True du = ', du, ', model du = ', fit.center[0])
print('True dv = ', dv, ', model dv = ', fit.center[1])
# This test is fairly accurate, since we didn't add any noise and didn't convolve by
# the pixel, so the image is very accurately a sheared GSObject.
np.testing.assert_allclose(fit.params[0], scale, rtol=1e-5)
np.testing.assert_allclose(fit.params[1], g1, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.params[2], g2, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.center[0], du, rtol=0, atol=1e-5)
np.testing.assert_allclose(fit.center[1], dv, rtol=0, atol=1e-5)
# Also need to test ability to serialize
outfile = os.path.join('output', 'gsobject_direct_test.fits')
with fitsio.FITS(outfile, 'rw', clobber=True) as f:
model.write(f, 'psf_model')
with fitsio.FITS(outfile, 'r') as f:
roundtrip_model = piff.GSObjectModel.read(f, 'psf_model')
assert model.__dict__ == roundtrip_model.__dict__
@timer
def test_var():
"""Check that the variance estimate in params_var is sane.
"""
# Here is the true PSF
scale = 1.3
g1 = 0.23
g2 = -0.17
du = 0.1
dv = 0.4
flux = 500
wcs = galsim.JacobianWCS(0.26, 0.05, -0.08, -0.29)
noise = 0.2
gsobjs = [galsim.Gaussian(sigma=1.0),
galsim.Kolmogorov(half_light_radius=1.0),
galsim.Moffat(half_light_radius=1.0, beta=3.0),
galsim.Moffat(half_light_radius=1.0, beta=2.5, trunc=3.0)]
# Mix of centered = True/False,
# fastfit = True/False,
# include_pixel = True/False
models = [piff.Gaussian(fastfit=False, include_pixel=False, centered=False),
piff.Kolmogorov(fastfit=True, include_pixel=True, centered=False),
piff.Moffat(fastfit=False, beta=4.8, include_pixel=True, centered=True),
piff.Moffat(fastfit=True, beta=2.5, trunc=3.0, include_pixel=False, centered=True)]
names = ['Gaussian',
'Kolmogorov',
'Moffat3',
'Moffat2.5']
for gsobj, model, name in zip(gsobjs, models, names):
print()
print("gsobj = ", gsobj)
print()
psf = gsobj.dilate(scale).shear(g1=g1, g2=g2).shift(du, dv).withFlux(flux)
image = psf.drawImage(nx=64, ny=64, wcs=wcs, method='no_pixel')
weight = image.copy()
weight.fill(1/noise**2)
# Save this one without noise.
image1 = image.copy()
image1.addNoise(galsim.GaussianNoise(sigma=noise))
# Make a StarData instance for this image
stardata = piff.StarData(image, image.true_center, weight)
star = piff.Star(stardata, None)
star = model.initialize(star)
fit = model.fit(star).fit
file_name = 'input/test_%s_var.npz'%name
print(file_name)
if not os.path.isfile(file_name):
num_runs = 1000
all_params = []
for i in range(num_runs):
image1 = image.copy()
image1.addNoise(galsim.GaussianNoise(sigma=noise))
sd = piff.StarData(image1, image1.true_center, weight)
s = piff.Star(sd, None)
try:
s = model.initialize(s)
s = model.fit(s)
except RuntimeError as e: # Occasionally hsm fails.
print('Caught ',e)
continue
print(s.fit.params)
all_params.append(s.fit.params)
var = np.var(all_params, axis=0)
np.savez(file_name, var=var)
var = np.load(file_name)['var']
print('params = ',fit.params)
print('empirical var = ',var)
print('piff estimate = ',fit.params_var)
print('ratio = ',fit.params_var/var)
print('max ratio = ',np.max(fit.params_var/var))
print('min ratio = ',np.min(fit.params_var/var))
print('mean ratio = ',np.mean(fit.params_var/var))
# Note: The fastfit=False estimates are better -- typically better than 10%
# The fastfit=True estimates are much rougher. Especially size. Need rtol=0.3.
np.testing.assert_allclose(fit.params_var, var, rtol=0.3)
def test_fail():
# Some vv noisy images that result in errors in the fit to check the error reporting.
scale = 1.3
g1 = 0.33
g2 = -0.27
flux = 15
noise = 2.
seed = 1234
psf = galsim.Moffat(half_light_radius=1.0, beta=2.5, trunc=3.0)
psf = psf.dilate(scale).shear(g1=g1, g2=g2).withFlux(flux)
image = psf.drawImage(nx=64, ny=64, scale=0.3)
weight = image.copy()
weight.fill(1/noise**2)
noisy_image = image.copy()
rng = galsim.BaseDeviate(seed)
noisy_image.addNoise(galsim.GaussianNoise(sigma=noise, rng=rng))
star1 = piff.Star(piff.StarData(image, image.true_center, weight), None)
star2 = piff.Star(piff.StarData(noisy_image, image.true_center, weight), None)
model1 = piff.Moffat(fastfit=True, beta=2.5)
with np.testing.assert_raises(RuntimeError):
model1.initialize(star2)
with np.testing.assert_raises(RuntimeError):
model1.fit(star2)
star3 = model1.initialize(star1)
star3 = model1.fit(star3)
star3 = piff.Star(star2.data, star3.fit)
with np.testing.assert_raises(RuntimeError):
model1.fit(star3)
# This is contrived to hit the fit failure for the reference.
# I'm not sure what realistic use case would actually hit it, but at least it's
# theoretically possible to fail there.
model2 = piff.GSObjectModel(galsim.InterpolatedImage(noisy_image), fastfit=True)
with np.testing.assert_raises(RuntimeError):
model2.initialize(star1)
model3 = piff.Moffat(fastfit=False, beta=2.5, scipy_kwargs={'max_nfev':10})
with np.testing.assert_raises(RuntimeError):
model3.initialize(star2)
with np.testing.assert_raises(RuntimeError):
model3.fit(star2).fit
star3 = model3.initialize(star1)
star3 = model3.fit(star3)
star3 = piff.Star(star2.data, star3.fit)
with np.testing.assert_raises(RuntimeError):
model3.fit(star3)
if __name__ == '__main__':
test_simple()
test_center()
test_interp()
test_missing()
test_gradient()
test_gradient_center()
test_direct()
test_var()
test_fail()
|
{"hexsha": "be91a471ec040be4aa29269ec1851406503ef756", "size": 37107, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_gsobject_model.py", "max_stars_repo_name": "rgraz/PIFFZTF", "max_stars_repo_head_hexsha": "5f47a7fbdb9040d871f40a5ce7de08740fdfbdb1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_gsobject_model.py", "max_issues_repo_name": "rgraz/PIFFZTF", "max_issues_repo_head_hexsha": "5f47a7fbdb9040d871f40a5ce7de08740fdfbdb1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_gsobject_model.py", "max_forks_repo_name": "rgraz/PIFFZTF", "max_forks_repo_head_hexsha": "5f47a7fbdb9040d871f40a5ce7de08740fdfbdb1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7871621622, "max_line_length": 100, "alphanum_fraction": 0.5984585119, "include": true, "reason": "import numpy", "num_tokens": 10583}
|
# coding: utf-8
# In[1]:
from pyaugur.augurlib import AugurOpt, AugurInfer
import numpy as np
import scipy as sp
import scipy.stats as sps
augur_lda = '''(K : Int, D : Int, N : Vec Int, alpha : Vec Real, beta : Vec Real) => {
param theta[d] ~ Dirichlet(alpha)
for d <- 0 until D ;
param phi[k] ~ Dirichlet(beta)
for k <- 0 until K ;
param z[d, n] ~ Categorical(theta[d])
for d <- 0 until D, n <- 0 until N[d] ;
data w[d, n] ~ Categorical(phi[z[d, n]])
for d <- 0 until D, n <- 0 until N[d] ;
}
'''
def run_lda(K, D, N, alpha, beta, train_w, sched, burnin=0, num_samples=100):
with AugurInfer('config.yml', augur_lda) as infer_obj:
# Compile
augur_opt = AugurOpt(cached=False, target='cpu', paramScale=None)
infer_obj.set_compile_opt(augur_opt)
infer_obj.set_user_sched(sched)
infer_obj.compile(K, D, N, alpha, beta)(train_w)
# Run
samples = infer_obj.samplen(burnIn=burnin, numSamples=num_samples)
# Print last sample
print samples['theta'][num_samples-1]
print samples['phi'][num_samples-1]
print samples['z'][num_samples-1]
return samples
def mk_synthetic_dataset_d4_k4_v8():
D = 4; K = 4; V = 8
N = np.array([10, 10, 10, 10], dtype=np.int32)
alpha = np.full(K, 0.1)
beta = np.full(V, 0.01)
print 'alpha', alpha
print 'beta', beta
# Intended phi
# K = 0 [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# K = 1 [0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0]
# K = 2 [0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0]
# K = 3 [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5]
# z1 = [0, 1, 0, 1, 0, 1, 0, 1]
w1 = np.array([0, 2, 1, 3, 0, 2, 1, 3, 0, 2], dtype=np.int32)
# z2 = [2, 3, 2, 3, 2, 3, 2, 3]
w2 = np.array([4, 6, 5, 7, 4, 6, 5, 7, 5, 7], dtype=np.int32)
# z3 = [1, 2, 1, 2, 1, 2, 1, 2]
w3 = np.array([2, 4, 3, 5, 2, 4, 3, 5, 2, 4], dtype=np.int32)
# z4 = [0, 3, 0, 3, 0, 3, 0, 3]
w4 = np.array([0, 6, 1, 7, 0, 6, 1, 7, 1, 7], dtype=np.int32)
w = np.array([w1, w2, w3, w4])
return K, V, D, N, alpha, beta, w
K, V, D, N, alpha, beta, w = mk_synthetic_dataset_d4_k4_v8()
sched1 = 'ConjGibbs [theta] (*) ConjGibbs [phi] (*) DiscGibbs [z]'
num_samples=5
samples = run_lda(K, D, N, alpha, beta, w, sched1, num_samples=num_samples)
# In[ ]:
|
{"hexsha": "23315204d01d810b504252698acdae932f477a9d", "size": 2393, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/lda.py", "max_stars_repo_name": "rjnw/augurv2", "max_stars_repo_head_hexsha": "0430482297e81288d58a16d43a98ea9d0196d640", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2017-03-06T19:51:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-18T15:07:41.000Z", "max_issues_repo_path": "examples/lda.py", "max_issues_repo_name": "rjnw/augurv2", "max_issues_repo_head_hexsha": "0430482297e81288d58a16d43a98ea9d0196d640", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-09-20T19:18:13.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-20T19:38:00.000Z", "max_forks_repo_path": "examples/lda.py", "max_forks_repo_name": "rjnw/augurv2", "max_forks_repo_head_hexsha": "0430482297e81288d58a16d43a98ea9d0196d640", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-10T21:55:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-18T20:17:36.000Z", "avg_line_length": 28.8313253012, "max_line_length": 86, "alphanum_fraction": 0.5394901797, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1052}
|
import numpy as np
import os
import datetime
import pygrib as pg
import argparse
import Myhelpers.defaults as defaults
import scipy.sparse as sparse
from solarconversionfunctions import SolarPVConversion, newHayDavies, testSlopeFunction, fullTrackingSlopeFunction
from configobj import ConfigObj
from validate import Validator
from itertools import izip as zip
from Myhelpers import write_datetime_string, parse_datetime_string
def fix_solar(L):
'''
Integrated field to hourly average values.
Divides by 3600 to convert joule = watthr to watthr/hr.
In a market context, the energy is assigned proactively to the following hour,
while the integrated field reports the accumulation over the last hour,
hence the appended 'measurement' should be at the end of the time series.
'''
out = [x/3600 for x in np.diff(L, axis=0)]
out.append(L[0]/3600)
return np.array(out)
# Argument parser
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--rootdir', help='Input directory for forecast files', default=defaults.solarforecastdatadir, metavar="forecast root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default=defaults.solarforecastoutdir, metavar="forecast outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=defaults.startyear, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=defaults.endyear, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=defaults.startmonth, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=defaults.endmonth, type=int, metavar="last month")
parser.add_argument('-sp', '--solarpaneltypefile', metavar="solarpaneltypefile", type=str, help='File containing characteristics of the solar panel to use', default=defaults.solarpanelcfg)
parser.add_argument('-la', '--latfile', help='Latitude file', default=defaults.latitudefile, type=str, metavar="latfile")
parser.add_argument('-lo', '--lonfile', help='Longitude file', default=defaults.longitudefile, type=str, metavar="lonfile")
args = parser.parse_args()
lats = np.load(args.latfile)
lons = np.load(args.lonfile)
# We have a forecast for each hour
forecastdelta = datetime.timedelta(hours=1)
# Temperature, downward solar and Albedo name
tmpname = 'ctr_P167_LSFC'
dsrname = 'ctr_P169_LSFC'
albname = 'ctr_P243_LSFC'
# PV slope function:
# Panel angled at 30 deg from horizontal
slopefunction = testSlopeFunction
# Initialize panel configurations
configspec = ConfigObj(
"PVconfigLayout.cfg",
list_values=True,
file_error=True,
_inspec=True)
panelconfig = ConfigObj(args.solarpaneltypefile, list_values=False, configspec=configspec)
panelconfig.validate(Validator())
# Set up filename
filename = "PVpower_{0}.npz".format(panelconfig['name'])
filename.replace(" ", "_")
# Load matrix to project to nodal space
solartransfer = np.load(defaults.solarprojectionmatrix)
solartransfer = sparse.csr_matrix((solartransfer['data'], solartransfer['indices'], solartransfer['indptr']), shape=solartransfer['shape'])
# load nodeorder file (used for saving)
nodeorder = np.load(defaults.nodeorder)
# Select only the forecasts specified for conversion.
forecastls = sorted(os.listdir(args.rootdir))
startdate = '{0:04d}{1:02d}0100'.format(args.first, args.fm)
stopdate = '{0:04d}{1:02d}0100'.format(args.last+int(args.lm == 12), args.lm % 12 + 1)
try:
startidx = forecastls.index(startdate)
except ValueError:
print('Start month not found - check forecast directory')
raise ValueError
try:
stopidx = forecastls.index(stopdate)
forecastls = forecastls[startidx:stopidx]
except ValueError:
print 'Stopmonth+1 not found - assuming we need to use all directories'
forecastls = forecastls[startidx:]
# MAIN LOOP
# For each forecast:
# - Extract fields of downward radiation, albedo and temperature
# - Calculate upward radiation
# - Convert to capacity factors
# - Project to nodal domain
# - Save nodal forecast time series
for fdir in forecastls:
print fdir
date = parse_datetime_string(fdir)
# Load files
dsfile = pg.open(args.rootdir + fdir + '/' + dsrname)
albfile = pg.open(args.rootdir + fdir + '/' + albname)
tmpfile = pg.open(args.rootdir + fdir + '/' + tmpname)
dsdata_int = np.array([ds['values'] for ds in dsfile])
albdata = np.array([alb['values'] for alb in albfile])
tmpdata = np.array([tmp['values'] for tmp in tmpfile])
dsdata = fix_solar(dsdata_int)
usdata = dsdata * albdata
dates = [date + forecastdelta*i for i in range(len(dsdata))]
# solar conversion
convdata = np.zeros_like(dsdata)
for influx, outflux, tmp2m, utcTime, outidx in zip(dsdata, usdata, tmpdata, dates, range(len(convdata))):
influx_tilted = newHayDavies(influx, outflux, lats, lons, utcTime, slopefunction)
out = SolarPVConversion((influx_tilted, tmp2m), panelconfig)
out /= (panelconfig['rated_production']/panelconfig['area'])
convdata[outidx] = np.nan_to_num(out)
# Projection to nodal domain
shape = convdata.shape
outdata = solartransfer.dot(np.reshape(convdata, (shape[0], shape[1]*shape[2])).T).T
# Save .npy file
try:
np.savez_compressed(args.outdir + '/' + fdir + '/' + filename, data=outdata, dates=dates)
except IOError:
os.mkdir(args.outdir + '/' + fdir + '/')
np.savez_compressed(args.outdir + '/' + fdir + '/' + filename, data=outdata, dates=dates)
|
{"hexsha": "d8cc87875f210029145d9417eb690b1fa16354d0", "size": 5620, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/Convert_Forecasts/convert_solar_forecast.py", "max_stars_repo_name": "DTU-ELMA/European_Dataset", "max_stars_repo_head_hexsha": "8fb79c61274c95277edc8ee7f60e724e5cc1e0f4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-04-04T16:00:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T09:44:47.000Z", "max_issues_repo_path": "Scripts/Convert_Forecasts/convert_solar_forecast.py", "max_issues_repo_name": "DTU-ELMA/European_Dataset", "max_issues_repo_head_hexsha": "8fb79c61274c95277edc8ee7f60e724e5cc1e0f4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-08-17T11:25:23.000Z", "max_issues_repo_issues_event_max_datetime": "2015-11-24T23:38:48.000Z", "max_forks_repo_path": "Scripts/Convert_Forecasts/convert_solar_forecast.py", "max_forks_repo_name": "DTU-ELMA/European_Dataset", "max_forks_repo_head_hexsha": "8fb79c61274c95277edc8ee7f60e724e5cc1e0f4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-07-05T01:51:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-29T13:50:47.000Z", "avg_line_length": 40.4316546763, "max_line_length": 188, "alphanum_fraction": 0.73113879, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1423}
|
import Base.convert
export MPSQuantumRegister, convert, two_qubit_gate_to_mpo, mpo_to_two_qubit_gate
export print_info, compress!, enaglemment_entropy, execute!
"Implementation type MPS quantum register"
struct MPSQuantumRegister{T} <: QuantumRegister
N::Integer
state::Array{Array{T, 3}, 1}
s_values::Array{Union{Nothing, Array{<:AbstractFloat, 1}}, 1}
function MPSQuantumRegister{T}(N::Integer, conf::String) where T <: Number
@assert length(conf) == N
new(N, [init_mps_tensor(c, T) for c in conf], fill(nothing, N-1))
end
end
MPSQuantumRegister(N::Integer, conf::String) = MPSQuantumRegister{ComplexF64}(N, conf)
"""
convert(::Type{FullStateQuantumRegister}, x::MPSQuantumRegister)
Convert an MPS quantum register to a full state quantum register
"""
function convert(::Type{FullStateQuantumRegister{T}}, x::MPSQuantumRegister{U}) where T <: Number where U <: Number
state = ones(U, 1, 1)
for i in x.N:-1:1
@tensor state[a, b, c] := state[a, j] * x.state[i][b, c, j]
dims = size(state)
state = reshape(state, (dims[1] * dims[2], dims[3]))
end
dims = size(state)
state = reshape(state, (dims[1] * dims[2]))
return FullStateQuantumRegister{T}(x.N, state)
end
"""
init_mps_tensor(qubit_value::Char, ::Type{T})
Initialise an array of MPS tensors from a string configuration
"""
function init_mps_tensor(qubit_value::Char, ::Type{T}) where T <: Number
if qubit_value == '0'
state = [1., 0.]
else
state = [0, 1]
end
return reshape(convert(Array{T}, state), (2, 1, 1)) # idx, up, down
end
function to_str(qreg::MPSQuantumRegister{T}) where T <: Number
to_str(convert(FullStateQuantumRegister{T}, qreg))
end
"""
apply_1qubit!(qreg::MPSQuantumRegister, gate, i)
Apply the 1 qubit gate to qubit i
"""
function apply_1qubit!(qreg::MPSQuantumRegister, gate, i)
@tensor qreg.state[i][a, b, c] := qreg.state[i][m, b, c] * gate[a, m]
end
"""
two_qubit_gate_to_mpo(gate::Array{T, 2}, n::Integer) where T <: Number
Convert a gate into an mpo
"""
function two_qubit_gate_to_mpo(gate::Array{T, 2}, threshold::AbstractFloat=1e-15) where T <: Number
gate = reshape(gate, (2, 2, 2, 2)) # r2, r1, l2, l1
gate = permutedims(gate, (2, 4, 1, 3)) # r1, l1, r2, l2
gate = reshape(gate, (4, 4)) # (r1, l1), (r2, l2)
F = svd(gate)
mpo = Array{Array{T, 4}, 1}()
chi = sum(F.S .> threshold)
push!(mpo, reshape(F.U[:,1:chi] * Diagonal(F.S[1:chi]), (2, 2, 1, chi))) # r1, l1, Up, Down
push!(mpo, permutedims(reshape(F.Vt[1:chi,:], (chi, 2, 2, 1)), (2, 3, 1, 4))) # r2, l2, Up, Down
mpo
end
"""
mpo_to_two_qubit_gate(mpo::Array{Array{T, 4}, 1}) where T <: Number
Convert a gate into an mpo
"""
function mpo_to_two_qubit_gate(mpo::Array{Array{T, 4}, 1}) where T <: Number
a = reshape_tensor(mpo[1], (1, 2, (3, 4))) # r1, l1, down
b = reshape_tensor(mpo[2], (1, 2, (3, 4))) # r2, l2, up
@tensor c[a, b, c, d] := a[a, b, x] * b[c, d, x] # r1, l1, r2, l2
reshape(permutedims(c, (3, 1, 4, 2)), (4, 4))
end
"""
apply_2qubit!(qreg::MPSQuantumRegister, gate, i, j)
Apply the 2 qubit gate to qubits i and j, assumes |i - j| = 1
"""
function apply_2qubit!(qreg::MPSQuantumRegister, gate, i, j)
# @assert abs(i -j) == 1
if j < i
apply_2qubit!(qreg, swap_2qubits(gate), j, i)
else
# reshape gate to mpo
gate_mpo = two_qubit_gate_to_mpo(gate)
# remove upward facing dim 1 index to simplify contraction
gate_mpo_1 = reshape_tensor(gate_mpo[1], [[1, 3], 2, 4])
# remove downward facing dim 1 index to simplify contraction
gate_mpo_2 = reshape_tensor(gate_mpo[2], [[1, 4], 2, 3])
# tensor gate mpo with state mpos
@tensor state_1[qubit_i, up1, down1, down2] := qreg.state[i][qubit_i_c, up1, down1] * gate_mpo_1[qubit_i, qubit_i_c, down2]
@tensor state_2[qubit_j, up1, up2, down1] := qreg.state[j][qubit_j_c, up1, down1] * gate_mpo_2[qubit_j, qubit_j_c, up2]
# reshape
qreg.state[i] = reshape_tensor(state_1, [1, 2, [3, 4]])
qreg.state[j] = reshape_tensor(state_2, [1, [2, 3], 4])
if abs(i -j) > 1
chi = size(gate_mpo_1)[3]
filler_mpo = tensor_identity(chi, chi, 2)
for k in i+1:j-1
@tensor tmp[qubit_k, up1, up2, down1, down2] :=
qreg.state[k][qubit_k_c, up1, down1] *
filler_mpo[up2, down2, qubit_k_c, qubit_k]
qreg.state[k] = reshape_tensor(tmp, [1, [2, 3], [4, 5]])
end
end
end
return
end
"""
function print_info(qreg::MPSQuantumRegister)
Print the dimension of virtual bond dimensions of the given MPS
"""
function print_info(qreg::MPSQuantumRegister)
for i = 1:qreg.N
dims = size(qreg.state[i])[2:3]
println("$(i) has virtual dims $(dims)")
end
end
"""
compress!(qreg::MPSQuantumRegister)
Compress the provided MPS quantum register
"""
function compress!(qreg::MPSQuantumRegister, threshold::AbstractFloat=1e-15)
for i in 1:qreg.N-1
compress_bond!(qreg, i, threshold)
end
for i in qreg.N-1:-1:1
compress_bond!(qreg, i, threshold)
end
end
function compress_bond!(qreg::MPSQuantumRegister, index::Integer, threshold::AbstractFloat=1e-15)
A = qreg.state[index]
A_dim = size(A)
B = qreg.state[index+1]
B_dim = size(B)
# Contract virtual bond connecting A and B
@tensor C[idx1, up, idx2, down] := A[idx1, up, c] * B[idx2, c, down]
# Reshape to a matrix and decompose
dims = size(C)
C = reshape(C, (dims[1]*dims[2], dims[3]*dims[4]))
F = svd(C)
# Take cutoff and reshape matrices back
s = F.S
chi = sum(s .> threshold)
s = s[1:chi]
qreg.s_values[index] = s./sqrt(sum(s.^2))
A = F.U[:,1:chi] * Diagonal(sqrt.(s))
qreg.state[index] = reshape(A, (2, A_dim[2], chi))
B = Diagonal(sqrt.(s)) * F.Vt[1:chi,:]
qreg.state[index+1] = permutedims(reshape(B, (chi, 2, B_dim[3])), (2, 1, 3))
end
function entropy(s::Array{<:AbstractFloat, 1})
-sum(s.^2 .* log.(s.^2))
end
function enaglemment_entropy(qreg::MPSQuantumRegister)
if qreg.s_values[1] == nothing
return
else
return [entropy(qreg.s_values[x]) for x in 1:qreg.N-1]
end
end
"""
execute!(qc::QuantumCircuit, state::MPSQuantumRegister)
Specialised execute for MPS Quantum Register
"""
function execute!(qc::Gates.QuantumCircuit, state::MPSQuantumRegister,
compress_freq::Integer=0)
for (i, op) in enumerate(qc.ops)
if op.n == 1
apply_1qubit!(state, op.gate, op.qubits[1])
elseif op.n == 2
apply_2qubit!(state, op.gate, op.qubits[1], op.qubits[2])
end
if compress_freq > 0 && i % compress_freq == 0
compress!(state)
end
end
end
|
{"hexsha": "61c4d49d37fde72c31f168889dfa9f1b66630e6c", "size": 6911, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mps_quantum_register.jl", "max_stars_repo_name": "nmoran/OhMyQSIM.jl", "max_stars_repo_head_hexsha": "b99670373b75db0cb975eef1bead3b850ac6f3bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-02-14T13:44:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T07:43:43.000Z", "max_issues_repo_path": "src/mps_quantum_register.jl", "max_issues_repo_name": "nmoran/OhMyQSIM.jl", "max_issues_repo_head_hexsha": "b99670373b75db0cb975eef1bead3b850ac6f3bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-01-23T14:07:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-28T14:24:27.000Z", "max_forks_repo_path": "src/mps_quantum_register.jl", "max_forks_repo_name": "nmoran/OhMyQSIM.jl", "max_forks_repo_head_hexsha": "b99670373b75db0cb975eef1bead3b850ac6f3bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5990566038, "max_line_length": 132, "alphanum_fraction": 0.6156851396, "num_tokens": 2372}
|
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_classes, testing_classes = \
train_test_split(features, tpot_data['target'], random_state=None)
exported_pipeline = RandomForestClassifier(bootstrap=False, max_features=0.4, min_samples_leaf=1, min_samples_split=9)
exported_pipeline.fit(training_features, training_classes)
results = exported_pipeline.predict(testing_features)
|
{"hexsha": "81f3e89c68579961ae5a613a09bc10301232359f", "size": 743, "ext": "py", "lang": "Python", "max_stars_repo_path": "janus/tpot/tutorials/tpot_titanic_pipeline.py", "max_stars_repo_name": "josepablocam/janus-public", "max_stars_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-10T17:00:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T23:23:19.000Z", "max_issues_repo_path": "janus/tpot/tutorials/tpot_titanic_pipeline.py", "max_issues_repo_name": "josepablocam/janus-public", "max_issues_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "janus/tpot/tutorials/tpot_titanic_pipeline.py", "max_forks_repo_name": "josepablocam/janus-public", "max_forks_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4375, "max_line_length": 118, "alphanum_fraction": 0.8142664872, "include": true, "reason": "import numpy", "num_tokens": 162}
|
import abc
import numpy as np
from tfutils.pyutils import inheritdocstring
class WeightInitializer(abc.ABC):
"""
Args:
input_dim: Positive integer.
output_dim: Positive integer.
"""
def __init__(self, input_dim, output_dim):
self.input_dim = input_dim
self.output_dim = output_dim
@abc.abstractmethod
def initialize(self):
"""Returns a np.array with shape `(self.input_dim, self.output_dim)`
as the initialized weight."""
pass
@inheritdocstring
class XavierInitializer(WeightInitializer):
"""
Args:
distribution: String. Either "normal" or "uniform".
"""
def __init__(self, distribution, *args, **kwargs):
super().__init__(*args, **kwargs)
self.distribution = distribution
def initialize(self):
scale = np.sqrt(2 / (self.input_dim + self.output_dim))
size = (self.input_dim, self.output_dim)
if self.distribution == 'uniform':
return np.random.uniform(low=-scale, high=scale, size=size)
elif self.distribution == 'normal':
return np.random.normal(loc=0.0, scale=scale, size=size)
else:
raise ValueError()
def simulate_output_comp_stds(kernal_initializer,
batch_size,
input_dim,
output_dim):
"""Simulates the standard derivatives for each component of the output-
batch
output_batch = np.matmul(input_batch, weight)
where the `weight` is initialized by the `kernel_initializer`.
Args:
kernel_initializer: Instance inherting `WeightInitializer`.
batch_size: Positive integer.
input_dim: Positive integer.
output_dim: Positive integer.
Returns:
Numpy array with shape `(output_dim)`.
"""
input_batch = np.random.random(size=(batch_size, input_dim))
weight = kernal_initializer.initialize()
output_batch = np.matmul(input_batch, weight)
output_comp_stds = [] # standard derivative for each component of output.
for comp in range(output_dim):
output_comp_batch = output_batch[:, comp]
output_comp_stds.append(np.std(output_comp_batch))
return output_comp_stds
def main(args):
if args.kernel_initializer == 'xavier':
kernel_initializer = XavierInitializer(
input_dim=args.input_dim,
output_dim=args.output_dim,
distribution=args.distribution)
else:
raise ValueError()
for _ in range(args.simulate_times):
output_comp_stds = simulate_output_comp_stds(
kernel_initializer, args.batch_size,
args.input_dim, args.output_dim)
print('Mean value of standard derivatives of output components:',
'{0:.3f} ({1:.3f})'.format(np.mean(output_comp_stds),
np.std(output_comp_stds)))
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--simulate_times', type=int, default=1,
help='Times of simulation')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--input_dim', type=int)
parser.add_argument('--output_dim', type=int)
parser.add_argument('--kernel_initializer', type=str, default='xavier')
parser.add_argument('--distribution', type=str, default='uniform')
args = parser.parse_args()
main(args)
|
{"hexsha": "3e3d4cbc8f26340fb7cbab6d658d113d48fd102e", "size": 3518, "ext": "py", "lang": "Python", "max_stars_repo_path": "related_topics/initializer_aspects.py", "max_stars_repo_name": "shuiruge/generative_models", "max_stars_repo_head_hexsha": "a1765a5ff9aeee8c0325f0c5f40b3537bb82accf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-23T06:46:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-20T14:42:56.000Z", "max_issues_repo_path": "related_topics/initializer_aspects.py", "max_issues_repo_name": "shuiruge/generative_models", "max_issues_repo_head_hexsha": "a1765a5ff9aeee8c0325f0c5f40b3537bb82accf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "related_topics/initializer_aspects.py", "max_forks_repo_name": "shuiruge/generative_models", "max_forks_repo_head_hexsha": "a1765a5ff9aeee8c0325f0c5f40b3537bb82accf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4107142857, "max_line_length": 78, "alphanum_fraction": 0.6401364412, "include": true, "reason": "import numpy", "num_tokens": 724}
|
package java.lang;
public class IndexOutOfBoundsException extends RuntimeException {
public IndexOutOfBoundsException() { }
public IndexOutOfBoundsException(String s) { super(s); }
}
|
{"hexsha": "8d08cd612201d2aa7db5741e0072a81f5de1582d", "size": 192, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "sig-src/java/lang/IndexOutOfBoundsException.jl", "max_stars_repo_name": "HarvardPL/cryptoerase", "max_stars_repo_head_hexsha": "4f0a8282858782894f76abee4e2d21ee7c2e6438", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-12-21T21:04:14.000Z", "max_stars_repo_stars_event_max_datetime": "2015-12-21T21:04:14.000Z", "max_issues_repo_path": "sig-src/java/lang/IndexOutOfBoundsException.jl", "max_issues_repo_name": "HarvardPL/cryptoerase", "max_issues_repo_head_hexsha": "4f0a8282858782894f76abee4e2d21ee7c2e6438", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sig-src/java/lang/IndexOutOfBoundsException.jl", "max_forks_repo_name": "HarvardPL/cryptoerase", "max_forks_repo_head_hexsha": "4f0a8282858782894f76abee4e2d21ee7c2e6438", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4285714286, "max_line_length": 65, "alphanum_fraction": 0.7760416667, "num_tokens": 31}
|
# H0: None == BGP
# Compare means with one-way ANOVA
## Bloom
data <- read.csv('../../results_old/filter_types/data_all.csv', sep = ';')
data <- data[which(data$combination=='combination_0' | data$combination=='combination_4'),]
print(data)
kruskal.test(combination ~ time, data = data)
#t.test(data$combination_0, data$combination_4,paired=TRUE)
#kruskal.test(combination_1 ~ combination_5, data = data)
# p: 0.5265
# => Accept H0, equal means
#mean(data$combination_0) # 12500.69
#mean(data$combination_4) # 16917.35
# None == BGP !
## GCS
#t.test(data$combination_1, data$combination_5,paired=TRUE)
# p: 0.6731
# => Accept H0, equal means
#mean(data$combination_1) # 13250.39
#mean(data$combination_5) # 16145.51
# None == BGP !
|
{"hexsha": "c2fd4c85bb9dbb8d115768d5733cb722fabe66e9", "size": 737, "ext": "r", "lang": "R", "max_stars_repo_path": "analysis/filter_types_old/none_vs_bgp.r", "max_stars_repo_name": "comunica/Experiments-AMF", "max_stars_repo_head_hexsha": "e133e5994d470f84ab923ca6ef8afa114ed21739", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/filter_types_old/none_vs_bgp.r", "max_issues_repo_name": "comunica/Experiments-AMF", "max_issues_repo_head_hexsha": "e133e5994d470f84ab923ca6ef8afa114ed21739", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-04T17:48:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-22T11:23:38.000Z", "max_forks_repo_path": "analysis/filter_types_old/none_vs_bgp.r", "max_forks_repo_name": "comunica/Experiments-AMF", "max_forks_repo_head_hexsha": "e133e5994d470f84ab923ca6ef8afa114ed21739", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2962962963, "max_line_length": 91, "alphanum_fraction": 0.7069199457, "num_tokens": 246}
|
from __future__ import print_function
import argparse
import os.path
import csv
import sys
import scipy.sparse
import numpy as np
import table_utils
import shelve
import pickle
dir_path = os.path.dirname(os.path.realpath(__file__))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--features-file', nargs='?', dest='features')
parser.add_argument('-i', '--input-folder', nargs='?', dest='input_folder')
parser.add_argument('-o', '--output-table', nargs='?', dest='output_data')
parser.add_argument('-l', '--label', nargs='?', dest='label', default='B')
args = parser.parse_args()
features_file = ""
output_file = ""
input_folder = ""
if args.features is None:
sys.exit("No features file selected!")
else:
features_file = os.path.abspath(args.features)
if not os.path.exists(features_file):
sys.exit("Feature file " + args.features + " does not exists!")
if args.input_folder is None or os.path.isdir(args.input_folder) is False:
sys.exit("Input folder not valid!")
else:
input_folder = os.path.abspath(args.input_folder)
if args.output_data is None:
output_file = os.path.basename(features_file) + "_table"
else:
out_dir = os.path.dirname(args.output_data)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
output_file = args.output_data
return input_folder, features_file, output_file, args.label
def main():
input_folder, features_file, output_file, label = parse_args()
# make_table(input_folder, output_file, f_list, label)
features = table_utils.read_file(features_file)
table = table_utils.make_table_dict(input_folder, label)
data_output = shelve.open(output_file, protocol=pickle.HIGHEST_PROTOCOL)
data_output[table_utils.TABLE] = table
data_output[table_utils.FEATURE_NAMES] = features
# with open(output_file, 'wb') as file:
# pickle.dump(table, file, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
|
{"hexsha": "b3f701e7b61d887c80056818d1831e2f565c0237", "size": 2082, "ext": "py", "lang": "Python", "max_stars_repo_path": "feature-table/build_table.py", "max_stars_repo_name": "fziliott/android-malware-analysis", "max_stars_repo_head_hexsha": "7ca784d0a05d5375cec6de21f8d9446ef9c5cf90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T09:27:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T09:27:54.000Z", "max_issues_repo_path": "feature-table/build_table.py", "max_issues_repo_name": "fziliott/android-malware-analysis", "max_issues_repo_head_hexsha": "7ca784d0a05d5375cec6de21f8d9446ef9c5cf90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "feature-table/build_table.py", "max_forks_repo_name": "fziliott/android-malware-analysis", "max_forks_repo_head_hexsha": "7ca784d0a05d5375cec6de21f8d9446ef9c5cf90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0746268657, "max_line_length": 79, "alphanum_fraction": 0.6873198847, "include": true, "reason": "import numpy,import scipy", "num_tokens": 468}
|
import numpy as np
features=[[1,2,3],
[1,4,9],
[1,5,0]]
X = np.array(features)
prices = [5,
13,
5]
y=np.array(prices)
theta = [0]*len(features[0])
theta = np.transpose(np.array(theta))
LEARNING_RATE=0.01
NO_TRAINING_EXAMPLES = len(features)
EPSILON=0.000000001
def predict(X,theta):
y=np.matmul(np.transpose(theta),X)
return y
def cost(X,y,theta):
return (1/(2*NO_TRAINING_EXAMPLES))*np.transpose((X@theta - y))@(X@theta - y)
def gradient_descent(X,y,theta):
old_cost = cost(X, y, theta)
while True:
sum=0
for i in range(NO_TRAINING_EXAMPLES):
sum += (predict(X[i],theta)-y[i])*X[i]
delta = sum/NO_TRAINING_EXAMPLES
theta=theta-LEARNING_RATE*np.transpose(delta)
new_cost=cost(X,y,theta)
# test for convergence
if abs(old_cost - new_cost) < EPSILON:
break
else:
old_cost = new_cost
return theta
print(gradient_descent(X,y,theta))
|
{"hexsha": "c2a7fc9fec838d594f5053d530df183fb9a94e5d", "size": 1044, "ext": "py", "lang": "Python", "max_stars_repo_path": "VectorizedLinearRegression.py", "max_stars_repo_name": "islamzedd/MLAlgorithms", "max_stars_repo_head_hexsha": "41ecce7cf24b4f72446f6e27be7d83f7ed1e2825", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VectorizedLinearRegression.py", "max_issues_repo_name": "islamzedd/MLAlgorithms", "max_issues_repo_head_hexsha": "41ecce7cf24b4f72446f6e27be7d83f7ed1e2825", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VectorizedLinearRegression.py", "max_forks_repo_name": "islamzedd/MLAlgorithms", "max_forks_repo_head_hexsha": "41ecce7cf24b4f72446f6e27be7d83f7ed1e2825", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.7272727273, "max_line_length": 82, "alphanum_fraction": 0.5785440613, "include": true, "reason": "import numpy", "num_tokens": 293}
|
#include <iostream>
#include <Eigen/Core>
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include<opencv2/core/eigen.hpp>
#include <chrono>
#include <sophus/se3.hpp>
#include <g2o/core/base_vertex.h>
#include <g2o/core/base_unary_edge.h>
#include <g2o/core/sparse_optimizer.h>
#include <g2o/core/block_solver.h>
#include <g2o/core/solver.h>
#include <g2o/core/optimization_algorithm_gauss_newton.h>
#include <g2o/solvers/dense/linear_solver_dense.h>
using namespace std;
using namespace cv;
using namespace Eigen;
// 相机内参
Mat K = (Mat_<double>(3,3) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1);
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
typedef vector<Eigen::Vector3d, Eigen::aligned_allocator<Eigen::Vector3d>> VecVector3d;
void find_feature_matches(
const Mat &img_1, const Mat &img_2,
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches);
void bundleAdjustmentGaussNewton(const VecVector3d &points_3d, const VecVector2d &points_2d,
const Mat &K, Sophus::SE3d &pose);
void bundleAdjustmentG2O(const VecVector3d &points_3d, const VecVector2d &points_2d,
const Mat &K, Sophus::SE3d &pose);
// 像素坐标转相机归一化坐标
Point2d pixel2cam(const Point2d &p);
int main(int argc, char **argv){
// 读取图像
Mat img_1 = imread("../1.png", CV_LOAD_IMAGE_COLOR);
Mat img_2 = imread("../2.png", CV_LOAD_IMAGE_COLOR);
assert(img_1.data && img_2.data);
cout << "读取图像 完成!" << endl;
// 特征点匹配
cout << "开始特征点匹配 ......" << endl;
vector<KeyPoint> keypoints_1, keypoints_2;
vector<DMatch> matches;
find_feature_matches(img_1, img_2, keypoints_1, keypoints_2, matches);
cout << "特征点匹配 完成! 一共找到了" << matches.size() << "组匹配点" << endl << endl;
// for (DMatch m:matches) {
// cout << keypoints_1[m.queryIdx].pt.x << " " << keypoints_1[m.queryIdx].pt.y << endl;
// }
// 读取深度图,建立3D点
Mat img_depth_1 = imread("../1_depth.png", CV_LOAD_IMAGE_UNCHANGED);
Mat img_depth_2 = imread("../2_depth.png", CV_LOAD_IMAGE_UNCHANGED);
vector<Point3d> points_3d;
vector<Point2d> points_2d;
for (DMatch m:matches){
// ushort d = img_depth_1.ptr<unsigned short>(int(keypoints_1[m.queryIdx].pt.y))[int(keypoints_1[m.queryIdx].pt.x)];
ushort d = img_depth_1.at<unsigned short>((int)keypoints_1[m.queryIdx].pt.y, (int)keypoints_1[m.queryIdx].pt.x);
if (d==0) continue;
float dd = d / 5000.0;
Point2d p1 = pixel2cam(keypoints_1[m.queryIdx].pt);
points_3d.push_back(Point3f(p1.x * dd, p1.y * dd, dd));
points_2d.push_back(keypoints_2[m.trainIdx].pt);
}
cout << "Total number of valid 3d-2d pairs: " << points_3d.size() << endl;
cout << "开始PnP求解 ......" << endl;
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
Mat R, r, t;
solvePnP(points_3d, points_2d, K, Mat(), r, t, false);
cv::Rodrigues(r, R);
cout << "R = " << endl << R << endl;
cout << "t = " << t.t() << endl;
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "solve pnp in opencv cost time: " << time_used.count() << " seconds." << endl;
cout << endl;
cout << "开始手写G-N优化 ......" << endl;
VecVector3d points_3d_eig;
VecVector2d points_2d_eig;
for(size_t i=0; i<points_3d.size(); i++){
Point3d p_3d = points_3d[i];
Point2d p_2d = points_2d[i];
points_3d_eig.push_back(Vector3d(p_3d.x, p_3d.y, p_3d.z));
points_2d_eig.push_back(Vector2d(p_2d.x, p_2d.y));
}
Matrix3d R_eig;
Vector3d t_eig;
cv2eigen(R, R_eig);
cv2eigen(t, t_eig);
Sophus::SE3d pose_gn(R_eig, t_eig);
t1 = chrono::steady_clock::now();
bundleAdjustmentGaussNewton(points_3d_eig, points_2d_eig, K, pose_gn);
t2 = chrono::steady_clock::now();
time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "pose by g-n: " << endl << pose_gn.matrix() << endl;
cout << "solve pnp by gauss newton cost time: " << time_used.count() << " seconds." << endl << endl;
cout << "开始G2O优化 ......" << endl;
Sophus::SE3d pose_g2o(R_eig, t_eig);
bundleAdjustmentG2O(points_3d_eig, points_2d_eig, K, pose_g2o);
cout << "pose by g2o: " << endl << pose_gn.matrix() << endl;
return 0;
}
void bundleAdjustmentGaussNewton(const VecVector3d &points_3d, const VecVector2d &points_2d,
const Mat &K, Sophus::SE3d &pose){
typedef Eigen::Matrix<double, 6, 1> Vector6d;
const int iterations = 10;
double cost = 0, lastCost = 0;
double fx = K.at<double>(0, 0);
double fy = K.at<double>(1, 1);
double cx = K.at<double>(0, 2);
double cy = K.at<double>(1, 2);
for (int iter=0; iter<iterations; iter++){
Matrix<double, 6, 6> H = Matrix<double, 6, 6>::Zero();
Vector6d b = Vector6d::Zero();
cost = 0;
for (int i=0; i<points_3d.size(); i++){
Vector3d pc = pose * points_3d[i];
double inv_z = 1.0 / pc[2];
double inv_z2 = inv_z * inv_z;
Vector2d proj (fx*pc[0]*inv_z + cx,
fy*pc[1]*inv_z + cy);
Vector2d e = points_2d[i] - proj;
cost += e.squaredNorm();
Matrix<double, 2, 6> J;
J << -fx * inv_z,
0,
fx * pc[0] * inv_z2,
fx * pc[0] * pc[1] * inv_z2,
-fx - fx * pc[0] * pc[0] * inv_z2,
fx * pc[1] * inv_z,
0,
-fy * inv_z,
fy * pc[1] * inv_z2,
fy + fy * pc[1] * pc[1] * inv_z2,
-fy * pc[0] * pc[1] * inv_z2,
-fy * pc[0] * inv_z;
H += J.transpose() * J;
b += - J.transpose() * e;
}
Vector6d dx;
dx = H.ldlt().solve(b);
if (isnan(dx[0])){
cout << "result is nan!" << endl;
}
if (iter > 0 && cost > lastCost){
cout << "the cost is larger than last cost!";
break;
}
pose *= Sophus::SE3d::exp(dx);
lastCost = cost;
cout << "iteration " << iter << " cost=" << cost << endl;
if (dx.norm() < 1e-6){
cout << "have converge." << endl;
break;
}
}
}
void find_feature_matches(
const Mat &img_1, const Mat &img_2,
std::vector<KeyPoint> &keypoints_1,
std::vector<KeyPoint> &keypoints_2,
std::vector<DMatch> &matches){
Mat descriptors_1, descriptors_2;
Ptr<FeatureDetector> detector = ORB::create();
Ptr<DescriptorExtractor> descriptor = ORB::create();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
// --第一步:检测Oriented Fast角点位置
detector->detect(img_1, keypoints_1);
detector->detect(img_2, keypoints_2);
cout << "--第一步完成:检测Oriented Fast角点位置" << endl;
// --第二步:根据角点位置计算BRIEF描述子
descriptor->compute(img_1, keypoints_1, descriptors_1);
descriptor->compute(img_2, keypoints_2, descriptors_2);
cout << "--第二步完成:根据角点位置计算BRIEF描述子" << endl;
// -- 第三步:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离
vector<DMatch> match;
matcher->match(descriptors_1, descriptors_2, match);
cout << "--第三步完成:对两幅图像中的BRIEF描述子进行匹配,使用Hamming距离" << endl;
// --第四步:匹配点对 筛选
auto min_max = minmax_element(match.begin(), match.end(),
[] (const DMatch &m1, const DMatch &m2) {return m1.distance < m2.distance;});
double min_dist = min_max.first->distance;
double max_dist = min_max.second->distance;
for (int i=0; i<descriptors_1.rows; i++){
if(match[i].distance <= max(2*min_dist, 30.0)){
matches.push_back(match[i]);
}
}
cout << "--第四步完成:匹配点对 筛选" << endl;
}
Point2d pixel2cam(const Point2d &p) {
return Point2d
(
(p.x - K.at<double>(0, 2)) / K.at<double>(0, 0),
(p.y - K.at<double>(1, 2)) / K.at<double>(1, 1)
);
}
class VertexPose: public g2o::BaseVertex<6, Sophus::SE3d>{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
virtual void setToOriginImpl() override {
_estimate = Sophus::SE3d();
}
virtual void oplusImpl(const double *update) override {
Eigen::Matrix<double, 6, 1> update_eigen;
update_eigen << update[0], update[1], update[2], update[3], update[4], update[5];
_estimate = Sophus::SE3d::exp(update_eigen) * _estimate;
}
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
};
class EdgeProjection: public g2o::BaseUnaryEdge<2, Vector2d, VertexPose>{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW;
EdgeProjection(const Vector3d &pos, const Matrix3d &K): _pos3d(pos), _K(K){}
virtual void computeError() override{
const VertexPose *v = static_cast<VertexPose *>(_vertices[0]);
Sophus::SE3d T = v->estimate();
Vector3d pos_pixel = _K * (T * _pos3d);
pos_pixel = pos_pixel / pos_pixel[2];
_error = _measurement - pos_pixel.head<2>();
}
virtual void linearizeOplus() override {
const VertexPose *v = static_cast<VertexPose *>(_vertices[0]);
Sophus::SE3d T = v->estimate();
Vector3d pos_cam = T * _pos3d;
double fx = _K(0, 0);
double fy = _K(1, 1);
double cx = _K(0, 2);
double cy = _K(1, 2);
double X = pos_cam[0];
double Y = pos_cam[1];
double Z = pos_cam[2];
double Z2 = Z * Z;
_jacobianOplusXi
<< -fx / Z, 0, fx * X / Z2, fx * X * Y / Z2, -fx - fx * X * X / Z2, fx * Y / Z,
0, -fy / Z, fy * Y / (Z * Z), fy + fy * Y * Y / Z2, -fy * X * Y / Z2, -fy * X / Z;
}
virtual bool read(istream &in) override {}
virtual bool write(ostream &out) const override {}
private:
Eigen::Vector3d _pos3d;
Eigen::Matrix3d _K;
};
void bundleAdjustmentG2O(const VecVector3d &points_3d, const VecVector2d &points_2d,
const Mat &K, Sophus::SE3d &pose){
typedef g2o::BlockSolver<g2o::BlockSolverTraits<6, 3>> BlockSolverType;
typedef g2o::LinearSolverDense<BlockSolverType::PoseMatrixType> LinearSolverType;
auto solver = new g2o::OptimizationAlgorithmGaussNewton(
g2o::make_unique<BlockSolverType>(g2o::make_unique<LinearSolverType>()));
g2o::SparseOptimizer optimizer;
optimizer.setAlgorithm(solver);
optimizer.setVerbose(true);
VertexPose *vertex_pose = new VertexPose();
vertex_pose->setId(0);
vertex_pose->setEstimate(Sophus::SE3d());
optimizer.addVertex(vertex_pose);
Matrix3d K_eigen;
cv2eigen(K, K_eigen);
for(size_t i=0; i < points_3d.size(); i++){
auto p2d = points_2d[i];
auto p3d = points_3d[i];
EdgeProjection *edge = new EdgeProjection(p3d, K_eigen);
edge->setId(i);
edge->setVertex(0, vertex_pose);
edge->setMeasurement(p2d);
edge->setInformation(Matrix2d::Identity());
optimizer.addEdge(edge);
}
chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
optimizer.initializeOptimization();
optimizer.optimize(10);
chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
chrono::duration<double> time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
cout << "optimization costs time: " << time_used.count() << " seconds." << endl;
pose = vertex_pose->estimate();
}
|
{"hexsha": "8468cf3b35643303b40a0a0b2682da16aba31628", "size": 11108, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "my_implementation_1/ch7/pose_estimation_3d2d/pose_estimation_3d2d.cpp", "max_stars_repo_name": "Mingrui-Yu/slambook2", "max_stars_repo_head_hexsha": "d31273192bd9fb5ac618f147105082022c87a005", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-09T14:18:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-09T14:18:15.000Z", "max_issues_repo_path": "my_implementation_1/ch7/pose_estimation_3d2d/pose_estimation_3d2d.cpp", "max_issues_repo_name": "Mingrui-Yu/slambook2", "max_issues_repo_head_hexsha": "d31273192bd9fb5ac618f147105082022c87a005", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "my_implementation_1/ch7/pose_estimation_3d2d/pose_estimation_3d2d.cpp", "max_forks_repo_name": "Mingrui-Yu/slambook2", "max_forks_repo_head_hexsha": "d31273192bd9fb5ac618f147105082022c87a005", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1029810298, "max_line_length": 120, "alphanum_fraction": 0.6380986676, "num_tokens": 3742}
|
## Future-proofing for Python3+
from __future__ import print_function
## Import dolfin and numpy and time ##
import dolfin as df
import numpy as np
from petsc4py import PETSc
from helper import assign_dof
## Optionally import dolfin_adjoint ##
try:
import dolfin_adjoint as dfa
dolfin_adjoint_found = True
except ImportError:
dolfin_adjoint_found = False
class BlockProblem(object):
def __init__(self, *args, **kwargs):
## Check if correct number of positional arguments ##
if len(args) != 3:
raise RuntimeError("Solver takes only three positional augments: a,L,x")
## Store Problem Info ##
self.a = args[0]
self.L = args[1]
self.u = args[2]
self.aP = kwargs.get("aP",None)
self.bcs = kwargs.get("bcs",[])
self.annotate = kwargs.get("annotate",False)
self.adjoint = kwargs.get("adjoint",False)
self.ident_zeros = kwargs.get("ident_zeros",False)
self.log_level = kwargs.get("log_level",0)
## Extract the Function Space ##
self.V = self.u.function_space()
self.dofs = np.array(self.u.function_space().dofmap().dofs())
self.goffset = np.min(self.dofs)
## Initialize field split information ##
self.block_field = {} # Dictionary of all the fields
self.field_size = {} # Dictionary of field sizes
self.block_split = {} # Dictionary of all the splits
self.num_fields = 0 # Total number of fields
self.finalize_field = False # Flag to indicate all fields added
self.split_0 = "" # Name of the outer most split if using nested/recursion
## Test if dolfin adjoint is required but not installed ##
if self.adjoint == True and dolfin_adjoint_found == False:
raise RuntimeError("Dolfin-adjoint is not installed")
## Add a field to the block problem ##
def field(self, *args, **kwargs):
if self.log_level >= 1:
timer = df.Timer("pFibs: Add block problem field")
## Check if splits already defined ##
if self.finalize_field:
raise RuntimeError("Cannot add anymore fields after split has been called")
## Required input ##
field_name = args[0]
field_indx = args[1]
## Optional solver parameters ##
solver_params = kwargs.get("solver",{})
## Check types ##
if not isinstance(field_name, str):
raise TypeError("Field name must be of type str")
if not isinstance(solver_params, dict):
raise TypeError("Solver parameters must be of type dict")
## Add to dictionary ##
self.block_field.update({field_name:[self.num_fields,field_indx,solver_params]})
self.num_fields += 1
if self.log_level >= 1:
timer.stop()
## Extract dofs ##
def extract_dofs(self,key):
if self.log_level >= 3:
timer = df.Timer("pFibs: Setup fields - Iterate through block fields - Extract dofs")
## If multiple subspaces belong to this field ##
if isinstance(self.block_field[key][1],list):
dofs = np.array([])
for space in self.block_field[key][1]:
## Case 1: subspace of FunctionSpace ##
if isinstance(space,int):
dofs = np.append(dofs,self.V.sub(space).dofmap().dofs())
## Case 2: subspace of subspace of FunctionSpace
elif isinstance(space,list):
if len(space) != 2:
raise ValueError("Argument length of vector function subspace can only be 2")
dofs = np.append(dofs,self.V.sub(space[0]).sub(space[1]).dofmap().dofs())
else:
raise TypeError("Input length must either be an int or a list of ints")
## Otherwise only one subspace belonging to this field ##
else:
dofs = np.array(self.V.sub(self.block_field[key][1]).dofmap().dofs())
## Get size of array ##
ndof = dofs.size
if self.log_level >= 3:
timer.stop()
return (dofs, ndof)
## Set up the fields ##
def setup_fields(self):
if self.log_level >= 1:
timer = df.Timer("pFibs: Setup fields")
## Default if empty ##
if not self.block_field:
for i in range(self.V.num_sub_spaces()):
self.block_field.update({i:[i,i,{}]})
self.num_fields = len(self.block_field)
if self.num_fields == 0:
self.num_fields += 1
## Create PetscSection ##
self.section = PETSc.Section().create()
self.section.setNumFields(self.num_fields)
self.section.setChart(0,len(self.V.dofmap().dofs()))
if self.log_level >= 2:
timer_iterBlockFields = df.Timer("pFibs: Setup fields - Iterate through block fields")
## Iterate through all the block fields ##
for key in self.block_field:
self.section.setFieldName(self.block_field[key][0],str(key))
## Extract dofs ##
(dofs, ndof) = self.extract_dofs(key)
## Record dof count for each field ##
self.field_size.update({self.block_field[key][0]:ndof})
if self.log_level >= 3:
timer_assignDof = df.Timer("pFibs: Setup fields - Iterate through block fields - assign dof")
assign_dof(self.section, dofs, self.goffset, self.block_field[key][0])
if self.log_level >= 3:
timer_assignDof.stop()
if self.log_level >= 2:
timer_iterBlockFields.stop()
## Create DM and assign PetscSection ##
self.section.setUp()
self.dm = PETSc.DMShell().create()
self.dm.setDefaultSection(self.section)
self.dm.setUp()
## Prevent any further modification to block_field ##
self.finalize_field = True
if self.log_level >= 1:
timer.stop()
## Add a split to the block problem ##
def split(self, *args, **kwargs):
if self.log_level >= 1:
timer = df.Timer("pFibs: Add block problem split")
## Setup fields ##
if not self.finalize_field:
self.setup_fields()
## Required input ##
split_name = args[0]
split_fields = args[1]
## Optional solver parameters ##
solver_params = kwargs.get("solver",{})
## Check types ##
if not isinstance(split_name, str):
raise TypeError("Field name must be of type str")
if not isinstance(solver_params, dict):
raise TypeError("Solver parameters must be of type dict")
if not isinstance(split_fields, list):
raise TypeError("Split fields must be of type list")
elif len(split_fields) < 2:
raise ValueError("Number of fields in split fields must be 2 or greater")
## Check whether split fields exist ##
for i in split_fields:
if not isinstance(i,str):
raise TypeError("Field/split must be of type str")
if not i in self.block_field and not i in self.block_split:
raise ValueError("Field/split '%s' not defined" %(i))
## Add to dictionary ##
self.block_split.update({split_name:[split_fields,solver_params]})
## Update/override as the first split ##
self._first_split(split_name)
if self.log_level >= 1:
timer.stop()
## Define the first split ##
def _first_split(self, split_name):
if not split_name in self.block_split:
raise ValueError("First split '%s' not defined" %(split_name))
else:
self.split_0 = split_name
|
{"hexsha": "83594c84ffaf5eb00ffeab0656c59c85c82757fc", "size": 8030, "ext": "py", "lang": "Python", "max_stars_repo_path": "pfibs/block_problem.py", "max_stars_repo_name": "iprotasov/pfibs", "max_stars_repo_head_hexsha": "589724369b248971ba76da3f764f4b760b666761", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-02-08T19:37:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T01:46:15.000Z", "max_issues_repo_path": "pfibs/block_problem.py", "max_issues_repo_name": "iprotasov/pfibs", "max_issues_repo_head_hexsha": "589724369b248971ba76da3f764f4b760b666761", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-04-09T17:14:38.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-12T20:57:37.000Z", "max_forks_repo_path": "pfibs/block_problem.py", "max_forks_repo_name": "iprotasov/pfibs", "max_forks_repo_head_hexsha": "589724369b248971ba76da3f764f4b760b666761", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-04-09T17:19:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-02T19:54:10.000Z", "avg_line_length": 36.0089686099, "max_line_length": 109, "alphanum_fraction": 0.5740971357, "include": true, "reason": "import numpy", "num_tokens": 1740}
|
###############################################################################
# Author: Wasi Ahmad
# Project: Adversarial Multi-task Learning for Text Classification
# Date Created: 10/18/2017
#
# File Description: This script provides general purpose utility functions that
# are required at different steps in the experiments.
###############################################################################
import re, os, pickle, string, math, time, util, torch, glob, inspect
import numpy as np
from nltk import wordpunct_tokenize, word_tokenize
from torch import optim
from torch.autograd import Variable
import matplotlib as mpl
import torch.nn.functional as F
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from collections import OrderedDict
from allennlp.data.token_indexers.elmo_indexer import ELMoCharacterMapper
def load_word_embeddings(directory, file, dictionary):
embeddings_index = {}
f = open(os.path.join(directory, file))
for line in f:
word, vec = line.split(' ', 1)
if word in dictionary:
embeddings_index[word] = np.array(list(map(float, vec.split())))
f.close()
return embeddings_index
def save_word_embeddings(directory, file, embeddings_index):
f = open(os.path.join(directory, file), 'w')
for word, vec in embeddings_index.items():
f.write(word + ' ' + ' '.join(str(x) for x in vec) + '\n')
f.close()
def save_checkpoint(state, filename='./checkpoint.pth.tar'):
if os.path.isfile(filename):
os.remove(filename)
torch.save(state, filename)
def load_model_states_from_checkpoint(model, filename, tag, from_gpu=True):
"""Load model states from a previously saved checkpoint."""
assert os.path.exists(filename)
if from_gpu:
checkpoint = torch.load(filename)
else:
checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint[tag])
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = list(inspect.signature(optim_fn.__init__).parameters.keys())
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
def softmax(input, axis=1):
input_size = input.size()
trans_input = input.transpose(axis, len(input_size) - 1)
trans_size = trans_input.size()
input_2d = trans_input.contiguous().view(-1, trans_size[-1])
soft_max_2d = F.softmax(input_2d)
soft_max_nd = soft_max_2d.view(*trans_size)
return soft_max_nd.transpose(axis, len(input_size) - 1)
def load_model_states_without_dataparallel(model, filename, tag):
"""Load a previously saved model states."""
assert os.path.exists(filename)
checkpoint = torch.load(filename)
new_state_dict = OrderedDict()
for k, v in checkpoint[tag].items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
def save_object(obj, filename):
"""Save an object into file."""
with open(filename, 'wb') as output:
pickle.dump(obj, output)
def load_object(filename):
"""Load object from file."""
with open(filename, 'rb') as input:
obj = pickle.load(input)
return obj
def tokenize_and_normalize(s):
"""Tokenize and normalize string."""
token_list = []
tokens = wordpunct_tokenize(s.lower())
token_list.extend([x for x in tokens if not re.fullmatch('[' + string.punctuation + ']+', x)])
return token_list
def tokenize(s, tokenize):
"""Tokenize string."""
if tokenize:
return word_tokenize(s)
else:
return s.split()
def initialize_out_of_vocab_words(dimension, choice='zero'):
"""Returns a vector of size dimension given a specific choice."""
if choice == 'random':
"""Returns a random vector of size dimension where mean is 0 and standard deviation is 1."""
return np.random.normal(size=dimension)
elif choice == 'zero':
"""Returns a vector of zeros of size dimension."""
return np.zeros(shape=dimension)
def sentence_to_tensor(sentence, max_sent_length, dictionary):
sen_rep = torch.LongTensor(max_sent_length).zero_()
for i in range(len(sentence)):
word = sentence[i]
if word in dictionary.word2idx:
sen_rep[i] = dictionary.word2idx[word]
return sen_rep
def batch_to_tensors(batch, dictionary, iseval=False):
"""Convert a list of sequences to a list of tensors."""
max_sent_length1, max_sent_length2 = 0, 0
for item in batch:
if max_sent_length1 < len(item.sentence1):
max_sent_length1 = len(item.sentence1)
if max_sent_length2 < len(item.sentence2):
max_sent_length2 = len(item.sentence2)
all_sentences1 = torch.LongTensor(len(batch), max_sent_length1)
sent_len1 = np.zeros(len(batch), dtype=np.int)
all_sentences2 = torch.LongTensor(len(batch), max_sent_length2)
sent_len2 = np.zeros(len(batch), dtype=np.int)
labels = torch.LongTensor(len(batch))
for i in range(len(batch)):
sent_len1[i], sent_len2[i] = len(batch[i].sentence1), len(batch[i].sentence2)
all_sentences1[i] = sentence_to_tensor(batch[i].sentence1, max_sent_length1, dictionary)
all_sentences2[i] = sentence_to_tensor(batch[i].sentence2, max_sent_length2, dictionary)
labels[i] = batch[i].label
return Variable(all_sentences1, volatile=iseval), sent_len1, Variable(all_sentences2, volatile=iseval), \
sent_len2, Variable(labels, volatile=iseval)
def convert_sent_to_tensor(sentence, max_length, pad_token):
word_list = []
for i in range(max_length):
word = sentence[i] if i < len(sentence) else pad_token
word_list.append(ELMoCharacterMapper.convert_word_to_char_ids(word))
return word_list
def batch_to_elmo_input(batch, dictionary, iseval=False):
"""Convert a list of sequences to a list of tensors."""
max_sent_length1, max_sent_length2 = 0, 0
for item in batch:
if max_sent_length1 < len(item.sentence1):
max_sent_length1 = len(item.sentence1)
if max_sent_length2 < len(item.sentence2):
max_sent_length2 = len(item.sentence2)
all_sentences1, all_sentences2 = [], []
sent_len1 = np.zeros(len(batch), dtype=np.int)
sent_len2 = np.zeros(len(batch), dtype=np.int)
labels = torch.LongTensor(len(batch))
for i in range(len(batch)):
sent_len1[i], sent_len2[i] = len(batch[i].sentence1), len(batch[i].sentence2)
all_sentences1.append(convert_sent_to_tensor(batch[i].sentence1, max_sent_length1 + 2, dictionary.pad_token))
all_sentences2.append(convert_sent_to_tensor(batch[i].sentence2, max_sent_length2 + 2, dictionary.pad_token))
labels[i] = batch[i].label
all_sentences1 = torch.from_numpy(np.asarray(all_sentences1, dtype=np.int))
all_sentences2 = torch.from_numpy(np.asarray(all_sentences2, dtype=np.int))
return Variable(all_sentences1, volatile=iseval), sent_len1, Variable(all_sentences2, volatile=iseval), \
sent_len2, Variable(labels, volatile=iseval)
def batchify(data, bsz):
"""Transform data into batches."""
np.random.shuffle(data)
batched_data = []
for i in range(len(data)):
if i % bsz == 0:
batched_data.append([data[i]])
else:
batched_data[len(batched_data) - 1].append(data[i])
return batched_data
def save_plot(points, filepath, filetag, epoch):
"""Generate and save the plot"""
path_prefix = os.path.join(filepath, filetag)
path = path_prefix + 'epoch_{}.png'.format(epoch)
fig, ax = plt.subplots()
loc = ticker.MultipleLocator(base=0.2) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
ax.plot(points)
fig.savefig(path)
plt.close(fig) # close the figure
for f in glob.glob(path_prefix + '*'):
if f != path:
os.remove(f)
def convert_to_minutes(s):
"""Converts seconds to minutes and seconds"""
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def show_progress(since, percent):
"""Prints time elapsed and estimated time remaining given the current time and progress in %"""
now = time.time()
s = now - since
es = s / percent
rs = es - s
return '%s (- %s)' % (convert_to_minutes(s), convert_to_minutes(rs))
|
{"hexsha": "81f7399fdad820892d563edbfa55b46c61563598", "size": 9581, "ext": "py", "lang": "Python", "max_stars_repo_path": "mtl_sent2vec/shared_private/helper.py", "max_stars_repo_name": "wasiahmad/community_question_answering", "max_stars_repo_head_hexsha": "73d13bc1cdf2ea66d13209c007dcc2767cf2155c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-10-04T04:37:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-19T15:59:51.000Z", "max_issues_repo_path": "mtl_sent2vec/shared_private/helper.py", "max_issues_repo_name": "wasiahmad/community_question_answering", "max_issues_repo_head_hexsha": "73d13bc1cdf2ea66d13209c007dcc2767cf2155c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-02T09:49:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-02T13:44:56.000Z", "max_forks_repo_path": "mtl_sent2vec/shared_private/helper.py", "max_forks_repo_name": "wasiahmad/community_question_answering", "max_forks_repo_head_hexsha": "73d13bc1cdf2ea66d13209c007dcc2767cf2155c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-10-08T06:52:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-18T03:49:14.000Z", "avg_line_length": 35.2242647059, "max_line_length": 117, "alphanum_fraction": 0.6550464461, "include": true, "reason": "import numpy", "num_tokens": 2291}
|
'''
Authors:
1.the-vishal : Vishal
2.Vikas92155 : Vikas
*/MIT License
Copyright (c) 2020 the-vishal
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import flask
from flask import request, jsonify
from flask_cors import CORS
import json
from urllib.parse import unquote
#approach1 - using Cosine Similarity
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
#approach3 - using WordNet
from nltk.corpus import stopwords,wordnet
from itertools import product
import numpy
#approach2 - using word2Vec
from gensim.models import word2vec
import gensim
#FLASK CONFIG
app = flask.Flask(__name__)
app.config["DEBUG"] = True
cors = CORS(app)
#HOME
@app.route('/', methods=['GET'])
def home():
return "<h1><u>NLP based Context Analyzer</u></h1><p>This is a NLP based Context Analyzing tool. <br /> Developer(s) never take responsibility of any cases or results missed. <br /> It is just a tool proposed for helping searchers out there.</p>"
#ANALYZE POST REQ API
@app.route('/analyze', methods=['POST'])
def analyze_context():
article = unquote(request.json.get('article', ""))
testData = unquote(request.json.get('test_data', ""))
algo = request.json.get('algo', 'wordnet')
if not (article and testData):
return jsonify("Error: missing values")
#word2vec
context = CheckSimilarity(article=article, testdata=testData, algo=algo).relatedContext
response = {
"match":len(context)>0,
"found":len(context),
"yourSearch":testData,
"resp":context
}
return jsonify(response)
#SIMILARITY CHECK
class CheckSimilarity(object):
def __init__(self, article, testdata, algo):
self.article = article
self.testdata = testdata
self.lmtzr = WordNetLemmatizer()
self.method = algo
self.stopWords = stopwords.words('english')+ list('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
self.stopWords = [' '+i+' ' for i in self.stopWords]
if self.method =='w2v':
self.wordmodelfile="../GoogleNews-vectors-negative300-SLIM.bin.gz"
self.wordmodel= gensim.models.KeyedVectors.load_word2vec_format(self.wordmodelfile, binary=True)
@staticmethod
def tokenize_sentences(text):
sentences = []
lines = text.split('\n')
for i in lines:
sentences.extend(i.split('.'))
sentences = [i for i in sentences if i.strip()]
return sentences
def text_clean(self, text):
text = ' '+text.lower()+' '
self.stopWords +=['"',"/","\/"]
for sw in self.stopWords:
text = text.replace(sw, " ")
return text.strip()
@staticmethod
def cossim_vectors(v1,v2):
v1 = v1.reshape(1,-1)
v2 = v2.reshape(1,-1)
return cosine_similarity(v1,v2)[0][0]
def w2v(self, s1,s2):
if s1==s2:
return 1.0
s1words=s1.split()
s2words=s2.split()
s1wordsset=set(s1words)
s2wordsset=set(s2words)
vocab = self.wordmodel.vocab
if len(s1wordsset & s2wordsset)==0:
return 0.0
for word in s1wordsset.copy():
if (word not in vocab):
s1words.remove(word)
for word in s2wordsset.copy():
if (word not in vocab):
s2words.remove(word)
def get_or_train(s1words, s2words):
try:
rate = self.wordmodel.n_similarity(s1words, s2words)
return rate
except Exception as ex:
print(ex)
self.wordmodel.build_vocab(text, update=True) # update your vocab
self.wordmodel.train
get_or_train(s1words, s2words)
def getWordnetSimilarity(self, lemm_sent1, lemm_sent2):
similar_senses = []
final = []
for w1 in lemm_sent1:
similar_synsets =[]
for w2 in lemm_sent2:
syns1 = wordnet.synsets(w1)
syns2 = wordnet.synsets(w2)
for comb_from_set1, comb_from_set2 in product(syns1, syns2):
combination_similarity = wordnet.wup_similarity(comb_from_set1, comb_from_set2)
if combination_similarity:
similar_senses.append(combination_similarity)
if similar_senses:
similar_synsets.append(max(similar_senses))
if similar_senses:
final.append(max(similar_synsets))
similarity_index = numpy.mean(final)
similarity_index = round(similarity_index , 2)
return similarity_index
def make_sentence_variation(self, tokenize_data):
sentences_i_have = list(map(self.text_clean, tokenize_data))
# sentence_types = {}
# for i in sentences_i_have:
# stem_sent = ' '.join(list(map(lemmatizer.lemmatize, i.split(' '))))
# lemm_sent = ' '.join([lemmatizer.lemmatize(j, pos="a") for j in i.split(' ')])
# sentence_types.setdefault(i,[]).extend([stem_sent, lemm_sent])
sentence_types = [list(map(self.lmtzr.lemmatize, i.split(' '))) for i in sentences_i_have]
return sentence_types
@property
def relatedContext(self):
sentences = self.tokenize_sentences(self.article)
test_sentences = self.tokenize_sentences(self.testdata)
lemm_sentences = self.make_sentence_variation(sentences)
lemm_test_sentences = self.make_sentence_variation(test_sentences)
if self.method == 'dcs':
vectorizer = CountVectorizer().fit_transform([' '.join(i) for i in lemm_test_sentences+lemm_sentences])
vectors = vectorizer.toarray()
test_vector = vectors[0]
elif self.method == 'w2v':
vectors = [' '.join(i) for i in lemm_sentences]
test_vector = ' '.join(lemm_test_sentences[0])
else:
vectors = lemm_sentences
test_vector = lemm_test_sentences[0]
related_context = []
for index, vector in enumerate(vectors[1:]):
if len(vector)>2:
if self.method=='dcs':
sim = self.cossim_vectors(test_vector, vector)
elif self.method == 'w2v':
sim = self.w2v(test_vector, vector)
else:
sim = self.getWordnetSimilarity(test_vector, vector)
if sim>0.9:
related_context.append({"sentence":sentences[index+1].strip(), "similarityIndex":sim})
# taking first 5 matches (to get all relavant matches uncomment this case), it would be slower.
if len(related_context)>5:
break
return related_context
#APP RUNSERVER
app.run()
|
{"hexsha": "d19ddcb26ad841c639c029f2ae5d36d064a1a28f", "size": 8077, "ext": "py", "lang": "Python", "max_stars_repo_path": "backend/main.py", "max_stars_repo_name": "iamxhunt3r/Context-Analyzer", "max_stars_repo_head_hexsha": "1b4f2b63ee7a101f021c7322eadcda0604d0b952", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-04-30T03:26:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T06:49:50.000Z", "max_issues_repo_path": "backend/main.py", "max_issues_repo_name": "iamxhunt3r/Context-Analyzer", "max_issues_repo_head_hexsha": "1b4f2b63ee7a101f021c7322eadcda0604d0b952", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-03-02T18:28:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-04T16:28:35.000Z", "max_forks_repo_path": "backend/main.py", "max_forks_repo_name": "iamxhunt3r/Context-Analyzer", "max_forks_repo_head_hexsha": "1b4f2b63ee7a101f021c7322eadcda0604d0b952", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-02T18:14:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-24T21:40:43.000Z", "avg_line_length": 33.7949790795, "max_line_length": 250, "alphanum_fraction": 0.6238702489, "include": true, "reason": "import numpy", "num_tokens": 1848}
|
"""
Mathematical functions for line shapes in S(q,E) modelling.
"""
import numpy as np
###############################################################################
###############################################################################
###############################################################################
def lorentzian(x, x0, width, **kwargs):
"""
Calculates a Lorentzian / Cauchy probability distribution.
Normalized to 1.
Parameters
----------
x : float, ndarray
running variable in our context mostly energy
x0 : float
mean value of the distribution
width : float
FWHM of the distributopm
Returns
-------
: float, ndarray
probability distribution function results
"""
return width / np.pi / ((x - x0)**2 + width**2)
#------------------------------------------------------------------------------
def lorentzian_cdf(x, x0, width, **kwargs):
"""
Calculates the cumulative distribution function of a Lorentzian /
Cauchyy distribution
Parameters
----------
x : float, ndarray
running variable in our context mostly energy
x0 : float
mean value of the distribution
width : float
FWHM of the distributopm
Returns
-------
: float, ndarray
integrated value of the probability distribution function
"""
return 1.0/np.pi * np.arctan((x-x0)/width) + 0.5
#------------------------------------------------------------------------------
def gaussian(x, x0, sigma):
"""
Calculates a Gaussian / normal probability distribution.
Normalized to 1.
Parameters
----------
x : float, ndarray
running variable in our context mostly energy
x0 : float
mean value of the distribution
sigma : float
standard deviation of the distributopm
Returns
-------
: float, ndarray
probability distribution function results
"""
return np.exp(-0.5 * np.power((x - x0)/sigma, 2)) / np.sqrt(2*np.pi) / np.abs(sigma)
#------------------------------------------------------------------------------
def fqe_I(e, e_c, A, q, kappa):
"""
Analytical expression for the dynamical correlation function
of an isotropic ferromagnet in first order in
epsilon = 6 - d dimensions introduced by Iro [1].
Valid for T > Tc.
Parameter
---------
e : float, ndarray
running variable, energy transfer [meV]
e_c : float
analogon to the linewidth in a Lorentzian spectral shape function
Has to given in the same unit as energy variable 'e'.
A : float
proportionality factor for linewidth calculation in gamma = A * q**2.5
[A] = meV angstroem**(5/2) | A(nickel) = 350 meV angstroem**(5/2)
q : float
momentum transfer [1/angstroem]
kappa : float
inverse (mag.) correlation length [angstroem]
Return
------
fqe : float
spectral shape function F(q, energy)
References
----------
[1] H. Iro, J. Magn. Magn. Mater. 73, 175 (1988)
"""
return 1 / np.pi / e_c * np.real(1.0 / (-1j * (e/e_c) + 1/(capital_z(q/kappa) * capital_pi1(q/kappa, capital_w(q, e, A)))))
def capital_z(x):
"""
Calculates Z value for fqe_I
Parameters
----------
x : float
q (momentum transfer) / kappa (inverse mag. correlation length)
Return
------
Z : float
"""
a = 0.46
b = 3.16
k = 0.51
return 1.0/(1 - k * np.arctan(a * (1 + 1/x**2)/(1 + b/x**2)**2)) * (1 + b/x**2)**(-3./4)
def capital_pi1(x, w):
"""
Calculates PI_1 value for fqe_I
Related to the self energy of the dynamic susceptibility
Parameters
----------
x : float
q (momentum transfer) / kappa (inverse mag. correlation length)
w : float
Return
------
PI_1 : float
"""
a = 0.46
b = 3.16
return ((1 + b/x**2)**(2 - 3.0/4) - 1j * a * w)**(3.0/5)
def capital_w(q, energy, A):
"""
Calculates W value for PI_1 in fqe_I
Parameters
----------
q : float
momentum transfer [1/angstroem]
energy : float
energy transfer [meV]
A : float
proportionality factor for linewidth calculation in gamma = A * q**2.5
[A] = meV angstroem**(5/2) | A(nickel) = 350 meV angstroem**(5/2)
Return
------
W : float
"""
return energy * capital_z(np.inf) / A / q**2.5
#------------------------------------------------------------------------------
def fqe_c(e, e_c, A, q):
"""
Spectral shape function by Folk and Iro [1] valid at T = Tc.
Based on renormalization-group approach by Dohm [2] for isotropic
ferromagnets.
Parameter
---------
e : float, ndarray
running variable, energy transfer [meV]
e_c : float
analogon to the linewidth in a Lorentzian spectral shape function
Has to given in the same unit as energy variable 'e'.
A : float
proportionality factor for linewidth calculation in gamma = A * q**2.5
[A] = meV angstroem**(5/2) | A(nickel) = 350 meV angstroem**(5/2)
q : float
momentum transfer [1/angstroem]
Return
------
fqe : float
spectral shape function F(q, energy)
References
----------
[1] R. Folk and H. Iro, Phys. Rev. B 32, 1880 (1985)
[2] V. Dohm, Solid State Commun. 20, 657 (1976)
"""
a = 0.46
alpha = 0.78
return np.real(1.0/(1j * (e/e_c) + alpha * (1 + 1j * (a/alpha) * (e/e_c))**-0.6)) / (np.pi * A * q**2.5)
|
{"hexsha": "879fb3eb1a8b330368f439335755ada85e18f7ef", "size": 5886, "ext": "py", "lang": "Python", "max_stars_repo_path": "modelmiezelb/utils/lineshape_functions.py", "max_stars_repo_name": "LukasBeddrich/modelmiezelb", "max_stars_repo_head_hexsha": "b5be0014c391c9aff26360d175d479df99628dcb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modelmiezelb/utils/lineshape_functions.py", "max_issues_repo_name": "LukasBeddrich/modelmiezelb", "max_issues_repo_head_hexsha": "b5be0014c391c9aff26360d175d479df99628dcb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-08-05T11:28:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-05T11:28:05.000Z", "max_forks_repo_path": "modelmiezelb/utils/lineshape_functions.py", "max_forks_repo_name": "LukasBeddrich/modelmiezelb", "max_forks_repo_head_hexsha": "b5be0014c391c9aff26360d175d479df99628dcb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-30T12:24:15.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-30T12:24:15.000Z", "avg_line_length": 28.572815534, "max_line_length": 127, "alphanum_fraction": 0.4898063201, "include": true, "reason": "import numpy", "num_tokens": 1533}
|
import numpy as np
from scipy.fftpack import fft, ifft
from sympy import sqrt
from devito import TimeFunction, Function, Inc, Dimension, Eq
def wavefield(model, space_order, save=False, nt=None, fw=True):
name = "u" if fw else "v"
if model.is_tti:
u = TimeFunction(name="%s1" % name, grid=model.grid, time_order=2,
space_order=space_order, save=None if not save else nt)
v = TimeFunction(name="%s2" % name, grid=model.grid, time_order=2,
space_order=space_order, save=None if not save else nt)
return (u, v)
else:
return TimeFunction(name=name, grid=model.grid, time_order=2,
space_order=space_order, save=None if not save else nt)
def weighted_norm(u, weight=None):
"""
Space-time nor of a wavefield, split into norm in time first then in space to avoid
breaking loops
"""
if type(u) is tuple:
expr = u[0].grid.time_dim.spacing * (u[0]**2 + u[1]**2)
grid = u[0].grid
else:
expr = u.grid.time_dim.spacing * u**2
grid = u.grid
# Norm in time
norm_vy2_t = Function(name="nvy2t", grid=grid)
n_v = [Eq(norm_vy2_t, norm_vy2_t + expr)]
# Then norm in space
i = Dimension(name="i", )
norm_vy2 = Function(name="nvy2", shape=(1,), dimensions=(i, ), grid=grid)
if weight is None:
n_v += [Eq(norm_vy2[0], norm_vy2[0] + norm_vy2_t)]
else:
n_v += [Eq(norm_vy2[0], norm_vy2[0] + norm_vy2_t / weight**2)]
return norm_vy2, n_v
# Weighting
def weight_fun(weight_fun_pars, model, src_coords):
if weight_fun_pars is None:
return None
if weight_fun_pars[0] == "srcfocus":
return weight_srcfocus(model, src_coords, delta=np.float32(weight_fun_pars[1]))
elif weight_fun_pars[0] == "depth":
return weight_depth(model, src_coords, delta=np.float32(weight_fun_pars[1]))
def weight_srcfocus(model, src_coords, delta=np.float32(0.01)):
"""
w(x) = sqrt((||x-xsrc||^2+delta^2)/delta^2)
"""
ix, iz = model.grid.dimensions
isrc = (np.float32(model.nbl) + src_coords[0, 0] / model.spacing[0],
np.float32(model.nbl) + src_coords[0, 1] / model.spacing[1])
h = np.sqrt(model.spacing[0]*model.spacing[1])
return sqrt((ix-isrc[0])**2+(iz-isrc[1])**2+(delta/h)**np.float32(2))/(delta/h)
def weight_depth(model, src_coords, delta=np.float32(0.01)):
"""
w(x) = sqrt((||z-zsrc||^2+delta^2)/delta^2)
"""
_, iz = model.grid.dimensions
isrc = (np.float32(model.nbl)+src_coords[0, 0]/model.spacing[0],
np.float32(model.nbl)+src_coords[0, 1]/model.spacing[1])
h = np.sqrt(model.spacing[0]*model.spacing[1])
return sqrt((iz-isrc[1])**2+(delta/h)**np.float32(2))/(delta/h)
# Data filtering
def applyfilt(dat, Filter=None):
if Filter is None:
return dat
else:
pad = max(dat.shape[0], Filter.size)
filtered = ifft(fft(dat, n=pad, axis=0)*Filter.reshape(-1, 1), axis=0)
return np.real(filtered[:dat.shape[0], :])
def applyfilt_transp(dat, Filter=None):
if Filter is None:
return dat
else:
pad = max(dat.shape[0], Filter.size)
filtered = ifft(fft(dat, n=pad, axis=0)*np.conj(Filter).reshape(-1, 1), axis=0)
return np.real(filtered[:dat.shape[0], :])
# Alpha for wri
def compute_optalpha(v1, v2, v3, comp_alpha=True):
if comp_alpha:
if v3 < np.abs(v2):
a = np.sign(v2)*(np.abs(v2)-v3)/(np.float32(2)*v1)
if np.isinf(a) or np.isnan(a):
return np.float32(0)
else:
return a
else:
return np.float32(0)
else:
return np.float32(1)
|
{"hexsha": "1fa774e63f34c4a6b8595ae3af299e4a6ba027a0", "size": 3730, "ext": "py", "lang": "Python", "max_stars_repo_path": "louboutin2020SEGtwri/src/TWRI_py/twri_utils.py", "max_stars_repo_name": "pilotlynd/Software.SEG2020", "max_stars_repo_head_hexsha": "fce1213331fcc516d199cea3ffbfecbc1953aef4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-04-25T15:00:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T22:29:18.000Z", "max_issues_repo_path": "louboutin2020SEGtwri/src/TWRI_py/twri_utils.py", "max_issues_repo_name": "slimgroup/Software.SEG2020", "max_issues_repo_head_hexsha": "e7dd34d803a4936b369b7d49f7a992445121c533", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "louboutin2020SEGtwri/src/TWRI_py/twri_utils.py", "max_forks_repo_name": "slimgroup/Software.SEG2020", "max_forks_repo_head_hexsha": "e7dd34d803a4936b369b7d49f7a992445121c533", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-04-27T09:14:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-06T10:30:24.000Z", "avg_line_length": 33.3035714286, "max_line_length": 87, "alphanum_fraction": 0.6026809651, "include": true, "reason": "import numpy,from scipy,from sympy", "num_tokens": 1106}
|
from __future__ import division # for proper float division
import os
import sys
import math
import time
import types
import functools
import random
import numpy as np
from ddapp import ik
from ddapp import ikconstraints
from ddapp import ikconstraintencoder
import drc as lcmdrc
import json
from ddapp.utime import getUtime
from ddapp import lcmUtils
import ipab
class PlannerPublisher(object):
def __init__(self, ikPlanner, affordanceMan):
self.ikPlanner = ikPlanner
self.affordanceManager = affordanceMan
self.poses={}
def setupMessage(self, constraints, endPoseName="", nominalPoseName="", seedPoseName="", additionalTimeSamples=None):
poses = ikconstraintencoder.getPlanPoses(constraints, self.ikPlanner)
poses.update(self.poses)
msg = ipab.exotica_planner_request_t()
msg.utime = getUtime()
msg.poses = json.dumps(poses)
msg.constraints = ikconstraintencoder.encodeConstraints(constraints)
msg.seed_pose = seedPoseName
msg.nominal_pose = nominalPoseName
msg.end_pose = endPoseName
msg.joint_names = json.dumps(list(self.ikPlanner.jointController.jointNames))
msg.affordances = self.processAffordances()
opt=ikplanner.getIkOptions()._properties
if additionalTimeSamples:
opt.update({'timeSamples':additionalTimeSamples})
msg.options = json.dumps(opt)
return msg
def processIK(self, constraints, endPoseName="", nominalPoseName="", seedPoseName="", additionalTimeSamples=None):
listener = self.ikPlanner.getManipIKListener()
msg = self.setupMessage(constraints, endPoseName, nominalPoseName, seedPoseName, additionalTimeSamples)
lcmUtils.publish('IK_REQUEST', msg)
ikplan = listener.waitForResponse(timeout=12000)
listener.finish()
endPose = [0] * self.ikPlanner.jointController.numberOfJoints
if ikplan.num_states>0:
endPose[len(endPose)-len(ikplan.plan[ikplan.num_states-1].joint_position):] = ikplan.plan[ikplan.num_states-1].joint_position
info=ikplan.plan_info[ikplan.num_states-1]
else:
info = -1
self.ikPlanner.ikServer.infoFunc(info)
return endPose, info
def processTraj(self, constraints, endPoseName="", nominalPoseName="", seedPoseName="", additionalTimeSamples=None):
# Temporary fix / HACK / TODO (should be done in exotica_json)
largestTspan = [0, 0]
for constraintIndex, _ in enumerate(constraints):
# Get tspan extend to normalise time-span
if np.isfinite(constraints[constraintIndex].tspan[0]) and np.isfinite(constraints[constraintIndex].tspan[1]):
largestTspan[0] = constraints[constraintIndex].tspan[0] if (constraints[constraintIndex].tspan[0] < largestTspan[0]) else largestTspan[0]
largestTspan[1] = constraints[constraintIndex].tspan[1] if (constraints[constraintIndex].tspan[1] > largestTspan[1]) else largestTspan[1]
# Temporary fix / HACK/ TODO to normalise time spans
for constraintIndex, _ in enumerate(constraints):
if np.isfinite(constraints[constraintIndex].tspan[0]) and np.isfinite(constraints[constraintIndex].tspan[1]):
if largestTspan[1] != 0:
constraints[constraintIndex].tspan[0] = constraints[constraintIndex].tspan[0] / largestTspan[1]
constraints[constraintIndex].tspan[1] = constraints[constraintIndex].tspan[1] / largestTspan[1]
listener = self.ikPlanner.getManipPlanListener()
msg = self.setupMessage(constraints, endPoseName, nominalPoseName, seedPoseName, additionalTimeSamples)
lcmUtils.publish('PLANNER_REQUEST', msg)
lastManipPlan = listener.waitForResponse(timeout=20000)
listener.finish()
if lastManipPlan:
# HACK: need to multiply by original trajectory length again (the one in the plan is planning and not real time), otherwise jumps to real hardware
for state in lastManipPlan.plan:
state.utime = state.utime * 50
self.ikPlanner.ikServer.infoFunc(lastManipPlan.plan_info[0])
return lastManipPlan, lastManipPlan.plan_info[0]
def processAddPose(self, pose, poseName):
self.poses[poseName]=list(pose);
def processAffordances(self):
affs = self.affordanceManager.getCollisionAffordances()
s='['
first=True
for aff in affs:
des=aff.getDescription()
classname=des['classname'];
if first:
s+='{'
else:
s+='\n,{'
first=False
s+='"classname":"'+classname+'"'
s+=',"uuid":"'+des['uuid']+'"'
s+=',"pose": {"position":{"__ndarray__":'+repr(des['pose'][0].tolist())+'},"quaternion":{"__ndarray__":'+repr(des['pose'][1].tolist())+'}}'
if classname=='MeshAffordanceItem':
s+=',"filename":"'+aff.getMeshManager().getFilesystemFilename(des['Filename'])+'"'
if classname=='SphereAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
if classname=='CylinderAffordanceItem' or classname=='CapsuleAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
s+=',"length":'+repr(des['Length'])
if classname=='BoxAffordanceItem':
s+=',"dimensions":'+repr(des['Dimensions'])
if classname=='CapsuleRingAffordanceItem':
s+=',"radius":'+repr(des['Radius'])
s+=',"tube_radius":'+repr(des['Tube Radius'])
s+=',"segments":'+repr(des['Segments'])
s+='}'
s=s+']'
return s
import ikplanner
|
{"hexsha": "98d5e35f4347a14452341b65ada4bb1d765d8a23", "size": 5360, "ext": "py", "lang": "Python", "max_stars_repo_path": "externals/director/src/python/ddapp/plannerPublisher.py", "max_stars_repo_name": "ericmanzi/double_pendulum_lqr", "max_stars_repo_head_hexsha": "76bba3091295abb7d412c4a3156258918f280c96", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "externals/director/src/python/ddapp/plannerPublisher.py", "max_issues_repo_name": "ericmanzi/double_pendulum_lqr", "max_issues_repo_head_hexsha": "76bba3091295abb7d412c4a3156258918f280c96", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "externals/director/src/python/ddapp/plannerPublisher.py", "max_forks_repo_name": "ericmanzi/double_pendulum_lqr", "max_forks_repo_head_hexsha": "76bba3091295abb7d412c4a3156258918f280c96", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5503875969, "max_line_length": 152, "alphanum_fraction": 0.6994402985, "include": true, "reason": "import numpy", "num_tokens": 1379}
|
module MYASM1
using Modia
setLogMerge(false)
include("../src/ModelMechanic.jl")
include("../src/ModelEPSphasor.jl")
##################################
VarVolts = Model( # Voltage dip
Tstart = 0.0,
dT = 0.15,
Vmin = 0.0,
y = output,
equations = :[
y = if after(Tstart) && !after(Tstart+dT); Vmin else; 1.0 end;
]
)
ASMtest = Model(
LOAD = parameter | Map(value=0.0, fixed=false),
wReference= parameter | 1.0 | Var(info = "ref. Frame"),
Omegarated=100.0*pi,
GridVolts = EPSphasor.VoltageSource,
GC = EPSphasor.GroundVec,
asmUnit = EPSphasor.InductionMachine,
transformer = EPSphasor.Transformer | Map( xt=0.10, rt=0.1/40.0, Strafo=119e6),
meterGRID = EPSphasor.Meter,
meterGEN = EPSphasor.Meter,
varvolts = VarVolts | Map(Vmin=0.7),
FIXED = Mechanic.Fixed,
J = Mechanic.Inertia | Map(
J=3.0,
w=Var(init=0.4),
phi=Var(init=0.0),
),
equations = :[
J.flange_b.tau = LOAD
],
connect = :[
(GridVolts.V, varvolts.y)
(GridVolts.p, meterGRID.n)
(meterGRID.p, transformer.pin1)
(transformer.pin2, meterGEN.n)
(meterGEN.p, asmUnit.p)
(asmUnit.Flange_a, J.flange_a)
(asmUnit.Flange_b, FIXED.flange)
(GridVolts.n, GC.p)
]
)
instModel = @instantiateModel(ASMtest,
unitless=true,
log=false,
logTiming=false,
logDetails=false,
logCode=false,
logStateSelection=false,
# saveCodeOnFile="CODE.txt",
)
# include("../src/Utils.jl")
# TemplateInit(instModel)
end # module MYGEN2
|
{"hexsha": "0fca8321291a150e8c638ad7b2ffd06e614f9290", "size": 1648, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/myASM1.jl", "max_stars_repo_name": "johhell/EPSphasor", "max_stars_repo_head_hexsha": "ef2ae21c976657f09e51347d88b02d9bfdfee3a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/myASM1.jl", "max_issues_repo_name": "johhell/EPSphasor", "max_issues_repo_head_hexsha": "ef2ae21c976657f09e51347d88b02d9bfdfee3a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/myASM1.jl", "max_forks_repo_name": "johhell/EPSphasor", "max_forks_repo_head_hexsha": "ef2ae21c976657f09e51347d88b02d9bfdfee3a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.0975609756, "max_line_length": 83, "alphanum_fraction": 0.5734223301, "num_tokens": 516}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 20 17:48:08 2020
@author: suelto
Modified from Jason Brownlee blog:
https://machinelearningmastery.com/grid-search-hyperparameters-deep-learning-models-python-keras/
This code seeks to implement a random search for hyperparameter estimation.
"""
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import patsy
from patsy import dmatrix
import Solubility as S
import wild_edit_2
from SWIMS_time_lag import lag_shift
import seaborn as sns
# multivariate multi-step encoder-decoder lstm
from math import sqrt
from numpy import split
from numpy import array
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.layers import Dropout
# split a univariate dataset into train/test sets
def split_dataset(data,n_input):
# split into standard weeks
train, test = data[0:-n_input*2], data[-n_input*2:]
# restructure into windows of weekly data
train = array(split(train, len(train)/n_input))
test = array(split(test, len(test)/n_input))
return train, test
# convert history into inputs and outputs
def to_supervised(train, n_input, n_out=100):
# flatten data
data = train.reshape((train.shape[0]*train.shape[1], train.shape[2]))
X, y = list(), list()
in_start = 0
# step over the entire history one time step at a time
for _ in range(len(data)):
# define the end of the input sequence
in_end = in_start + n_input
#out_end = in_end + n_out
# ensure we have enough data for this instance
if in_end <= len(data)-1:
X.append(data[in_start:in_end, 0:])
y.append(data[in_start:in_end, -1])
# move along one time step
in_start += 1
return array(X), array(y)
"""
New code below
"""
verbose = 1
n_input = 100
# define the grid search parameters
batch_size = [20, 60, 80]
epochs = [5, 10, 20]
dropout = [0.1,0.4,0.6,0.8]
param_grid = dict(batch_size=batch_size, epochs=epochs,n_drop=dropout)
pth = '../ZIPP2_EN602/EN602_Loose/science/UMS/MS Data/'
target = 64
#idx = [62,5,36,28,57,3,64]
idx = [36,28,57,64]
# Use scikit-learn to grid search the batch size and epochs
import numpy
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from keras.wrappers.scikit_learn import KerasRegressor
# Function to create model, required for KerasClassifier
###### Create model function is above #####
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load dataset
#%%
###------------- CALIBRATE July 12 ----------------------- #####
massspec_12_1 = pd.read_csv(pth+'MSData_7_12_2017 21_08.dfData',sep='\t',parse_dates=[0], header=0, low_memory=False,encoding='latin1')
hdrs = massspec_12_1.columns.values
#Shift to compensate for lage in UMS flow-thru system#
massspec_12_1 = lag_shift(massspec_12_1,hdrs[17:-1],41)
massspec_12_1 = massspec_12_1.dropna(axis=0, how='all')
ms = massspec_12_1.iloc[5858:9895, :]
ms = ms.reset_index(drop=True)
y12 = ms[hdrs[target]]
X = ms[hdrs[idx]].interpolate(method='linear', order=1, limit_direction='both')
Xf = X.values
Xf_mean = Xf.mean(axis=0)
Xf_std = Xf.std(axis=0)
x = (Xf-Xf_mean)/Xf_std
#model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
# prepare data
#shorten dataset to split into pieces
x = x[0:len(x)-np.mod(len(x),n_input)]
x = array(split(x, len(x)/n_input))
#%%
train_x, train_y = to_supervised(x, n_input)
#train_x = x; train_y = x[:,:,3]
# define parameters
#verbose, epochs, batch_size = 1, 15, 50
n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
# reshape output into [samples, timesteps, features]
train_y = train_y.reshape((train_y.shape[0], train_y.shape[1], 1))
# train the model
def build_model(n_timesteps=n_timesteps, n_features=n_features, n_outputs=n_outputs, n_drop=0.5):
# define model
model = Sequential()
model.add(LSTM(50, activation='tanh', input_shape=(n_timesteps, n_features)))
model.add(RepeatVector(n_outputs)); model.add(Dropout(n_drop))
model.add(LSTM(50, activation='tanh', return_sequences=True))
model.add(TimeDistributed(Dense(20, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.compile(loss='mse', optimizer='adam')
# fit network
return model
# create model
#model = KerasClassifier(build_fn=build_model, verbose=1)
model = KerasRegressor(build_fn=build_model, verbose=0)
#model= build_model(n_timesteps, n_features, n_outputs)
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=5)
grid_result = grid.fit(train_x, train_y)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
|
{"hexsha": "840e00bff3c573743db783c81507f5c990a9395c", "size": 5193, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/gridsearchtest.py", "max_stars_repo_name": "bloose/bias_correction_by_ML", "max_stars_repo_head_hexsha": "c0ae25f2bd656ab180adac0e228a3d4ffdec84cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-02T15:33:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-20T11:12:02.000Z", "max_issues_repo_path": "notebooks/gridsearchtest.py", "max_issues_repo_name": "bloose/bias_correction_by_ML", "max_issues_repo_head_hexsha": "c0ae25f2bd656ab180adac0e228a3d4ffdec84cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/gridsearchtest.py", "max_forks_repo_name": "bloose/bias_correction_by_ML", "max_forks_repo_head_hexsha": "c0ae25f2bd656ab180adac0e228a3d4ffdec84cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.222826087, "max_line_length": 135, "alphanum_fraction": 0.7392643944, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1474}
|
import os
import tensorflow as tf
import numpy as np
import random
import math
from matplotlib import pyplot as plt
from preprocessing import get_data
from tensorflow.keras import Sequential
from tensorflow.math import exp, sqrt, square
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Flatten, Reshape, MaxPooling2D, Dropout, Conv2D, GlobalAveragePooling2D, UpSampling2D
from tensorflow.keras.layers.experimental.preprocessing import RandomFlip, RandomRotation
class Referee(tf.keras.Model):
def __init__(self) -> None:
super(Referee, self).__init__()
self.num_classes = 6
self.learning_rate = 0.00001
self.decay_rate = 0.00000025
self.dropout_rate = 0.01
self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
self.batch_size = 100
self.hidden_size = 500
self.conv_size = 128
self.ResNet50 = ResNet50V2(include_top=False,
weights="imagenet",
input_shape=(384, 512, 3))
self.ResNet50.trainable = False
self.referee = Sequential([
Flatten(),
Dense(self.hidden_size, activation="relu"),
Dropout(self.dropout_rate),
Dense(self.hidden_size, activation="relu"),
Dropout(self.dropout_rate),
Dense(6, activation="relu")
])
# self.referee = Sequential([
# Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same'),
# MaxPooling2D(pool_size=(2, 2)),
# Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'),
# MaxPooling2D(pool_size=(2, 2)),
# Conv2D(self.conv_size, kernel_size=(3, 3), activation='relu'),
# MaxPooling2D(pool_size=(2, 2)),
# Flatten(),
# Dense(self.hidden_size, activation='relu'),
# Dense(self.hidden_size, activation='relu'),
# Dense(6, activation='softmax')])
def call(self, inputs):
# outputs a 1 by 1 by 20
resnet_output = tf.stop_gradient(self.ResNet50(inputs))
referee_output = self.referee(resnet_output)
# this returns probability of input being one of the 20 images
return referee_output
def loss(self, probs, labels):
loss = tf.keras.losses.sparse_categorical_crossentropy(labels, probs, from_logits=True)
return tf.reduce_sum(loss)
def accuracy(self, logits, labels):
"""
:param logits: a matrix of size (num_inputs, self.num_classes); during training, this will be (batch_size, self.num_classes)
containing the result of multiple convolution and feed forward layers
:param labels: matrix of size (num_labels, self.num_classes) containing the answers, during training, this will be (batch_size, self.num_classes)
:return: the accuracy of the model as a Tensor
labels should be one-hot vector
"""
print("logits", logits)
# print("labels Shape", labels.shape)
labelsOneHot = tf.one_hot(labels, self.num_classes)
correct_predictions = tf.equal(tf.argmax(logits, 1), tf.argmax(labelsOneHot, 1))
#print(correct_predictions) # correct predictions should be a one-dimensional list of labels
# print("dimension1: ", tf.argmax(logits, 1))
# print("dimension1: ", tf.argmax(labelsOneHot, 1))
# print("dimension2", tf.argmax(logits, 0))
# print("dimension2", tf.argmax(labelsOneHot, 0))
return tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
def train(model, train_inputs, train_labels):
for epoch in range(1):
print("EPOCH: " + str(epoch))
for b in range(0, len(train_inputs), model.batch_size):
print("batch: " + str(int(b / model.batch_size)))
batch_inputs = tf.convert_to_tensor(train_inputs[b : b + model.batch_size], dtype=tf.int32)
batch_labels = tf.convert_to_tensor(train_labels[b : b + model.batch_size], dtype=tf.int32)
# batch_inputs = tf.convert_to_tensor(train_inputs[b : b + model.batch_size])
# batch_labels = tf.convert_to_tensor(train_labels[b : b + model.batch_size])
with tf.GradientTape() as tape:
logits = model.call(batch_inputs)
loss = model.loss(logits, batch_labels)
accuracy = model.accuracy(logits, batch_labels)
print("ACCURACY: " + str(accuracy))
gradients = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(gradients, model.trainable_variables))
def test(model, test_inputs, test_labels):
"""
"""
accuracy = 0
n = 0
num_examples = model.batch_size
for i in range(0, len(test_inputs), num_examples):
# print(i)
images = tf.convert_to_tensor(test_inputs[i:i + num_examples], dtype=tf.int32)
labels = tf.convert_to_tensor(test_labels[i:i + num_examples], dtype=tf.int32)
# images = tf.convert_to_tensor(test_inputs[i:i + num_examples])
# labels = tf.convert_to_tensor(test_labels[i:i + num_examples])
# print(test_inputs[i].shape)
predictions = model.call(images)
loss = model.loss(predictions, labels)
test_acc = model.accuracy(predictions, labels)
n += 1
accuracy += test_acc
overallAcc = accuracy / n
return overallAcc
def main():
training_images, training_labels, testing_images, testing_labels = get_data("../data")
referee = Referee()
print("Starting training...")
train(referee, training_images, training_labels)
print("Starting testing...")
final_acc = test(referee, testing_images, testing_labels)
print("FINAL TESTING ACCURACY: " + str(final_acc))
tf.saved_model.save(referee, "../models")
if __name__ == '__main__':
main()
|
{"hexsha": "5c4ec4aaa5389988ca44ed7143efbe4f9f522b8f", "size": 5959, "ext": "py", "lang": "Python", "max_stars_repo_path": "deep_learning/code/model.py", "max_stars_repo_name": "Naveen-and-Taishi/recycleAtBrown", "max_stars_repo_head_hexsha": "397c1fdd70ac9d2dd977ae5de9ff36808d6af63d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "deep_learning/code/model.py", "max_issues_repo_name": "Naveen-and-Taishi/recycleAtBrown", "max_issues_repo_head_hexsha": "397c1fdd70ac9d2dd977ae5de9ff36808d6af63d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deep_learning/code/model.py", "max_forks_repo_name": "Naveen-and-Taishi/recycleAtBrown", "max_forks_repo_head_hexsha": "397c1fdd70ac9d2dd977ae5de9ff36808d6af63d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5642857143, "max_line_length": 153, "alphanum_fraction": 0.651955026, "include": true, "reason": "import numpy", "num_tokens": 1355}
|
import cv2
import numpy as np
import transformations as tf
import math as m
import time
import argparse
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from dronekit import connect, VehicleMode, LocationGlobalRelative, Command, LocationGlobal
from pymavlink import mavutil
from dt_apriltags import Detector
#######################################
# Parameters for FCU and MAVLink
#######################################
# Default configurations for connection to the FCU
connection_string_default = '/dev/serial0'
connection_baudrate_default = 921600
landing_target_msg_hz_default = 20
range_data = 0
# Timestamp (UNIX Epoch time or time since system boot)
current_time = 0
vehicle = None
is_landing_tag_detected = None
#######################################
# Parsing user' inputs
#######################################
parser = argparse.ArgumentParser(description='ArduPilot AprilTag Landing')
parser.add_argument('--camera_resolution', type=float,
help="Update the resoultion of captured image for landing pad detection. Higher resolution will slow down processing speed")
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
parser.add_argument('--landing_target_msg_hz', type=float,
help="Update frequency for LANDING_TARGET message. If not specified, a default value will be used.")
args = parser.parse_args()
camera_resolution = args.camera_resolution
connection_string = args.connect
connection_baudrate = args.baudrate
landing_target_msg_hz = args.landing_target_msg_hz
######################################
# Camera Resolution Setup
######################################
if not camera_resolution:
camera_size=[640,480]
print("INFO: Using Camera Resultion as", camera_size[1])
else:
if camera_resolution == 720:
camera_size=[1280,720]
else:
camera_size=[640,480]
print("INFO: Using Camera Resultion as", camera_size[1])
if camera_size[1] == 720:
camera_matrix = np.loadtxt('cameraMatrix_720.txt', delimiter=',')
elif camera_size[1] == 480:
camera_matrix = np.loadtxt('cameraMatrix_480.txt', delimiter=',')
else:
print('Error Loading Camera Matrix')
exit()
if not connection_string:
connection_string = connection_string_default
print("INFO: Using default connection_string", connection_string)
else:
print("INFO: Using connection_string", connection_string)
if not connection_baudrate:
connection_baudrate = connection_baudrate_default
print("INFO: Using default connection_baudrate", connection_baudrate)
else:
print("INFO: Using connection_baudrate", connection_baudrate)
if not landing_target_msg_hz:
landing_target_msg_hz = landing_target_msg_hz_default
print("INFO: Using default landing_target_msg_hz", landing_target_msg_hz)
else:
print("INFO: Using landing_target_msg_hz", landing_target_msg_hz)
#######################################
# Functions for AprilTag detection
#######################################
at_detector = Detector(searchpath=['apriltags'],
families='tag36h11',
nthreads=1,
quad_decimate=1.0,
quad_sigma=0.0,
refine_edges=1,
decode_sharpening=0.25,
debug=0)
def send_land_message_v1():
global current_time, H_camera_tag, is_landing_tag_detected
if is_landing_tag_detected == True:
x = H_camera_tag[0][3]
y = H_camera_tag[1][3]
z = H_camera_tag[2][3]
x_offset_rad = m.atan(x / z)
y_offset_rad = m.atan(y / z)
distance = np.sqrt(x * x + y * y + z * z)
msg = vehicle.message_factory.landing_target_encode(
current_time, # time target data was processed, as close to sensor capture as possible
0, # target num, not used
mavutil.mavlink.MAV_FRAME_BODY_NED, # frame, not used
x_offset_rad, # X-axis angular offset, in radians
y_offset_rad, # Y-axis angular offset, in radians
distance, # distance, in meters
0, # Target x-axis size, in radians
0, # Target y-axis size, in radians
)
vehicle.send_mavlink(msg)
vehicle.flush()
def vehicle_connect():
global vehicle
try:
vehicle = connect(connection_string, wait_ready = True, baud = connection_baudrate, timeout=300)
except KeyboardInterrupt:
print("INFO: Exiting")
sys.exit()
except:
print('Connection error! Retrying...')
if vehicle == None:
return False
else:
return True
#######################################
# Camera Setup
#######################################
camera_params = [camera_matrix[0][0], camera_matrix[1][1], camera_matrix[0][2], camera_matrix[1][2]]
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, camera_size[0])
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_size[1])
print("INFO: Connecting to vehicle.")
while (not vehicle_connect()):
pass
print("INFO: Vehicle connected.")
sched = BackgroundScheduler()
sched.add_job(send_land_message_v1, 'interval', seconds = 1/landing_target_msg_hz_default)
sched.start()
try:
while True:
range_data = vehicle.rangefinder.distance
print(range_data)
if range_data < 0.5:
tag_landing_id = 8
tag_landing_size = 0.0495 # tag's border size, measured in meter
else:
tag_landing_id = 5
tag_landing_size = 0.161 # tag's border size, measured in meter
ret, frame = vid.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
current_time = int(round(time.time() * 1000000))
tags = at_detector.detect(gray, estimate_tag_pose=True, camera_params=camera_params, tag_size=tag_landing_size)
if tags != []:
for tag in tags:
# Check for the tag that we want to land on
if tag.tag_id == tag_landing_id:
is_landing_tag_detected = True
H_camera_tag = tf.euler_matrix(0, 0, 0, 'sxyz')
H_camera_tag[0][3] = tag.pose_t[0]
H_camera_tag[1][3] = tag.pose_t[1]
H_camera_tag[2][3] = tag.pose_t[2]
print("INFO: Detected landing tag", str(tag.tag_id), " relative to camera at x:", H_camera_tag[0][3], ", y:", H_camera_tag[1][3], ", z:", H_camera_tag[2][3])
else:
print("INFO: No tag detected")
is_landing_tag_detected = False
except KeyboardInterrupt:
sched.shutdown()
vid.release()
vehicle.close()
print("INFO: KeyboardInterrupt has been caught. Cleaning up...")
|
{"hexsha": "25cca6124d479bfd570221187843af982411d111", "size": 7361, "ext": "py", "lang": "Python", "max_stars_repo_path": "Non_T265_Land.py", "max_stars_repo_name": "cloudtenno/autonomous_landing", "max_stars_repo_head_hexsha": "be91679a40f3ee74c9691925fd2b42f723ebf920", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Non_T265_Land.py", "max_issues_repo_name": "cloudtenno/autonomous_landing", "max_issues_repo_head_hexsha": "be91679a40f3ee74c9691925fd2b42f723ebf920", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Non_T265_Land.py", "max_forks_repo_name": "cloudtenno/autonomous_landing", "max_forks_repo_head_hexsha": "be91679a40f3ee74c9691925fd2b42f723ebf920", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6118721461, "max_line_length": 178, "alphanum_fraction": 0.6005977449, "include": true, "reason": "import numpy", "num_tokens": 1584}
|
import piexif
import piexif.helper
import PIL
from PIL import Image,ImageChops
from PIL.ExifTags import TAGS
import os
from DateTime import DateTime
import datetime
import pandas as pd
import numpy
import math
import csv
import sys
#sys.path.insert(0,'C:\F_archive\easystore\Python_Programs')
sys.path.insert(0,'F:\Python_Programs')
import my_modules as my_modules
from my_modules import get_directory_name as get_directory_name
from my_modules import get_file_name as get_file_name
from my_modules import save_file_name as save_file_name
from my_modules import get_exif as get_exif
from my_modules import get_gps_deg as get_gps_deg
from my_modules import get_r_of_phi as get_r_of_phi
from my_modules import calculate_distance as calculate_distance
from my_modules import date_to_nth_day as date_to_nth_day
from my_modules import calculate_sun_angle as calculate_sun_angle
from my_modules import date_to_nth_day as date_to_nth_day
crop_factor = 26.0
FOV = 78.8 #Mavic Pro camer field of view 78.9 Deg
DRONE = 'Phantom'
if DRONE == 'Phantom':
FOV = 84.8 #Phantom 4 camer field of view 84.8 Deg
crop_factor = 24.0
EarthMeanRadius = 6371.01 # In km
AstronomicalUnit = 149597890 # In km
def find_closest_waypoint(GPS,latitude,longitude,altitude):
MIN_DISTANCE_THRESHOLD = 200.0
STEPOVER_ALTITUDE = 200.0
ALTITUDE_TOLERANCE = 5.0 # Arbitrarily set at 5 feet
min_distance = 2.0*MIN_DISTANCE_THRESHOLD
closest_distance = [min_distance,min_distance]
error_dist = [0.0,0.0]
closest_waypoint = -1 # Negative return implies error
for waypoint in range(1,len(latitude)):
#test for stepover
if abs(float(altitude[waypoint])-float(STEPOVER_ALTITUDE)) > float(ALTITUDE_TOLERANCE): # skip stepover waypoints
Waypoint_GPS = [float(latitude[waypoint]),float(longitude[waypoint]),float(altitude[waypoint])]
error_dist = calculate_distance(Waypoint_GPS,GPS)
#print ('########## error_dist #########\n',error_dist)
error_distance = math.sqrt(error_dist[0]*error_dist[0] + error_dist[1]*error_dist[1])
#print ('******* error_distance ******* ',error_distance)
if error_distance < min_distance:
min_distance = error_distance
closest_waypoint = waypoint
closest_distance = error_dist
if min_distance > MIN_DISTANCE_THRESHOLD:
closest_waypoint = -abs(closest_waypoint)
return (closest_waypoint,closest_distance)
def log_images():
# GET Directory containiing image files
print ('\n Browse for directory containg image files from mission: ')
ImgDir = get_directory_name('Select Directory with image files')
# Get Directory to hold reformatted and renamed files
print ('\n Browse for directory to hold reformatted image files')
ImgDir_reformatted = get_directory_name('Select directory to hold reformatted filles')
# GET Waypoint path file
print ('\n Browse for mission waypoint file: ')
wpf = get_file_name('Select Waypoint file')
waypoint_file = wpf.name
print (' ' + waypoint_file)
# Get file to hold mission meta data
print ('\n Browse for directory to write mission meta data:')
CSVDir = get_directory_name('Select directory to hold CSV file')
#print (CSVDir)
# Initialize some stuff
all_lines = []
this_line = [ImgDir,waypoint_file,'\n']
all_lines.append(this_line)
this_line = ['File','DateTime','Flight Time','Path Length','Weigh Point','Camera GPS', 'Error Distance', 'Error W', 'Error H','Shadow W', 'Shadow H','\n']
all_lines.append(this_line)
# get waypoint file data
longitude = []
latitude = []
altitude = []
heading = []
k = 0
with open(waypoint_file,'r') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
latitude.append(line[0])
longitude.append(line[1])
altitude.append(line[2])
heading.append(line[3])
## print ('longitude = ',longitude[k], ' latitude = ',latitude[k],
## 'altitude = ', altitude[k],'heading = ', heading[k])
k += 1
k = 0
csvfile.close()
# Build base file name
base_file_name = ImgDir.split('/')
print ('length of base file name',len(base_file_name))
try:
sortie = base_file_name[len(base_file_name)-1]
sortie_number = sortie.split('_')
sortie_number = int(sortie_number[1])
print ( 'sortie = ',sortie,'sortie_number', sortie_number)
except:
sortie_number = 0
print ( 'Error asigning sortie number sortie = ',sortie,'sortie_number', sortie_number)
print(base_file_name)
base_file = ImgDir.replace(base_file_name[0],"")
base_file = ImgDir.replace("/","")
base_file = ImgDir.replace("Images/","")
base_file = base_file.replace(sortie,"")
#base_file = base_file_name[2] + '_' + base_file_name[4] +'_' + base_file_name[5]
base_file = base_file + 'sortie_' + str(sortie_number)
print ('base_file',base_file)
CSVfile = CSVDir + '/' + 'base_file' + '.csv'
print ('CSVfile',CSVfile)
#F:\Adams_Farm\DT07042019\Images\PS500mils\sortie_1
for filenames in os.listdir(ImgDir):
if k >= len(longitude)-1:
break
fn = ImgDir + '/' + filenames
im = PIL.Image.open(fn)
#Image_list.append(im)
exf = get_exif(fn) # exf is the image metadata
GPS = get_gps_deg(exf) # GPS coordinat of the camera
dt = exf['DateTimeDigitized'] # UTC date and time
t = DateTime(dt)
print ('********** ', filenames, ' **********')
if (k == 0): # initialization stuff
start_time = t
old_time = start_time
start_gps = GPS
old_gps = start_gps
seconds = 0
path_length = 0
altitude_bias = GPS[2] - float(altitude[1]) # possable positioning erro
if (k == 1):
altitude_bias = GPS[2] - float(altitude[2]) # possable positioning erro
delta_time = 3600.0*(DateTime.hour(t) - DateTime.hour(old_time))
delta_time += 60.0*(DateTime.minute(t) - DateTime.minute(old_time))
delta_time += DateTime.second(t) - DateTime.second(old_time)
seconds += delta_time # total flite time in seconds
# print 'Time: ', t, 'Delta Time: ' , delta_time , ' Total Flite Time (seconds): ', seconds
old_time = t
distance_s = calculate_distance(GPS,old_gps)
dist = math.sqrt(distance_s[0]*distance_s[0] + distance_s[1]*distance_s[1])
path_length += dist
old_gps = GPS
# find_closest_waypoint
(closest_waypoint,error_dist) = find_closest_waypoint(GPS,latitude,longitude,altitude)
#print ("error_dist \n",error_dist)
error_distance = math.sqrt(error_dist[0]*error_dist[0] + error_dist[1]*error_dist[1])
#starting_day_of_current_year = datetime().date().replace(month=1, day=1)
solar_angle = calculate_sun_angle(GPS,t)
height = (GPS[2] - altitude_bias)
ds_0 = 0.
ds_1 = 0.
if ((math.tan(solar_angle[0]) != 0) & (abs(solar_angle[0]) != math.pi/2)):
ds_0 = height/math.tan(solar_angle[0])
#print 'GPS[2] = ',GPS[2],' solar_angle[0} ',solar_angle[0],' solar_angle[1} ',solar_angle[1]
ds_1 = -ds_0*math.sin(solar_angle[1])
ds_0 = -ds_0*math.cos(solar_angle[1])
delta_shadow = [ds_0,ds_1]
## print('****delta shadow *****',delta_shadow)
pixel_size = 0
if height != 0:
pixel_size = abs(4000./(2.0*height*math.tan((math.pi/180.)*FOV/2.0)))
pixel_size = (35./crop_factor)*pixel_size
# print 'pixels per foot',pixel_size
H = 1500 - int(pixel_size*ds_1)
W = 2000 - int(pixel_size*ds_0)
# print stuff:
print("Time: {0:s},Delta Time: {1:5.2f}, Total Flite Time (seconds): {2:5.2f}"
.format(datetime.datetime.now().time(),delta_time,seconds))
print("Camera GPS {0:10.6f} {1:10.6f} {2:10.6f} Distance: {3:5.2f} Path Length:{4:5.2f}"
.format( GPS[0],GPS[1],GPS[2],dist,path_length))
lat = float(latitude[closest_waypoint])
long = float(longitude[closest_waypoint])
alt = float(altitude[closest_waypoint])
ht = alt - float(altitude_bias)
print("Way Point {0:3d}: {1:10.6f} {2:10.6f} error distance = {3:4.2f} feet height = {4:4.2f} feet"
.format(closest_waypoint,lat,long,error_distance,alt))
print ("pixels per foot {0:5.3f}".format(pixel_size))
print ("height {0:5.3f} delta shadow {1:8.3f} {2:8.3f}(feet) shadow location (pixels) {3:5d} {4:5d}"
.format(height,delta_shadow[0],delta_shadow[1],-W,H))
error_W = int(error_dist[1]*pixel_size)
error_H = int(error_dist[0]*pixel_size)
this_line = [filenames,t,seconds, path_length,closest_waypoint,GPS,error_distance,error_W,error_H,W,H,'\n']
all_lines.append(this_line)
k += 1
#### Reformat and copy File
## exif_dict = piexif.load(im.info["exif"])
## hdg = float(heading[closest_waypoint])
## if hdg > 0:
## hdg = int(hdg+0.5)
## else:
## hdg = int(hdg-0.5)
##
## sfn = base_file + '_' + filenames
## sfn = ImgDir_reformatted + "/sortie_" + str(sortie_number) + "_" + filenames
##
## ps = 12.0/pixel_size # pixel size in inches
## ps = int(1000*ps)
##
## a_b = int(1000*altitude_bias)
##
## comment_string = "Sortie?" + str(sortie_number) + "_Heading?" + str(hdg)
## comment_string = comment_string + "_altitude_bias?(" + str(a_b) + ",1000)"
## comment_string = comment_string + "_WindSpeed?TBD" + "_WindDirection?TBD"
## comment_string = comment_string + "_UVindex?TBD" + "_PixelSize?(" + str(ps) + ",1000)"
#### print('comment_string: ',comment_string)
## user_comment = piexif.helper.UserComment.dump(comment_string,encoding='ascii')
## exif_dict["Exif"][piexif.ExifIFD.UserComment] = user_comment
##
## exif_bytes = piexif.dump(exif_dict)
##
#### save as output_file
## sfn_jpg = sfn
## print(sfn_jpg)
## if (abs(hdg) > 10.0):
## print("heading {0:5.2f}".format(hdg))
## im.rotated = im.rotate(-hdg,expand=True)
## im.rotated.save(sfn_jpg,exif=exif_bytes)
## im.rotated.close()
## im.close()
this_line = ['altitude_bias',altitude_bias]
all_lines.append(this_line)
## Write flight log file (CSV)
CSVfile = CSVDir + "/" + "LogData_" + "sortie_" + str(sortie_number) + '.csv'
print ("altitude bias = {0:6.2f} feet".format(altitude_bias))
with open(CSVfile,'w',newline ='\n') as f_csv:
writer = csv.writer(f_csv)
for this_line in all_lines:
writer.writerow(this_line)
print (' Done at last !!! \n')
return
def main():
log_images()
return
main()
if __name__ == "main":
# execute only if run as a script
main()
|
{"hexsha": "3dbf9f3bafcdb5f1e9fa0b10c4540749ba846d9e", "size": 11472, "ext": "py", "lang": "Python", "max_stars_repo_path": "reprocess_and save_images/logger_only.py", "max_stars_repo_name": "jcjumley/FlightPathBuilder", "max_stars_repo_head_hexsha": "487a358d2b213ccc4aa8db47b5b4ad0481d6c48a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reprocess_and save_images/logger_only.py", "max_issues_repo_name": "jcjumley/FlightPathBuilder", "max_issues_repo_head_hexsha": "487a358d2b213ccc4aa8db47b5b4ad0481d6c48a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reprocess_and save_images/logger_only.py", "max_forks_repo_name": "jcjumley/FlightPathBuilder", "max_forks_repo_head_hexsha": "487a358d2b213ccc4aa8db47b5b4ad0481d6c48a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5652173913, "max_line_length": 159, "alphanum_fraction": 0.6125348675, "include": true, "reason": "import numpy", "num_tokens": 3075}
|
// Copyright (c) 2007-2013 Hartmut Kaiser
// Copyright (c) 2011 Bryce Lelbach
// Copyright (c) 2008-2009 Chirag Dekate, Anshul Tandon
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(HPX_THREAD_HELPERS_NOV_15_2008_0504PM)
#define HPX_THREAD_HELPERS_NOV_15_2008_0504PM
#include <hpx/hpx_fwd.hpp>
#include <hpx/util/backtrace.hpp>
#include <hpx/util/date_time_chrono.hpp>
#include <hpx/util/move.hpp>
#include <hpx/exception.hpp>
#include <hpx/runtime/threads/thread_executor.hpp>
#include <boost/exception_ptr.hpp>
///////////////////////////////////////////////////////////////////////////////
namespace hpx { namespace threads
{
struct thread_init_data;
///////////////////////////////////////////////////////////////////////////
/// \brief Set the thread state of the \a thread referenced by the
/// thread_id \a id.
///
/// \param id [in] The thread id of the thread the state should
/// be modified for.
/// \param state [in] The new state to be set for the thread
/// referenced by the \a id parameter.
/// \param state_ex [in] The new extended state to be set for the
/// thread referenced by the \a id parameter.
/// \param priority
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note If the thread referenced by the parameter \a id
/// is in \a thread_state#active state this function
/// schedules a new thread which will set the state of
/// the thread as soon as its not active anymore. The
/// function returns \a thread_state#active in this case.
///
/// \returns This function returns the previous state of the
/// thread referenced by the \a id parameter. It will
/// return one of the values as defined by the
/// \a thread_state enumeration. If the
/// thread is not known to the thread-manager the
/// return value will be \a thread_state#unknown.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT thread_state set_thread_state(thread_id_type const& id,
thread_state_enum state = pending,
thread_state_ex_enum stateex = wait_signaled,
thread_priority priority = thread_priority_normal,
hpx::error_code& ec = throws);
///////////////////////////////////////////////////////////////////////
/// \brief Set the thread state of the \a thread referenced by the
/// thread_id \a id.
///
/// Set a timer to set the state of the given \a thread to the given
/// new value after it expired (at the given time)
///
/// \param id [in] The thread id of the thread the state should
/// be modified for.
/// \param at_time
/// \param state [in] The new state to be set for the thread
/// referenced by the \a id parameter.
/// \param state_ex [in] The new extended state to be set for the
/// thread referenced by the \a id parameter.
/// \param priority
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT thread_id_type set_thread_state(thread_id_type const& id,
util::steady_time_point const& abs_time,
thread_state_enum state = pending,
thread_state_ex_enum stateex = wait_timeout,
thread_priority priority = thread_priority_normal,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Set the thread state of the \a thread referenced by the
/// thread_id \a id.
///
/// Set a timer to set the state of the given \a thread to the given
/// new value after it expired (after the given duration)
///
/// \param id [in] The thread id of the thread the state should
/// be modified for.
/// \param after_duration
/// \param state [in] The new state to be set for the thread
/// referenced by the \a id parameter.
/// \param state_ex [in] The new extended state to be set for the
/// thread referenced by the \a id parameter.
/// \param priority
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
inline thread_id_type set_thread_state(thread_id_type const& id,
util::steady_duration const& rel_time,
thread_state_enum state = pending,
thread_state_ex_enum stateex = wait_timeout,
thread_priority priority = thread_priority_normal,
error_code& ec = throws)
{
return set_thread_state(id, rel_time.from_now(), state, stateex,
priority, ec);
}
///////////////////////////////////////////////////////////////////////////
/// The function get_thread_description is part of the thread related API
/// allows to query the description of one of the threads known to the
/// thread-manager.
///
/// \param id [in] The thread id of the thread being queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns the description of the
/// thread referenced by the \a id parameter. If the
/// thread is not known to the thread-manager the return
/// value will be the string "<unknown>".
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT char const* get_thread_description(thread_id_type const& id,
error_code& ec = throws);
HPX_API_EXPORT char const* set_thread_description(thread_id_type const& id,
char const* desc = 0, error_code& ec = throws);
HPX_API_EXPORT char const* get_thread_lco_description(thread_id_type const& id,
error_code& ec = throws);
HPX_API_EXPORT char const* set_thread_lco_description(thread_id_type const& id,
char const* desc = 0, error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// The function get_thread_backtrace is part of the thread related API
/// allows to query the currently stored thread back trace (which is
/// captured during thread suspension).
///
/// \param id [in] The thread id of the thread being queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns the currently captured stack
/// back trace of the thread referenced by the \a id
/// parameter. If the thread is not known to the
/// thread-manager the return value will be the zero.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
#ifdef HPX_THREAD_MAINTAIN_FULLBACKTRACE_ON_SUSPENSION
HPX_API_EXPORT char const* get_thread_backtrace(
thread_id_type const& id, error_code& ec = throws);
HPX_API_EXPORT char const* set_thread_backtrace(
thread_id_type const& id, char const* bt = 0, error_code& ec = throws);
#else
HPX_API_EXPORT util::backtrace const* get_thread_backtrace(
thread_id_type const& id, error_code& ec = throws);
HPX_API_EXPORT util::backtrace const* set_thread_backtrace(
thread_id_type const& id, util::backtrace const* bt = 0, error_code& ec = throws);
#endif
///////////////////////////////////////////////////////////////////////////
/// The function get_thread_state is part of the thread related API. It
/// queries the state of one of the threads known to the thread-manager.
///
/// \param id [in] The thread id of the thread the state should
/// be modified for.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns the thread state of the
/// thread referenced by the \a id parameter. If the
/// thread is not known to the thread-manager the return
/// value will be \a terminated.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT thread_state get_thread_state(thread_id_type const& id,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// The function get_thread_phase is part of the thread related API.
/// It queries the phase of one of the threads known to the thread-manager.
///
/// \param id [in] The thread id of the thread the phase should
/// be modified for.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns the thread phase of the
/// thread referenced by the \a id parameter. If the
/// thread is not known to the thread-manager the return
/// value will be ~0.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT std::size_t get_thread_phase(thread_id_type const& id,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
// Return the number of the NUMA node the current thread is running on
HPX_API_EXPORT std::size_t get_numa_node_number();
///////////////////////////////////////////////////////////////////////////
/// Returns whether the given thread can be interrupted at this point.
///
/// \param id [in] The thread id of the thread which should be
/// queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns \a true if the given thread
/// can be interrupted at this point in time. It will
/// return \a false otherwise.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT bool get_thread_interruption_enabled(thread_id_type const& id,
error_code& ec = throws);
/// Set whether the given thread can be interrupted at this point.
///
/// \param id [in] The thread id of the thread which should
/// receive the new value.
/// \param enable [in] This value will determine the new interruption
/// enabled status for the given thread.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns the previous value of
/// whether the given thread could have been interrupted.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT bool set_thread_interruption_enabled(thread_id_type const& id,
bool enable, error_code& ec = throws);
/// Returns whether the given thread has been flagged for interruption.
///
/// \param id [in] The thread id of the thread which should be
/// queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function returns \a true if the given thread
/// was flagged for interruption. It will return
/// \a false otherwise.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT bool get_thread_interruption_requested(thread_id_type const& id,
error_code& ec = throws);
/// Flag the given thread for interruption.
///
/// \param id [in] The thread id of the thread which should be
/// interrupted.
/// \param flag [in] The flag encodes whether the thread should be
/// interrupted (if it is \a true), or 'uninterrupted'
/// (if it is \a false).
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT void interrupt_thread(thread_id_type const& id, bool flag,
error_code& ec = throws);
inline void interrupt_thread(thread_id_type const& id, error_code& ec = throws)
{
interrupt_thread(id, true, ec);
}
///////////////////////////////////////////////////////////////////////////
/// Interrupt the current thread at this point if it was canceled. This
/// will throw a thread_interrupted exception, which will cancel the thread.
///
/// \param id [in] The thread id of the thread which should be
/// interrupted.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT void interruption_point(thread_id_type const& id,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// Return priority of the given thread
///
/// \param id [in] The thread id of the thread whose priority
/// is queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT threads::thread_priority get_thread_priority(
thread_id_type const& id, error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// Return stack size of the given thread
///
/// \param id [in] The thread id of the thread whose priority
/// is queried.
/// \param ec [in,out] this represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT std::ptrdiff_t get_stack_size(
thread_id_type const& id, error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
HPX_API_EXPORT void run_thread_exit_callbacks(thread_id_type const& id,
error_code& ec = throws);
HPX_API_EXPORT bool add_thread_exit_callback(thread_id_type const& id,
HPX_STD_FUNCTION<void()> const& f, error_code& ec = throws);
HPX_API_EXPORT void free_thread_exit_callbacks(thread_id_type const& id,
error_code& ec = throws);
#if defined(HPX_THREAD_MAINTAIN_LOCAL_STORAGE)
///////////////////////////////////////////////////////////////////////////
HPX_API_EXPORT std::size_t get_thread_data(thread_id_type const& id,
error_code& ec = throws);
HPX_API_EXPORT std::size_t set_thread_data(thread_id_type const& id,
std::size_t data, error_code& ec = throws);
#endif
/// Returns a non-null pointer to the executor which was used to create
/// the given thread.
///
/// \throws If <code>&ec != &throws</code>, never throws, but will set \a ec
/// to an appropriate value when an error occurs. Otherwise, this
/// function will throw an \a hpx#exception with an error code of
/// \a hpx#yield_aborted if it is signaled with \a wait_aborted.
/// If called outside of a HPX-thread, this function will throw
/// an \a hpx#exception with an error code of \a hpx::null_thread_id.
/// If this function is called while the thread-manager is not
/// running, it will throw an \a hpx#exception with an error code of
/// \a hpx#invalid_status.
///
HPX_API_EXPORT threads::executor get_executor(
thread_id_type const& id, error_code& ec = throws);
}}
namespace hpx { namespace this_thread
{
///////////////////////////////////////////////////////////////////////////
/// The function \a suspend will return control to the thread manager
/// (suspends the current thread). It sets the new state of this thread
/// to the thread state passed as the parameter.
///
/// \note Must be called from within a HPX-thread.
///
/// \throws If <code>&ec != &throws</code>, never throws, but will set \a ec
/// to an appropriate value when an error occurs. Otherwise, this
/// function will throw an \a hpx#exception with an error code of
/// \a hpx#yield_aborted if it is signaled with \a wait_aborted.
/// If called outside of a HPX-thread, this function will throw
/// an \a hpx#exception with an error code of \a hpx::null_thread_id.
/// If this function is called while the thread-manager is not
/// running, it will throw an \a hpx#exception with an error code of
/// \a hpx#invalid_status.
///
HPX_API_EXPORT threads::thread_state_ex_enum suspend(
threads::thread_state_enum state = threads::pending,
char const* description = "this_thread::suspend",
error_code& ec = throws);
/// The function \a suspend will return control to the thread manager
/// (suspends the current thread). It sets the new state of this thread
/// to \a suspended and schedules a wakeup for this threads at the given
/// time.
///
/// \note Must be called from within a HPX-thread.
///
/// \throws If <code>&ec != &throws</code>, never throws, but will set \a ec
/// to an appropriate value when an error occurs. Otherwise, this
/// function will throw an \a hpx#exception with an error code of
/// \a hpx#yield_aborted if it is signaled with \a wait_aborted.
/// If called outside of a HPX-thread, this function will throw
/// an \a hpx#exception with an error code of \a hpx::null_thread_id.
/// If this function is called while the thread-manager is not
/// running, it will throw an \a hpx#exception with an error code of
/// \a hpx#invalid_status.
///
HPX_API_EXPORT threads::thread_state_ex_enum suspend(
util::steady_time_point const& abs_time,
char const* description = "this_thread::suspend",
error_code& ec = throws);
/// The function \a suspend will return control to the thread manager
/// (suspends the current thread). It sets the new state of this thread
/// to \a suspended and schedules a wakeup for this threads after the given
/// duration.
///
/// \note Must be called from within a HPX-thread.
///
/// \throws If <code>&ec != &throws</code>, never throws, but will set \a ec
/// to an appropriate value when an error occurs. Otherwise, this
/// function will throw an \a hpx#exception with an error code of
/// \a hpx#yield_aborted if it is signaled with \a wait_aborted.
/// If called outside of a HPX-thread, this function will throw
/// an \a hpx#exception with an error code of \a hpx::null_thread_id.
/// If this function is called while the thread-manager is not
/// running, it will throw an \a hpx#exception with an error code of
/// \a hpx#invalid_status.
///
inline threads::thread_state_ex_enum suspend(
util::steady_duration const& rel_time,
char const* description = "this_thread::suspend",
error_code& ec = throws)
{
return suspend(rel_time.from_now(), description, ec);
}
/// The function \a suspend will return control to the thread manager
/// (suspends the current thread). It sets the new state of this thread
/// to \a suspended and schedules a wakeup for this threads after the given
/// time (specified in milliseconds).
///
/// \note Must be called from within a HPX-thread.
///
/// \throws If <code>&ec != &throws</code>, never throws, but will set \a ec
/// to an appropriate value when an error occurs. Otherwise, this
/// function will throw an \a hpx#exception with an error code of
/// \a hpx#yield_aborted if it is signaled with \a wait_aborted.
/// If called outside of a HPX-thread, this function will throw
/// an \a hpx#exception with an error code of \a hpx::null_thread_id.
/// If this function is called while the thread-manager is not
/// running, it will throw an \a hpx#exception with an error code of
/// \a hpx#invalid_status.
///
inline threads::thread_state_ex_enum suspend(
boost::uint64_t ms, char const* description = "this_thread::suspend",
error_code& ec = throws)
{
return suspend(boost::chrono::milliseconds(ms), description, ec);
}
}}
///////////////////////////////////////////////////////////////////////////////
// FIXME: the API function below belong into the namespace hpx::threads
namespace hpx { namespace applier
{
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new \a thread using the given function as the work to
/// be executed.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum) and returns a
/// \a threads#thread_state_enum.
/// \param description [in] A optional string describing the newly created
/// thread. This is useful for debugging and logging
/// purposes as this string will be inserted in the logs.
/// \param initial_state [in] The thread state the newly created thread
/// should have. If this is not given it defaults to
/// \a threads#pending, which means that the new thread
/// will be scheduled to run as soon as it is created.
/// \param run_now [in] If this is set to `true` the thread object will
/// be actually immediately created. Otherwise the
/// thread-manager creates a work-item description, which
/// will result in creating a thread object later (if
/// no work is available any more). The default is to
/// immediately create the thread object.
/// \param priority [in] This is the priority the newly created HPX-thread
/// should be executed with. The default is \a
/// threads#thread_priority_normal. This parameter is not
/// guaranteed to be taken into account as it depends on
/// the used scheduling policy whether priorities are
/// supported in the first place.
/// \param os_thread [in] The number of the shepherd thread the newly
/// created HPX-thread should run on. If this is given it
/// will be no more than a hint in any case, mainly
/// because even if the HPX-thread gets scheduled on the
/// queue of the requested shepherd thread, it still can
/// be stolen by another shepherd thread. If this is not
/// given, the system will select a shepherd thread.
/// \param ec [in,out] This represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \returns This function will return the internal id of the newly created
/// HPX-thread or threads#invalid_thread_id (if run_now is set to
/// `false`).
///
/// \note The value returned by the thread function will be interpreted by
/// the thread manager as the new thread state the executed HPX-thread
/// needs to be switched to. Normally, HPX-threads will either return
/// \a threads#terminated (if the thread should be destroyed) or
/// \a threads#suspended (if the thread needs to be suspended because
/// it is waiting for an external event to happen). The external
/// event will set the state of the thread back to pending, which
/// will re-schedule the HPX-thread.
///
/// \throws invalid_status if the runtime system has not been started yet.
///
///
/// \note As long as \a ec is not pre-initialized to
/// \a hpx#throws this function doesn't
/// throw but returns the result code using the
/// parameter \a ec. Otherwise it throws an instance
/// of hpx#exception.
HPX_API_EXPORT threads::thread_id_type register_thread_plain(
threads::thread_function_type && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
bool run_now = true,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new \a thread using the given function as the work to
/// be executed.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum). The thread will be
/// terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_thread_plain
///
HPX_API_EXPORT threads::thread_id_type register_thread(
util::unique_function_nonser<void(threads::thread_state_ex_enum)> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
bool run_now = true,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
/// \brief Create a new \a thread using the given function as the work to
/// be executed. The work item can't be suspended when
/// executing.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum). The thread will be
/// terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_thread_plain
///
HPX_API_EXPORT threads::thread_id_type register_non_suspendable_thread(
util::unique_function_nonser<void(threads::thread_state_ex_enum)> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
bool run_now = true,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new \a thread using the given function as the work to
/// be executed.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes no arguments. The
/// thread will be terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_thread_plain
///
HPX_API_EXPORT threads::thread_id_type register_thread_nullary(
util::unique_function_nonser<void()> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
bool run_now = true,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new \a thread using the given data.
///
/// \note This function is completely equivalent to the first overload
/// of threads#register_thread_plain above, except that part of the
/// parameters are passed as members of the threads#thread_init_data
/// object.
///
HPX_API_EXPORT threads::thread_id_type register_thread_plain(
threads::thread_init_data& data,
threads::thread_state_enum initial_state = threads::pending,
bool run_now = true, error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new work item using the given function as the
/// work to be executed. This work item will be used to create a
/// \a threads#thread instance whenever the shepherd thread runs out
/// of work only. The created work descriptions will be queued
/// separately, causing them to be converted into actual thread
/// objects on a first-come-first-served basis.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum) and returns a
/// \a threads#thread_state_enum.
/// \param description [in] A optional string describing the newly created
/// thread. This is useful for debugging and logging
/// purposes as this string will be inserted in the logs.
/// \param initial_state [in] The thread state the newly created thread
/// should have. If this is not given it defaults to
/// \a threads#pending, which means that the new thread
/// will be scheduled to run as soon as it is created.
/// \param priority [in] This is the priority the newly created HPX-thread
/// should be executed with. The default is \a
/// threads#thread_priority_normal. This parameter is not
/// guaranteed to be taken into account as it depends on
/// the used scheduling policy whether priorities are
/// supported in the first place.
/// \param os_thread [in] The number of the shepherd thread the newly
/// created HPX-thread should run on. If this is given it
/// will be no more than a hint in any case, mainly
/// because even if the HPX-thread gets scheduled on the
/// queue of the requested shepherd thread, it still can
/// be stolen by another shepherd thread. If this is not
/// given, the system will select a shepherd thread.
/// \param ec [in,out] This represents the error status on exit,
/// if this is pre-initialized to \a hpx#throws
/// the function will throw on error instead.
///
/// \note The value returned by the thread function will be interpreted by
/// the thread manager as the new thread state the executed HPX-thread
/// needs to be switched to. Normally, HPX-threads will either return
/// \a threads#terminated (if the thread should be destroyed) or
/// \a threads#suspended (if the thread needs to be suspended because
/// it is waiting for an external event to happen). The external
/// event will set the state of the thread back to pending, which
/// will re-schedule the HPX-thread.
///
/// \throws invalid_status if the runtime system has not been started yet.
///
HPX_API_EXPORT void register_work_plain(
threads::thread_function_type && func,
char const* description = 0, naming::address_type lva = 0,
threads::thread_state_enum initial_state = threads::pending,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
#if !defined(DOXYGEN)
HPX_API_EXPORT void register_work_plain(
threads::thread_function_type && func,
naming::id_type const& target, char const* description = 0,
naming::address_type lva = 0,
threads::thread_state_enum initial_state = threads::pending,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
#endif
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new work item using the given function as the
/// work to be executed.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum). The thread will be
/// terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_work_plain
///
HPX_API_EXPORT void register_work(
util::unique_function_nonser<void(threads::thread_state_ex_enum)> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
/// \brief Create a new work item using the given function as the
/// work to be executed. The work item can't be suspended when
/// executing.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes one argument (a
/// \a threads#thread_state_ex_enum). The thread will be
/// terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_work_plain
///
HPX_API_EXPORT void register_non_suspendable_work(
util::unique_function_nonser<void(threads::thread_state_ex_enum)> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new work item using the given function as the
/// work to be executed.
///
/// \param func [in] The function to be executed as the thread-function.
/// This function has to expose the minimal low level
/// HPX-thread interface, i.e. it takes no arguments. The
/// thread will be terminated after the function returns.
///
/// \note All other arguments are equivalent to those of the function
/// \a threads#register_work_plain
///
HPX_API_EXPORT void register_work_nullary(
util::unique_function_nonser<void()> && func,
char const* description = 0,
threads::thread_state_enum initial_state = threads::pending,
threads::thread_priority priority = threads::thread_priority_normal,
std::size_t os_thread = std::size_t(-1),
threads::thread_stacksize stacksize = threads::thread_stacksize_default,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// \brief Create a new work item using the given function as the
/// work to be executed.
///
/// \note This function is completely equivalent to the first overload
/// of threads#register_work_plain above, except that part of the
/// parameters are passed as members of the threads#thread_init_data
/// object.
///
HPX_API_EXPORT void register_work_plain(
threads::thread_init_data& data,
threads::thread_state_enum initial_state = threads::pending,
error_code& ec = throws);
///////////////////////////////////////////////////////////////////////////
/// The \a create function initiates the creation of a new
/// component instance using the runtime_support as given by targetgid.
/// This function is non-blocking as it returns a \a lcos#future. The
/// caller of this create is responsible to call
/// \a lcos#future#get to obtain the result.
///
/// \param targetgid
/// \param type
/// \param count
///
/// \returns The function returns a \a lcos#future instance
/// returning the the global id of the newly created
/// component when used to call get.
///
/// \note For synchronous operation use the function
/// \a threads#create_sync.
HPX_API_EXPORT lcos::future<naming::id_type>
create(naming::id_type const& targetgid,
components::component_type type, std::size_t count = 1);
///////////////////////////////////////////////////////////////////////////
/// The \a create_sync function creates a new component instance using the
/// \a runtime_support as given by targetgid. This function is blocking
/// for the component to be created and until the global id of the new
/// component has been returned.
///
/// \param targetgid
/// \param type
/// \param count
///
/// \returns The function returns the global id of the newly created
/// component.
///
/// \note For asynchronous operation use the function
/// \a threads#create.
HPX_API_EXPORT naming::id_type create_sync(naming::id_type const& targetgid,
components::component_type type, std::size_t count = 1);
}}
///////////////////////////////////////////////////////////////////////////////
namespace hpx { namespace threads
{
// Import all thread creation functions into this name space (we will
// deprecate the functions in namespace applier above at some point).
using applier::register_thread_plain;
using applier::register_thread;
using applier::register_thread_nullary;
using applier::register_work_plain;
using applier::register_work;
using applier::register_work_nullary;
}}
#endif
|
{"hexsha": "fd60b673a317bc8baa20de8aab6b33a1005f25e7", "size": 46232, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "hpx/runtime/threads/thread_helpers.hpp", "max_stars_repo_name": "Titzi90/hpx", "max_stars_repo_head_hexsha": "150fb0de1cfe40c26a722918097199147957b45c", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hpx/runtime/threads/thread_helpers.hpp", "max_issues_repo_name": "Titzi90/hpx", "max_issues_repo_head_hexsha": "150fb0de1cfe40c26a722918097199147957b45c", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hpx/runtime/threads/thread_helpers.hpp", "max_forks_repo_name": "Titzi90/hpx", "max_forks_repo_head_hexsha": "150fb0de1cfe40c26a722918097199147957b45c", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.0183486239, "max_line_length": 90, "alphanum_fraction": 0.5693459076, "num_tokens": 9719}
|
import numpy as np
from cv2 import VideoCapture,cvtColor,COLOR_BGR2GRAY,imread
class VideoSource:
def __init__(self,name):
self.name = name
self.size = (0,0)
pass
def getFrame(self):
return []
def getSize(self):
return self.size
def getName(self):
return self.name
def getFPS(self):
return 0
def start(self):
pass
def stop(self):
pass
class CV2WebCamSource(VideoSource):
def __init__(self,name,camNumber):
VideoSource.__init__(self,name+"-CV2Cam"+str(camNumber))
self.camNumber = camNumber
def getFrame(self):
ret, rawFrame = self.cap.read()
return rawFrame
def getFPS(self):
return 28
def start(self):
self.cap = VideoCapture(self.camNumber)
self.size = (int(self.cap.get(3)),int(self.cap.get(4)))
def stop(self):
self.cap.release()
class StillImageSource(VideoSource):
def __init__(self,name,fileName):
VideoSource.__init__(self,name+"-StillImageSource-"+fileName)
self.fileName = fileName
self.image = None
def getFrame(self):
return self.image
def getFPS(self):
return 0
def start(self):
self.image = imread(self.fileName)
|
{"hexsha": "f143b8aa3bb3ce7420afe6a957bc7465173cd315", "size": 1469, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/VideoSources.py", "max_stars_repo_name": "wgaylord/HAMVideo", "max_stars_repo_head_hexsha": "b6d3494d67d52d8f3002271195505ab1ff40bdbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/VideoSources.py", "max_issues_repo_name": "wgaylord/HAMVideo", "max_issues_repo_head_hexsha": "b6d3494d67d52d8f3002271195505ab1ff40bdbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/VideoSources.py", "max_forks_repo_name": "wgaylord/HAMVideo", "max_forks_repo_head_hexsha": "b6d3494d67d52d8f3002271195505ab1ff40bdbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.9253731343, "max_line_length": 69, "alphanum_fraction": 0.5391422737, "include": true, "reason": "import numpy", "num_tokens": 320}
|
import sys
import os
import matplotlib as mpl
sys.path.append(os.path.abspath('./plot/'))
from option import *
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import numpy as np
import csv
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., height + 0.01,
'%0.1f' % float(height),
# '%d' % int(height),
ha='center', va='bottom')
GROUP_NUM = 6
GROUP_NAMES = ["Single", "Double", "3-Grams", "4-Grams", "ALM", "ALM-Improved"]
GROUP_SIZE = 6
CATEGORY_NAMES = ["0.001%", "0.01%", "0.1%", "1%", "10%", "100%"]
REPEAT_TIME = 3
ROUND_SIZE = ((GROUP_NUM - 2) * GROUP_SIZE + 2 * (GROUP_SIZE - 1))
CSV_FILE_PATH = "results/microbench/sample_size_sweep/cpr_url_sample_size_sweep.csv"
GRAPH_OUTPUT_PATH = "figures/microbench/sample_size_sweep/cpr_url_sample_size_sweep.pdf"
COLORS = ['#ffffff', '#fee8c8', '#fc8d59', '#d7301f', '#7f0000', '#4c0000']
Y_LABEL = "Compression Rate"
X_TICK_FONT_SIZE = 16
Y_TICK_FONT_SIZE = 16
Y_MODIFIER = 1.0
LEGEND_FONT_SIZE = 18
LEGEND_POS = 'upper left'
GRAPH_HEIGHT = 4.5 #inches
GRAPH_WIDTH = 8.0 #inches
f_in = open(CSV_FILE_PATH, 'r')
reader = csv.reader(f_in)
csvrows = list(reader)
csvdata = []
for row in csvrows :
for item in row :
csvdata.append((float(item) / Y_MODIFIER))
data = []
for i in range(0, GROUP_SIZE - 1) :
data.append([0] * GROUP_NUM)
data.append([0] * (GROUP_NUM - 2))
#[[0.01 s, 0.01 d,],[]]
#[0.01 s, 0.01 d, 0.01 3, 0.1 s]
for count, item in enumerate(csvdata):
idx = count % ROUND_SIZE
if (idx < (GROUP_SIZE - 1) * GROUP_NUM):
data[idx / GROUP_NUM][idx % GROUP_NUM] += item
else:
group_id = 5
offset = (idx - (GROUP_SIZE - 1) * GROUP_NUM)
data[group_id][offset] += item
for i in range(GROUP_SIZE) :
if i < GROUP_SIZE - 1:
for j in range(GROUP_NUM):
data[i][j] = data[i][j] * 1.0 / REPEAT_TIME
else:
for j in range(GROUP_NUM - 2):
data[i][j] = data[i][j] * 1.0 / REPEAT_TIME
#========================================================================================
mpl.rcParams['ps.useafm'] = True
mpl.rcParams['pdf.use14corefonts'] = True
mpl.rcParams['text.usetex'] = False
mpl.rcParams['text.latex.preamble'] = [
r'\usepackage{siunitx}', # i need upright \micro symbols, but you need...
r'\sisetup{detect-all}', # ...this to force siunitx to actually use your fonts
r'\usepackage{helvet}', # set the normal font here
r'\usepackage{sansmath}', # load up the sansmath so that math -> helvet
r'\sansmath' # <- tricky! -- gotta actually tell tex to use!
]
#========================================================================================
width = GRAPH_WIDTH / (ROUND_SIZE + GROUP_NUM + 1)
fig = plot.figure(figsize={GRAPH_HEIGHT, GRAPH_WIDTH})
ax = fig.add_subplot(111)
rect = []
for i in range(0, GROUP_SIZE - 1) :
pos = []
for j in range(0, GROUP_NUM - 1) :
pos.append(width + width * i + width * j * (GROUP_SIZE + 1))
pos.append(width + width * i + width * (GROUP_NUM - 1) * (GROUP_SIZE) + 4 * width)
rect.append(ax.bar(pos, data[i], width, color=COLORS[i], label=CATEGORY_NAMES[i]))
# autolabel(rect[i])
for i in range(GROUP_SIZE-1, GROUP_SIZE):
pos = []
for j in range(GROUP_NUM - 2):
pos.append(width * (GROUP_SIZE - 1) + j * width * (GROUP_SIZE +1) + (i - GROUP_SIZE+2) * width)
rect.append(ax.bar(pos, data[i], width, color=COLORS[i], label=CATEGORY_NAMES[i]))
# autolabel(rect[i])
xtick_pos = []
for j in range(0, GROUP_NUM - 1) :
xtick_pos.append(width + width * (GROUP_SIZE / 2.0) + width * j * (GROUP_SIZE + 1))
xtick_pos.append(width * (GROUP_SIZE / 2.0) + width * (GROUP_NUM - 1) * (GROUP_SIZE + 1))
ax.set_xticks(xtick_pos)
ax.set_xticklabels(GROUP_NAMES, rotation=20)
for label in ax.get_xticklabels():
label.set_fontsize(X_TICK_FONT_SIZE)
y_ticks = [0, 1.0, 2.0, 3.0, 4.0]
ax.set_yticks(y_ticks)
ax.set_ylim(0, 4.0)
for label in ax.get_yticklabels():
label.set_fontsize(Y_TICK_FONT_SIZE)
ax.set_ylabel(Y_LABEL, fontsize=Y_LABEL_FONT_SIZE)
ax.legend(loc=LEGEND_POS, ncol=2, prop={'size':LEGEND_FONT_SIZE})
outFile = GRAPH_OUTPUT_PATH
plot.savefig(outFile, bbox_inches='tight')
|
{"hexsha": "ec6db0bbe36499a9400d4501dc34797555fdbdd1", "size": 4357, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot/microbench/sample_size_sweep/cpr_url.py", "max_stars_repo_name": "XinYao1994/HOPE", "max_stars_repo_head_hexsha": "99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 108, "max_stars_repo_stars_event_min_datetime": "2020-04-23T19:06:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T20:05:09.000Z", "max_issues_repo_path": "plot/microbench/sample_size_sweep/cpr_url.py", "max_issues_repo_name": "XinYao1994/HOPE", "max_issues_repo_head_hexsha": "99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-07T05:58:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-07T05:58:57.000Z", "max_forks_repo_path": "plot/microbench/sample_size_sweep/cpr_url.py", "max_forks_repo_name": "XinYao1994/HOPE", "max_forks_repo_head_hexsha": "99b41b457b67d3e5d6dd182f8aa2ce4ea66e4a68", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-04-24T01:53:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T07:36:14.000Z", "avg_line_length": 30.6830985915, "max_line_length": 103, "alphanum_fraction": 0.6137250402, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
# TODO Rework this whole file for new dataloader format
import os
import glob
import torch
import imageio
import numpy as np
import pickle as pkl
from .process_spin import process_spin_data
from .load_surreal import dilate_masks
from collections import OrderedDict
def read_3dhp_spin_data(data_path, subject='S1', ext_scale=0.001, bbox_res=224):
res_map = {
'S1': (768, 768),
'S2': (768, 768),
'S3': (768, 768),
'S4': (768, 768),
'S5': (768, 1365),
'S6': (768, 1365),
}
spin_data = dd.io.load(data_path)
img_paths = []
img_idxs = []
for i, p in enumerate(spin_data['img_path']):
if subject in p:
img_paths.append(p)
img_idxs.append(i)
img_idxs = np.array(img_idxs)
betas = torch.tensor(spin_data['pred_betas'][img_idxs]).cpu()
joints = torch.cat([spin_data['pred_output'][i].joints for i in img_idxs]).cpu()
rot_mats = torch.tensor(spin_data['pred_rot_mat'][img_idxs]).cpu()
bboxes = torch.tensor(spin_data['bbox_params'][img_idxs]).cpu()
try:
cameras = torch.tensor(spin_data['pred_camera'][img_idxs]).cpu()
except:
cameras = torch.tensor(spin_data['pred_cam'][img_idxs]).cpu()
img_res = res_map[subject]
processed_ret = process_spin_data(betas, cameras, joints, rot_mats,
bboxes, res=img_res, resized_res=bbox_res,
ext_scale=ext_scale, scale_rest_pose=True,
)
processed_ret['img_path'] = np.array(img_paths)
processed_ret['hw'] = img_res
if subject != 'S5' and subject != 'S6':
processed_ret['gt_kp3d'] = spin_data['pose_3d'][img_idxs]
else:
h5_pose = os.path.join(os.path.dirname(data_path), 'MPI_SPIN_rect_output-maxmin.h5')
pose_path = dd.io.load(h5_pose, '/img_path')
pose_3d = dd.io.load(h5_pose, '/pose_3d')
idxs = []
for i, p in enumerate(pose_path):
if subject in p:
idxs.append(i)
# for debugging
# subject_idx = np.array(pose_path)[idxs]
processed_ret['gt_kp3d'] = pose_3d[idxs]
processed_ret['betas'] = spin_data['pred_betas'][img_idxs]
processed_ret['bboxes'] = bboxes.numpy()
return processed_ret
def extract_background(img_paths, data_path='data/mpi_3dhp/', subject='S1'):
subject_paths = [p for p in img_paths if subject in p]
imgs = []
for p in subject_paths:
img = imageio.imread(os.path.join(data_path, p))
imgs.append(img)
imgs = np.array(imgs)
bkgd = np.median(imgs, axis=0)
return bkgd
# TODO: rework this for new dataloader format
def process_3dhp_data(data_path, subject='S1', ext_scale=0.001, bbox_res=224,
extend_iter=5):
'''
:param data_path: path to h3.6m dataset root
:param subject: subject directory
:param ext_scale: to scale human poses and camera location
:param bbox_res: resolution of bounding box when running the pose estimator
:param extend_iter: extend mask to obtain sampling mask
'''
#spin_h5 = os.path.join(data_path, "MPI_SPIN_rect_output.h5")
if subject != 'S5' and subject != 'S6':
spin_h5 = os.path.join(data_path, "MPI_SPIN_rect_output-maxmin.h5")
bkgd = imageio.imread(os.path.join(data_path, f"{subject}_bkgd.png"))
else:
spin_h5 = os.path.join(data_path, "mpi_3dhp", "3DHP-S5S6.h5")
bkgd = imageio.imread(os.path.join(data_path, "mpi_3dhp", f"{subject}_bkgd.png"))
print(f"Read from {spin_h5}")
"""
mask_h5 = os.path.join(data_path, f"{subject}_masks.h5")
masks = dd.io.load(mask_h5)['masks']
masks = masks[..., None]
# mask is not perfect, dilate it a bit
masks = dilate_masks(masks, extend_iter=2)[..., None]
# dilate it more to get sampling mask
sampling_masks = dilate_masks(masks, extend_iter=5)[..., None]
H, W = masks.shape[1], masks.shape[2]
"""
#bkgd = imageio.imread(os.path.join(data_path, f"{subject}_bkgd.png"))
processed_est = read_3dhp_spin_data(spin_h5, subject, ext_scale, bbox_res=bbox_res)
imgs, masks = [], []
img_paths = processed_est['img_path']
cam_idxs = np.zeros((len(img_paths),))
for i in range(len(img_paths)):
img = imageio.imread(os.path.join(data_path, img_paths[i]))
mask = imageio.imread(os.path.join(data_path, img_paths[i].replace("/imageSequence/", "/masks/")))
mask[mask < 2] = 0
mask[mask >= 2] = 1
#mask = masks[i] // 255
#img = img * mask + (1 - mask) * bkgd
imgs.append(img)
masks.append(mask)
masks = np.array(masks)[..., None]
sampling_masks = dilate_masks(masks[..., 0], extend_iter=2)[..., None]
data = {'imgs': np.array(imgs).astype(np.uint8),
'bkgd_idxs': np.array(cam_idxs),
'train_idxs': np.arange(len(imgs)),
'val_idxs': np.array(len(imgs)),
'bkgds': bkgd[None],
'masks': masks,
'sampling_masks': sampling_masks,
**processed_est}
h5_name = f"{subject}_processed.h5"
dd.io.save(os.path.join(data_path, h5_name), data)
def load_3dhp_data(data_path, subject='S1'):
data_h5 = os.path.join(data_path, f"{subject}_processed.h5")
print(f"Reading from {data_h5}")
data = dd.io.load(data_h5)
imgs = data["imgs"]
H, W = imgs.shape[1], imgs.shape[2]
temporal_validity = np.ones((len(imgs),))
temporal_validity[0] = 0
if subject == "S2":
data["bkgds"][...] = 0 # background tone is too close to the subject..
if subject == "S5":
data["c2ws"][..., :3, -1] /= 0.82
pass
if subject == "S6":
data["c2ws"][..., :3, -1] /= 0.82
pass
train_kp_dict = {
"bone_poses": data["bones"],
"cyls": data["cyls"],
"img_pose_indices": np.arange(len(data["imgs"])),
"kp3d": data["kp3d"],
"sampling_masks": data["sampling_masks"],
"skts": data["skts"],
"rest_pose": data["rest_pose"],
"n_views": len(data["imgs"]),
"gt_kp3d": data["gt_kp3d"],
"gt_skts": data["skts"].copy(),
"gt_bone_poses": data["bones"].copy(),
"temporal_validity": temporal_validity,
"betas": data["betas"].mean(0)[None],
}
train_data = {
"c2ws": data["c2ws"],
"fg_masks": data["masks"],
"hwf": (H, W, data["focals"]),
"imgs": data["imgs"],
"keypoint_dict": train_kp_dict,
"bg_imgs": data["bkgds"],
"bg_indices": data["bkgd_idxs"].astype(np.int32),
"ext_scale": data["ext_scale"],
"beta_full": data["betas"],
}
val_idxs = np.arange(len(data["imgs"]))[::9]
val_kp_dict = {
"bone_poses": data["bones"][val_idxs],
"cyls": data["cyls"][val_idxs],
"img_pose_indices": val_idxs,
"kp3d": data["kp3d"][val_idxs],
"sampling_masks": data["sampling_masks"][val_idxs],
"skts": data["skts"][val_idxs],
"rest_pose": data["rest_pose"],
"n_views": len(val_idxs),
}
val_data = {
"c2ws": data["c2ws"][val_idxs],
"fg_masks": data["masks"][val_idxs],
"hwf": (H, W, data["focals"][val_idxs]),
"imgs": data["imgs"][val_idxs],
"keypoint_dict": val_kp_dict,
"bg_imgs": data["bkgds"],
"bg_indices": data["bkgd_idxs"][val_idxs].astype(np.int32),
"ext_scale": data["ext_scale"],
}
return train_data, val_data
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Arguments for masks extraction')
parser.add_argument("-s", "--subject", type=str, default="S1",
help='subject to extract')
parser.add_argument("-b", "--base_path", type=str, default="data/mpi_3dhp/",
help='path to dataset')
args = parser.parse_args()
data_path = args.base_path
subject = args.subject
#img_paths = glob.glob(os.path.join(data_path, f"data/test/{subject}/Seq1/imageSequence/video_0/*.jpg"))
#import pdb; pdb.set_trace()
#print
process_3dhp_data(args.base_path, args.subject)
|
{"hexsha": "44879a7f4f4edf0c36c40c54d7dcd02a4eb34833", "size": 8216, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/load_3dhp.py", "max_stars_repo_name": "liruilong940607/A-NeRF", "max_stars_repo_head_hexsha": "19cb6c4fd389266214ac0d7215a44011cb1bebf5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 110, "max_stars_repo_stars_event_min_datetime": "2021-12-07T13:30:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:17:09.000Z", "max_issues_repo_path": "core/load_3dhp.py", "max_issues_repo_name": "liruilong940607/A-NeRF", "max_issues_repo_head_hexsha": "19cb6c4fd389266214ac0d7215a44011cb1bebf5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-12-20T10:04:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T18:15:41.000Z", "max_forks_repo_path": "core/load_3dhp.py", "max_forks_repo_name": "liruilong940607/A-NeRF", "max_forks_repo_head_hexsha": "19cb6c4fd389266214ac0d7215a44011cb1bebf5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-12-07T13:59:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T09:00:24.000Z", "avg_line_length": 35.4137931034, "max_line_length": 108, "alphanum_fraction": 0.5993184031, "include": true, "reason": "import numpy", "num_tokens": 2322}
|
#tests a post processed version
#!/usr/bin/env python
import numpy as np
from matplotlib import pyplot as plt
import pyefd
import Grasping
import cv2 as cv
import rospy
from std_msgs.msg import String
def find_current_grasp():
# find the contour of the image.
# img = cv.imread('test5.png', 0)
img = cv.imread('test5.png',0)
img = 255-img
kernel = np.ones((15, 15), np.uint8)
img = cv.dilate(img, kernel, 1)
img = cv.erode(img, kernel, 1)
t = 180
# create binary image
# gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(img, (5, 5), 0)
(t, binary) = cv.threshold(blur, t, 255, cv.THRESH_BINARY)
cv.imshow("output",binary)
# find contours
(_, contours, _) = cv.findContours(binary, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_NONE)
# print table of contours and sizes
print("Found %d objects." % len(contours))
for (i, c) in enumerate(contours):
print("\tSize of contour %d: %d" % (i, len(c)))
# draw contours over original image
cv.drawContours(img, contours, -1, (0, 0, 255), 5)
# display original image with contours
cv.namedWindow("output", cv.WINDOW_NORMAL)
cv.imshow("output", img)
cv.waitKey(0)
edge = cv.Canny(img, 100, 200)
_, cnts, _ = cv.findContours(edge.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
cv.imshow('g',edge)
cv.waitKey(0)
cnts = sorted(cnts, key=cv.contourArea, reverse=True)[:1]
screenCnt = None
contour_1 = np.vstack(cnts[0]).squeeze()
plt.imshow(img, plt.cm.gray)
plt.plot(contour_1[:, 0], contour_1[:, 1])
plt.show()
# plots clockwise
# for ii in range(1, len(contour_1)):
# plt.plot(contour_1[ii,0], contour_1[ii,1], 'y*', linewidth=2)
# plt.show()
numPts = 200
order = 4
# pre-calculate symbolic variables so we can solve numerically in the loop.
px, py, zx, zy, nx, ny = pyefd.initEFDModel(order)
# this part runs in the loop:
# 1) calculate the EFD silhouette:
locus = pyefd.calculate_dc_coefficients(contour_1)
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order)
# 2) Build the grasping point model from silhouette data, and compute best grasp.
P, N, Cbar = pyefd.generateEFDModel(coeffs, locus, numPts, px, py, zx, zy, nx, ny)
pyefd.plot_efd(P, N, Cbar, img, contour_1, numPts)
xLoc, yLoc = Grasping.GraspPointFiltering(numPts, P, N, Cbar)
pyefd.finalPlot(P, xLoc, yLoc, img, contour_1, numPts)
return xLoc, yLoc
if __name__ == '__main__':
find_current_grasp()
|
{"hexsha": "d51939c061667f076d677a0f175baad5335e5410", "size": 2600, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_pp.py", "max_stars_repo_name": "jpchiodini/Grasp-Planning", "max_stars_repo_head_hexsha": "e31234244b8f934743605ebea59d9d98a258957e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_pp.py", "max_issues_repo_name": "jpchiodini/Grasp-Planning", "max_issues_repo_head_hexsha": "e31234244b8f934743605ebea59d9d98a258957e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_pp.py", "max_forks_repo_name": "jpchiodini/Grasp-Planning", "max_forks_repo_head_hexsha": "e31234244b8f934743605ebea59d9d98a258957e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5882352941, "max_line_length": 86, "alphanum_fraction": 0.6453846154, "include": true, "reason": "import numpy", "num_tokens": 789}
|
# -*- coding: utf-8 -*-
"""
@author: salimt
"""
# Part B: Problem 3
# Bookmark this page
# Part B: Problem 3: Implementing a Simulation With Drugs
# 10.0/10.0 points (graded)
# In this problem, we consider the effects of both administering drugs to the patient and the ability of virus particle offsprings to
# inherit or mutate genetic traits that confer drug resistance. As the virus population reproduces, mutations will occur in the virus
# offspring, adding genetic diversity to the virus population. Some virus particles gain favorable mutations that confer resistance to
# drugs.
# ResistantVirus class
# In order to model this effect, we introduce a subclass of SimpleVirus called ResistantVirus. ResistantVirus maintains the state of a
# virus particle's drug resistances, and accounts for the inheritance of drug resistance traits to offspring. Implement the ResistantVirus
# class.
# Hint: reproduce function child resistances
# If you are really unsure about how to think about what each child resistances should be changed to, here is a different approach. If the
# probability mutProb is successful, the child resistance switches. Otherwise, the child resistance stays the same as the parent
# resistance.
# If you want to use numpy arrays, you should import numpy as np and use np.METHOD_NAME in your code.
# Enter your definition for the ResistantVirus class in this box.
# You'll enter your code for TreatedPatient on the next page.
#
# PROBLEM 3
#
class ResistantVirus(SimpleVirus):
"""
Representation of a virus which can have drug resistance.
"""
def __init__(self, maxBirthProb, clearProb, resistances, mutProb):
"""
Initialize a ResistantVirus instance, saves all parameters as attributes
of the instance.
maxBirthProb: Maximum reproduction probability (a float between 0-1)
clearProb: Maximum clearance probability (a float between 0-1).
resistances: A dictionary of drug names (strings) mapping to the state
of this virus particle's resistance (either True or False) to each drug.
e.g. {'guttagonol':False, 'srinol':False}, means that this virus
particle is resistant to neither guttagonol nor srinol.
mutProb: Mutation probability for this virus particle (a float). This is
the probability of the offspring acquiring or losing resistance to a drug.
"""
SimpleVirus.__init__(self, maxBirthProb, clearProb)
self.mutProb = mutProb
self.resistances = resistances
def getResistances(self):
"""
Returns the resistances for this virus.
"""
return self.resistances
def getMutProb(self):
"""
Returns the mutation probability for this virus.
"""
return self.mutProb
def isResistantTo(self, drug):
"""
Get the state of this virus particle's resistance to a drug. This method
is called by getResistPop() in TreatedPatient to determine how many virus
particles have resistance to a drug.
drug: The drug (a string)
returns: True if this virus instance is resistant to the drug, False
otherwise.
"""
if drug in self.resistances:
if self.resistances.get(drug) == True:
return True
else:
return False
def reproduce(self, popDensity, activeDrugs):
"""
Stochastically determines whether this virus particle reproduces at a
time step. Called by the update() method in the TreatedPatient class.
A virus particle will only reproduce if it is resistant to ALL the drugs
in the activeDrugs list. For example, if there are 2 drugs in the
activeDrugs list, and the virus particle is resistant to 1 or no drugs,
then it will NOT reproduce.
Hence, if the virus is resistant to all drugs
in activeDrugs, then the virus reproduces with probability:
self.maxBirthProb * (1 - popDensity).
If this virus particle reproduces, then reproduce() creates and returns
the instance of the offspring ResistantVirus (which has the same
maxBirthProb and clearProb values as its parent). The offspring virus
will have the same maxBirthProb, clearProb, and mutProb as the parent.
For each drug resistance trait of the virus (i.e. each key of
self.resistances), the offspring has probability 1-mutProb of
inheriting that resistance trait from the parent, and probability
mutProb of switching that resistance trait in the offspring.
For example, if a virus particle is resistant to guttagonol but not
srinol, and self.mutProb is 0.1, then there is a 10% chance that
that the offspring will lose resistance to guttagonol and a 90%
chance that the offspring will be resistant to guttagonol.
There is also a 10% chance that the offspring will gain resistance to
srinol and a 90% chance that the offspring will not be resistant to
srinol.
popDensity: the population density (a float), defined as the current
virus population divided by the maximum population
activeDrugs: a list of the drug names acting on this virus particle
(a list of strings).
returns: a new instance of the ResistantVirus class representing the
offspring of this virus particle. The child should have the same
maxBirthProb and clearProb values as this virus. Raises a
NoChildException if this virus particle does not reproduce.
"""
counter = [i for i in activeDrugs if self.isResistantTo(i) == True]
if len(counter) == len(activeDrugs) and\
random.random() <= self.maxBirthProb * (1 - popDensity):
if self.mutProb*len(self.resistances) == len(self.resistances)//2 and (len(self.resistances) > 1):
return ResistantVirus(self.maxBirthProb, self.clearProb, \
{key:self.resistances[key] for i, key in enumerate(self.resistances) if i % 2 == 0}, self.mutProb)
if random.random() <= self.mutProb:
self.resistances.update((k, False) if self.resistances[k] == True else (k, True) for k, v in self.resistances.items())
return ResistantVirus(self.maxBirthProb, self.clearProb, self.resistances, self.mutProb)
else:
raise NoChildException
|
{"hexsha": "0effa064b9cdcacbd04f0cfc25d88187e5b73275", "size": 6577, "ext": "py", "lang": "Python", "max_stars_repo_path": "MITx-6.00.2x/pset3-virus-patient-simulation/Problem-3-resistant-virus-with-drugs.py", "max_stars_repo_name": "FTiniNadhirah/Coursera-courses-answers", "max_stars_repo_head_hexsha": "d59311917b740a6ce8b8361e9ac79657b103bb75", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 73, "max_stars_repo_stars_event_min_datetime": "2020-08-26T03:03:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T17:35:47.000Z", "max_issues_repo_path": "MITx-6.00.2x/pset3-virus-patient-simulation/Problem-3-resistant-virus-with-drugs.py", "max_issues_repo_name": "FTiniNadhirah/Coursera-courses-answers", "max_issues_repo_head_hexsha": "d59311917b740a6ce8b8361e9ac79657b103bb75", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MITx-6.00.2x/pset3-virus-patient-simulation/Problem-3-resistant-virus-with-drugs.py", "max_forks_repo_name": "FTiniNadhirah/Coursera-courses-answers", "max_forks_repo_head_hexsha": "d59311917b740a6ce8b8361e9ac79657b103bb75", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2020-09-19T09:28:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T18:07:19.000Z", "avg_line_length": 45.3586206897, "max_line_length": 140, "alphanum_fraction": 0.6793370838, "include": true, "reason": "import numpy", "num_tokens": 1468}
|
import numpy as np
import cv2 as cv
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import warnings
def load_dataframe():
'''
Carga un dataframe Pandas con las imagenes para entrenamiento de modelo
'''
dados = {
"ARCHIVO": [],
"ROTULO": [],
"OBJETO": [],
"IMAGEN": [],
}
com_mascara = os.listdir(f"imagenes{os.sep}conmascara")
sem_mascara = os.listdir(f"imagenes{os.sep}sinmascara")
for ARCHIVO in com_mascara:
dados["ARCHIVO"].append(f"imagenes{os.sep}conmascara{os.sep}{ARCHIVO}")
dados["ROTULO"].append(f"Com mascara")
dados["OBJETO"].append(1)
img = cv.cvtColor(cv.imread(f"imagenes{os.sep}conmascara{os.sep}{ARCHIVO}"), cv.COLOR_BGR2GRAY).flatten()
dados["IMAGEN"].append(img)
for ARCHIVO in sem_mascara:
dados["ARCHIVO"].append(f"imagenes{os.sep}sinmascara{os.sep}{ARCHIVO}")
dados["ROTULO"].append(f"Sem mascara")
dados["OBJETO"].append(0)
img = cv.cvtColor(cv.imread(f"imagenes{os.sep}sinmascara{os.sep}{ARCHIVO}"), cv.COLOR_BGR2GRAY).flatten()
dados["IMAGEN"].append(img)
dataframe = pd.DataFrame(dados)
return dataframe
def train_test(dataframe):
'''
Divide dataframe en conjunto para prueba
'''
X = list(dataframe["IMAGEN"])
y = list(dataframe["OBJETO"])
#train_test_split(X, y, train_size=0.40, random_state=13)
return X, y
def pca_model(X_train):
'''
PCA para extraccion de imagenes
'''
pca = PCA(n_components=30)
pca.fit(X_train)
return pca
def knn(X_train, y_train):
warnings.filterwarnings("ignore")
'''
Modelo K-Nearest Neighbors
'''
grid_params = {
"n_neighbors": [2, 3, 5, 11, 19, 23, 29],
"weights": ["uniform", "distance"],
"metric": ["euclidean", "manhattam", "cosine", "l1", "l2"]
}
knn_model = GridSearchCV(KNeighborsClassifier(), grid_params, refit=True)
knn_model.fit(X_train, y_train)
return knn_model
|
{"hexsha": "365087ee26f3df48c4b7518d3bc06c4331b0b946", "size": 2163, "ext": "py", "lang": "Python", "max_stars_repo_path": "functions.py", "max_stars_repo_name": "kbueso/Python", "max_stars_repo_head_hexsha": "a18a23bbf6ba3f214c2ed751a20348fe415c6dbe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "functions.py", "max_issues_repo_name": "kbueso/Python", "max_issues_repo_head_hexsha": "a18a23bbf6ba3f214c2ed751a20348fe415c6dbe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functions.py", "max_forks_repo_name": "kbueso/Python", "max_forks_repo_head_hexsha": "a18a23bbf6ba3f214c2ed751a20348fe415c6dbe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0602409639, "max_line_length": 113, "alphanum_fraction": 0.6412390199, "include": true, "reason": "import numpy", "num_tokens": 600}
|
using Accumulo
using Base.Test
const SEP = "\n" * "="^20 * "\n"
print_header(message) = println(SEP, message, SEP)
function print_table_config(session, tbl)
cfg = table_config(session, tbl)
println("Configuration of $tbl:")
for (n,v) in cfg
println("\t$n => $v")
end
end
function print_table_iters(session, tbl)
tbliters = iters(session, tbl)
println("Iterators on $tbl: $tbliters")
for (iname, iscopes) in tbliters
for iscope in iscopes
iprops = iter(session, tbl, iname, iscope)
println("Iterator $iname on $tbl in scope $iscope: $iprops")
end
end
end
function print_info(session)
print_header("SERVER INFO")
tbls = tables(session)
println("Tables: $tbls")
diskusage = table_du(session, tbls...)
println("Disk updage of $tbls: $diskusage")
for tbl in tbls
println("Table: $tbl")
println("===========================")
diskusage = table_du(session, tbl)
println("Disk updage of $tbl: $diskusage")
print_table_config(session, tbl)
print_table_iters(session, tbl)
tblconstr = constraints(session, tbl)
println("Constraints on $tbl: $tblconstr")
end
end
function test_table_admin(session)
print_header("TABLE ADMIN")
try
println("creating mytable...")
@test !table_exists(session, "mytable")
table_create(session, "mytable")
@test table_exists(session, "mytable")
println("renaming mytable to mytable1...")
table_rename(session, "mytable", "mytable1")
@test table_exists(session, "mytable1")
@test !table_exists(session, "mytable")
println("cloning mytable1 to mytable2...")
table_clone(session, "mytable1", "mytable2")
@test table_exists(session, "mytable2")
println("deleting mytable1...")
table_delete(session, "mytable1")
@test !table_exists(session, "mytable1")
@test table_exists(session, "mytable2")
println("deleting mytable2...")
table_delete(session, "mytable2")
@test !table_exists(session, "mytable2")
finally
for tbl in ("mytable", "mytable1", "mytable2")
println("cleaning up $tbl...")
table_exists(session, tbl) && table_delete(session, tbl)
end
end
end
function test_table_readwrite(session)
N = 10
TNAME = "mytable"
function _scan_verify(valpfx)
println("creating scanner...")
scanner(session, TNAME) do scan
println("\tstarting iteration...")
nrecs = 0
for rec in records(scan)
row = bytestring(rec.key.row)
colfam = bytestring(rec.key.colFamily)
colqual = bytestring(rec.key.colQualifier)
colvis = bytestring(rec.key.colVisibility)
val = bytestring(rec.value)
println("\trow($(row)), column($(colfam):$(colqual)), visibility($(colvis)), value($(val))")
rownum = parse(Int, row[4:end])
@test colfam == "colf"
@test colqual == "colq"
@test colvis == ""
if isempty(valpfx)
@test val == ""
else
@test val == "$valpfx$rownum"
end
nrecs += 1
end
println("\tfinished iteration ($nrecs records)...")
end
println("closed scanner")
end
function _create()
println("creating $TNAME...")
@test !table_exists(session, TNAME)
table_create(session, TNAME; versioning=false)
@test table_exists(session, TNAME)
table_versions!(session, TNAME, 1)
#print_table_config(session, TNAME)
#print_table_iters(session, TNAME)
end
function _write()
println("creating writer...")
batch_writer(session, TNAME) do writer
println("\tstarting batch...")
upd = batch()
println("\tcollecting mutations...")
for rownum in 1:N
update(upd, "row$rownum", "colf", "colq", "", "value$rownum")
end
println("\tupdating mutations...")
update(writer, upd)
println("\tflushing writer...")
flush(writer)
end
println("closed writer")
end
function _update()
println("creating conditional batch writer...")
conditional_batch_writer(session, TNAME) do writer
println("\tstarting batch...")
upd = conditional_batch()
println("\tcollecting mutations...")
for rownum in 1:N
row = "row$rownum"
val = "value$rownum"
update(where(upd, row, "colf", "colq"; value=val), "colf", "colq", "", "newvalue$rownum")
#update(where(upd, row, "colf", "colq", "public"; value=val), "colf", "colq", "public", "newvalue$rownum")
end
println("\tupdating mutations...")
res = update(writer, upd)
status_counts = Dict(ConditionalStatus.ACCEPTED => 0, ConditionalStatus.REJECTED => 0, ConditionalStatus.VIOLATED => 0, ConditionalStatus.UNKNOWN => 0, ConditionalStatus.INVISIBLE_VISIBILITY => 0)
for v in values(res)
status_counts[v] += 1
end
println("\tstatus counts: $status_counts")
println("\tflushing writer...")
flush(writer)
end
println("closed writer")
end
function _delete()
println("creating writer for deletes...")
batch_writer(session, TNAME) do writer
println("\tstarting batch...")
upd = batch()
println("\tcollecting mutations...")
for rownum in 1:N
delete(upd, "row$rownum", "colf", "colq")
end
println("\tupdating mutations...")
update(writer, upd)
println("\tflushing writer...")
flush(writer)
end
println("closed writer")
end
print_header("TABLE READ WRITE")
try
_create()
_write()
_scan_verify("value")
_update()
_scan_verify("newvalue")
_delete()
_scan_verify("")
finally
println("cleaning up...")
table_exists(session, TNAME) && table_delete(session, TNAME)
end
end
session = AccumuloSession("127.0.1.1", 42424, AccumuloAuthSASLPlain("root", "root"))
print_info(session)
test_table_admin(session)
test_table_readwrite(session)
close(session)
|
{"hexsha": "6113278507b11ac3a164cc861370ecc56c8fa856", "size": 6635, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_accumulo.jl", "max_stars_repo_name": "JuliaPackageMirrors/Accumulo.jl", "max_stars_repo_head_hexsha": "dc0d083bd2f89b470933221b7162155f9ff9577f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_accumulo.jl", "max_issues_repo_name": "JuliaPackageMirrors/Accumulo.jl", "max_issues_repo_head_hexsha": "dc0d083bd2f89b470933221b7162155f9ff9577f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_accumulo.jl", "max_forks_repo_name": "JuliaPackageMirrors/Accumulo.jl", "max_forks_repo_head_hexsha": "dc0d083bd2f89b470933221b7162155f9ff9577f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3658536585, "max_line_length": 208, "alphanum_fraction": 0.5650339111, "num_tokens": 1557}
|
# Velocity Calculation for the Robot based on MDH frames
# Introduction
# Berechnung der Geschwindigkeit von Koordinatensystemen und Schwerpunkten
#
# Dateiname:
# robot -> Berechnung für allgemeinen Roboter
# tree -> Berechnung für eine beliebige Baumstruktur (ohne Schleifen)
# floatb_twist -> floating base wird durch base twist (Geschwindigkeit der Basis) berücksichtigt
# rotmat -> Kinematik wird mit Rotationsmatrizen berechnet
# velocity -> Berechnung der Geschwindigkeit aller Segmente
# linkframe -> Berechnung der Geschwindigkeit im Körper-KS (KSi)
# par1 -> Parametersatz 1 (Schwerpunkt als Parameter: SX,SY,SZ)
# Authors
# Moritz Schappler, schappler@irt.uni-hannover.de, 2016-03
# (C) Institut fuer Regelungstechnik, Leibniz Universitaet Hannover
#
# Sources
# [GautierKhalil1990] Direct Calculation of Minimum Set of Inertial Parameters of Serial Robots
# [KhalilDombre2002] Modeling, Identification and Control of Robots
# [Ortmaier2014] Vorlesungsskript Robotik I (WS 2014/15)
# [Ott2008] Cartesian Impedance Control of Redundant and Flexible-Joint Robots
# Initialization
interface(warnlevel=0): # Unterdrücke die folgende Warnung.
restart: # Gibt eine Warnung, wenn über Terminal-Maple mit read gestartet wird.
interface(warnlevel=3):
with(LinearAlgebra):
with(ArrayTools):
with(codegen):
with(CodeGeneration):
with(StringTools):
codegen_act := true:
codegen_opt := 2:
read "../helper/proc_convert_s_t":
read "../helper/proc_convert_t_s":
read "../helper/proc_MatlabExport":
read "../helper/proc_simplify2":
read "../transformation/proc_rotx":
read "../transformation/proc_roty":
read "../transformation/proc_rotz":
read "../transformation/proc_trotx":
read "../transformation/proc_troty":
read "../transformation/proc_trotz":
read "../transformation/proc_transl":
read "../transformation/proc_trafo_mdh":
read "../robot_codegen_definitions/robot_env":
printf("%s. Generiere Geschwindigkeit für %s (Herleitung im Körper-KS)\n", FormatTime("%Y-%m-%d %H:%M:%S"), robot_name):
read sprintf("../codeexport/%s/tmp/tree_floatb_definitions", robot_name):
read sprintf("../codeexport/%s/tmp/kinematic_constraints_maple_inert.m", robot_name):
kin_constraints_exist := kin_constraints_exist: # nur zum Abschätzen der Komplexität
;
# Term-Vereinfachungen einstellen
if not assigned(simplify_options) or simplify_options(4)=-1 then # Standard-Einstellungen:
if not kin_constraints_exist then # normale serielle Ketten und Baumstrukturen
use_simplify := 0: # Standardmäßig aus
else # mit kinematischen Zwangsbedingungen
use_simplify := 1: # standardmäßig simplify-Befehle anwenden
end if:
else # Benutzer-Einstellungen:
use_simplify := simplify_options(4): # vierter Eintrag ist für Geschwindigkeit
end if:
# Ergebnisse der Kinematik laden
read sprintf("../codeexport/%s/tmp/kinematics_floatb_%s_rotmat_maple.m", robot_name, base_method_name):
Trf := Trf:
Trf_c := Trf_c:
# Zeitableitungen der MDH-Drehwinkel laden.
# Die Berechnung soll nur an einer Stelle erfolgen. Siehe robot_tree_velocity_mdh_angles.mw.
read sprintf("../codeexport/%s/tmp/velocity_mdh_angles_maple.m", robot_name):
thetaD := thetaD:
# Calculate Velocities
# First assume fixed base model with base velocity and acceleration set to zero
# Anfangsgeschwindigkeit definieren für floating base model
# Velocities of Frames
#
# Analog zu [Ortmaier2014], Gl. (7.6)
# Geschwindigkeit der Basis der Koordinatensysteme der Körper (ausgedrückt im Körper-KS)
rD_i_i := Matrix(3, NL):
# Winkelgeschwindigkeit von Körper i ausgedrückt im Körper-KS
omega_i_i := Matrix(3, NL):
# Anfangsgeschwindigkeit der Basis:
# twist: Basis-Geschwindigkeit bzgl Welt-KS ausgedrückt im Basis-KS
# eulxyz: V_base_t beinhaltet die Geschwindigkeit der Basis im Welt-KS, ausgedrückt im Welt-KS. Daher ist für eine Darstellung im Körper-KS noch die Rotation erforderlich.
if base_method_name = "twist" then:
rD_i_i(1..3,1) := V_base_t(1..3,1):
omega_i_i(1..3,1) := V_base_t(4..6,1):
end:
if base_method_name = "eulxyz" then:
rD_i_i(1..3,1) := Transpose(Trf_c(1..3, 1..3, 1)) . V_base_t(1..3,1):
omega_i_i(1..3,1) := Transpose(Trf_c(1..3, 1..3, 1)) . T_basevel . V_base_t(4..6,1):
end:
# Erhöhe die Geschwindigkeit jedes Körpers
# Betrachte dazu nur die durch Gelenke angetriebenen Körper, nicht die Basis
for i from 1 to NJ do # Gelenke durchgehen
# Körper der von Gelenkwinkel i bewegt wird: Körperindex i+1
# Vorgängerkörper bestimmen
j := v(i) + 1:
# Geschwindigkeit des Vorgängers; Trf_c(...,i+1) enthält z-Achse des Körperkoordinatensystems, das von qi bewegt wird.
# [Ortmaier2014] (7.7) (S.115) (dort falsche Indizes für MDH), [KhalilDombre2002] (9.14)
R_j_i := Trf(1..3,1..3,i): # Rotation vom Vorgänger-Körper (j) zu diesem Körper (i+1)
R_i_j := Transpose(R_j_i):
# [GautierKhalil1988], equ.7: omega_jj aus [GautierKhalil1988] entspricht omega_i_i(1 .. 3, i+1) hier
if sigma(i) = 0 then # Drehgelenk
omega_i_i(1 .. 3, i+1) := Multiply(R_i_j,Matrix(3,1,omega_i_i(1 .. 3, j))) + thetaD(i,1)*<0;0;1>:
else: # Schubgelenk
omega_i_i(1 .. 3, i+1) := Multiply(R_i_j,Matrix(3,1,omega_i_i(1 .. 3, j)))
end if:
# Terme vereinfachen (Teil 1)
if use_simplify>=1 then
tmp_t11:=time():
tmp_l11 := length(omega_i_i(1 .. 3, i+1)):
omega_i_i(1 .. 3, i+1) := simplify2(omega_i_i(1 .. 3, i+1)):
tmp_l21 := length(omega_i_i(1 .. 3, i+1)):
tmp_t21:=time():
end if:
# Vektor vom Ursprung des vorherigen Koordinatensystems zu diesem KS
r_j_j_i := Trf(1 .. 3, 4, i):
# [GautierKhalil1988], equ.8: v_jj aus [GautierKhalil1988] entspricht rD_i_i(1 .. 3, i+1) hier
if sigma(i) = 0 then # Drehgelenk
rD_i_i(1 .. 3, i+1) := Multiply( R_i_j, ( rD_i_i(1 .. 3, j) + CrossProduct(omega_i_i(1 .. 3, j), r_j_j_i) ) ):
else: # Schubgelenk
rD_i_i(1 .. 3, i+1) := Matrix(Multiply( R_i_j, ( rD_i_i(1 .. 3, j) + CrossProduct(omega_i_i(1 .. 3, j), r_j_j_i) ) ) ) + Matrix(dD(i,1)*<0;0;1>):
end if:
printf("%s. Geschwindigkeit für Körperkoordinatensystem %d aufgestellt (Herleitung im Körper-KS).\n", \
FormatTime("%Y-%m-%d %H:%M:%S"), i-1): #0=Basis
# Terme vereinfachen (Teil 2)
if use_simplify>=1 then
tmp_t12:=time():
tmp_l12 := length(rD_i_i(1 .. 3, i+1)):
rD_i_i(1 .. 3, i+1) := simplify2(rD_i_i(1 .. 3, i+1)):
tmp_l22 := length(rD_i_i(1 .. 3, i+1)):
tmp_t22:=time():
printf("%s: Terme für Geschwindigkeiten vereinfacht. Länge: %d->%d / %d->%d. Rechenzeit %1.1fs und %1.1fs.\n", \
FormatTime("%Y-%m-%d %H:%M:%S"), tmp_l11, tmp_l21, tmp_l12, tmp_l22, tmp_t21-tmp_t11, tmp_t22-tmp_t12):
end if:
end do:
# Export
# Maple Export
save omega_i_i, rD_i_i, sprintf("../codeexport/%s/tmp/velocity_linkframe_floatb_%s_maple.m", robot_name, base_method_name):
printf("%s. Maple-Ausdrücke exportiert.\n", FormatTime("%Y-%m-%d %H:%M:%S")):
# Matlab Export
if codegen_act then
MatlabExport(convert_t_s(omega_i_i), sprintf("../codeexport/%s/tmp/velocity_omegaii_floatb_%s_linkframe_matlab.m", robot_name, base_method_name), codegen_opt):
MatlabExport(convert_t_s(rD_i_i), sprintf("../codeexport/%s/tmp/velocity_rDii_floatb_%s_linkframe_matlab.m", robot_name, base_method_name), codegen_opt):
printf("%s. Geschwindigkeiten in Matlab exportiert.\n", FormatTime("%Y-%m-%d %H:%M:%S")):
end if:
|
{"hexsha": "61b76bd6734d36a6b48a8745f86411f748aa4710", "size": 7261, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "robot_codegen_kinematics/robot_tree_floatb_rotmat_velocity_linkframe.mpl", "max_stars_repo_name": "SchapplM/robsynth-modelgen", "max_stars_repo_head_hexsha": "33b345ae0dd6ec4aa15499ab3d43edbbded0bea5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-25T07:31:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T09:54:50.000Z", "max_issues_repo_path": "robot_codegen_kinematics/robot_tree_floatb_rotmat_velocity_linkframe.mpl", "max_issues_repo_name": "SchapplM/robsynth-modelgen", "max_issues_repo_head_hexsha": "33b345ae0dd6ec4aa15499ab3d43edbbded0bea5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "robot_codegen_kinematics/robot_tree_floatb_rotmat_velocity_linkframe.mpl", "max_forks_repo_name": "SchapplM/robsynth-modelgen", "max_forks_repo_head_hexsha": "33b345ae0dd6ec4aa15499ab3d43edbbded0bea5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.0860927152, "max_line_length": 171, "alphanum_fraction": 0.7252444567, "num_tokens": 2507}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
LOG_LEVEL_INDEX = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
DESIRED_LOG_LEVEL = sys.argv[LOG_LEVEL_INDEX] if 0 < LOG_LEVEL_INDEX < len(sys.argv) else '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = DESIRED_LOG_LEVEL
import absl.app
import numpy as np
import progressbar
import shutil
import tensorflow as tf
import tensorflow.compat.v1 as tfv1
import time
import pickle as pkl
from tensorflow.python.ops import gen_audio_ops as contrib_audio
tfv1.logging.set_verbosity({
'0': tfv1.logging.DEBUG,
'1': tfv1.logging.INFO,
'2': tfv1.logging.WARN,
'3': tfv1.logging.ERROR
}.get(DESIRED_LOG_LEVEL))
from datetime import datetime
from ds_ctcdecoder import ctc_beam_search_decoder, Scorer
from .evaluate import evaluate
from six.moves import zip, range
from .util.config import Config, initialize_globals
from .util.checkpoints import load_or_init_graph_for_training, load_graph_for_evaluation
from .util.evaluate_tools import save_samples_json
from .util.feeding import create_dataset, samples_to_mfccs, audiofile_to_features
from src.flags import create_flags, FLAGS
from .util.helpers import check_ctcdecoder_version, ExceptionBox
from .util.logging import create_progressbar, log_debug, log_error, log_info, log_warn
check_ctcdecoder_version()
# Graph Creation
# ==============
def variable_on_cpu(name, shape, initializer):
r"""
Next we concern ourselves with graph creation.
However, before we do so we must introduce a utility function ``variable_on_cpu()``
used to create a variable in CPU memory.
"""
# Use the /cpu:0 device for scoped operations
with tf.device(Config.cpu_device):
# Create or get apropos variable
var = tfv1.get_variable(name=name, shape=shape, initializer=initializer)
return var
def create_overlapping_windows(batch_x):
batch_size = tf.shape(input=batch_x)[0]
window_width = 2 * Config.n_context + 1
num_channels = Config.n_input
# Create a constant convolution filter using an identity matrix, so that the
# convolution returns patches of the input tensor as is, and we can create
# overlapping windows over the MFCCs.
eye_filter = tf.constant(np.eye(window_width * num_channels)
.reshape(window_width, num_channels, window_width * num_channels), tf.float32) # pylint: disable=bad-continuation
# Create overlapping windows
batch_x = tf.nn.conv1d(input=batch_x, filters=eye_filter, stride=1, padding='SAME')
# Remove dummy depth dimension and reshape into [batch_size, n_windows, window_width, n_input]
batch_x = tf.reshape(batch_x, [batch_size, -1, window_width, num_channels])
return batch_x
def dense(name, x, units, dropout_rate=None, relu=True):
with tfv1.variable_scope(name):
bias = variable_on_cpu('bias', [units], tfv1.zeros_initializer())
weights = variable_on_cpu('weights', [x.shape[-1], units], tfv1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform"))
output = tf.nn.bias_add(tf.matmul(x, weights), bias)
if relu:
output = tf.minimum(tf.nn.relu(output), FLAGS.relu_clip)
if dropout_rate is not None:
output = tf.nn.dropout(output, rate=dropout_rate)
return output
def rnn_impl_lstmblockfusedcell(x, seq_length, previous_state, reuse):
with tfv1.variable_scope('cudnn_lstm/rnn/multi_rnn_cell/cell_0'):
fw_cell = tf.contrib.rnn.LSTMBlockFusedCell(Config.n_cell_dim,
forget_bias=0,
reuse=reuse,
name='cudnn_compatible_lstm_cell')
output, output_state = fw_cell(inputs=x,
dtype=tf.float32,
sequence_length=seq_length,
initial_state=previous_state)
return output, output_state
def rnn_impl_cudnn_rnn(x, seq_length, previous_state, _):
assert previous_state is None # 'Passing previous state not supported with CuDNN backend'
# Hack: CudnnLSTM works similarly to Keras layers in that when you instantiate
# the object it creates the variables, and then you just call it several times
# to enable variable re-use. Because all of our code is structure in an old
# school TensorFlow structure where you can just call tf.get_variable again with
# reuse=True to reuse variables, we can't easily make use of the object oriented
# way CudnnLSTM is implemented, so we save a singleton instance in the function,
# emulating a static function variable.
if not rnn_impl_cudnn_rnn.cell:
# Forward direction cell:
fw_cell = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=1,
num_units=Config.n_cell_dim,
input_mode='linear_input',
direction='unidirectional',
dtype=tf.float32)
rnn_impl_cudnn_rnn.cell = fw_cell
output, output_state = rnn_impl_cudnn_rnn.cell(inputs=x,
sequence_lengths=seq_length)
return output, output_state
rnn_impl_cudnn_rnn.cell = None
def rnn_impl_static_rnn(x, seq_length, previous_state, reuse):
with tfv1.variable_scope('cudnn_lstm/rnn/multi_rnn_cell'):
# Forward direction cell:
fw_cell = tfv1.nn.rnn_cell.LSTMCell(Config.n_cell_dim,
forget_bias=0,
reuse=reuse,
name='cudnn_compatible_lstm_cell')
# Split rank N tensor into list of rank N-1 tensors
x = [x[l] for l in range(x.shape[0])]
output, output_state = tfv1.nn.static_rnn(cell=fw_cell,
inputs=x,
sequence_length=seq_length,
initial_state=previous_state,
dtype=tf.float32,
scope='cell_0')
output = tf.concat(output, 0)
return output, output_state
def create_model(batch_x, seq_length, dropout, reuse=False, batch_size=None, previous_state=None, overlap=True, rnn_impl=rnn_impl_lstmblockfusedcell):
layers = {}
# Input shape: [batch_size, n_steps, n_input + 2*n_input*n_context]
if not batch_size:
batch_size = tf.shape(input=batch_x)[0]
layers['input'] = batch_x
# Create overlapping feature windows if needed
if overlap:
batch_x = create_overlapping_windows(batch_x)
# Reshaping `batch_x` to a tensor with shape `[n_steps*batch_size, n_input + 2*n_input*n_context]`.
# This is done to prepare the batch for input into the first layer which expects a tensor of rank `2`.
# Permute n_steps and batch_size
batch_x = tf.transpose(a=batch_x, perm=[1, 0, 2, 3])
# Reshape to prepare input for first layer
batch_x = tf.reshape(batch_x, [-1, Config.n_input + 2*Config.n_input*Config.n_context]) # (n_steps*batch_size, n_input + 2*n_input*n_context)
layers['input_reshaped'] = batch_x
layers['input_length'] = seq_length
# The next three blocks will pass `batch_x` through three hidden layers with
# clipped RELU activation and dropout.
layers['layer_1'] = layer_1 = dense('layer_1', batch_x, Config.n_hidden_1, dropout_rate=dropout[0])
layers['layer_2'] = layer_2 = dense('layer_2', layer_1, Config.n_hidden_2, dropout_rate=dropout[1])
layers['layer_3'] = layer_3 = dense('layer_3', layer_2, Config.n_hidden_3, dropout_rate=dropout[2])
# `layer_3` is now reshaped into `[n_steps, batch_size, 2*n_cell_dim]`,
# as the LSTM RNN expects its input to be of shape `[max_time, batch_size, input_size]`.
layer_3 = tf.reshape(layer_3, [-1, batch_size, Config.n_hidden_3])
# Run through parametrized RNN implementation, as we use different RNNs
# for training and inference
output, output_state = rnn_impl(layer_3, seq_length, previous_state, reuse)
# Reshape output from a tensor of shape [n_steps, batch_size, n_cell_dim]
# to a tensor of shape [n_steps*batch_size, n_cell_dim]
output = tf.reshape(output, [-1, Config.n_cell_dim])
layers['rnn_output'] = output
layers['rnn_output_state'] = output_state
# Now we feed `output` to the fifth hidden layer with clipped RELU activation
layers['layer_5'] = layer_5 = dense('layer_5', output, Config.n_hidden_5, dropout_rate=dropout[5])
# Now we apply a final linear layer creating `n_classes` dimensional vectors, the logits.
layers['layer_6'] = layer_6 = dense('layer_6', layer_5, Config.n_hidden_6, relu=False)
# Finally we reshape layer_6 from a tensor of shape [n_steps*batch_size, n_hidden_6]
# to the slightly more useful shape [n_steps, batch_size, n_hidden_6].
# Note, that this differs from the input in that it is time-major.
layer_6 = tf.reshape(layer_6, [-1, batch_size, Config.n_hidden_6], name='raw_logits')
layers['raw_logits'] = layer_6
# Output shape: [n_steps, batch_size, n_hidden_6]
return layer_6, layers
# Accuracy and Loss
# =================
# In accord with 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# the loss function used by our network should be the CTC loss function
# (http://www.cs.toronto.edu/~graves/preprint.pdf).
# Conveniently, this loss function is implemented in TensorFlow.
# Thus, we can simply make use of this implementation to define our loss.
def calculate_mean_edit_distance_and_loss(iterator, dropout, reuse):
r'''
This routine beam search decodes a mini-batch and calculates the loss and mean edit distance.
Next to total and average loss it returns the mean edit distance,
the decoded result and the batch's original Y.
'''
# Obtain the next batch of data
batch_filenames, (batch_x, batch_seq_len), batch_y = iterator.get_next()
def fn2audio(fn):
samples = tf.io.read_file(fn)
decoded = contrib_audio.decode_wav(samples, desired_channels=1)
return decoded.audio
# batch_audio = tf.map_fn(fn2audio, batch_filenames, dtype=tf.float32)
batch_audio = tf.constant(0)
if FLAGS.train_cudnn:
rnn_impl = rnn_impl_cudnn_rnn
else:
rnn_impl = rnn_impl_lstmblockfusedcell
# Calculate the logits of the batch
logits, layers = create_model(batch_x, batch_seq_len, dropout, reuse=reuse, rnn_impl=rnn_impl)
# Compute the CTC loss using TensorFlow's `ctc_loss`
total_loss = tfv1.nn.ctc_loss(labels=batch_y, inputs=logits, sequence_length=batch_seq_len)
# Check if any files lead to non finite loss
non_finite_files = tf.gather(batch_filenames, tfv1.where(~tf.math.is_finite(total_loss)))
# Calculate the average loss across the batch
avg_loss = tf.reduce_mean(input_tensor=total_loss)
# Finally we return the average loss
return avg_loss, non_finite_files, layers, batch_y, batch_audio
# Adam Optimization
# =================
# In contrast to 'Deep Speech: Scaling up end-to-end speech recognition'
# (http://arxiv.org/abs/1412.5567),
# in which 'Nesterov's Accelerated Gradient Descent'
# (www.cs.toronto.edu/~fritz/absps/momentum.pdf) was used,
# we will use the Adam method for optimization (http://arxiv.org/abs/1412.6980),
# because, generally, it requires less fine-tuning.
def create_optimizer(learning_rate_var, opt='sgd'):
if opt == 'adam':
return tfv1.train.AdamOptimizer(
learning_rate=learning_rate_var,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
elif opt == 'sgd':
return tfv1.train.GradientDescentOptimizer(learning_rate=learning_rate_var)
else:
raise ValueError
# Towers
# ======
# In order to properly make use of multiple GPU's, one must introduce new abstractions,
# not present when using a single GPU, that facilitate the multi-GPU use case.
# In particular, one must introduce a means to isolate the inference and gradient
# calculations on the various GPU's.
# The abstraction we intoduce for this purpose is called a 'tower'.
# A tower is specified by two properties:
# * **Scope** - A scope, as provided by `tf.name_scope()`,
# is a means to isolate the operations within a tower.
# For example, all operations within 'tower 0' could have their name prefixed with `tower_0/`.
# * **Device** - A hardware device, as provided by `tf.device()`,
# on which all operations within the tower execute.
# For example, all operations of 'tower 0' could execute on the first GPU `tf.device('/gpu:0')`.
def get_tower_results(iterator, optimizer, dropout_rates):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate and return the optimization gradients
and the average loss across towers.
'''
# To calculate the mean of the losses
tower_avg_losses = []
# Tower gradients to return
tower_gradients = []
# Aggregate any non finite files in the batches
tower_non_finite_files = []
with tfv1.variable_scope(tfv1.get_variable_scope()):
# Loop over available_devices
for i in range(len(Config.available_devices)):
# Execute operations of tower i on device i
device = Config.available_devices[i]
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i):
# Calculate the avg_loss and mean_edit_distance and retrieve the decoded
# batch along with the original batch's labels (Y) of this tower
avg_loss, non_finite_files, layers, batch_y, batch_audio = calculate_mean_edit_distance_and_loss(iterator, dropout_rates, reuse=i > 0)
# Allow for variables to be re-used by the next tower
tfv1.get_variable_scope().reuse_variables()
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Compute gradients for model parameters using tower's mini-batch
gradients = optimizer.compute_gradients(avg_loss)
# Retain tower's gradients
tower_gradients.append(gradients)
tower_non_finite_files.append(non_finite_files)
avg_loss_across_towers = tf.reduce_mean(input_tensor=tower_avg_losses, axis=0)
tfv1.summary.scalar(name='step_loss', tensor=avg_loss_across_towers, collections=['step_summaries'])
all_non_finite_files = tf.concat(tower_non_finite_files, axis=0)
# Return gradients and the average loss
return tower_gradients, avg_loss_across_towers, all_non_finite_files, layers, batch_y, batch_audio
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a synchronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Run this on cpu_device to conserve GPU memory
with tf.device(Config.cpu_device):
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
# Logging
# =======
def log_variable(variable, gradient=None):
r'''
We introduce a function for logging a tensor variable's current state.
It logs scalar values for the mean, standard deviation, minimum and maximum.
Furthermore it logs a histogram of its state and (if given) of an optimization gradient.
'''
name = variable.name.replace(':', '_')
mean = tf.reduce_mean(input_tensor=variable)
tfv1.summary.scalar(name='%s/mean' % name, tensor=mean)
tfv1.summary.scalar(name='%s/sttdev' % name, tensor=tf.sqrt(tf.reduce_mean(input_tensor=tf.square(variable - mean))))
tfv1.summary.scalar(name='%s/max' % name, tensor=tf.reduce_max(input_tensor=variable))
tfv1.summary.scalar(name='%s/min' % name, tensor=tf.reduce_min(input_tensor=variable))
tfv1.summary.histogram(name=name, values=variable)
if gradient is not None:
if isinstance(gradient, tf.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
tfv1.summary.histogram(name='%s/gradients' % name, values=grad_values)
def log_grads_and_vars(grads_and_vars):
r'''
Let's also introduce a helper function for logging collections of gradient/variable tuples.
'''
for gradient, variable in grads_and_vars:
log_variable(variable, gradient=gradient)
def train():
do_cache_dataset = True
# pylint: disable=too-many-boolean-expressions
if (FLAGS.data_aug_features_multiplicative > 0 or
FLAGS.data_aug_features_additive > 0 or
FLAGS.augmentation_spec_dropout_keeprate < 1 or
FLAGS.augmentation_freq_and_time_masking or
FLAGS.augmentation_pitch_and_tempo_scaling or
FLAGS.augmentation_speed_up_std > 0 or
FLAGS.augmentation_sparse_warp):
do_cache_dataset = False
exception_box = ExceptionBox()
# Create training and validation datasets
train_set = create_dataset(FLAGS.train_files.split(','),
batch_size=1 if not FLAGS.export_sample_only else FLAGS.train_batch_size,
enable_cache=FLAGS.feature_cache and do_cache_dataset,
cache_path=FLAGS.feature_cache,
train_phase=True,
exception_box=exception_box,
process_ahead=len(Config.available_devices) * (1 if not FLAGS.export_sample_only else FLAGS.train_batch_size) * 2,
buffering=FLAGS.read_buffer)
iterator = tfv1.data.Iterator.from_structure(tfv1.data.get_output_types(train_set),
tfv1.data.get_output_shapes(train_set),
output_classes=tfv1.data.get_output_classes(train_set))
# Make initialization ops for switching between the two sets
train_init_op = iterator.make_initializer(train_set)
# Dropout
dropout_rates = [tfv1.placeholder(tf.float32, name='dropout_{}'.format(i)) for i in range(6)]
dropout_feed_dict = {
dropout_rates[0]: FLAGS.dropout_rate,
dropout_rates[1]: FLAGS.dropout_rate2,
dropout_rates[2]: FLAGS.dropout_rate3,
dropout_rates[3]: FLAGS.dropout_rate4,
dropout_rates[4]: FLAGS.dropout_rate5,
dropout_rates[5]: FLAGS.dropout_rate6,
}
no_dropout_feed_dict = {
rate: 0. for rate in dropout_rates
}
# Building the graph
learning_rate_var = tf.constant(FLAGS.model_learning_rate)
# learning_rate_var = tfv1.get_variable('learning_rate', initializer=FLAGS.model_learning_rate, trainable=False)
# reduce_learning_rate_op = learning_rate_var.assign(tf.multiply(learning_rate_var, FLAGS.plateau_reduction))
optimizer = create_optimizer(learning_rate_var)
# Enable mixed precision training
if FLAGS.automatic_mixed_precision:
log_info('Enabling automatic mixed precision training.')
optimizer = tfv1.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
gradients, loss, non_finite_files, layers, batch_y, batch_audio = get_tower_results(iterator, optimizer, dropout_rates)
# Average tower gradients across GPUs
avg_tower_gradients = average_gradients(gradients)
log_grads_and_vars(avg_tower_gradients)
# global_step is automagically incremented by the optimizer
global_step = tfv1.train.get_or_create_global_step()
apply_gradient_op = optimizer.apply_gradients(avg_tower_gradients, global_step=global_step)
# Summaries
step_summaries_op = tfv1.summary.merge_all('step_summaries')
step_summary_writers = {
'train': tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'train'), max_queue=120),
'dev': tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'dev'), max_queue=120)
}
# Save flags next to checkpoints
os.makedirs(FLAGS.save_checkpoint_dir, exist_ok=True)
flags_file = os.path.join(FLAGS.save_checkpoint_dir, 'flags.txt')
with open(flags_file, 'w') as fout:
fout.write(FLAGS.flags_into_string())
with tfv1.Session(config=Config.session_config) as session:
log_debug('Session opened.')
# Prevent further graph changes
# tfv1.get_default_graph().finalize()
# Load checkpoint or initialize variables
load_or_init_graph_for_training(session)
def run_set(set_name, epoch, init_op, dataset=None):
is_train = set_name == 'train'
train_op = apply_gradient_op if is_train else []
feed_dict = dropout_feed_dict if is_train else no_dropout_feed_dict
total_loss = 0.0
step_count = 0
step_summary_writer = step_summary_writers.get(set_name)
checkpoint_time = time.time()
# Initialize iterator to the appropriate dataset
session.run(init_op)
assert len(gradients) == 1
grads = gradients[0]
grads = {v.op.name: g for g, v in grads}
# Batch loop
try:
_batch_y = tf.sparse.to_dense(batch_y)
model_before = session.run(tf.trainable_variables())
assert FLAGS.num_steps == 1 or FLAGS.train_batch_size == 1
if FLAGS.num_steps > 1: # multi-step
for step in range(FLAGS.num_steps):
_, current_step, batch_loss, var_grads, audio, bx, bx_len, by, problem_files, step_summary = \
session.run([train_op, global_step, loss, grads, batch_audio, layers['input'], layers['input_length'], _batch_y, non_finite_files, step_summaries_op],
feed_dict=feed_dict)
model_after = session.run(tf.trainable_variables())
model_update = {v.op.name: model_before[i] - model_after[i] for i, v in
enumerate(tf.trainable_variables())}
elif FLAGS.train_batch_size > 1 and not FLAGS.export_sample_only: # multi-sample
all_var_grads = None
for sample_id in range(FLAGS.train_batch_size):
current_step, batch_loss, var_grads, problem_files, step_summary = \
session.run([global_step, loss, grads, non_finite_files, step_summaries_op],
feed_dict=dropout_feed_dict)
if all_var_grads is None:
all_var_grads = var_grads
else:
for op_name in all_var_grads:
all_var_grads[op_name] += var_grads[op_name]
for op_name in all_var_grads:
all_var_grads[op_name] /= FLAGS.train_batch_size
model_update = all_var_grads
else:
_, current_step, batch_loss, var_grads, audio, bx, bx_len, layer_1, by, problem_files, step_summary = \
session.run(
[train_op, global_step, loss, grads, batch_audio, layers['input'], layers['input_length'], layers['layer_1'],
_batch_y, non_finite_files, step_summaries_op],
feed_dict=feed_dict)
model_after = session.run(tf.trainable_variables())
model_update = {v.op.name: model_before[i] - model_after[i] for i, v in enumerate(tf.trainable_variables())}
os.makedirs(FLAGS.output_path or 'outputs', exist_ok=True)
if not FLAGS.export_sample_only:
fn = os.path.join(FLAGS.output_path or 'outputs', 'grads.pkl')
with open(fn, 'wb') as f:
pkl.dump(model_update, f)
print("Gradients written to %s" % fn)
if FLAGS.export_sample_only or FLAGS.train_batch_size == 1:
fn = os.path.join(FLAGS.output_path or 'outputs', 'samples.pkl')
with open(fn, 'wb') as f:
pkl.dump([audio, bx, bx_len, by], f)
print("Data sample written to %s" % fn)
if FLAGS.export_dropout_mask:
print('hello')
dropout_tensors = [n for n in tf.get_default_graph().get_operations() if 'dropout' in n.name]
print(dropout_tensors)
print(session.run([dropout_tensors[0]], feed_dict=feed_dict))
input()
print("Loss: " + str(batch_loss))
exception_box.raise_if_set()
except tf.errors.InvalidArgumentError as err:
if FLAGS.augmentation_sparse_warp:
log_info("Ignoring sparse warp error: {}".format(err))
raise
except tf.errors.OutOfRangeError:
exception_box.raise_if_set()
if problem_files.size > 0:
problem_files = [f.decode('utf8') for f in problem_files[..., 0]]
log_error('The following files caused an infinite (or NaN) '
'loss: {}'.format(','.join(problem_files)))
total_loss += batch_loss
step_count += 1
step_summary_writer.add_summary(step_summary, current_step)
mean_loss = total_loss / step_count if step_count > 0 else 0.0
return mean_loss, step_count
log_info('STARTING Optimization')
train_start_time = datetime.utcnow()
best_dev_loss = float('inf')
dev_losses = []
epochs_without_improvement = 0
try:
# Training
train_loss, _ = run_set('train', 0, train_init_op)
except KeyboardInterrupt:
pass
log_debug('Session closed.')
def test():
samples = evaluate(FLAGS.test_files.split(','), create_model)
if FLAGS.test_output_file:
save_samples_json(samples, FLAGS.test_output_file)
def create_inference_graph(batch_size=1, n_steps=16, tflite=False):
batch_size = batch_size if batch_size > 0 else None
# Create feature computation graph
input_samples = tfv1.placeholder(tf.float32, [Config.audio_window_samples], 'input_samples')
samples = tf.expand_dims(input_samples, -1)
mfccs, _ = samples_to_mfccs(samples, FLAGS.audio_sample_rate)
mfccs = tf.identity(mfccs, name='mfccs')
# Input tensor will be of shape [batch_size, n_steps, 2*n_context+1, n_input]
# This shape is read by the native_client in DS_CreateModel to know the
# value of n_steps, n_context and n_input. Make sure you update the code
# there if this shape is changed.
input_tensor = tfv1.placeholder(tf.float32, [batch_size, n_steps if n_steps > 0 else None, 2 * Config.n_context + 1, Config.n_input], name='input_node')
seq_length = tfv1.placeholder(tf.int32, [batch_size], name='input_lengths')
if batch_size <= 0:
# no state management since n_step is expected to be dynamic too (see below)
previous_state = None
else:
previous_state_c = tfv1.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c')
previous_state_h = tfv1.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h')
previous_state = tf.nn.rnn_cell.LSTMStateTuple(previous_state_c, previous_state_h)
# One rate per layer
no_dropout = [None] * 6
if tflite:
rnn_impl = rnn_impl_static_rnn
else:
rnn_impl = rnn_impl_lstmblockfusedcell
logits, layers = create_model(batch_x=input_tensor,
batch_size=batch_size,
seq_length=seq_length if not FLAGS.export_tflite else None,
dropout=no_dropout,
previous_state=previous_state,
overlap=False,
rnn_impl=rnn_impl)
# TF Lite runtime will check that input dimensions are 1, 2 or 4
# by default we get 3, the middle one being batch_size which is forced to
# one on inference graph, so remove that dimension
if tflite:
logits = tf.squeeze(logits, [1])
# Apply softmax for CTC decoder
logits = tf.nn.softmax(logits, name='logits')
if batch_size <= 0:
if tflite:
raise NotImplementedError('dynamic batch_size does not support tflite nor streaming')
if n_steps > 0:
raise NotImplementedError('dynamic batch_size expect n_steps to be dynamic too')
return (
{
'input': input_tensor,
'input_lengths': seq_length,
},
{
'outputs': logits,
},
layers
)
new_state_c, new_state_h = layers['rnn_output_state']
new_state_c = tf.identity(new_state_c, name='new_state_c')
new_state_h = tf.identity(new_state_h, name='new_state_h')
inputs = {
'input': input_tensor,
'previous_state_c': previous_state_c,
'previous_state_h': previous_state_h,
'input_samples': input_samples,
}
if not FLAGS.export_tflite:
inputs['input_lengths'] = seq_length
outputs = {
'outputs': logits,
'new_state_c': new_state_c,
'new_state_h': new_state_h,
'mfccs': mfccs,
}
return inputs, outputs, layers
def file_relative_read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def export():
r'''
Restores the trained variables into a simpler graph that will be exported for serving.
'''
log_info('Exporting the model...')
inputs, outputs, _ = create_inference_graph(batch_size=FLAGS.export_batch_size, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite)
graph_version = int(file_relative_read('GRAPH_VERSION').strip())
assert graph_version > 0
outputs['metadata_version'] = tf.constant([graph_version], name='metadata_version')
outputs['metadata_sample_rate'] = tf.constant([FLAGS.audio_sample_rate], name='metadata_sample_rate')
outputs['metadata_feature_win_len'] = tf.constant([FLAGS.feature_win_len], name='metadata_feature_win_len')
outputs['metadata_feature_win_step'] = tf.constant([FLAGS.feature_win_step], name='metadata_feature_win_step')
outputs['metadata_beam_width'] = tf.constant([FLAGS.export_beam_width], name='metadata_beam_width')
outputs['metadata_alphabet'] = tf.constant([Config.alphabet.serialize()], name='metadata_alphabet')
if FLAGS.export_language:
outputs['metadata_language'] = tf.constant([FLAGS.export_language.encode('utf-8')], name='metadata_language')
# Prevent further graph changes
tfv1.get_default_graph().finalize()
output_names_tensors = [tensor.op.name for tensor in outputs.values() if isinstance(tensor, tf.Tensor)]
output_names_ops = [op.name for op in outputs.values() if isinstance(op, tf.Operation)]
output_names = output_names_tensors + output_names_ops
with tf.Session() as session:
# Restore variables from checkpoint
load_graph_for_evaluation(session)
output_filename = FLAGS.export_file_name + '.pb'
if FLAGS.remove_export:
if os.path.isdir(FLAGS.export_dir):
log_info('Removing old export')
shutil.rmtree(FLAGS.export_dir)
output_graph_path = os.path.join(FLAGS.export_dir, output_filename)
if not os.path.isdir(FLAGS.export_dir):
os.makedirs(FLAGS.export_dir)
frozen_graph = tfv1.graph_util.convert_variables_to_constants(
sess=session,
input_graph_def=tfv1.get_default_graph().as_graph_def(),
output_node_names=output_names)
frozen_graph = tfv1.graph_util.extract_sub_graph(
graph_def=frozen_graph,
dest_nodes=output_names)
if not FLAGS.export_tflite:
with open(output_graph_path, 'wb') as fout:
fout.write(frozen_graph.SerializeToString())
else:
output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite'))
converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values())
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite
converter.allow_custom_ops = True
tflite_model = converter.convert()
with open(output_tflite_path, 'wb') as fout:
fout.write(tflite_model)
log_info('Models exported at %s' % (FLAGS.export_dir))
metadata_fname = os.path.join(FLAGS.export_dir, '{}_{}_{}.md'.format(
FLAGS.export_author_id,
FLAGS.export_model_name,
FLAGS.export_model_version))
model_runtime = 'tflite' if FLAGS.export_tflite else 'tensorflow'
with open(metadata_fname, 'w') as f:
f.write('---\n')
f.write('author: {}\n'.format(FLAGS.export_author_id))
f.write('model_name: {}\n'.format(FLAGS.export_model_name))
f.write('model_version: {}\n'.format(FLAGS.export_model_version))
f.write('contact_info: {}\n'.format(FLAGS.export_contact_info))
f.write('license: {}\n'.format(FLAGS.export_license))
f.write('language: {}\n'.format(FLAGS.export_language))
f.write('runtime: {}\n'.format(model_runtime))
f.write('min_ds_version: {}\n'.format(FLAGS.export_min_ds_version))
f.write('max_ds_version: {}\n'.format(FLAGS.export_max_ds_version))
f.write('acoustic_model_url: <replace this with a publicly available URL of the acoustic model>\n')
f.write('scorer_url: <replace this with a publicly available URL of the scorer, if present>\n')
f.write('---\n')
f.write('{}\n'.format(FLAGS.export_description))
log_info('Model metadata file saved to {}. Before submitting the exported model for publishing make sure all information in the metadata file is correct, and complete the URL fields.'.format(metadata_fname))
def package_zip():
# --export_dir path/to/export/LANG_CODE/ => path/to/export/LANG_CODE.zip
export_dir = os.path.join(os.path.abspath(FLAGS.export_dir), '') # Force ending '/'
zip_filename = os.path.dirname(export_dir)
shutil.copy(FLAGS.scorer_path, export_dir)
archive = shutil.make_archive(zip_filename, 'zip', export_dir)
log_info('Exported packaged model {}'.format(archive))
def do_single_file_inference(input_file_path):
with tfv1.Session(config=Config.session_config) as session:
inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1)
# Restore variables from training checkpoint
load_graph_for_evaluation(session)
features, features_len = audiofile_to_features(input_file_path)
previous_state_c = np.zeros([1, Config.n_cell_dim])
previous_state_h = np.zeros([1, Config.n_cell_dim])
# Add batch dimension
features = tf.expand_dims(features, 0)
features_len = tf.expand_dims(features_len, 0)
# Evaluate
features = create_overlapping_windows(features).eval(session=session)
features_len = features_len.eval(session=session)
logits = outputs['outputs'].eval(feed_dict={
inputs['input']: features,
inputs['input_lengths']: features_len,
inputs['previous_state_c']: previous_state_c,
inputs['previous_state_h']: previous_state_h,
}, session=session)
logits = np.squeeze(logits)
if FLAGS.scorer_path:
scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
FLAGS.scorer_path, Config.alphabet)
else:
scorer = None
decoded = ctc_beam_search_decoder(logits, Config.alphabet, FLAGS.beam_width,
scorer=scorer, cutoff_prob=FLAGS.cutoff_prob,
cutoff_top_n=FLAGS.cutoff_top_n)
# Print highest probability result
print(decoded[0][1])
def early_training_checks():
# Check for proper scorer early
if FLAGS.scorer_path:
scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta,
FLAGS.scorer_path, Config.alphabet)
del scorer
if FLAGS.train_files and FLAGS.test_files and FLAGS.load_checkpoint_dir != FLAGS.save_checkpoint_dir:
log_warn('WARNING: You specified different values for --load_checkpoint_dir '
'and --save_checkpoint_dir, but you are running training and testing '
'in a single invocation. The testing step will respect --load_checkpoint_dir, '
'and thus WILL NOT TEST THE CHECKPOINT CREATED BY THE TRAINING STEP. '
'Train and test in two separate invocations, specifying the correct '
'--load_checkpoint_dir in both cases, or use the same location '
'for loading and saving.')
def main(_):
initialize_globals()
early_training_checks()
if FLAGS.train_files:
tfv1.reset_default_graph()
tfv1.set_random_seed(FLAGS.random_seed)
train()
if FLAGS.test_files:
tfv1.reset_default_graph()
test()
if FLAGS.export_dir and not FLAGS.export_zip:
tfv1.reset_default_graph()
export()
if FLAGS.export_zip:
tfv1.reset_default_graph()
FLAGS.export_tflite = True
if os.listdir(FLAGS.export_dir):
log_error('Directory {} is not empty, please fix this.'.format(FLAGS.export_dir))
sys.exit(1)
export()
package_zip()
if FLAGS.one_shot_infer:
tfv1.reset_default_graph()
do_single_file_inference(FLAGS.one_shot_infer)
def run_script():
create_flags()
absl.app.run(main)
if __name__ == '__main__':
run_script()
|
{"hexsha": "7c591567ecae58cd202dd658e3c125c7601e0422", "size": 39868, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/deepspeech_training/export_gradients.py", "max_stars_repo_name": "googleinterns/deepspeech-reconstruction", "max_stars_repo_head_hexsha": "72f28d1e9064d221b3421c302a8725a8c71859ee", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-08-20T16:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T23:17:52.000Z", "max_issues_repo_path": "src/deepspeech_training/export_gradients.py", "max_issues_repo_name": "googleinterns/deepspeech-reconstruction", "max_issues_repo_head_hexsha": "72f28d1e9064d221b3421c302a8725a8c71859ee", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-22T04:16:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T04:26:03.000Z", "max_forks_repo_path": "src/deepspeech_training/export_gradients.py", "max_forks_repo_name": "googleinterns/deepspeech-reconstruction", "max_forks_repo_head_hexsha": "72f28d1e9064d221b3421c302a8725a8c71859ee", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-28T21:51:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T21:51:12.000Z", "avg_line_length": 42.8227712137, "max_line_length": 211, "alphanum_fraction": 0.6540834755, "include": true, "reason": "import numpy", "num_tokens": 8739}
|
import numpy as np
from msdsl import *
r, c = 1e3, 1e-9
m = MixedSignalModel('rc')
x = m.add_analog_input('x')
dt = m.add_analog_input('dt')
y = m.add_analog_output('y')
func = lambda dt: np.exp(-dt/(r*c))
f = m.make_function(func,
domain=[0, 10*r*c], numel=512, order=1)
a = m.set_from_sync_func('a', f, dt)
x_prev = m.cycle_delay(x, 1)
y_prev = m.cycle_delay(y, 1)
m.set_this_cycle(y, a*y_prev + (1-a)*x_prev)
m.compile_and_print(VerilogGenerator())
|
{"hexsha": "23b1ab54a4086183b31bfb8475dfb02032badfd9", "size": 455, "ext": "py", "lang": "Python", "max_stars_repo_path": "random/func.py", "max_stars_repo_name": "sgherbst/msdsl", "max_stars_repo_head_hexsha": "e38d5ecdb88b3574bda62f22a4f91ce3e4173d12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-05-14T10:12:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:29:52.000Z", "max_issues_repo_path": "random/func.py", "max_issues_repo_name": "sgherbst/msdsl", "max_issues_repo_head_hexsha": "e38d5ecdb88b3574bda62f22a4f91ce3e4173d12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-01-22T21:44:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-05T02:10:41.000Z", "max_forks_repo_path": "random/func.py", "max_forks_repo_name": "sgherbst/msdsl", "max_forks_repo_head_hexsha": "e38d5ecdb88b3574bda62f22a4f91ce3e4173d12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-10-21T09:53:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-10T17:32:20.000Z", "avg_line_length": 30.3333333333, "max_line_length": 44, "alphanum_fraction": 0.6835164835, "include": true, "reason": "import numpy", "num_tokens": 163}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2020-05-21
@author: cook
"""
import numpy as np
# https://en.wikipedia.org/wiki/Adler-32
# Alder 32 algorithm
# try with sample string:
#
# string = 'Wikipedia'
# =============================================================================
# Define variables
# =============================================================================
# =============================================================================
# Define functions
# =============================================================================
def adler32(string):
b = [a for a in string.encode(encoding ='ascii')]
b = np.array(b,dtype = int)
A = (np.cumsum(b)+1)[-1] % 635521
n = np.arange(len(b),0,-1)
B = np.cumsum( (np.cumsum(b)+1) )[-1] % 65521
A32= B *65536 + A
return hex(A32)[2:]
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main code
mystring = 'Wikipedia'
mycode = adler32(mystring)
print('{0} --> {1}'.format(mystring, mycode))
# =============================================================================
# End of code
# =============================================================================
|
{"hexsha": "6e4fd4a101d5bae60563f82a281ac2825cbe00ab", "size": 1412, "ext": "py", "lang": "Python", "max_stars_repo_path": "updates_to_drs/ea_alder32_code.py", "max_stars_repo_name": "njcuk9999/apero-utils", "max_stars_repo_head_hexsha": "f77de4c9123874e5bb6ed6bd03a7de3b27057402", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-08T17:03:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-09T17:49:44.000Z", "max_issues_repo_path": "misc/updates_to_drs/ea_alder32_code.py", "max_issues_repo_name": "njcuk9999/apero-drs", "max_issues_repo_head_hexsha": "83b043e9f277a011b03e0227c77307961b200901", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 43, "max_issues_repo_issues_event_min_datetime": "2020-10-06T18:42:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T21:23:10.000Z", "max_forks_repo_path": "misc/updates_to_drs/ea_alder32_code.py", "max_forks_repo_name": "njcuk9999/apero-drs", "max_forks_repo_head_hexsha": "83b043e9f277a011b03e0227c77307961b200901", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-04-10T06:41:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-16T21:09:14.000Z", "avg_line_length": 26.641509434, "max_line_length": 79, "alphanum_fraction": 0.3321529745, "include": true, "reason": "import numpy", "num_tokens": 270}
|
function imview(colortype::Type{<:ColorTypes.Colorant}, imgvalues...)
return PermutedDimsArray(
ImageCore.colorview(
colortype,
(
ImageCore.normedview(
ImageCore.Normed{eltype(img),8 * sizeof(eltype(img))},
img,
) for img in imgvalues
)...,
),
(2, 1),
)
end
function imview(
gci::GDALColorInterp,
imgvalues::AbstractMatrix;
colortable::ColorTable = ColorTable(C_NULL),
)
return if gci == GCI_GrayIndex
imview(ColorTypes.Gray, imgvalues)
elseif gci == GCI_Undefined
imview(ColorTypes.Gray, imgvalues)
elseif gci == GCI_RedBand
zerovalues = zeros(eltype(imgvalues), size(imgvalues))
imview(GPI_RGB, imgvalues, zerovalues, zerovalues)
elseif gci == GCI_GreenBand
zerovalues = zeros(eltype(imgvalues), size(imgvalues))
imview(GPI_RGB, zerovalues, imgvalues, zerovalues)
elseif gci == GCI_BlueBand
zerovalues = zeros(eltype(imgvalues), size(imgvalues))
imview(GPI_RGB, zerovalues, zerovalues, imgvalues)
elseif gci == GCI_PaletteIndex
if colortable.ptr == C_NULL
error(
"""
`imview` is only supported for `GCI_PaletteIndex` with non-null
colortables.
""",
)
end
gpi = paletteinterp(colortable)
if gpi == GPI_Gray
imview(GPI_Gray, imgvalues)
elseif gpi == GPI_RGB
colorentries = GDAL.GDALColorEntry[
getcolorentryasrgb(colortable, i - 1) for
i in 1:GDAL.gdalgetcolorentrycount(colortable.ptr)
]
c1 = Matrix{UInt8}(undef, size(imgvalues)...)
c2 = Matrix{UInt8}(undef, size(imgvalues)...)
c3 = Matrix{UInt8}(undef, size(imgvalues)...)
c4 = Matrix{UInt8}(undef, size(imgvalues)...)
for i in eachindex(imgvalues)
c1[i] = UInt8(colorentries[imgvalues[i]+1].c1)
c2[i] = UInt8(colorentries[imgvalues[i]+1].c2)
c3[i] = UInt8(colorentries[imgvalues[i]+1].c3)
c4[i] = UInt8(colorentries[imgvalues[i]+1].c4)
end
imview(GPI_RGB, c1, c2, c3, c4)
else
error("""
Unsupported GPI: $(paletteinterp(colortable)). Please file an
issue at https://github.com/yeesian/ArchGDAL.jl/issues if it
should be supported.
""")
end
else
error("""
Unknown GCI: $gci. Please file an issue at
https://github.com/yeesian/ArchGDAL.jl/issues if it should be
supported.
""")
end
end
function imview(gpi::GDALPaletteInterp, imgvalues::AbstractMatrix)
return if gpi == GPI_Gray
imview(ColorTypes.Gray, imgvalues)
else
error("""
Unsupported GPI: $gpi. Please file an issue at
https://github.com/yeesian/ArchGDAL.jl/issues if it should be
supported.
""")
end
end
function imview(
gpi::GDALPaletteInterp,
c1::AbstractMatrix,
c2::AbstractMatrix,
c3::AbstractMatrix,
)
return if gpi == GPI_Gray
imview(ColorTypes.Gray, c1, c2, c3)
elseif gpi == GPI_RGB
imview(ColorTypes.RGB, c1, c2, c3)
else
error("""
Unsupported GPI: $gpi. If it should be supported, please file an
issue at https://github.com/yeesian/ArchGDAL.jl/issues with the
desired output.
""")
end
end
function imview(
gpi::GDALPaletteInterp,
c1::AbstractMatrix,
c2::AbstractMatrix,
c3::AbstractMatrix,
c4::AbstractMatrix,
)
return if gpi == GPI_Gray
imview(ColorTypes.Gray, c1, c2, c3, c4)
elseif gpi == GPI_RGB
imview(ColorTypes.RGBA, c1, c2, c3, c4)
else
error("""
Unsupported GPI: $gpi. If it should be supported, please file an
issue at https://github.com/yeesian/ArchGDAL.jl/issues with the
desired output.
""")
end
end
function imread(
colortype::Union{GDALPaletteInterp,GDALColorInterp},
dataset::AbstractDataset,
i::Integer,
args...,
)
return imread(colortype, getband(dataset, i), args...)
end
function imread(gpi::GDALPaletteInterp, rb::AbstractRasterBand, args...)
return imview(gpi, read(rb, args...))
end
function imread(gci::GDALColorInterp, rb::AbstractRasterBand, args...)
return getcolortable(rb) do colortable
return imview(gci, read(rb, args...), colortable = colortable)
end
end
function imread(rb::AbstractRasterBand, args...)
return getcolortable(rb) do colortable
return imview(
getcolorinterp(rb),
read(rb, args...),
colortable = colortable,
)
end
end
function imread(dataset::AbstractDataset, i::Integer, args...)
return imread(getband(dataset, i), args...)
end
function _colorindices(dataset::AbstractDataset, indices)
gci = unique(
GDALColorInterp[getcolorinterp(getband(dataset, i)) for i in indices],
)
gciorder = sort(gci)
colortype = if gciorder == [GCI_GrayIndex]
GCI_GrayIndex
elseif gciorder == [GCI_PaletteIndex]
GCI_PaletteIndex
elseif gciorder == [GCI_RedBand, GCI_GreenBand, GCI_BlueBand]
GPI_RGB
elseif gciorder == [GCI_RedBand, GCI_GreenBand, GCI_BlueBand, GCI_AlphaBand]
GPI_RGB
else
error("""
Unknown GCI: $gciorder. Please file an issue at
https://github.com/yeesian/ArchGDAL.jl/issues if it should be
supported.
""")
end
return colortype, Tuple(indices[sortperm(gci)])
end
function imread(dataset::AbstractDataset, indices, args...)
colortype, idxs = _colorindices(dataset, indices)
return if colortype == GCI_PaletteIndex
getcolortable(getband(dataset, 1)) do colortable
return imview(
colortype,
(read(getband(dataset, i), args...) for i in idxs)...,
colortable = colortable,
)
end
else
imview(colortype, (read(getband(dataset, i), args...) for i in idxs)...)
end
end
imread(dataset::AbstractDataset) = imread(dataset, 1:nraster(dataset))
function imread(filename::AbstractString)
return read(filename) do dataset
return imread(dataset)
end
end
|
{"hexsha": "0b7089122ea67993550a81ff531c784e6383791c", "size": 6557, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/raster/images.jl", "max_stars_repo_name": "mattwigway/ArchGDAL.jl", "max_stars_repo_head_hexsha": "3f63e18545286fadbfbf386a06a148118808e8a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/raster/images.jl", "max_issues_repo_name": "mattwigway/ArchGDAL.jl", "max_issues_repo_head_hexsha": "3f63e18545286fadbfbf386a06a148118808e8a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/raster/images.jl", "max_forks_repo_name": "mattwigway/ArchGDAL.jl", "max_forks_repo_head_hexsha": "3f63e18545286fadbfbf386a06a148118808e8a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6763285024, "max_line_length": 80, "alphanum_fraction": 0.5918865335, "num_tokens": 1733}
|
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from torchvision import models
import zipfile
import io
import tqdm
def uncertain_logits_to_probs(logits):
"""Convert explicit uncertainty modeling logits to probabilities P(is_abnormal).
Args:
logits: Input of shape (batch_size, num_tasks * 3).
Returns:
probs: Output of shape (batch_size, num_tasks).
Position (i, j) interpreted as P(example i has pathology j).
"""
b, n_times_d = logits.size()
d = 3
if n_times_d % d:
raise ValueError('Expected logits dimension to be divisible by {}, got size {}.'.format(d, n_times_d))
n = n_times_d // d
logits = logits.view(b, n, d)
probs = F.softmax(logits[:, :, 1:], dim=-1)
probs = probs[:, :, 1]
return probs
class Model(nn.Module):
"""Models from TorchVision's GitHub page of pretrained neural networks:
https://github.com/pytorch/vision/tree/master/torchvision/models
"""
def __init__(self, model_fn, task_sequence, model_uncertainty, use_gpu):
super(Model, self).__init__()
self.task_sequence = task_sequence
self.get_probs = uncertain_logits_to_probs if model_uncertainty else torch.sigmoid
self.use_gpu = use_gpu
# Set pretrained to False to avoid loading weights which will be overwritten
self.model = model_fn(pretrained=False)
self.pool = nn.AdaptiveAvgPool2d(1)
num_ftrs = self.model.classifier.in_features
if model_uncertainty:
num_outputs = 3 * len(task_sequence)
else:
num_outputs = len(task_sequence)
self.model.classifier = nn.Linear(num_ftrs, num_outputs)
def forward(self, x):
x = self.model.features(x)
x = F.relu(x, inplace=True)
x = self.pool(x).view(x.size(0), -1)
x = self.model.classifier(x)
return x
def features2(self, x):
features = self.model.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1)).view(features.size(0), -1)
return out
def infer(self, x, tasks):
preds = self(x)
probs = self.get_probs(preds)[0]
task2results = {}
for task in tasks:
idx = self.task_sequence[task]
#task_prob = probs.detach().cpu().numpy()[idx]
task_prob = probs[idx]
task2results[task] = task_prob
return task2results
class DenseNet121(Model):
def __init__(self, task_sequence, model_uncertainty, use_gpu):
super(DenseNet121, self).__init__(models.densenet121, task_sequence, model_uncertainty, use_gpu)
def load_individual(weights_zip, ckpt_path, model_uncertainty, use_gpu=False):
#print(ckpt_path)
with weights_zip.open(ckpt_path) as file:
stream = io.BytesIO(file.read())
ckpt_dict = torch.load(stream, map_location="cpu")
device = 'cuda:0' if use_gpu else 'cpu'
#ckpt_path = os.path.join(os.path.dirname(__file__), ckpt_path)
# Build model, load parameters
task_sequence = ckpt_dict['task_sequence']
model = DenseNet121(task_sequence, model_uncertainty, use_gpu)
model = nn.DataParallel(model)
model.load_state_dict(ckpt_dict['model_state'])
return model.eval().to(device), ckpt_dict['ckpt_info']
class Tasks2Models(object):
"""
Main attribute is a (task tuple) -> {iterator, list} dictionary,
which loads models iteratively depending on the
specified task.
"""
def __init__(self, config_path, weights_zip, num_models=1, dynamic=True, use_gpu=False):
super(Tasks2Models).__init__()
self.get_config(config_path)
self.dynamic = dynamic
self.use_gpu = use_gpu
self.weights_zip = zipfile.ZipFile(weights_zip)
if dynamic:
model_loader = self.model_iterator
else:
model_loader = self.model_list
model_dicts2tasks = {}
for task, model_dicts in self.task2model_dicts.items():
hashable_model_dict = self.get_hashable(model_dicts)
if hashable_model_dict in model_dicts2tasks:
model_dicts2tasks[hashable_model_dict].append(task)
else:
model_dicts2tasks[hashable_model_dict] = [task]
# Initialize the iterators
self.tasks2models = {}
for task, model_dicts in self.task2model_dicts.items():
hashable_model_dict = self.get_hashable(model_dicts)
tasks = tuple(model_dicts2tasks[hashable_model_dict])
if tasks not in self.tasks2models:
self.tasks2models[tasks] = model_loader(model_dicts,
num_models=num_models,
desc="Loading weights {}".format(tasks))
self.tasks = list(self.task2model_dicts.keys())
def get_hashable(self, model_dicts):
return tuple([tuple(model_dict.items()) for model_dict in model_dicts])
@property
def module(self):
return self
def get_config(self, config_path):
"""Read configuration from a JSON file.
Args:
config_path: Path to configuration JSON file.
Returns:
task2models: Dictionary mapping task names to list of dicts.
Each dict has keys 'ckpt_path' and 'model_uncertainty'.
aggregation_fn: Aggregation function to combine predictions from multiple models.
"""
with open(config_path, 'r') as json_fh:
config_dict = json.load(json_fh)
self.task2model_dicts = config_dict['task2models']
agg_method = config_dict['aggregation_method']
if agg_method == 'max':
self.aggregation_fn = torch.max
elif agg_method == 'mean':
self.aggregation_fn = torch.mean
else:
raise ValueError('Invalid configuration: {} = {} (expected "max" or "mean")'.format('aggregation_method', agg_method))
def model_iterator(self, model_dicts, num_models, desc=""):
def iterator():
for model_dict in model_dicts[:num_models]:
ckpt_path = model_dict['ckpt_path']
model_uncertainty = model_dict['is_3class']
model, ckpt_info = load_individual(self.weights_zip, ckpt_path, model_uncertainty, self.use_gpu)
yield model
return iterator
def model_list(self, model_dicts, num_models, desc=""):
loaded_models = []
toiter = tqdm.tqdm(model_dicts[:num_models])
toiter.set_description(desc)
for model_dict in toiter:
ckpt_path = model_dict['ckpt_path']
model_uncertainty = model_dict['is_3class']
model, ckpt_info = load_individual(self.weights_zip, ckpt_path, model_uncertainty, self.use_gpu)
loaded_models.append(model)
def iterator():
return loaded_models
return iterator
def infer(self, img, tasks):
ensemble_probs = []
model_iterable = self.tasks2models[tasks]
task2ensemble_results = {}
for model in model_iterable():
individual_task2results = model.module.infer(img, tasks)
for task in tasks:
if task not in task2ensemble_results:
task2ensemble_results[task] = [individual_task2results[task]]
else:
task2ensemble_results[task].append(individual_task2results[task])
assert all([task in task2ensemble_results for task in tasks]),\
"Not all tasks in task2ensemble_results"
task2results = {}
for task in tasks:
ensemble_probs = task2ensemble_results[task]
task2results[task] = self.aggregation_fn(torch.stack(ensemble_probs), dim=0)
assert all([task in task2results for task in tasks]), "Not all tasks in task2results"
return task2results
def features(self, img, tasks):
"""
Return shape is [3, 30, 1, 1024]
3 task groups, 30 models each
"""
ensemble_probs = []
model_iterable = self.tasks2models[tasks]
ensemble_results = []
for model in model_iterable():
individual_feats = model.module.features2(img)
ensemble_results.append(individual_feats)
return torch.stack(ensemble_results)
def __iter__(self):
return iter(self.tasks2models)
|
{"hexsha": "b25dcfc994bbee9a1e5c893d100d4084caab445a", "size": 8674, "ext": "py", "lang": "Python", "max_stars_repo_path": "torchxrayvision/baseline_models/chexpert/model.py", "max_stars_repo_name": "KiLJ4EdeN/torchxrayvision", "max_stars_repo_head_hexsha": "18985291b217d51bd7d46c8a0dc069a78a82755e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 468, "max_stars_repo_stars_event_min_datetime": "2020-03-14T19:07:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T04:41:48.000Z", "max_issues_repo_path": "torchxrayvision/baseline_models/chexpert/model.py", "max_issues_repo_name": "KiLJ4EdeN/torchxrayvision", "max_issues_repo_head_hexsha": "18985291b217d51bd7d46c8a0dc069a78a82755e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2020-03-16T18:20:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T00:00:54.000Z", "max_forks_repo_path": "torchxrayvision/baseline_models/chexpert/model.py", "max_forks_repo_name": "KiLJ4EdeN/torchxrayvision", "max_forks_repo_head_hexsha": "18985291b217d51bd7d46c8a0dc069a78a82755e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 143, "max_forks_repo_forks_event_min_datetime": "2020-03-15T11:41:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T07:16:02.000Z", "avg_line_length": 32.4868913858, "max_line_length": 130, "alphanum_fraction": 0.6286603643, "include": true, "reason": "import numpy", "num_tokens": 1893}
|
#ifndef TIKPP_DETAIL_TYPE_TRAITS_STREAM_HPP
#define TIKPP_DETAIL_TYPE_TRAITS_STREAM_HPP
#include "tikpp/detail/type_traits/macros.hpp"
#include <boost/asio/buffer.hpp>
#include <boost/system/error_code.hpp>
#include <functional>
#include <type_traits>
namespace tikpp::detail::type_traits {
HAS_MEMBER_FUNCTION(async_write_some,
(std::declval<boost::asio::const_buffer>(),
std::declval<std::function<void(
const boost::system::error_code &, std::size_t)>>()))
HAS_MEMBER_FUNCTION(async_read_some,
(std::declval<boost::asio::mutable_buffer>(),
std::declval<std::function<void(
const boost::system::error_code &, std::size_t)>>()))
HAS_MEMBER_FUNCTION(get_executor, ())
template <typename T>
constexpr auto is_async_write_stream_v =
has_async_write_some_v<T> &&has_get_executor_v<T>;
template <typename T>
constexpr auto is_async_read_stream_v =
has_async_read_some_v<T> &&has_get_executor_v<T>;
template <typename T>
constexpr auto is_async_stream_v =
is_async_write_stream_v<T> &&is_async_read_stream_v<T>;
} // namespace tikpp::detail::type_traits
#endif
|
{"hexsha": "b8f763e2958e9f5a553801a79764e4c6ce0089d4", "size": 1263, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/tikpp/detail/type_traits/stream.hpp", "max_stars_repo_name": "aymanalqadhi/tikpp", "max_stars_repo_head_hexsha": "8e94abdc4ac8c85dd893780ad4256cdd6690a758", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2021-02-07T08:21:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T04:33:18.000Z", "max_issues_repo_path": "include/tikpp/detail/type_traits/stream.hpp", "max_issues_repo_name": "xSHAD0Wx/tikpp", "max_issues_repo_head_hexsha": "8e94abdc4ac8c85dd893780ad4256cdd6690a758", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/tikpp/detail/type_traits/stream.hpp", "max_forks_repo_name": "xSHAD0Wx/tikpp", "max_forks_repo_head_hexsha": "8e94abdc4ac8c85dd893780ad4256cdd6690a758", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-10-18T20:00:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T09:04:46.000Z", "avg_line_length": 30.8048780488, "max_line_length": 78, "alphanum_fraction": 0.6682501979, "num_tokens": 267}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Consistency check for location at sea QC test.
"""
from numpy import ma
from cotede.qctests.location_at_sea import (
LocationAtSea,
location_at_sea,
get_bathymetry,
)
from data import DummyData
def test_bathymetry_point():
"""Check the elevation of single locations
"""
coords = [[10, 30, -366], [10, -30, 5192], [15, -38, 5036], [12, 222, 4995]]
for lat, lon, z in coords:
etopo = get_bathymetry(lat, lon, resolution="5min")
assert "bathymetry" in etopo, "Missing bathymetry from get_bathymetry"
assert ma.allclose(etopo["bathymetry"], [z]), \
"For ({},{}) expected {}".format(lat, lon, z)
def test_bathymetry_track():
"""Check the elevation for a track
"""
lat = [10, 10, 15, 12]
lon = [30, -30, -38, 222]
z = [-366, 5192, 5036, 4995]
etopo = get_bathymetry(lat, lon, resolution="5min")
assert "bathymetry" in etopo, "Missing bathymetry from get_bathymetry"
assert ma.allclose(etopo["bathymetry"], z), "Unexpected value"
def test_bathymetry_greenwich():
"""Check elevation that includes 0
"""
coords = [[0, 0, 4876], [6, 0, -76], [-10, 0, 5454]]
for lat, lon, z in coords:
etopo = get_bathymetry(lat, lon, resolution="5min")
assert "bathymetry" in etopo, "Missing bathymetry from get_bathymetry"
assert ma.allclose(etopo["bathymetry"], [z]), \
"For ({},{}) expected {}".format(lat, lon, z)
def test_attribute():
data = DummyData()
coords = [[10, -30, 1], [10, 330, 1]]
for lat, lon, flag in coords:
data.attrs['LATITUDE'] = lat
data.attrs['LONGITUDE'] = lon
assert location_at_sea(data) == flag
def test_attribute_inland():
data = DummyData()
coords = [[-10, -60, 3], [-10, 300, 3]]
for lat, lon, flag in coords:
data.attrs['LATITUDE'] = lat
data.attrs['LONGITUDE'] = lon
assert location_at_sea(data) == flag
def notready_test_data():
data = DummyData()
data.data['LATITUDE'] = 10
data.data['LONGITUDE'] = -30
flag = location_at_sea(data)
assert flag == 1
data.data['LATITUDE'] = 10
data.data['LONGITUDE'] = 330
flag = location_at_sea(data)
assert flag == 1
def test_badlocation():
data = DummyData()
coords = [[91, -30, 3], [-91, -30, 3], [10, -361, 3], [10, 1000, 3]]
for lat, lon, flag in coords:
data.attrs['LATITUDE'] = lat
data.attrs['LONGITUDE'] = lon
assert location_at_sea(data) == flag
def test_nonelocation():
data = DummyData()
coords = [[None, 1, 0], [1, None, 0]]
for lat, lon, flag in coords:
data.attrs['LATITUDE'] = lat
data.attrs['LONGITUDE'] = lon
assert location_at_sea(data) == flag
del(data.attrs['LATITUDE'])
data.attrs['LONGITUDE'] = 1
assert location_at_sea(data) == 0
del(data.attrs['LONGITUDE'])
data.attrs['LATITUDE'] = 1
assert location_at_sea(data) == 0
def test_LocationAtSea_attrs():
"""Test standard with single location
Lat & Lon defined in the attrs
Locking etopo resolution, since it can change the values.
"""
data = DummyData()
y = LocationAtSea(data, cfg={'resolution': '5min'})
assert hasattr(y, 'features')
assert 'bathymetry' in y.features
assert ma.allclose(y.features['bathymetry'], 5036)
assert hasattr(y, 'flags')
assert 'location_at_sea' in y.flags
assert ma.allclose(y.flags['location_at_sea'], 1)
def test_LocationAtSea_track():
"""Test standard with multiple locations
lat & lon defined in the dataset. This would be the case for a TSG
where each measurement is associated with a location.
Locking etopo resolution, since it can change the values.
Note that there is no restriction in the number of locations. In this
example there are multiple depths but only 3 positions. It's not the
LocationAtSea job to make sense of that. Should it match with which
variable? It can't be done here, but should be done once the tests
are combined.
"""
data = DummyData()
data.data['LATITUDE'] = [15, 12, 8]
data.data['LONGITUDE'] = [-38, 222, 0]
y = LocationAtSea(data, cfg={'resolution': '5min'})
assert hasattr(y, 'features')
assert 'bathymetry' in y.features
assert ma.allclose(y.features['bathymetry'], [5036, 4995, -122])
assert hasattr(y, 'flags')
assert 'location_at_sea' in y.flags
assert ma.allclose(y.flags['location_at_sea'], [1, 1, 4])
|
{"hexsha": "81dd8a969e17be543578018f3092d8d533a7b54a", "size": 4597, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/qctests/test_qc_location_at_sea.py", "max_stars_repo_name": "jessicaaustin/CoTeDe", "max_stars_repo_head_hexsha": "0ca2a1c71de980d91262fd36fd5d8ab8cc09f019", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/qctests/test_qc_location_at_sea.py", "max_issues_repo_name": "jessicaaustin/CoTeDe", "max_issues_repo_head_hexsha": "0ca2a1c71de980d91262fd36fd5d8ab8cc09f019", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/qctests/test_qc_location_at_sea.py", "max_forks_repo_name": "jessicaaustin/CoTeDe", "max_forks_repo_head_hexsha": "0ca2a1c71de980d91262fd36fd5d8ab8cc09f019", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0949367089, "max_line_length": 80, "alphanum_fraction": 0.6243202088, "include": true, "reason": "from numpy", "num_tokens": 1349}
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# ===========
# Libraries
# ===========
import os
import sys
import time
import warnings
import imageio
import matplotlib.pyplot as plt
import numpy as np
from skimage import exposure, img_as_uint
# Custom Libraries
from modules import metrics
from modules.args import args
from modules.utils import settings
# =========================
# [Test] Framework Config
# =========================
SAVE_TEST_DISPARITIES = True # Default: True
showImages = True
# ==================
# Global Variables
# ==================
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu # Limits TensorFlow to see only the specified GPU.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore") # Suppress Warnings
def read_text_file(filename, dataset_path='/media/nicolas/nicolas_seagate/datasets/kitti/'):
print("\n[Dataloader] Loading '%s'..." % filename)
try:
data = np.genfromtxt(filename, dtype='str', delimiter='\t')
# print(data.shape)
# Parsing Data
depth_continuous_filenames = list(data[:, 0])
depth_semidense_filenames = list(data[:, 1])
timer = -time.time()
depth_continuous_filenames = [dataset_path + filename for filename in depth_continuous_filenames]
depth_semidense_filenames = [dataset_path + filename for filename in depth_semidense_filenames]
timer += time.time()
print('time:', timer, 's\n')
except OSError:
raise OSError("Could not find the '%s' file." % filename)
return depth_continuous_filenames, depth_semidense_filenames
def read_depth_image(filename, div=1.0):
return imageio.imread(filename).astype('float32') / div
def imsave_as_uint16_png(filename, image_float32):
# Converts the Predictions Images from float32 to uint16 and Saves as PNG Images
image_uint16 = img_as_uint(exposure.rescale_intensity(image_float32, out_range='float'))
imageio.imsave(filename, image_uint16)
def evaluate_densification():
# Loads split file containing Input and Output filenames
# input_filenames, output_filenames = read_text_file('data/new_splits/eigen_split_based_on_kitti_depth/eigen_test_kitti_depth_aligned_with_kitti_continuous_files.txt')
input_filenames = ['/home/nicolas/Downloads/depth_interpolation/close_k_analysis/0000000005_close_k_19.png']
output_filenames = ['/home/nicolas/Downloads/depth_interpolation/close_k_analysis/0000000005.png']
assert len(input_filenames) == len(output_filenames)
print(len(input_filenames), len(output_filenames))
# Read Images
input_depths, output_depths = [], []
num_test_images = len(output_filenames)
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1)
for i, (input_filename, output_filename) in enumerate(list(zip(input_filenames, output_filenames))):
close_depth = read_depth_image(input_filename, 256.0)
kitti_depth_depth = read_depth_image(output_filename, 256.0)
# Fix Data shift caused by close operation
artefacts = close_depth - kitti_depth_depth
real_proof = kitti_depth_depth + artefacts
real_proof2 = (close_depth - real_proof)
print(real_proof2)
print(close_depth[close_depth > 0.0].size,
kitti_depth_depth[kitti_depth_depth > 0.0].size,
artefacts[artefacts > 0.0].size) # Number of Valid Pixels
print(close_depth.shape)
print(kitti_depth_depth.shape)
print(np.min(close_depth), np.max(close_depth))
if showImages:
ax1.imshow(close_depth)
ax1.set_title('close(k=2)')
ax2.imshow(kitti_depth_depth)
ax2.set_title('KITTI Depth')
ax3.imshow(artefacts)
ax3.set_title('Artefacts')
ax4.imshow(real_proof)
ax4.set_title('KITTI Depth + Artefacts')
ax5.imshow(real_proof2)
ax5.set_title('(KITTI Depth + Artefacts)-close(k=2)')
# plt.draw()
# plt.pause(0.001)
plt.show()
# Saves the Test Predictions as uint16 PNG Images
if SAVE_TEST_DISPARITIES or args.eval_tool == 'monodepth':
imsave_as_uint16_png(settings.output_tmp_pred_dir + 'pred' + str(i) + '.png', close_depth)
imsave_as_uint16_png(settings.output_tmp_gt_dir + 'gt' + str(i) + '.png', kitti_depth_depth)
input_depths.append(close_depth)
output_depths.append(kitti_depth_depth)
print('{}/{}'.format(i + 1, num_test_images))
# Invokes Evaluation Tools
if args.eval_tool == 'monodepth':
metrics.evaluation_tool_monodepth(input_depths, output_depths)
elif args.eval_tool == 'kitti_depth':
metrics.evaluation_tool_kitti_depth(num_test_images)
else:
raise SystemError("Invalid 'eval_tool' selected. Choose one of the options: 'monodepth' or 'kitti_depth'.")
# ======
# Main
# ======
if __name__ == '__main__':
evaluate_densification()
print("\nDone.")
sys.exit()
|
{"hexsha": "7d02da9f24305969817a234467ddc22679516553", "size": 5018, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/evaluate_depth_densification_by_close_operation.py", "max_stars_repo_name": "nicolasrosa/Sparse-to-Continuous", "max_stars_repo_head_hexsha": "8664de17d6b6c6cc39bf8fcebfcb829249367f2f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2018-09-25T01:58:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T09:54:36.000Z", "max_issues_repo_path": "tensorflow/evaluate_depth_densification_by_close_operation.py", "max_issues_repo_name": "nicolasrosa/Sparse-to-Continuous", "max_issues_repo_head_hexsha": "8664de17d6b6c6cc39bf8fcebfcb829249367f2f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-03-24T18:18:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:35:13.000Z", "max_forks_repo_path": "tensorflow/evaluate_depth_densification_by_close_operation.py", "max_forks_repo_name": "nicolasrosa/Sparse-to-Continuous", "max_forks_repo_head_hexsha": "8664de17d6b6c6cc39bf8fcebfcb829249367f2f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-14T02:35:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-27T11:36:02.000Z", "avg_line_length": 35.0909090909, "max_line_length": 171, "alphanum_fraction": 0.6765643683, "include": true, "reason": "import numpy", "num_tokens": 1188}
|
HANSARD REVISE * NUMERO 22
Le mardi 28 octobre 1997
Presentation et premiere lecture>
LOI DE MISE EN OEUVRE DE L'ACCORD CANADA-YUKON SUR LE PETROLE
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
L'hon. Ethel Blondin-Andrew
LES SERVICES D'INCENDIE ET D'URGENCE DE MISSISSAUGA
LA CAMETOID ADVANCED TECHNOLOGIES LIMITED
LE MINISTRE DU DEVELOPPEMENT DES RESSOURCES HUMAINES
M. John O'Reilly
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
L'hon. Jean J. Charest
L'hon. Jean J. Charest
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
Le tres hon. Jean Chretien
M. Svend J. Robinson
L'hon. David M. Collenette
L'hon. Pierre S. Pettigrew
L'hon. Ralph E. Goodale
M. Jake E. Hoeppner
L'hon. Ralph E. Goodale
L'hon. David M. Collenette
L'hon. Ralph E. Goodale
LE TRIBUNAL CANADIEN DU COMMERCE EXTERIEUR
L'hon. Jean J. Charest
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
La Presidente suppleante (Mme Thibeault)
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
Jour designe-L'industrie de la peche au Canada
Adoption de la motion modifiee
LOI SUR LES JUSTES SALAIRES ET HEURES DE TRAVAIL
HANSARD REVISE * NUMERO 22
Le mardi 28 octobre 1997
La seance est ouverte a 10 heures.
M. Jim Pankiw (Saskatoon-Humboldt, Ref.):
Monsieur le President, j'invoque le Reglement.
Y a-t-il consentement unanime pour adopter la proposition du depute?
Il n'y a pas consentement unanime.
M. Werner Schmidt (Kelowna, Ref.):
M. Gordon Earle (Halifax-Ouest, NPD):
Monsieur le President, je suggere que toutes les questions soient reservees.
Les premieres etapes de ce transfert au territoire du Yukon remonte aux annees 1980.
Cela se fera au niveau territorial.
Cela revet une grande importance pour les Premieres Nations du Yukon.
Le Conseil des Premieres Nations du Yukon a indique qu'il appuyait cette loi.
M. Darrel Stinson (Okanagan-Shuswap, Ref.):
Je suis sur que le president connait cela tres bien.
Ce projet de loi revient periodiquement depuis quelque temps.
Le gouvernement federal demontre sa volonte de remettre des pouvoirs politiques au Territoire du Yukon.
Meme si les reformistes appuient cette mesure legislative, ils ont aussi quelques reserves.
Cette date prevue avait ensuite ete reportee a juillet 1997.
La mesure legislative donne au Yukon des pouvoirs d'ordre economique.
La mesure legislative suscitait des craintes bien legitimes.
Sur ce point, je suis force de questionner la sagesse du gouvernement.
Qui plus est, il incorpore les preoccupations et les modifications de la base.
M. Claude Bachand (Saint-Jean, BQ):
On sait que c'est une region qui est supposement tres, tres riche.
C'est un peu malheureux.
C'est l'attitude federale actuelle de centralisation.
Il m'apparait important aussi de situer le Yukon geographiquement.
Le Yukon est borde au nord par l'Inuvialuit.
Je rappelle qu'il y a quatre grandes regions inuit au Canada.
L'Inuvialuit a ete la premiere region reconnue par l'entente sur l'autonomie gouvernementale.
Il y a la des ententes d'autonomie gouvernementale qui sont en progression.
A l'est, on retrouve les Territoires du Nord-Ouest.
Ce sont de grandes nations autochtones qui sont voisines immediates du Yukon.
C'est ce qui nous a preoccupes au depart.
Le gouvernement s'etait d'ailleurs engage a la demenager.
Alors, c'est l'histoire et, malheureusement, l'histoire a tendance a se repeter.
Le Quebec est en avance et tient a le rester.
On veille a ce que les nations autochtones ne soient pas flouees la-dedans.
Le Conseil des nations autochtones du Yukon etait la.
Certaines d'entre elles ont deja signe une entente finale, d'autres sont en progression.
C'etait un voyage extraordinaire a Whitehorse.
Cela a un rapport avec le projet de loi qui est devant nous aujourd'hui.
J'etais avec ma fille et elle n'etait pas grosse non plus.
Il y a une espece de bruit terrible, et tout branle dans l'avion.
Je lui ai repondu: Eh bien non, ca ne se peut pas.
C'est une farce.
Je vais toujours m'en rappeler.
Je me suis donc rendu a Dawson City.
D'ailleurs, Patrimoine Canada est proprietaire de la moitie de la ville de Dawson City.
Les batiments ont tous ete proteges et ils refletent l'epoque.
On se retrouve presque dans le Far West.
Je salue d'ailleurs mes amis autochtones de la-bas.
On a pris un bon verre; il y avait des danseuses de french cancan.
C'est vraiment special.
Tu es toujours dans le DC-3?
Les Premieres Nations m'accueillent a bras ouverts et me font faire la grande tournee.
C'etait vraiment extraordinaire.
On a pris un saumon d'une vingtaine de livres.
Personnellement, je vous dis qu'on a pris un poisson de 20 livres.
Il m'est arrive un incident malheureux.
Je veux rappeler aussi le contrat social de l'epoque.
Aujourd'hui, les gens ont tendance a oublier cela.
On est tannes de payer pour ces gens.
On reglementait systematiquement la vie des autochtones.
Il n'y a pas de droits de succession.
Il y a des choses dans la loi qui sont difficilement applicables dans la realite.
Maintenant, je voudrais vous donner un apercu de l'etat d'avancement des negociations.
Les gens se demandent ce qu'on a dit.
Ce n'est pas facile pour moi non plus.
On disait: Voila, c'est regle pour l'autonomie gouvernementale et les revendications territoriales.
Depuis ce temps-la, on voit que cela progresse toujours.
Ces gens ont joint les rangs de ceux qui avaient deja signe.
Les negociations avec le Conseil Dena de Ross River sont a l'etape preliminaire.
Je me rappelle qu'a l'epoque, c'etait un probleme particulier.
Ces gens-la avaient beaucoup de questions sur la facon dont cela s'etait passe.
Ils formaient la minorite des autochtones du Yukon.
Aujourd'hui, je constate qu'ils ont au moins passe l'etape preliminaire.
Il va falloir y faire attention tantot.
Quant a la Premiere Nation de Carcross-Tagish, des rencontres ont lieu presentement.
Les negociations sur les terres touchent actuellement les terres rurales.
Des ententes ont ete conclues sur un certain nombre de selections.
La Premiere Nation devrait deposer bientot les selections portant sur les sites specifiques.
L'entente definitive devrait etre finalisee d'ici au mois de mars 1998.
Il est prevu que les negociations seront achevees au mois de decembre 1998.
La, il y a un litige.
Il y a donc des negociations qui se poursuivent toujours.
Tout ce qui s'appelle centralisation, forcement, ne rentre pas dans notre philosophie.
Quand je vous disais qu'on avait des reserves, les voici.
Elles disent qu'elles sont d'accord.
Il y a eu un changement de gouvernement, aussi.
On decidera, au comite permanent, s'il y a lieu de proposer des amendements.
Mme Louise Hardy (Yukon, NPD):
Je tiens a remercier les electeurs du Yukon qui m'ont envoyee ici.
C'est tres important.
En 1898, un territoire du Yukon distinct a ete cree.
En 1902, notre premiere fonction publique a ete etablie a une echelle modeste.
Ce dossier touche tous les habitants du Yukon.
En 1996, des consultations ont debute sur le transfert des programmes des Affaires du Nord.
Ces transferts constituent un element essentiel de l'autonomie gouvernementale des autochtones.
Ils permettront egalement d'accroitre l'intendance de notre environnement.
Elle offre des possibilites de developpement economique aux Yukonnais.
Le projet de loi facilite la realisation des objectifs strategiques du gouvernement federal.
La devolution ne signifie pas que le gouvernement federal se decharge de ses responsabilites.
Le transfert est une mesure de bon gouvernement.
J'exhorte la Chambre a adopter le projet de loi rapidement.
M. Gerald Keddy (South Shore, PC):
Je partagerai mon temps de parole avec mon collegue, le depute de Cumberland-Colchester.
Ces revenus sont actuellement evalues a 2 millions de dollars par annee.
J'ai certaines reserves a formuler a l'egard de ce projet de loi.
Or, un tel plan a ete elabore, mais il n'a pas ete adopte.
Parmi les pouvoirs transferes, mentionnons le pouvoir de reglementation.
Nous appuyons le projet de loi.
Nous l'estimons important.
Nous sommes aussi d'avis qu'il faut l'adopter.
M. Bill Casey (Cumberland-Colchester, PC):
La plupart des groupes communautaires et des organisations du Yukon appuient la mesure.
Les articles 6 et 8 du projet de loi portent sur ces questions.
Il nous demande d'agir avec celerite.
Elle est prete a collaborer avec les groupes autochtones.
Il est clair que nous appuyons sa position.
Je crois que les mesures necessaires ont ete prises.
M. Claude Bachand (Saint-Jean, BQ):
J'en ai egalement parle dans mon discours.
Deuxiemement, le gouvernement federal a un lien fiduciaire a l'egard des autochtones.
Toutefois, je crois que les articles 6 et 8 repondent a ses preoccupations.
Je crois que des amendements peuvent etre apportes au projet de loi.
Il n'est pas parfait.
La Chambre est-elle prete a se prononcer?
Plait-il a la Chambre d'adopter la motion?
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
Le projet de loi C-6 etablira ces organismes.
Nous avons egalement effectue des consultations exhaustives de la loi en elle-meme.
Ces consultations se sont revelees tres productives.
Il faut regarder l'ensemble d'une situation environnementale.
Ce sont des exemples de bonne planification et de bon gouvernement des affaires publiques.
Le gouvernement territorial appuie vigoureusement le principe d'un office public par vallee.
M. Derrek Konrad (Prince Albert, Ref.):
La riviere Saskatchewan traverse ma circonscription.
Ce cours d'eau fut jadis une importante route servant au commerce des fourrures.
Nous avons de tout.
Notre circonscription est la plus illustre du Canada et je suis fier de la representer.
Tous les droits sont detenus en commun.
J'ai trouve dans ces documents plusieurs lettres d'une page.
Je pretends que cet espoir sera vain lorsque nous examinerons davantage le projet de loi.
Le fardeau de ce legs pesera longtemps sur les contribuables canadiens.
Nous savons que les societes d'exploitation des ressources doivent respecter l'environnement.
Ces faits ne sont pas contestes.
Les decisions doivent etre rendues rapidement et l'arbitraire doit etre minimise.
Le cabinet du ministre a recu la lettre au printemps de 1996.
Nous croyons que des consultations plus larges sont la cle, dans ces negociations.
M. Claude Bachand (Saint-Jean, BQ):
Malheureusement, je ne suis jamais alle dans la vallee du Mackenzie.
Or, on voit que le projet de loi, la-dessus, peut atteindre son objectif.
Ces offices seront composes de cinq membres.
La parite sur les comites se retrouve dans plusieurs projets de loi.
Je trouve que cet effort est quand meme louable.
Encore une fois, l'objectif est la participation des autochtones.
C'est une region assez specifique.
Un comite permanent emettra des permis selon les besoins de la region designee.
Avant ces ententes, certaines lois avaient ete mises de l'avant par le Parlement federal.
Ces lois seront donc modifiees par le projet de loi a l'etude aujourd'hui.
Il faut bien visualiser le territoire que le fleuve Mackenzie traverse de long en large.
Je vais, dans ma conclusion, vous expliquer pourquoi.
Encore une fois, la parite autochtone et gouvernement est presente.
On trouve que cela aussi est meritoire.
Encore une fois, le projet de loi vient modifier une loi deja existante.
Donc, ce n'est pas juste l'environnement.
C'est important de le jauger, et le projet de loi le prevoit.
J'y ai fait reference un peu plus tot.
Cela a vraiment un impact tres negatif sur l'environnement.
C'etait un impact immediat et en plus cumulatif.
Ces fonctions peuvent incomber a un office ou a un ministere.
Alors, je pense que la fameuse parite sur l'ensemble des offices atteint cet objectif.
Alors, effectivement, c'est une region tres riche en petrole et en gaz.
Le Canada et la planete etaient en guerre.
Il fallait alimenter la machine de guerre avec le petrole.
Donc, ils ont atteint leur sommet a ce moment-la.
Norman Wells constitue la limite est du sentier Canol.
Est-ce que cela s'est fait aux depens des autochtones?
Je vous dis oui.
La est la grande question.
Naturellement, il y a des zones grises.
Beaucoup d'Amerindiens y vivent egalement.
Ils ont aussi garde leur propre culture autochtone.
Dernierement, le mot dene a acquis un sens plus restreint en politique.
Je pense que c'est leur rendre justice que de les nommer.
Donc, le probleme n'est pas nouveau, cela a toujours ete le cas.
Je donne souvent un exemple.
Quant aux peuples autochtones, leur vie s'est trouvee a jamais transformee.
Par la suite, ils ont vu leurs biens et leurs terres leur echapper.
La colonisation s'est concentree d'abord sur les terres agricoles du sud.
Cela se passe encore de nos jours.
On voit bien l'esprit qui habitait les traites, c'etait des ententes mutuelles.
Cette effervescence a cree un climat particulierement febrile.
On n'en a pas discute, on les a tout simplement appliquees.
Alors, ce sont les fameuses clauses qu'on appelle les clauses d'extinction.
Il m'apparait important de donner un apercu des objectifs des deux ententes.
Il m'apparait important d'en faire le tour.
La fameuse clause d'extinction dont je vous parlais tantot.
On veut se sortir de la fameuse Loi sur les Indiens.
C'est un autre trait culturel des autochtones; ils pensent souvent aux generations futures.
Les Mohawks, entre autres, parlent souvent de la septieme generation.
C'est partout pareil.
Ils souhaitent revoir les accords originaux et leur donner une interpretation.
Et le gouvernement refuse cela completement ou en partie.
J'en arrive finalement a la position du Bloc quebecois.
Je vous demande donc de verifier le quorum.
Le president suppleant (M. McClelland):
Un depute a demande de verifier s'il y avait quorum.
A-t-on quorum?
Le president suppleant (M. McClelland):
La presidence a determine qu'il y avait quorum.
C'est peut-etre le reflet du comportement gouvernemental en matiere autochtone depuis quelques annees.
Alors, je pense que c'est assez symptomatique de ce qui s'est passe.
Je desire quand meme remercier mon collegue.
M. Bob Kilger (Stormont-Dundas, Lib.):
Monsieur le President, j'invoque le Reglement.
Cela n'apporte rien a un cote ou l'autre de la Chambre.
Je felicite mon collegue d'en face qui accomplit enormement de boulot dans ce domaine.
Le president suppleant (M. McClelland):
Le whip du gouvernement a raison.
Cela veut plutot dire le contraire.
Alors, je pense que c'est ce que mon honorable collegue a voulu dire.
Alors, cela nous apparait etre un probleme majeur.
Aujourd'hui, ils se retrouvent avec un projet de loi qui les englobe.
Ils demandent la complete souverainete sur leurs terres.
Il m'apparaissait important de le dire.
Mme Louise Hardy (Yukon, NPD):
Le Conseil tribal des Gwich'ins appuie officiellement le projet de loi.
Nous approuvons le projet de loi et souhaitons qu'il soit adopte sans delai.
Les liberaux ont confie aux pollueurs le soin de s'autodiscipliner.
Le Canada n'honore pas ses obligations internationales en matiere de protection de l'environnement.
C'est excellent pour les autochtones de notre pays et pour l'Etat canadien.
M. Gerald Keddy (South Shore, PC):
Il ne s'agit pas ici d'un projet de loi ordinaire.
Le Mackenzie et son enorme vallee meritent d'etre respectes et proteges.
Je commence a peine a apprendre l'histoire de ce territoire.
Ce geste a certainement attire l'attention du gouvernement.
Evidemment, le gouvernement federal a interjete appel.
La demarche a certes ete lente.
Beaucoup de gens en ont certes eu assez d'attendre.
Nous esperons tous que ses nombreuses annees de discussion ont porte fruit.
Toutefois, je suis impatient d'examiner la chose de plus pres aux audiences du comite.
Ce sont des choses excellentes qui ont surement permis de realiser des progres depuis 1973.
Pourtant, si tout cela traduisait un principe, ce serait celui d'un bon gouvernement.
Cette vallee s'etend sur une longueur de 4 241 kilometres.
Le Parti progressiste-conservateur appuie ce projet de loi.
Nous sommes d'accord avec son objet.
M. Derrek Konrad (Prince Albert, Ref.):
Elle veut evidemment negocier.
Il a parle des droits inalienables de refus a l'egard de nombreuses activites.
Est-ce la la bonne facon de proceder?
Monsieur le President, je remercie le depute de ses questions.
Il souleve des questions valables.
Nous en sommes a la deuxieme lecture du projet de loi.
Il nous reste encore du chemin a faire.
Bien des gens sont alles s'etablir dans le Nord depuis 200 ans.
Le projet de loi s'applique a tous les habitants du Nord.
Il englobe tout le monde et reflete les droits de tous.
L'hon. Ethel Blondin-Andrew (secretaire d'Etat (Enfance et Jeunesse), Lib.):
C'est assez clair.
Ces dispositions doivent etre interpretees.
Chaque personne et chaque groupe donne sa propre interpretation.
Tout d'abord, le paragraphe 5(2) doit reconforter ces gens.
Au paragraphe 5(1), il est dit que la Loi sur les Indiens demeure entiere.
Il est clair qu'il appuie la mesure.
Le gouvernement ne tient pas a promulguer une reglementation excessive qui paralyserait l'industrie.
Ce n'est pas du tout son intention.
Ce n'est pas ainsi que veut fonctionner le gouvernement.
Ces offices sont extremement importants.
Le gouvernement veut servir tout le monde equitablement.
Nous sommes a l'etape de la deuxieme lecture.
C'est extremement important.
Nous n'entendons pas beaucoup parler de cela.
L'industrie du diamant n'a rien de comparable aux autres.
Combien d'entreprises en font autant?
Et il y a d'autres projets miniers.
Cela va leur rapporter seulement 200 millions de dollars.
Ce sont les renseignements que nous avons jusqu'a maintenant, et ceux que j'ai.
Quoi qu'en disent les annonces publicitaires, les diamants ne sont pas eternels.
C'est apparemment la norme.
Neanmoins, ce genre de developpement se produit dans le Nord.
Dans les Territoires du Nord-Ouest nous avons d'autres activites qui en resultent.
Nous savons qu'il faut etablir cet equilibre.
J'ai rencontre des representants de l'industrie qui ne l'appuient pas necessairement.
Le projet de loi confere certains pouvoirs aux nouveaux offices.
En ce sens, ils auront des pouvoirs assimilables a ceux d'un tribunal.
Apparemment, c'est la loi.
Les lois ne sont pas censees etre brutales.
Elles sont censees etre appliquees en fonction du facteur humain.
Il y a diverses cultures, divers groupes.
Il y a differents peuples, avec des niveaux varies de scolarite et de capacites.
Il y a des gens qui travaillent dans l'industrie.
Il y a des gens qui sont extremement preoccupes par les questions environnementales.
Nous continuons, a notre facon, de rechercher l'equilibre dans les Territoires du Nord-Ouest.
En ma qualite de legislatrice, c'est mon mandat.
Que ces gens fassent valoir leurs points de vue.
Ils sont les mieux places pour les exprimer.
Je ne peux parler en leur nom.
Nous avons l'occasion de batir.
Nous avons l'occasion de partager.
Nous avons l'occasion de travailler ensemble.
Je ne vois aucun facteur attenuant qui pourrait nous empecher de le faire.
M. Werner Schmidt (Kelowna, Ref.):
L'hon. Ethel Blondin-Andrew:
Nous insistons fortement sur les principes qui sous-tendent ce projet de loi.
Je n'ai pas entendu proposer d'amendements.
Je n'ai pas de liste d'amendements.
Car ce serait inacceptable.
Ce serait faire fi de l'objet initial du projet de loi.
Dans ce cas-la, elles seront prises en consideration.
On peut prendre en consideration des amendements a differentes etapes du processus.
Mais je ne sais rien pour l'instant des intentions des groupes interesses.
M. Gerald Keddy (South Shore, PC):
Le gouvernement a-t-il l'intention d'aller dans ce sens?
L'hon. Ethel Blondin-Andrew:
Cependant, nous devons etre prudents.
Nous devons en etre conscients et ce n'est pas facile.
M. John Williams (St. Albert, Ref.):
Je suis assez inquiet du nombre d'offices et d'organismes competents que nous creons.
Ce projet de loi cree cinq ou six offices differents pour une tres petite population.
Je sais qu'il y a des preoccupations environnementales et que l'environnement est fragile.
Je voudrais poser a la deputee deux questions.
Ensuite, je m'inquiete du transfert de pouvoirs par le ministre competent.
L'hon. Ethel Blondin-Andrew:
Monsieur le President, mon collegue a fait valoir des points tres importants.
C'est plus facile a dire qu'a faire.
Nous n'avons pas toujours a traiter avec des partenaires bienveillants.
Une fois adopte, ce projet de loi appuiera leur demarche.
La question du transfert de pouvoirs est un peu plus compliquee.
On ne dictera pas aux offices tout ce qu'ils doivent faire.
Il faut adopter des lois pour creer ces offices.
Toutefois, ces offices ont le pouvoir de diriger leurs propres activites.
M. Darrel Stinson (Okanagan-Shuswap, Ref.):
Elle a parle avec fierte des mines de diamants du Nord, avec raison d'ailleurs.
Je tiens a ce que les deputes en soient parfaitement conscients.
Ce projet de loi souleve de nombreuses preoccupations dans le Nord.
Les lettres que nous avons recues prouvent que ce n'est pas le cas ici.
Nous savons que le libelle juridique fait probleme.
Les interesses ne peuvent se permettre de venir a Ottawa pour faire valoir leurs craintes.
Certaines ne peuvent meme pas se rendre a Yellowknife.
Je m'y suis rendu.
Je suis certain que d'autres deputes l'ont fait egalement.
Ces gens meritent qu'on les ecoute.
Il faut que la Chambre y pense serieusement.
Je m'interroge et je m'inquiete a ce sujet.
Nous savons tous ce qui se passe dans l'industrie miniere.
Nous devons repondre a certaines de ces inquietudes.
J'ai siege, et je continue de sieger au comite des ressources naturelles.
Nous avons travaille tres fort.
Ce rapport a ete remise sur une tablette ou il amasse la poussiere.
Le manque de clarte est certainement la premiere preoccupation.
De meme, nous devons nous preoccuper de la possibilite du manque de ressources.
Beaucoup de ces questions devraient nous preoccuper.
Ce sont des preoccupations tres legitimes.
Monsieur le President, il a effectivement des obligations.
Mais il doit aussi ecouter tout le monde.
C'est la raison d'etre de la Chambre des communes.
Nous devons etudier tout cela avant d'adopter la loi les yeux fermes.
Pourquoi adopter une mesure qui a des imperfections?
Elle devrait etre bonne d'un bout a l'autre, si possible.
C'est ca, une bonne mesure legislative.
M. Werner Schmidt (Kelowna, Ref.):
Monsieur le President, j'ai trouve les remarques de mon collegue fort interessantes.
Monsieur le President, au moins trois nouveaux offices seront crees.
Cela ne peut absolument pas accelerer les choses.
Nous savons tous ce qui arrive lorsque nous nous enlisons dans la bureaucratie.
J'ai siege au Comite des ressources naturelles.
Nous avons cherche a etablir un guichet unique.
Ce n'est pas ce que nous faisons maintenant.
C'est exactement ce que nous faisons ici, alourdir la bureaucratie.
Est-ce que cela coutera plus cher au gouvernement?
Cela coutera certainement plus cher au gouvernement.
Il n'y aucun doute sur ce point.
Ce systeme coutera plus cher au gouvernement pour une plus grande incertitude.
C'est exactement ce que nous faisons.
M. John Finlay (Oxford, Lib.):
J'ai une ou deux questions a poser au depute d'Okanagan-Shuswap.
Je ne comprends pas tres bien.
Nous voulons quelque chose de simple que comprendra le milieu des ressources naturelles.
Je ne crois pas que mon collegue puisse gagner sur tous les tableaux.
Il ne peut pas avoir raison tout le temps.
Nous savons tous cela.
Puis-je avoir une explication?
Monsieur le President, je vais d'abord repondre a la derniere question.
Ils n'ont pas maintenant les ressources qu'il faut pour regler ce probleme.
Il ne faut pas l'oublier.
Je ne sais pas ce qui en est pour les deputes qui sont decedes.
M. John Finlay (Oxford, Lib.):
Des services de soutien seront assures par une petite equipe technique et administrative.
Il sera revise tous les cinq ans.
Un office regional des terres et des eaux sera cree dans chacune des regions designees.
Dans ce cas, des formations regionales speciales pourront etre constituees.
Cette evaluation permettra d'etablir si une etude d'impact complete s'impose.
Une fois la decision prise, elle sera mise en oeuvre par les autorites reglementaires appropries.
Ces principes sont la reconnaissance, le respect, le partage et la responsabilite.
Ils participent avec nous a la cogestion du developpement et des ressources.
La mesure met en place un systeme de gestion des ressources plus transparent et local.
Ma question est la suivante.
Je pense qu'il a parle du nombre des formations.
J'ai explique qu'il y en avait six.
Nous reprendrons le debat apres la periode des questions.
Comme il est presque 14 heures, la Chambre passe maintenant aux declarations de deputes.
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
Monsieur le President, quel est le resultat de l'apartheid institutionnalise au Canada?
La parole est au depute de Scarborough-Est.
M. John McKay (Scarborough-Est, Lib.):
Mme Christiane Gagnon (Quebec, BQ):
C'est une femme sur neuf qui se verra un jour diagnostiquer cette maladie.
J'invite le gouvernement a garder cela en tete.
Mme Colleen Beaumier (Brampton-Ouest-Mississauga, Lib.):
Je crois que ce sera une caline de belle fete.
LES SERVICES D'INCENDIE ET D'URGENCE DE MISSISSAUGA
M. Steve Mahoney (Mississauga-Ouest, Lib.):
L'equipe de Mississauga est sortie victorieuse avec le temps de reaction le plus rapide.
M. Claude Drouin (Beauce, Lib.):
M. Gary Lunn (Saanich-Gulf Islands, Ref.):
Monsieur le President, j'aimerais feliciter Brian MacKenzie, juge de la cour provinciale de Victoria.
La decision du juge MacKenzie tranche en faveur des victimes.
Bravo donc au juge MacKenzie.
LA CAMETOID ADVANCED TECHNOLOGIES LIMITED
Mme Judi Longfield (Whitby-Ajax, Lib.):
Nous sommes en train d'etablir les bases...
La deputee de Vancouver Kingsway.
Mme Sophia Leung (Vancouver Kingsway, Lib.):
Quelle magnifique maniere de montrer combien il aime et apprecie le Canada!
Ils ont construit le chemin de fer et combattu pour le Canada a la guerre.
Je veux leur rendre hommage, ainsi qu'a M. Lee.
M. Jack Ramsay (Crowfoot, Ref.):
Un conducteur en etat d'ebriete a tue son ami.
Une femme a tente d'engager quelqu'un pour assassiner sa fille.
Aucun de ces criminels n'a purge de peine d'emprisonnement.
En raison de la loi des liberaux sur la condamnation avec sursis.
Mme Carolyn Bennett (St. Paul's, Lib.):
En ce premier anniversaire, le gouvernement a bien des raisons d'etre fier.
LE MINISTRE DU DEVELOPPEMENT DES RESSOURCES HUMAINES
M. Paul Crete (Kamouraska-Riviere-du-Loup-Temiscouata-Les Basques, BQ):
Cyniquement, le ministre faisait la morale au gouvernement du Quebec.
M. Lorne Nystrom (Qu'Appelle, NPD):
Les epargnes canadiennes ne devraient pas etre confiees aux joueurs inveteres de la societe casino.
Les Canadiens veulent que leur argent soit place en lieu sur.
M. Jacques Saada (Brossard-La Prairie, Lib.):
Le cheminement de M. Beaudoin est intimement lie a la croissance de Bombardier.
La compagnie excelle dans ses travaux de conception, de fabrication et de commercialisation.
Mme Elsie Wayne (Saint John, PC):
M. John O'Reilly (Victoria-Haliburton, Lib.):
Continuez de proposer des projets.
M. Preston Manning (chef de l'opposition, Ref.):
Leur politique sera-t-elle fondee sur la science?
Sera-t-elle fondee sur une entente avec les provinces, les consommateurs et les contribuables?
La verite est enfin sortie hier.
Cela donne une nouvelle signification a l'expression taxe verte.
Le tres hon. Jean Chretien (premier ministre, Lib.):
Il y aura bientot une conference a Kyoto.
Il y a un ecart important entre les differentes regions du monde.
Une chose est claire.
Pendant que j'etais en Grande-Bretagne, j'ai...
Le chef de l'opposition.
M. Preston Manning (chef de l'opposition, Ref.):
Tous les autres pays du G7 ont deja publie leurs objectifs.
Le tres hon. Jean Chretien (premier ministre, Lib.):
M. Preston Manning (chef de l'opposition, Ref.):
Jusqu'ou le premier ministre est-il pret a hausser...
Le tres hon. Jean Chretien (premier ministre, Lib.):
Le tres hon. Jean Chretien:
Oui, c'est ce qu'il a dit.
Je n'ai jamais employe ce genre de langage inacceptable pour parler de mes deputes.
Mme Deborah Grey (Edmonton-Nord, Ref.):
Certains deputes ne seraient pas de cet avis.
Nous sommes devant deux enfants qui se vantent d'avoir la plus grosse taxe ecologique.
Je pose la question au premier ministre.
Le tres hon. Jean Chretien (premier ministre, Lib.):
Il s'agit d'un probleme mondial tres serieux.
Comme toujours, la deputee d'Edmonton-Nord ne peut pas etre serieuse.
Mme Deborah Grey (Edmonton-Nord, Ref.):
Les Canadiens veulent des assurances sur ce point tres serieux.
Le tres hon. Jean Chretien (premier ministre, Lib.):
Nous adopterons une position responsable pour tous les Canadiens.
Le probleme est grave, malgre l'absence de politiques de l'opposition a ce sujet.
M. Richard Marceau (Charlesbourg, BQ):
Monsieur le President, ma question s'adresse au solliciteur general.
Or, M. Michel Deslauriers est directeur de l'Institut Leclerc, un penitencier federal de Laval.
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Monsieur le President, je devrai confirmer ces allegations et repondre plus tard au depute.
M. Richard Marceau (Charlesbourg, BQ):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
M. Gilles Duceppe (Laurier-Sainte-Marie, BQ):
Voila maintenant qu'il ignore ce qui se passe dans son propre ministere.
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Monsieur le President, il faut assurer l'application reguliere de la loi.
Je vais examiner la situation et j'en reparlerai au depute.
M. Gilles Duceppe (Laurier-Sainte-Marie, BQ):
Monsieur le President, la realite depasse la fiction.
Cette situation me rappelle presque un episode de la serie Omerta .
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Mme Alexa McDonough (Halifax, NPD):
Monsieur le President, ma question s'adresse au premier ministre.
D'abord, le ministre de l'Environnement parle de cibles en termes tres vagues.
Peut-etre que le premier ministre est maintenant pret a se produire.
Le tres hon. Jean Chretien (premier ministre, Lib.):
La position du Canada est tres claire.
Nous ne pouvons pas aller a Kyoto s'il ne s'y passe rien.
Nous voudrions vraiment voir de reels progres a Kyoto.
Nous preparons une position canadienne avec tous ceux qui peuvent participer.
La deputee de Halifax.
Mme Alexa McDonough (Halifax, NPD):
C'est comme cela partout, sauf au Canada.
Le tres hon. Jean Chretien (premier ministre, Lib.):
Monsieur le President, la reponse est oui.
L'hon. Jean J. Charest (Sherbrooke, PC):
Il a d'ailleurs pris un engagement en ce sens dans le livre rouge.
L'hon. Christine Stewart (ministre de l'Environnement, Lib.):
L'hon. Jean J. Charest (Sherbrooke, PC):
Nous pouvons peut-etre aider le gouvernement aujourd'hui a regler ce probleme.
L'hon. Paul Martin (ministre des Finances, Lib.):
Monsieur le President, un travail enorme a ete accompli au sein du ministere des Finances.
Nous avons fait enormement de progres.
M. Monte Solberg (Medicine Hat, Ref.):
Il trepignait d'enthousiasme a propos de cette TPS detestee.
C'est votre mulligan pour aujourd'hui.
Le tres hon. Jean Chretien (premier ministre, Lib.):
On le trouvera tot ou tard la tete dans le sable d'une fosse.
La TPS existe au Canada depuis longtemps.
Nous nous y sommes opposes.
Ma position demeure inchangee.
Nous l'avons harmonisee avec celles de plusieurs des provinces pour la rendre plus efficace.
M. Monte Solberg (Medicine Hat, Ref.):
Le premier ministre est alle jusqu'a dire qu'il avait fait adopter la taxe.
Je crois que cela va laisser les Canadiens pantois.
Nous savons maintenant comment il arrive a avoir une bonne carte de pointage.
Le tres hon. Jean Chretien (premier ministre, Lib.):
C'est exactement ce que j'ai fait.
M. Michel Bellehumeur (Berthier-Montcalm, BQ):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Monsieur le President, les accusations qui ont ete portees cet apres-midi sont tres graves.
Je vais les examiner.
Si les faits sont exacts, des mesures seront prises.
M. Michel Bellehumeur (Berthier-Montcalm, BQ):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
L'hon. Paul Martin (ministre des Finances, Lib.):
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
Pourquoi le ministre n'est-il pas de cet avis?
L'hon. Paul Martin (ministre des Finances, Lib.):
Mme Helene Alarie (Louis-Hebert, BQ):
Monsieur le President, ma question s'adresse au solliciteur general.
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Le travail y est tres difficile.
C'est dur pour les employes.
Nous allons voir ce qu'il en est.
Ces allegations sont tres serieuses et si elles s'averent, des mesures seront prises.
Mme Helene Alarie (Louis-Hebert, BQ):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
Monsieur le President, il est tres important que la loi suive son cours.
Il faut examiner cela.
Si les allegations s'averent alors, des mesures seront prises.
M. John Reynolds (West Vancouver-Sunshine Coast, Ref.):
L'hon. Lucienne Robillard (ministre de la Citoyennete et de l'Immigration, Lib.):
M. John Reynolds (West Vancouver-Sunshine Coast, Ref.):
Monsieur le President, je crois que le premier ministre a raison.
Le chomage chez les jeunes atteint 16 p. 100 au Canada a l'heure actuelle.
L'hon. Lucienne Robillard (ministre de la Citoyennete et de l'Immigration, Lib.):
Et si le Parti reformiste a une etude de cette nature, qu'il le prouve.
M. Pierre Brien (Temiscamingue, BQ):
Monsieur le President, ma question s'adresse au premier ministre.
Le tres hon. Jean Chretien (premier ministre, Lib.):
Le gouvernement l'appuie et nous l'avons etabli clairement.
Alors, je me demande pourquoi l'honorable depute n'a pas compris plus tot.
Nous avons eu un vote, sur la question de Terre-Neuve, de nature independante.
Mme Beth Phinney (Hamilton Mountain, Lib.):
L'hon. Lloyd Axworthy (ministre des Affaires etrangeres, Lib.):
M. Garry Breitkreuz (Yorkton-Melville, Ref.):
Nous sommes donc heureux de savoir quelle est son opinion.
Il a eu une journee pour faire enquete.
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
M. Garry Breitkreuz (Yorkton-Melville, Ref.):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
M. Svend J. Robinson (Burnaby-Douglas, NPD):
Le ministre repondra-t-il positivement a cette requete?
L'hon. David M. Collenette (ministre des Transports, Lib.):
Je crois que c'est tres louable.
Nous partageons les preoccupations du depute.
Mme Wendy Lill (Dartmouth, NPD):
Ce groupe de travail avait ete cree pour...
Je demande a la deputee de poser directement sa question.
Ma question s'adresse au ministre du Developpement des ressources humaines.
L'hon. Pierre S. Pettigrew (ministre du Developpement des ressources humaines, Lib.):
Monsieur le President, je remercie la deputee pour sa tres bonne question.
Nous avons avance sur tous les fronts.
Le depute de Brandon-Souris.
M. Rick Borotsik (Brandon-Souris, PC):
Monsieur le President, ma question s'adresse au ministre des Peches et Oceans.
Le ministre a-t-il consulte le president du conseil d'administration, ses membres et...
Le ministre des Peches et Oceans.
L'hon. David Anderson (ministre des Peches et des Oceans, Lib.):
Quand je regarde les deputes conservateurs, je suis cependant tente de revenir sur mon opinion.
Nous avons nomme des anciens deputes competents d'autres partis a des offices et commissions.
M. Rick Borotsik (Brandon-Souris, PC):
L'office se retrouvera donc avec deux pdg.
Pendant que M. Dunn fera le veritable travail, M. Fewchuck appatera les hamecons.
L'hon. David Anderson (ministre des Peches et des Oceans, Lib.):
Et quant au Parti reformiste, il souhaite surement que nous congediions Jack Fraser.
M. Reg Alcock (Winnipeg-Sud, Lib.):
Monsieur le President, la proposition du gouverneur du Dakota du Nord etait tres interessante.
M. Jake E. Hoeppner (Portage-Lisgar, Ref.):
Le ministre responsable de la commission est-il d'accord avec ce jugement?
Mme Pauline Picard (Drummond, BQ):
Monsieur le President, ma question s'adresse au ministre de la Sante.
L'hon. Allan Rock (ministre de la Sante, Lib.):
Alors, nous avons l'intention de deposer sous peu la modification pour respecter l'engagement.
M. Gordon Earle (Halifax-Ouest, NPD):
Monsieur le President, ma question s'adresse au ministre de l'Industrie.
L'hon. John Manley (ministre de l'Industrie, Lib.):
Mme Elsie Wayne (Saint John, PC):
L'hon. Sheila Copps (ministre du Patrimoine canadien, Lib.):
M. David Iftody (Provencher, Lib.):
Monsieur le President, ma question s'adresse au ministre des Transports.
Les agriculteurs de l'Ouest sont legitimement inquiets au sujet du transport de leur grain.
L'hon. David M. Collenette (ministre des Transports, Lib.):
Des preparatifs sont en cours.
M. Darrel Stinson (Okanagan-Shuswap, Ref.):
Le gouvernement est en train de les etudier.
LE TRIBUNAL CANADIEN DU COMMERCE EXTERIEUR
M. Benoit Sauvageau (Repentigny, BQ):
Ma question s'adresse au ministre des Finances.
Quels interets protege-t-il?
L'hon. Paul Martin (ministre des Finances, Lib.):
Nous avons recu de nouvelles informations.
Nous sommes en train de les analyser.
Nous allons en discuter avec le Tribunal et avant longtemps, une decision sera rendue.
M. Rick Laliberte (Riviere Churchill, NPD):
Monsieur le President, ma question s'adresse a la ministre de l'Environnement.
Cela fait quatre jours consecutifs que les matieres toxiques font la une des informations.
L'hon. Christine Stewart (ministre de l'Environnement, Lib.):
L'hon. Jean J. Charest (Sherbrooke, PC):
L'hon. Paul Martin (ministre des Finances, Lib.):
M. John Nunziata (York-Sud-Weston), Ind.):
Monsieur le President, ma question s'adresse a la ministre de la Justice.
L'hon. Anne McLellan (ministre de la Justice et procureur general du Canada, Lib.):
Monsieur le President, il m'est impossible de commenter une cause en particulier.
Cette affaire pourrait faire l'objet d'un appel.
La decision revient au procureur general de l'Ontario.
Toutefois, je rappelle au depute que mon predecesseur a modifie le Code criminel.
Il suffit de les appliquer.
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
Mme Michelle Dockrill (Bras d'Or, NPD):
Les autochtones considerent la terre et ses ressources comme des biens communs.
M. Keith Martin (Esquimalt-Juan de Fuca, Ref.):
Nous, du Parti reformiste, ne serions absolument pas en desaccord avec cela.
Il ne va tout simplement pas donner lieu a cet effort concerte et coordonne.
De nombreux habitants des Territoires du Nord-Ouest disent la meme chose.
Mais ce n'est pas ce qui se produit.
D'autres accords avaient nettement mieux defini et unifie le systeme.
Mais ce n'est pas le cas.
Les Territoires du Nord-Ouest n'ont pas l'exclusivite de ce probleme.
Il est present dans tout le Canada.
La Colombie-Britannique sera entierement balkanisee par le reglement des revendications territoriales.
Imaginez le cauchemar, quand on essaie de faire des affaires dans une situation semblable.
Le developpement economique en devient quasi impossible.
Or, il n'est est tout simplement rien.
Les taux ont-ils diminue?
Sont-ils demeures au meme niveau?
Quelles sont les consequences sociales, quel est le cout social de cette horrible situation?
Elle empire meme dans certaines localites.
Nous devons envisager la situation sous un jour nouveau.
C'est la population qui en assume les couts sociaux.
Nous devons mettre un terme a ce developpement parallele.
Cela ne doit pas se repeter.
Il faut cesser de voir les choses en termes d'eux et nous.
Il faut comprendre que nous sommes tous des etres humains.
Nous devons tirer profit de ce qui nous distingue.
Nous pouvons beaucoup apprendre les uns des autres.
Nous pouvons tirer beaucoup de la beaute des cultures autochtones de notre pays.
Cela est a peu pres inconnu dans les milieux medicaux.
On note des proportions epidemiques d'alcoolisme et de toxicomanie.
M. Garry Breitkreuz (Yorkton-Melville, Ref.):
J'invoque le Reglement, monsieur le President.
On m'avait assure que j'en aurais une copie.
La Presidente suppleante (Mme Thibeault):
L'hon. Andy Scott (solliciteur general du Canada, Lib.):
LOI SUR LA GESTION DES RESSOURCES DE LA VALLEE DU MACKENZIE
Madame la Presidente, je remercie le depute d'Esquimalt-Juan de Fuca pour son discours.
J'ai quelques observations a faire.
Il a demande pourquoi nous avions trois nouveaux offices.
Le premier est l'office d'amenagement territorial.
C'est tres simple.
C'est pourquoi nous avons des offices d'amenagement territorial.
Nous devons nous occuper de l'amenagement du territoire.
Ma question est: que veut l'industrie?
J'ai une question simple a poser a mon collegue d'Esquimalt-Juan de Fuca.
Madame la Presidente, je remercie le depute liberal pour sa question.
Il a demande trois choses.
Il a mentionne les offices.
Cependant, les choses se gatent lorsqu'il y en a trois.
Les personnes en cause demandent pourquoi il leur faut investir dans trois offices.
C'est de la bureaucratisation a outrance.
Peut-etre l'argent des contribuables serait-il ainsi mieux utilise.
C'est ce que nous, reformistes, voudrions que le gouvernement fasse.
Nous sommes donc pour les deux.
C'est avec de la collaboration que les deux groupes pourront y trouver leur compte.
Le depute a parle des revendications territoriales.
Les negociations se deroulent uniquement avec les autochtones et, souvent, a huis clos.
C'est un probleme qui existe dans de nombreuses negociations de revendications territoriales.
M. Jim Gouk (West Kootenay-Okanagan, Ref.):
Quatriemement, permettra-t-il la protection de l'environnement d'une maniere efficace et rentable?
Madame la Presidente, je remercie mon collegue de ses questions succinctes et precises.
Tout le processus de mise en valeur est maintenant au point mort.
Certains deputes de la majorite opinent.
Mon collegue mentionnait egalement l'emploi pour les autochtones et les autres.
Est-ce que cela conduira a l'autosuffisance economique?
Le projet de loi en a le potentiel.
M. Grant Hill (Macleod, Ref.):
Sait-il ce qui est arrive?
Ou est passe cet argent?
La personne la mieux placee pour y repondre est la ministre des Affaires indiennes.
Nous savons que le taux de chomage augmente chez les autochtones.
C'est encore pire que ce qu'on pourrait imaginer.
M. Deepak Obhrai (Calgary-Est, Ref.):
Je voudrais aussi remercier les infatigables benevoles qui m'ont aide dans ma campagne.
L'accord actuel avait ete conclu par le gouvernement Mulroney.
Plus precisement, la chambre des mines a quatre grandes preoccupations.
Nous ne pouvons pas definir les regles au fur et a mesure.
Notre appareil judiciaire croule deja sous les nombreux litiges qu'il doit traiter.
On ne peut pas et on ne devrait pas tolerer cette situation.
Mais a quel prix, voila la question.
Nous devrions examiner de plus pres les repercussions de cette mesure legislative.
La deuxieme lecture porte sur le principe du projet de loi.
Madame la Presidente, je voudrais adresser quelques remarques a mon collegue.
Le Parti reformiste se plaint de l'existence de trois offices.
Je voudrais expliquer leurs activites.
La distance entre Yellowknife et Inuvik est enorme.
Le fleuve Mackenzie est le plus long du Canada.
Ce sera ma derniere observation avant ma question.
Il ne parle que d'exploitation miniere.
Cependant, c'est plus qu'une simple loi sur la gestion des ressources.
Notre probleme ne se situe pas au niveau de l'objet du projet de loi.
Il s'emploie a creer un autre palier de bureaucratie.
Voila pourquoi nous exprimons une reserve a ce sujet.
Le depute devrait preter une oreille attentive aux propos du Parti reformiste.
Voila ou est le probleme quant a nous.
M. John Finlay (Oxford, Lib.):
La vallee du fleuve Mackenzie represente l'un des plus importants reseaux hydrographiques au monde.
On me dit qu'il y a trop d'offices.
Comme le secretaire parlementaire l'a mentionne, les Gwich'ins veulent un office.
Ils veulent etre representes.
Ils veulent avoir la maitrise sur leur grande region.
Les Denes et les Metis du Sahtu veulent aussi un office.
Il pourrait y en avoir trois.
Il pourrait y en avoir un.
C'est comme cela que les choses se font.
Il faut laisser s'exercer la responsabilite des gens.
Je l'ai dit ce matin et je le repete encore.
Je n'ai rien entendu d'autre que des belles paroles.
Je remercie le depute d'avoir pose cette question.
Nous n'avons aucun probleme avec la vallee du Mackenzie.
Le depute a raison.
C'est un beau cours d'eau propre.
Les premieres nations ont parfaitement le droit d'en profiter au maximum pour devenir prosperes.
M. Mike Scott (Skeena, Ref.):
Nous devrions tous etre motives par le souci de batir un bel avenir.
A premiere vue, le projet de loi semble aller dans cette direction.
On peut se dire qu'il permettra cette collaboration entre les deux groupes.
Dans ces regimes, qui avait des droits?
Nous savons tous comment ces droits etaient determines.
Les rois possedaient tout le pouvoir.
Ils n'etaient pas elus.
Lorsqu'ils naissaient, la couronne leur etait deja destinee.
Ils n'etaient pas obliges de se presenter a des elections.
Ils n'avaient besoin du consentement de personne.
Les serfs formaient la vaste majorite de la population.
Ils n'avaient aucun pouvoir, aucune voix au chapitre.
C'etait des gens qui appartenaient pratiquement au roi.
Ils etaient la propriete du roi.
Le roi pouvait faire d'eux ce qu'il voulait.
Il n'avait pas besoin de demander la permission.
Il etait monarque absolu et ils en etaient les serviteurs absolus.
Prenons l'exemple de l'Union sovietique, qui dispose de beaucoup de ressources.
La plupart des gens y vivent dans la misere.
Ce n'est pas un hasard.
Certains signes nous laissent maintenant esperer l'emergence d'une democratie la-bas.
La presidente suppleante (Mme Thibeault):
On va faire le decompte.
Et les deputes ayant ete comptes:
La presidente suppleante (Mme Thibeault):
En effet, il n'y a pas de quorum.
Et la sonnerie s'etant arretee:
La presidente suppleante (Mme Thibeault):
Nous avons maintenant le quorum.
Je donne la parole a l'honorable depute de Skeena.
Comparons donc notre situation a celle de l'ancienne Union sovietique.
Nous avons la democratie en Amerique du Nord.
Je dois ajouter que la plupart de ces gouvernements etaient liberaux.
Les liberaux ont detenu le pouvoir au Canada pendant la majeure partie du siecle.
Ce sont les liberaux qui ont edifie l'Etat providence et cree la dependance.
Ils sont tres en colere et cherchent des reponses a leurs questions.
Ils veulent etre respectes.
Comment dire que c'est la temoigner du respect a un etre humain?
Je rejette completement cette facon de penser.
Elle ne devrait pas etre liee a l'appartenance a une bande autochtone.
Cela ne vient pas de moi, mais bien du verificateur general.
Autrement, cela n'a eu aucun effet positif.
Le gouvernement est au courant du probleme depuis dix ans.
Le gouvernement n'assume pas ses responsabilites.
En realite, il ne fait strictement rien.
Regardez ce qui arrive dans la reserve Stony, en Alberta.
Les gens de la reserve ont du se plaindre dans les medias.
Et voici que, par suite de cette verification judiciaire, des accusations sont portees.
La verite est faite.
Esperons que toute la verite sera faite.
Je me suis entretenu hier avec des autochtones du sud de l'Ontario.
Je n'entrerai pas dans le detail de l'affaire.
Je crois que les autochtones de tout le Canada ont perce le systeme.
Je crois qu'ils connaissent le systeme mieux que le gouvernement.
Je crois qu'une majorite de Canadiens appuie cette notion.
Je crois que cela constitue une richesse pour notre pays.
Je ne crois pas que ce soit la solution pour l'avenir de notre pays.
La presidente suppleante (Mme Thibeault):
M. Bernard Patry (Pierrefonds-Dollard, Lib.):
Madame la Presidente, je remercie le depute de Skeena de son discours.
En francais, nous dirions qu'il a fait une affirmation gratuite.
Je ne sais pas comment traduire cette expression.
Il a parle de favoritisme.
Le suivant, l'office des terres et des eaux, comportera 17 membres.
Nous les avons nommees parce qu'elles etaient competentes.
Ce sont des gens devoues et qualifies.
Toutes les nominations dans les Territoires du Nord-Ouest sont excellentes.
Ma question est tres simple.
Est-ce que partenaires egaux et a part entiere signifie assimilation a ses yeux?
Nous avons vu les resultats et nous devons composer avec.
Que fait le gouvernement aujourd'hui?
Je n'arrive pas a le croire.
Violer ce principe n'est pas sans consequences.
J'aimerais que le depute cesse de me fixer.
M. Rick Laliberte (Riviere Churchill, NPD):
Ils auraient la majorite au Senat.
Nous formions la majorite de la population canadienne.
C'est un progres.
Les ressources sont la richesse du Canada.
Sans revenus, il n'y aurait pas de cycle economique.
Pour creer de la richesse, il faut exploiter les ressources.
Si les autochtones peuvent participer a cette exploitation, c'est deja un debut.
Les offices de gestion sont un progres.
Madame la Presidente, j'essaierai d'etre bref.
Je remercie le depute pour son intervention.
Je ne suis pas d'accord avec lui sur un point.
Il dit que les ressources sont la richesse de notre pays.
Je me permets de ne pas partager cet avis.
Les gens sont la richesse du pays et les ressources en sont les outils.
Il y a des regions a forte concentration autochtone.
Ils sont de la region.
M. Grant McNally (Dewdney-Alouette, Ref.):
C'est certes un bel endroit a visiter.
Je dirais que c'est la plus belle circonscription du Canada.
La famille constitue une partie tres importante de ma vie.
Nous faisons tous des sacrifices pour etre ici a la Chambre.
La famille est la vie de notre pays.
L'apathie n'a jamais ete aussi grande.
C'est une chose qui merite sans aucun doute d'etre examine.
Nous constatons que les gens sont touches par les mesures legislatives.
Ce sont des choses auxquelles nous devons nous attaquer.
Nous appuyons egalement la participation des autochtones au processus.
C'est a ce niveau que se situent nos objections.
Dans les faits, cette structure de pouvoir interdit toute volonte d'engagement reel.
C'etait une tres bonne analogie.
Nous savons qu'il est important que tous les Canadiens soient egaux.
Mon pere etait un ancien combattant.
Il a servi son pays en combattant dans la Seconde Guerre mondiale.
Il m'a vraiment fait comprendre le sens du mot democratie.
Nous avons tenu maints et maints debats sur la liberte et la democratie.
Il a perdu beaucoup de ses amis.
C'est le principe qu'ont defendu mon pere et ses amis.
Nous souhaiterions qu'on mette en oeuvre l'egalite.
La vallee du Mackenzie est une superbe region qui fait partie de notre merveilleux pays.
C'est ma principale objection.
Madame la Presidente, je vous remercie de m'avoir ecoute avec attention.
D'autres s'inquietent de ce qu'il en coutera.
Le fleuve Mackenzie est long de 4 000 kilometres.
On ne se rend pas de Yellowknife a Inuvik en criant ciseaux.
Nous transferons leurs pouvoirs aux peuples des premieres nations.
J'ai une question a l'adresse de mon collegue.
Je remercie le depute pour sa question.
D'abord, je comparerais l'opinion des leaders autochtones a celle des simples autochtones.
Voila comment je repondrais a cette question.
Ce n'est pas l'objet du projet de loi qui nous preoccupe.
M. John Finlay (Oxford, Lib.):
Je suis sur que nous nous souvenons tous d'avoir tenu des propos aussi elogieux.
Je lui en sais gre car ca nous rappelle pourquoi nous sommes ici.
J'ai travaille avec l'ancien depute de Churchill, Elijah Harper.
C'est un Indien cri.
Vous n'avez pas la moindre idee de ce dont je parle.
Le depute a eu recours a une bien mauvaise analogie.
Elles ont chacune leur systeme de gouvernement, leur facon de fonctionner.
Elles s'arrangeaient ensemble.
Bien sur, elles se battaient de temps a autre et faisaient quelques prisonniers.
Nous devons apprendre cela.
Puis il y a cet illogisme selon lequel ils veulent tous l'egalite.
Il est bien difficile de s'entendre sur la definition de ce terme.
Les deputes reformistes l'utilisent sans meme y penser.
Ailleurs, ils suivront les regles en place.
Ce n'est pas du tout ce que les autochtones veulent.
Nous n'en avons pas fait grand cas.
Oui, nous avons lu des choses sur certains premiers traites.
C'est ce que nous tentons de faire encore une fois.
C'est precisement l'objet de ce projet de loi.
Il faudra du temps, de la bonne volonte.
J'ai plutot entendu des slogans et des mots a la mode.
Madame la Presidente, je suis heureux de repliquer aux observations de mon collegue.
Je ne vois pas M. Harper aujourd'hui.
Le depute a parle de divagations et il a traite les autochtones de sauvages.
Il a parle des structures de pouvoir et des abus des gouvernements precedents.
Nous cherchons a etablir un nouvel equilibre.
Nous ecoutons les autochtones de la base.
Ils se preoccupent des structures de pouvoir au sein de leurs reserves.
L'egalite n'est pas un mot a la mode chez nous.
C'est un des principes de notre politique.
Mme Diane Ablonczy (Calgary-Nose Hill, Ref.):
Les deputes doivent examiner objectivement certaines de ces dispositions.
J'imagine que ce titre decrit bien la teneur du projet de loi.
Lorsque pareille situation se presente, nous le soulignons sans macher nos mots.
Premierement, le projet de loi prevoit la creation de quatre nouveaux offices.
Il cree un office d'amenagement territorial pour la region designee de Gwich'ins.
Il cree aussi un office d'amenagement territorial pour celle du Sahtu.
Il y a donc une proliferation d'offices.
Puis, a quelque distance de la, un autre groupe pourra faire la meme chose.
C'est un bel objet.
Un systeme unifie, cela sonne bien.
Ce systeme est-il unifie?
Le projet de loi ne dit pas comment les membres des offices seront choisis.
Il ne donne aucun critere d'admissibilite.
Ce processus decisionnel manque de ressources.
Encore une fois, cela n'est pas prevu dans le projet de loi.
En passant, ce n'est pas la la seule structure decisionnelle dans cette region.
Il y a aussi le gouvernement territorial et les conseils autochtones.
Il y aura tous ces niveaux de decideurs, de gouvernements et d'autorites.
Je ne suis pas du tout convaincue que cette structure peut fonctionner.
C'est la facon dont les institutions, les organisations et les organes de decision fonctionnent.
L'objectif est louable.
Aucun depute ne dit cela.
On doit definir clairement son mode de fonctionnement.
Il n'en est absolument pas question dans le projet de loi.
M. Jim Karygiannis (Scarborough-Agincourt, Lib.):
Monsieur le President, j'ai ecoute avec un vif interet la deputee d'en face.
Elle a aussi declare que c'etait une mesure irresponsable.
La responsabilite ne consiste pas a critiquer et a detruire.
C'est malheureusement ce que font toujours les reformistes.
La responsabilite consiste a etre elu a la Chambre des communes.
On ne peut se contenter de critiquer.
Quand on a des idees constructives, on les presente au lieu de critiquer.
La critique etait leur specialite.
Sur un dossier apres l'autre, nous avons presente des solutions constructives.
Monsieur le President, pour commencer, j'aimerais remercier mon collegue de son discours.
Il a parle des criteres applicables a la nomination des membres des offices.
Il n'y a qu'un seul critere, la competence.
Deuxiemement, le depute a parle de tribunaux, de retards et de frustrations.
Nous ne faisons que remplacer ce que nous faisons maintenant.
Le depute pretend que les offices ne sont pas integres.
C'est exactement l'inverse.
Il est integre, completement.
Il est concu pour les gens qui y vivent.
Ma question est tres simple.
Les bandes qui habitent la sont d'accord.
Le ministre des ressources renouvelables est d'accord.
Les gens et le gouvernement des Territoires du Nord-Ouest sont d'accord.
Pourquoi la deputee ne prend-elle pas tout cela en consideration?
Voila ce qu'il importe de rappeler.
Ils meritent ce qu'il y a de mieux.
Voila le genre de tactiques que les liberaux utilisent constamment quand ils se font critiquer.
C'est completement absurde.
Si la competence est le critere, en quoi donc consiste la competence?
Il y a toutes sortes de gens qui sont competents, mais competents a differents egards.
Quel genre de formation doivent-ils posseder?
Quel genre de connaissances leur faut-il?
Quel genre de perspective doivent-ils avoir a l'egard de la region?
Voila le genre de choses qu'il faut definir.
M. John Duncan (Ile de Vancouver-Nord, Ref.):
Certains l'appellent l'Arctique de l'Ouest.
Divers interets opposent les groupements tribaux, les Metis et les non-autochtones.
Il est certes tout, sauf etrange.
Nous n'avons pas suffisamment l'occasion de debattre cette question fort importante.
Toutefois, il existe une meilleure facon de faire.
Le projet de loi vise des objectifs louables.
Les ressources sont nombreuses et principalement minieres.
Le secteur minier possede une longue experience dans ce domaine.
Ils observent les choses de plusieurs points de vue differents.
Puis, il y a l'absence de transparence dans le choix des membres.
Ce projet de loi doit comporter des dispositions tres claires sur ces points.
Le president suppleant (M. McClelland):
Plusieurs deputes sont venus me parler, et je voudrais tirer quelque chose au clair.
La plupart d'entre vous le savez sans doute deja.
JOUR DESIGNE-L'INDUSTRIE DE LA PECHE AU CANADA
(L'amendement, mis aux voix, est adopte.)
Je declare l'amendement adopte.
Le prochain vote porte sur la motion principale modifiee.
Le vote porte sur la motion suivante:
M. Charest, appuye par Mme Wayne, propose:
Que la Chambre reconnaisse le besoin imperieux de reagir...
Ai-je bien entendu un non?
Plait-il a la Chambre d'adopter la motion modifiee?
Monsieur le President, j'invoque le Reglement.
Y a-t-il consentement unanime de la Chambre?
Monsieur le President, les deputes reformistes presents votent contre la motion.
Monsieur le President, les deputes du Bloc quebecois voteront contre cette motion.
Monsieur le President, les deputes neo-democrates presents votent contre la motion.
Nous n'avons pas entendu le vote du Parti conservateur.
Voir liste sous le vote no 017 ]
Je declare la motion modifiee adoptee.
Le depute invoque le Reglement.
Le chiffre indique en ce qui concerne les liberaux est-il exact?
Y a-t-il consentement unanime de la Chambre?
Monsieur le President, les deputes du Bloc quebecois voteront en faveur de cette motion.
Monsieur le President, les deputes du NPD presents ce soir voteront contre cette motion.
Monsieur le President, nous voterons en faveur de cette motion.
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
Le projet de loi est donc renvoye au Comite permanent de l'industrie.
(Le projet de loi est lu pour la deuxieme fois et renvoye a un comite.)
La question est la suivante:
M. Dion, appuye par Mme Stewart (Northumberland), propose:
Qu'un comite mixte special de la Chambre...
M. Manning, appuye par Mlle Gray (Edmonton-Nord), propose l'amendement suivant:
Que la motion soit modifiee...
M. John Nunziata (York-Sud-Weston, Ind.):
Monsieur le President, j'invoque le Reglement.
M. Preston Manning, appuye par Mlle Gray (Edmonton-Nord), propose l'amendement suivant:
Qu'on modifie la motion
Le critere du consentement democratique,
2. Le critere de l'interet national canadien et
c) en supprimant les mots et sept senateurs dans le deuxieme paragraphe;
f) en supprimant les mots au Senat et au neuvieme paragraphe;
h) en supprimant tout le dernier paragraphe.
Le vote porte sur l'amendement.
Y a-t-il consentement unanime?
Monsieur le President, les deputes reformistes presents appuient l'amendement.
Monsieur le President, les deputes du Bloc quebecois voteront contre cet amendement.
Monsieur le President, les deputes du NPD qui sont presents appuient l'amendement.
Monsieur le President, les deputes de notre parti voteront contre l'amendement.
Cependant, je vais voter pour l'amendement.
(L'amendement, mis aux voix, est rejete.)
Je declare l'amendement rejete.
Le vote porte maintenant sur la motion principale.
Plait-il a la Chambre d'adopter la motion?
C'est ainsi qu'il faut proceder.
Que tous ceux qui sont en faveur de la motion veuillent bien dire oui.
Que tous ceux qui sont contre veuillent bien dire non.
A mon avis, les oui l'emportent.
Y a-t-il consentement unanime?
Monsieur le President, les deputes du Bloc quebecois voteront en faveur de cette motion.
Monsieur le President, les deputes du Nouveau Parti democratique appuient la motion.
Monsieur le President, nous voterons en faveur de la motion.
(La motion, mise aux voix, est adoptee.)
Je declare la motion adoptee.
Le hansard indiquera la maniere dont le depute a vote.
LOI SUR LES JUSTES SALAIRES ET HEURES DE TRAVAIL
Apparemment, les promesses rompues jonchent le parcours des liberaux.
Passons rapidement sur d'autres details pour arriver au 24 avril 1997.
Apparemment, les liberaux ont un peu panique.
Le Parti reformiste gagnait du terrain.
Nous estimons qu'il ne doit pas se meler de cela.
Comment les entrepreneurs reagiront-ils?
Ils tricheront dans les contrats.
Ils y insereront les couts supplementaires.
Le retablissement de ces echelles de salaires nous vaudra encore plus de bureaucrates.
Revenons un peu plus en arriere.
Il n'y a aucune logique dans cela.
Voici des exemples de travailleurs qui disent ne pas avoir ete remuneres de facon juste.
Cela depasse mon entendement.
La logique d'un tel raisonnement m'echappe.
Mme Brenda Chamberlain (secretaire parlementaire du ministre du Travail, Lib.):
Il a souvent des observations interessantes a nous faire.
C'etait en 1900.
La loi proprement dite ne vint qu'en 1935.
C'est une tres bonne chose a savoir pour les entrepreneurs.
C'etait un autre tres bon processus juste et transparent.
Toutefois, l'obligation de payer un juste salaire a continue d'exister.
Il est tres important de comprendre ce nouveau processus.
Cette methode semble constituer un compromis raisonnable.
Nous voulons tous agir pour le mieux.
Qu'est-ce que le depute propose dans le cas des autres?
Le depute a mis beaucoup de reflexion dans cette motion.
Je le felicite de lancer cet important debat, mais nous en sommes au debut.
Il n'y aurait de surprise pour personne.
Je pense qu'il faut rejeter la motion du depute.
M. Yves Rocheleau (Trois-Rivieres, BQ):
L'objectif vise par cette loi est clair et noble, d'ailleurs.
Et la TPS recemment.
On sait combien ces gens-la sont des girouettes.
Donc, moi, ca m'inquiete.
On nous transmet le communique de presse du 24 avril.
Cela demontre un peu le serieux de ce gouvernement.
M. Gordon Earle (Halifax-Ouest, NPD):
Le gouvernement a cesse d'afficher les justes salaires en 1987.
Le 24 avril dernier, le concept des justes salaires a ete retabli.
Cette motion reformiste va a l'encontre des interets du contribuable.
Je vais expliquer pourquoi a la Chambre.
C'est une question d'honnetete, de responsabilite et de loyaute.
C'est comme ca que les entrepreneurs consciencieux se font avoir.
C'est comme ca que les contribuables se font avoir.
Il convient toujours d'obtenir la meilleure valeur pour chaque dollar depense.
Il convient toujours aussi d'appuyer les entrepreneurs justes.
Cela favorise la sante et la securite de tous les travailleurs canadiens de la construction.
Cette motion merite d'etre rejetee avec vigueur.
La presidente suppleante (Mme Thibeault):
M. Dale Johnston (Wetaskiwin, Ref.):
J'ai tellement de choses a dire que je ne sais pas par ou commencer.
Elle dit qu'elle voudrait voir cela et je le voudrais aussi.
Toutefois, je dirai a ma collegue que les gens recoivent un juste salaire.
Je crois que cela prouve que les gens sont bien payes pour l'epoque.
J'estime que le systeme actuel fonctionne tres bien.
En fait, il ne fonctionne pas dans .00002
p. 100 des cas.
Tout le reste serait du domaine des heures supplementaires.
Ils ont alors plus de temps a consacrer a leur famille.
Mon collegue du Bloc a parle d'un vide.
Or, il n'y a pas de vide.
Je voudrais terminer la-dessus.
Je pense que mon temps tire a sa fin.
La presidente suppleante (Mme Thibeault):
M. Andrew Telegdi (Kitchener-Waterloo, Lib.):
C'etait le dernier jour de seance de la derniere legislature.
Comment peut-on prevenir le crime?
Comment pouvons-nous accroitre la securite dans nos collectivites?
Toutefois, cela exige un partenariat.
La meilleure facon de lutter contre le crime au Canada consiste a le prevenir.
Je remercie le depute de sa question.
Mme Libby Davies (Vancouver-Est, NPD):
J'ai souleve cette question a plusieurs reprises depuis l'ete.
Aucune reponse n'a ete donnee, aucune mesure n'a ete prise.
Combien faudra-t-il de morts avant que le gouvernement n'intervienne?
M. Joseph Volpe (secretaire parlementaire du ministre de la Sante, Lib.):
La presidente suppleante (Mme Thibeault):
La motion portant que la Chambre s'ajourne est maintenant reputee adoptee.
(La seance est levee a 19 h 22.)
|
{"hexsha": "4921655c48ccaa11eb4b597df4c3372e6c4251e6", "size": 66318, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.022.f", "max_stars_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_stars_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.022.f", "max_issues_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_issues_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/Hansard/Training/hansard.36.1.house.debates.022.f", "max_forks_repo_name": "j1ai/Canadian_Hansards_Neural_Machine_Translation", "max_forks_repo_head_hexsha": "554666a89090fc1b1d1fb83601a2e9da132e6ad0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.537084399, "max_line_length": 104, "alphanum_fraction": 0.7799240025, "num_tokens": 18956}
|
import pickle
import random
import time
import sys
import numpy as np
import tensorflow as tf
import process_text
from scipy import spatial
import argparse
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('summaries_dir', 'data\dssm-400-120-relu', 'Summaries directory')
flags.DEFINE_float('learning_rate', 0.1, 'Initial learning rate.')
flags.DEFINE_integer('max_steps', 408, 'Number of steps to run trainer.')
flags.DEFINE_integer('epoch_steps', 408, "Number of steps in one epoch.")
flags.DEFINE_integer('pack_size', 20, "Number of batches in one pickle pack.")
flags.DEFINE_bool('gpu', 1, "Enable GPU or not")
parser = argparse.ArgumentParser(description='dssm trainer', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train_query', type=str, dest='train_query_file', required=True, help='training query file')
parser.add_argument('--test_query', type=str, dest='test_query_file', required=True, help='test query file')
parser.add_argument('--train_doc', type=str, dest='train_doc_file', required=True, help='training doc file')
parser.add_argument('--test_doc', type=str, dest='test_doc_file', required=True, help='test doc file')
parser.add_argument('--out', type=str, dest='out_file', default='pred.txt', help='pred output')
parser.add_argument('--epoch', type=int, dest='epoch_num', default=5, help='pred output')
parser.add_argument('--lr', type=float, dest='learning_rate',default=0.1)
parser.add_argument('--bs', type=int, dest='batch_size', default=1024)
args = parser.parse_args()
start = time.time()
#query_train_data, doc_train_data = process_text.build_data(args.train_file)
#query_test_data, doc_test_data = process_text.build_data(args.test_file)
query_train_data = pickle.load(open(args.train_query_file, 'rb'))
doc_train_data = pickle.load(open(args.train_doc_file, 'rb'))
query_test_data = pickle.load(open(args.test_query_file, 'rb'))
doc_test_data = pickle.load(open(args.test_doc_file, 'rb'))
end = time.time()
print("Loading data from HDD to memory: %.2fs" % (end - start))
TRIGRAM_D = 27000
NEG = 50
BS = args.batch_size
L1_N = 400
L2_N = 120
train_iter_num_epoch = int(query_train_data.shape[0] / BS)
test_iter_num_epoch = int(query_test_data.shape[0] / BS)
print(train_iter_num_epoch, test_iter_num_epoch)
query_in_shape = np.array([BS, TRIGRAM_D], np.int64)
doc_in_shape = np.array([BS, TRIGRAM_D], np.int64)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.summary.scalar('sttdev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(var))
tf.summary.scalar('min/' + name, tf.reduce_min(var))
tf.summary.histogram(name, var)
with tf.name_scope('input'):
# Shape [BS, TRIGRAM_D].
query_batch = tf.sparse_placeholder(tf.float32, shape=(None, TRIGRAM_D), name='QueryBatch')
# Shape [BS, TRIGRAM_D]
doc_batch = tf.sparse_placeholder(tf.float32, shape=(None, TRIGRAM_D), name='DocBatch')
with tf.name_scope('L1'):
l1_par_range = np.sqrt(6.0 / (TRIGRAM_D + L1_N))
weight1 = tf.Variable(tf.random_uniform([TRIGRAM_D, L1_N], -l1_par_range, l1_par_range))
bias1 = tf.Variable(tf.random_uniform([L1_N], -l1_par_range, l1_par_range))
variable_summaries(weight1, 'L1_weights')
variable_summaries(bias1, 'L1_biases')
# query_l1 = tf.matmul(tf.to_float(query_batch),weight1)+bias1
query_l1 = tf.sparse_tensor_dense_matmul(query_batch, weight1) + bias1
# doc_l1 = tf.matmul(tf.to_float(doc_batch),weight1)+bias1
doc_l1 = tf.sparse_tensor_dense_matmul(doc_batch, weight1) + bias1
query_l1_out = tf.nn.relu(query_l1)
doc_l1_out = tf.nn.relu(doc_l1)
with tf.name_scope('L2'):
l2_par_range = np.sqrt(6.0 / (L1_N + L2_N))
weight2 = tf.Variable(tf.random_uniform([L1_N, L2_N], -l2_par_range, l2_par_range))
bias2 = tf.Variable(tf.random_uniform([L2_N], -l2_par_range, l2_par_range))
variable_summaries(weight2, 'L2_weights')
variable_summaries(bias2, 'L2_biases')
query_l2 = tf.matmul(query_l1_out, weight2) + bias2
doc_l2 = tf.matmul(doc_l1_out, weight2) + bias2
query_y = tf.nn.relu(query_l2)
doc_y = tf.nn.relu(doc_l2)
with tf.name_scope('FD_rotate'):
n_doc_y = doc_y
print(query_y.shape)
# Rotate FD+ to produce 50 FD-
temp = tf.tile(n_doc_y, [1, 1])
for i in range(NEG):
rand = int((random.random() + i) * BS / NEG)
n_doc_y = tf.concat([n_doc_y,
tf.slice(temp, [rand, 0], [BS - rand, -1]),
tf.slice(temp, [0, 0], [rand, -1])], 0)
with tf.name_scope('Cosine_Similarity'):
# Cosine similarity
query_norm = tf.tile(tf.sqrt(tf.reduce_sum(tf.square(query_y), 1, True)), [NEG + 1, 1])
doc_norm = tf.sqrt(tf.reduce_sum(tf.square(n_doc_y), 1, True))
prod = tf.reduce_sum(tf.multiply(tf.tile(query_y, [NEG + 1, 1]), n_doc_y), 1, True)
norm_prod = tf.multiply(query_norm, doc_norm)
cos_sim_raw = tf.truediv(prod, norm_prod)
cos_sim = tf.transpose(tf.reshape(tf.transpose(cos_sim_raw), [NEG + 1, BS])) * 20
with tf.name_scope('Loss'):
# Train Loss
prob = tf.nn.softmax((cos_sim))
hit_prob = tf.slice(prob, [0, 0], [-1, 1])
loss = -tf.reduce_sum(tf.log(hit_prob)) / BS
tf.summary.scalar('loss', loss)
with tf.name_scope('Training'):
# Optimizer
train_step = tf.train.GradientDescentOptimizer(args.learning_rate).minimize(loss)
# with tf.name_scope('Accuracy'):
# correct_prediction = tf.equal(tf.argmax(prob, 1), 0)
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# tf.scalar_summary('accuracy', accuracy)
merged = tf.summary.merge_all()
with tf.name_scope('Test'):
average_loss = tf.placeholder(tf.float32)
loss_summary = tf.summary.scalar('average_loss', average_loss)
def pull_batch(query_data, doc_data, batch_idx, batch_size):
# start = time.time()
start, end = batch_idx * batch_size, ( batch_idx + 1 ) * batch_size
query_in = query_data[start:end, :]
doc_in = doc_data[start:end, :]
# if batch_idx == 0:
# print(query_in.getrow(53))
query_in = query_in.tocoo()
doc_in = doc_in.tocoo()
query_in = tf.SparseTensorValue(
np.transpose([np.array(query_in.row, dtype=np.int64), np.array(query_in.col, dtype=np.int64)]),
np.array(query_in.data, dtype=np.float),
np.array((query_in.shape[0], TRIGRAM_D), dtype=np.int64))
doc_in = tf.SparseTensorValue(
np.transpose([np.array(doc_in.row, dtype=np.int64), np.array(doc_in.col, dtype=np.int64)]),
np.array(doc_in.data, dtype=np.float),
np.array((doc_in.shape[0], TRIGRAM_D), dtype=np.int64))
return query_in, doc_in
def feed_dict(Train, batch_idx, batch_size):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if Train:
query_in, doc_in = pull_batch(query_train_data, doc_train_data, batch_idx, batch_size)
else:
query_in, doc_in = pull_batch(query_test_data, doc_test_data, batch_idx, batch_size)
return {query_batch: query_in, doc_batch: doc_in}
config = tf.ConfigProto() # log_device_placement=True)
config.gpu_options.allow_growth = True
# if not FLAGS.gpu:
# config = tf.ConfigProto(device_count= {'GPU' : 0})
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train', sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/test', sess.graph)
# Actual execution
start = time.time()
for epoch in range(args.epoch_num):
for batch_idx in range(train_iter_num_epoch):
progress = 100.0 * (batch_idx+1) / train_iter_num_epoch
sys.stdout.write("\r%.2f%% Epoch %d" % (progress, epoch))
sys.stdout.flush()
sess.run(train_step, feed_dict=feed_dict(True, batch_idx % train_iter_num_epoch, BS))
if batch_idx == train_iter_num_epoch - 1:
end = time.time()
epoch_loss = 0
for i in range(train_iter_num_epoch):
loss_v = sess.run(loss, feed_dict=feed_dict(True, i, BS))
epoch_loss += loss_v
epoch_loss /= train_iter_num_epoch
train_loss = sess.run(loss_summary, feed_dict={average_loss: epoch_loss})
train_writer.add_summary(train_loss, epoch * train_iter_num_epoch + 1)
print("Epoch #%-5d | Train Loss: %-4.3f | PureTrainTime: %-3.3fs" %
(epoch, epoch_loss, end - start))
epoch_loss = 0
for i in range(test_iter_num_epoch):
loss_v = sess.run(loss, feed_dict=feed_dict(False, i, BS))
epoch_loss += loss_v
epoch_loss /= test_iter_num_epoch
test_loss = sess.run(loss_summary, feed_dict={average_loss: epoch_loss})
test_writer.add_summary(test_loss, epoch * train_iter_num_epoch + 1)
start = time.time()
print("Epoch #%-5d | Batch: %d | Test Loss: %-4.3f | CalLossTime: %-3.3fs" %
(epoch, batch_idx, epoch_loss, end - start))
# saver = saver.save(sess, "data/model.ckpt")
with open(args.out_file, 'w') as o:
for i in range(test_iter_num_epoch):
data = feed_dict(False, i, 1)
q = sess.run(query_y, feed_dict=data)
d = sess.run(doc_y, feed_dict=data)
sim = 1.0 - spatial.distance.cosine(q.reshape(L2_N), d.reshape(L2_N))
o.write('{0}\n'.format(sim))
np.savetxt("weight1_08.txt", weight1.eval())
np.savetxt("bias1_08.txt", bias1.eval())
np.savetxt("weight2_08.txt", weight2.eval())
np.savetxt("bias2_08.txt", bias2.eval())
pickle.dump(weight1.eval(), open('weight1_08.pickle', 'wb', True))
pickle.dump(bias1.eval(), open('bias1_08.pickle', 'wb', True))
pickle.dump(weight2.eval(), open('weight2_08.pickle', 'wb', True))
pickle.dump(bias2.eval(), open('bias2_08.pickle', 'wb', True))
|
{"hexsha": "69d391cc243f738d17a04ba8943ec383b0b561ab", "size": 10332, "ext": "py", "lang": "Python", "max_stars_repo_path": "DLScripts/samples/dssm.py", "max_stars_repo_name": "StanleyLeiSun/PlayGround", "max_stars_repo_head_hexsha": "e8774ef41043e88cc64fc1eacbf0edd99a40ba35", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-12T11:40:28.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-12T11:40:28.000Z", "max_issues_repo_path": "DLScripts/samples/dssm.py", "max_issues_repo_name": "StanleyLeiSun/PlayGround", "max_issues_repo_head_hexsha": "e8774ef41043e88cc64fc1eacbf0edd99a40ba35", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-18T20:30:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:19:54.000Z", "max_forks_repo_path": "DLScripts/samples/dssm.py", "max_forks_repo_name": "StanleyLeiSun/PlayGround", "max_forks_repo_head_hexsha": "e8774ef41043e88cc64fc1eacbf0edd99a40ba35", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4939759036, "max_line_length": 116, "alphanum_fraction": 0.6704413473, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2740}
|
import dash
import dash_bootstrap_components as dbc
import dash_html_components as html
import json
from pathlib import Path
from json_schema_to_dash_forms import SchemaFormContainer
from dash.dependencies import Input, Output, State
import numpy as np
import json
# Font Awesome and bootstrap CSS required
FONT_AWESOME = "https://use.fontawesome.com/releases/v5.7.2/css/all.css"
external_stylesheets = [dbc.themes.BOOTSTRAP, FONT_AWESOME]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# Required if using file browsing component
app.server.config['DATA_PATH'] = '.'
# The schema used to create the forms
path_schema = Path.cwd() / "schema.json"
with open(path_schema, 'r') as inp:
my_schema = json.load(inp)
# Initialize
my_form = SchemaFormContainer(
id='myform',
schema=my_schema,
parent_app=app
)
app.layout = dbc.Container([
dbc.Row(
dbc.Col(
my_form,
width={'size': 12}
),
),
dbc.Row(
dbc.Col(
dbc.Button('Show data', id='show_data'),
width={'size': 12}
),
style={"margin-top": "10px", "margin-bottom": "10px"}
),
dbc.Row([
dbc.Col(
dbc.Alert(
children=[],
id="alerts",
dismissable=True,
is_open=False,
color='danger'
)
)
]),
dbc.Row([
dbc.Col(
dbc.Textarea(
id='display_results',
className='string_input',
bs_size="lg",
readOnly=True,
style={'font-size': '16px', 'min-height': '250px', 'max-height': '500px'}
),
)
])
], style={"margin-bottom": '30px'})
@app.callback(
Output('myform-external-trigger-update-internal-dict', 'children'),
Input('show_data', 'n_clicks')
)
def update_internal_form_dict(click):
"""
This function trigger myform internal dict update.
When the update is done it will return a flag on
"myform-output-update-finished-verification".
"""
ctx = dash.callback_context
if not ctx.triggered:
return dash.no_update
return str(np.random.rand())
@app.callback(
[
Output('alerts', 'is_open'),
Output('alerts', 'children'),
Output('display_results', 'value')
],
[Input('myform-output-update-finished-verification', 'children')],
[State('alerts', 'is_open')]
)
def show_data_from_internal_dict(trigger, is_open):
"""
This functions runs when the update internal dict is done
It will read myform internal dict and create an output nested dict
with the forms data, following the rules defined by the schema.
"""
if not trigger:
return dash.no_update
alerts, output = my_form.data_to_nested() # Read internal dict and get missing required fields and output nested dict
if alerts is not None:
return True, alerts, '' # If any missing fields return alerts
# Else show json data
json_string = json.dumps(output, indent=4)
return False, [], json_string
if __name__ == '__main__':
app.run_server(debug=True)
|
{"hexsha": "37a8ed068e71e77a7b2b9087f0d2d12ad25e816b", "size": 3199, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/standalone_example.py", "max_stars_repo_name": "catalystneuro/json-schema-to-dash-forms", "max_stars_repo_head_hexsha": "a6e83a02f3ac2dd0ec2c7d09ec4327f3c6512ee4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-11-11T12:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T21:33:18.000Z", "max_issues_repo_path": "examples/standalone_example.py", "max_issues_repo_name": "catalystneuro/json-schema-to-dash-forms", "max_issues_repo_head_hexsha": "a6e83a02f3ac2dd0ec2c7d09ec4327f3c6512ee4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-10-23T15:31:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T09:06:59.000Z", "max_forks_repo_path": "examples/standalone_example.py", "max_forks_repo_name": "catalystneuro/json-schema-to-dash-forms", "max_forks_repo_head_hexsha": "a6e83a02f3ac2dd0ec2c7d09ec4327f3c6512ee4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.6583333333, "max_line_length": 121, "alphanum_fraction": 0.6195686152, "include": true, "reason": "import numpy", "num_tokens": 727}
|
using SimpleCG
plane_x = Plane(Vector3(0, 1, 0), 0)
plane_y = Plane(Vector3(0, 0, 1), -50)
plane_z = Plane(Vector3(1, 0, 0), -20)
sphere = Sphere(Vector3(0, 10, -10), 10)
# light = DirectionalLight(White, Vector3(-1.75, -2, -1.5))
# light = PointLight(White * 2000, Vector3(30, 40, 20))
light = SpotLight(White * 2000, Vector3(30, 40, 20), Vector3(-1, -1, -1), π/9, π/6, 0.5)
canvas = Canvas(400, 400);
camera = PerspectiveCamera(Vector3(0, 10, 10), Vector3(0, 0, -1), Vector3(0, 1, 0), π/2)
scene = UnionGeometry([plane_x, plane_y, plane_z, sphere])
SimpleCG.inner_render!(canvas, scene, camera, SimpleCG.light_render!, Lights([light]))
imshow(canvas)
|
{"hexsha": "ec5c489c7c3a639076f551592e597edd88330459", "size": 657, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/render_light.jl", "max_stars_repo_name": "sunoru/SimpleeCG.jl", "max_stars_repo_head_hexsha": "07a1fb747973e6b09ec71dcceba66e1f93bcbc7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/render_light.jl", "max_issues_repo_name": "sunoru/SimpleeCG.jl", "max_issues_repo_head_hexsha": "07a1fb747973e6b09ec71dcceba66e1f93bcbc7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/render_light.jl", "max_forks_repo_name": "sunoru/SimpleeCG.jl", "max_forks_repo_head_hexsha": "07a1fb747973e6b09ec71dcceba66e1f93bcbc7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5, "max_line_length": 88, "alphanum_fraction": 0.6666666667, "num_tokens": 257}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 11:46:10 2019
@author: roshanprakash
"""
import pandas as pd
import numpy as np
import tensorflow as tf
tf.set_random_seed(35)
np.random.seed(3)
import networkx as nx
from GraphUtils import *
from Loss import *
class Component:
""" A function approximator for a node in the causal graph. Requires the number of parents for this node, the number of hidden layers
to be used in the regression network, the number of units per layer and the dimension of the output.
"""
def __init__(self, dimensions):
"""
Initialize the function approximator for a node.
PARAMETERS
----------
- dimensions(list) : a list of the form [number_parents, num_hidden_layers, [number_hidden_units,...], output_dim]
RETURNS
-------
- None
"""
self.num_parents = dimensions[0]
self.num_layers = dimensions[1]+1
assert len(dimensions[2])==self.num_layers-1, "Number of hidden units must be specified for each fully-connected hidden layer!"
self.layers = {}
for i in range(1, self.num_layers):
self.layers['layer_{}'.format(i)] = tf.keras.layers.Dense(dimensions[2][i-1], kernel_initializer=tf.variance_scaling_initializer, activation = tf.nn.relu)
self.layers['layer_{}'.format(self.num_layers)] = tf.keras.layers.Dense(dimensions[-1], kernel_initializer=tf.truncated_normal_initializer) # final output layer
def forward_pass(self, x):
"""
Computes a forward pass through this node network.
PARAMETERS
----------
- x(tf.tensor of shape(N, 1+number_of_parents)) : the input data to this node (node_out = f(node_outs(parents(node)), noise))
RETURNS
-------
- the node output (tf.tensor of shape (N, 1))
"""
for l in range(1, self.num_layers+1):
if l==1:
out = self.layers['layer_{}'.format(l)](x)
else:
out = self.layers['layer_{}'.format(l)](out)
return out
def reset_weights(self):
""" Resets the weights of the network """
pass
class CausalNet:
"""
A Causal Generative Network that collectively holds component-regression networks for every node in the causal graph ;
Automatically infers a directed causal graph from observational data and initializes the network based on this DAG.
REFERENCE
---------
[1.] Learning Functional Causal Models with Generative Neural Networks
[2.] Link : https://arxiv.org/pdf/1709.05321.pdf
"""
def __init__(self, data, batch_size=256, lr=0.001, num_hidden_layers=1, nh_per_layer=[64]):
"""
Initialize the causal generative network.
PARAMETERS
----------
- data(pandas DataFrame) : the input observational data
- batch_size(int, default=256) : the size of mini-batches used while training the network
- lr(float, default=0.001) : the learning rate for this Causal Generative Neural Network
- num_hidden_layers(int, default=1) : the number of hidden layers to be used in each of the component networks
- nh_per_layer(list, default=[64]) : the number of hidden units in each layer of the component networks (Requires : <num_hidden_layers>=len(<nh_per_layer>))
RETURNS
-------
- None
"""
# Graph-specific initializations
self.data = data
assert batch_size<=self.data.values.shape[0], "Not enough data instances. Reduce the batch size and try again!"
self.batch_size = batch_size
self.causal_graph = infer_DAG(self.data)
self.topological_order = nx.topological_sort(self.causal_graph)
# Network-specific initializations
tf.reset_default_graph()
self.Network = self._build_causal_network(num_hidden_layers, nh_per_layer)
self.optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.output_layer = num_hidden_layers+1
self.observed_data = tf.placeholder(dtype=tf.float32, shape=[None, self.data.shape[1]])
self.noise_inputs = tf.placeholder(dtype=tf.float32, shape=[None, self.data.shape[1]])
self.node_inputs = {}
self.node_outputs = {}
# forward-pass through the collective network
for node in self.topological_order:
node_idx = list(self.data.columns).index(node)
self.node_inputs[node] = tf.concat([j for i in [[self.node_outputs[parent] for parent in self.causal_graph.predecessors(node)],\
[tf.slice(self.noise_inputs, begin=[0, node_idx], size=[-1, 1])]] for j in i], axis=1)
self.node_outputs[node] = self.Network[node].forward_pass(self.node_inputs[node])
self.generated_data = tf.concat([self.node_outputs[node] for node in self.causal_graph.nodes], axis=1)
self.loss = compute_loss(self.generated_data, self.observed_data)
self.train_step = self.optimizer.minimize(self.loss)
def _build_causal_network(self, num_hidden_layers, nh_per_layer):
"""
Helper function to build a collective network by first building networks for every node in the graph.
PARAMETERS
----------
- num_hidden_layers(int, default=1) : the number of hidden layers to be used in each of the component networks
- nh_per_layer(list, default=[64]) : the number of hidden units in each layer of the component networks
(Requires : <num_hidden_layers>=len(<nh_per_layer>))
RETURNS
-------
- a dictionary of the form {node_id : Component-Network-object} ; all nodes included.
"""
collective = {}
for node in self.causal_graph.nodes:
collective[node] = Component([len(list(self.causal_graph.predecessors(node))), num_hidden_layers, nh_per_layer, 1])
return collective
def run(self, ground_truth_data, sess, is_training=False):
"""
Computes a forward pass through the network ; (equivalent to generating a new data instance from noise.)
PARAMETERS
----------
- ground_truth_data(numpy array) : the ground truth data
- sess : a tensorflow session
- is_training(bool, default=False) : if True, updates weights if necessary
RETURNS
-------
- a numpy array of generated data and the MMD loss.
"""
noise = np.hstack([np.reshape(np.random.normal(3, 1, ground_truth_data.shape[0]), (ground_truth_data.shape[0], 1)) for i in range(self.data.shape[1])])
if is_training:
generated_data, loss, _ = sess.run([self.generated_data, self.loss, self.train_step], feed_dict={self.observed_data:ground_truth_data, self.noise_inputs:noise})
print('Updated weights')
else:
generated_data, loss = sess.run([self.generated_data, self.loss], feed_dict={self.observed_data:ground_truth_data, self.noise_inputs:noise})
return generated_data, loss
def _sample_batches(self):
"""
Samples batches of training data for an epoch.
PARAMETERS
----------
- None
RETURNS
-------
- a list containing batches wherein each batch in a numpy array of shape [<batch_size>, d] where d is the dimensions of the data.
"""
shuffled = np.random.permutation(self.data.values[:int(0.75*self.data.shape[0])])
return [shuffled[k*self.batch_size:k*self.batch_size+self.batch_size] for k in range(int(0.75*self.data.shape[0])//self.batch_size)]
def train_evaluate(self, num_epochs=100, print_every=1000, plot_losses=False, save_path='../model'):
"""
Trains the Causal Generative Network, evaluates it and saves the trained model.
PARAMETERS
----------
- num_epochs(int, default=100 : the number of training epochs
- print_every(int, default=1000) : Prints loss after every <print_every> iterations
RETURNS
-------
- None ; plots the training curve and saves the model to <save_path>.
"""
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
training_losses = []
for epoch in range(1, num_epochs+1):
mini_batches = self._sample_batches()
for iteration in range(1, len(mini_batches)+1):
generated_data, loss = self.run(mini_batches[iteration-1], sess, is_training=True)
if iteration%print_every==0:
print('Completed training step for iteration {}, loss = {}'.format(iteration*epoch, loss))
training_losses.append(loss)
print(generated_data)
print('Completed training the model!')
if plot_losses:
# plot losses here
pass
# Model evaluation
print('Validating model..')
_, test_loss = self.forward_pass(self.data[int(0.75*self.data.shape[0]):], sess, is_training=False)
print('Mean test-time loss = {}'.format(test_loss))
# save model here
if __name__=='__main__':
sample_data = 1000*np.random.rand(100, 4)
df = pd.DataFrame(sample_data, columns = np.arange(2, 6))
CGNN = CausalNet(df, lr=0.0001, batch_size=10, num_hidden_layers=3, nh_per_layer=[100, 80, 20])
CGNN.train_evaluate()
|
{"hexsha": "c0392d77fdf413696e4918e36d50401be33f8021", "size": 9720, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/CausalGraphicalModel.py", "max_stars_repo_name": "roshan19041/Causal-Discovery", "max_stars_repo_head_hexsha": "900cfc94d9fc3ff3d75366b00bda3acd044ed638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-20T00:00:31.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T00:00:31.000Z", "max_issues_repo_path": "src/CausalGraphicalModel.py", "max_issues_repo_name": "roshan19041/Causal-Discovery", "max_issues_repo_head_hexsha": "900cfc94d9fc3ff3d75366b00bda3acd044ed638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/CausalGraphicalModel.py", "max_forks_repo_name": "roshan19041/Causal-Discovery", "max_forks_repo_head_hexsha": "900cfc94d9fc3ff3d75366b00bda3acd044ed638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2857142857, "max_line_length": 172, "alphanum_fraction": 0.6261316872, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2130}
|
import matplotlib.pyplot as plt
import pandas as pd
from numpy import arange, array
import os
import logging
logging.basicConfig()
logger = logging.getLogger('PlotTimeCost')
logger.setLevel('INFO')
class PlotTimeCostBar:
def __init__(self, data, path, show=False):
self.data = data
self.path = path
self.show_flag = show
(filepath, tempfilename) = os.path.split(path)
if not os.path.exists(filepath):
os.makedirs(filepath)
(filename, extension) = os.path.splitext(tempfilename)
self.format = extension[1:]
def plot(self):
data = array([0, 0, 0])
data[1:] = self.data['Time Cost'].values
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111)
width = 0.5
xticks = self.data.index
n = data.shape[0]
ind = arange(n)
data = data / 3600
colors = ['black', 'tab:blue', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown']
plt.bar(x=ind, height=data, width=width, color=colors)
ax.set_xticks(ind[1:])
ax.set_xticklabels(xticks)
# ax.set_xlabel('Multi-fidelity control strategy', fontsize=16)
ax.tick_params(labelsize=12)
ax.set_ylabel('Time Cost (h)', fontsize=16)
if self.show_flag:
plt.show()
fig.savefig(self.path, format=self.format, dpi=80, bbox_inches='tight')
|
{"hexsha": "f706739fa5fbb45a327cc6482a6f9e131f810e2c", "size": 1404, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualizer/plot_mf_param_opt/plot_time_cost_bar.py", "max_stars_repo_name": "buctlab/NIO", "max_stars_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-23T09:12:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T08:43:32.000Z", "max_issues_repo_path": "visualizer/plot_mf_param_opt/plot_time_cost_bar.py", "max_issues_repo_name": "buctlab/NIO", "max_issues_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualizer/plot_mf_param_opt/plot_time_cost_bar.py", "max_forks_repo_name": "buctlab/NIO", "max_forks_repo_head_hexsha": "094e688dd1cd3def7f31cd16ff927d4324651422", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-12-02T08:03:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T18:04:00.000Z", "avg_line_length": 28.6530612245, "max_line_length": 89, "alphanum_fraction": 0.6082621083, "include": true, "reason": "from numpy", "num_tokens": 354}
|
from pathlib import Path
from typing import Dict, Union
import numpy as np
from wai_data_tools.io import load_frames, save_frames
import matplotlib.pyplot as plt
def temporal_encoding(frame_dicts: Dict[int, Dict[str, Union[bool, np.ndarray]]], window_size, rgb=False):
"""
Performs temporal normalizations and encodings for frames dictionary.
:param frame_dicts:Dictionary where key is frame index and value is a dictionary with the label class
and frame image
:param window_size: Size of sliding window to for voxelwise mean calculation
:param rgb: boolean if previous frames should be encoded in color channels.
:return: Modified frames dictionary
"""
frame_dicts = remove_mean_from_frames(frame_dicts, window_size=window_size)
if rgb:
frame_dicts = create_3_frame_rgb(frame_dicts=frame_dicts)
return frame_dicts
def remove_mean_from_frames(frame_dicts: Dict[int, Dict[str, Union[bool, np.ndarray]]], window_size: int):
"""
Normalizes each frame by subtracting the mean from a sliding window of specified size.
:param frame_dicts: Dictionary where key is frame index and value is a dictionary with the label class
and frame image
:param window_size: Size of sliding window
:return: Modified frames dictionary where mean has been subtracted
"""
window_inds_dict = calculate_window_inds(window_size=window_size,
n_frames=len(frame_dicts))
for frame_ind, frame_dict in frame_dicts.items():
window_dict = {ind: frame_dicts[ind] for ind in window_inds_dict[frame_ind]}
mean_image = calculate_voxelwise_mean_for_frames(frame_dicts=window_dict)
proc_img = frame_dict["img"][:, :, 0].astype(float) - mean_image
proc_img = np.expand_dims(proc_img, axis=-1)
proc_img = scale_array_to_8bit(proc_img)
frame_dict["img"] = np.tile(proc_img, (1, 1, 3))
frame_dict["img"] = frame_dict["img"].astype(np.uint8)
return frame_dicts
def scale_array_to_8bit(img_array: np.ndarray) -> np.ndarray:
"""
Scales array values to fit inside uint8 format (0-255)
:param img_array: image array
:return: image array scaled and converted to uint8 format
"""
img_array -= np.min(img_array)
img_array *= (255/np.max(img_array))
return img_array.astype(np.uint8)
def calculate_window_inds(window_size: int, n_frames: int) -> Dict[int, np.ndarray]:
"""
Calculates the frame indices for each window.
:param window_size: Size of window
:param n_frames: Total number of frames in sequence
:return: Dictionary where item is an array of frame indices for a window.
Key is the frame index that the window is based on
"""
frame_inds = np.arange(n_frames)
window_ind_dict = {}
for frame_ind in frame_inds:
if frame_ind + window_size <= n_frames:
start_ind = max(0, np.floor(frame_ind - window_size / 2))
else:
start_ind = n_frames - window_size
end_ind = min(n_frames, start_ind + window_size)
window_ind_dict[frame_ind] = np.arange(start_ind, end_ind)
return window_ind_dict
def calculate_voxelwise_mean_for_frames(frame_dicts: Dict[int, Dict[str, Union[bool, np.ndarray]]]) -> np.ndarray:
"""
calculates the voxelwise mean for the supplied frames
:param frame_dicts: Dictionary where key is frame index and value is a dictionary with the label class
and frame image
:return: array with voxelwise means for frames
"""
frame_imgs = [frame_dict["img"][:, :, 0].astype(float) for frame_dict in frame_dicts.values()]
frames_array = np.stack(frame_imgs, axis=0)
mean_image = np.mean(frames_array, axis=0)
return mean_image
def display_image(image_array: np.ndarray):
"""
Displays an image using matplotlib
:param image_array: n-dimensional array to show.
"""
plt.imshow(image_array)
plt.show()
def create_3_frame_rgb(frame_dicts: Dict[int, Dict[str, Union[bool, np.ndarray]]]):
new_frame_dicts = {}
# skip first two because we need two previous frames for encoding
for curr_frame_ind in range(2, len(frame_dicts)):
transformed_im = np.zeros_like(frame_dicts[curr_frame_ind]["img"]).astype(float)
for frame_increment in range(0, 3):
transformed_im[:, :, frame_increment] = frame_dicts[curr_frame_ind - frame_increment]["img"][:, :, 0]
new_frame_dict = {
"img": transformed_im.astype(np.uint8),
"label": frame_dicts[curr_frame_ind]["label"]
}
new_frame_dicts[curr_frame_ind] = new_frame_dict
return new_frame_dicts
def main():
data_dir = Path(r"C:\Users\david\Desktop\wildlife.ai\curated-datasets\rat-cleaned\background-test")
frame_dirs = list(data_dir.glob("*"))
for frame_dir in frame_dirs:
print(frame_dir.name)
frame_dicts = load_frames(frame_dir=frame_dir)
new_frame_dicts = temporal_encoding(frame_dicts=frame_dicts, window_size=40, rgb=True)
save_frames(video_name=frame_dir,
dst_root_dir=Path(
r"C:\Users\david\Desktop\wildlife.ai\curated-datasets\temporal-encoding-trials\rat_rgb_40\background-test"),
frames_dict=new_frame_dicts)
if __name__ == "__main__":
main()
|
{"hexsha": "edf7b5b818007b2b511014c849236ac406a6af4b", "size": 5420, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/wai_data_tools/temporal_encoding.py", "max_stars_repo_name": "davidaderup/wai_data_tools", "max_stars_repo_head_hexsha": "3057c2be43e05cc88c086c45e0d58eece27b5af0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wai_data_tools/temporal_encoding.py", "max_issues_repo_name": "davidaderup/wai_data_tools", "max_issues_repo_head_hexsha": "3057c2be43e05cc88c086c45e0d58eece27b5af0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wai_data_tools/temporal_encoding.py", "max_forks_repo_name": "davidaderup/wai_data_tools", "max_forks_repo_head_hexsha": "3057c2be43e05cc88c086c45e0d58eece27b5af0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4397163121, "max_line_length": 132, "alphanum_fraction": 0.6885608856, "include": true, "reason": "import numpy", "num_tokens": 1228}
|
"""Operations to support categorical data."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import OrderedDict
from typing import List, NamedTuple, Optional, Tuple
import numpy as np
from tqdm import tqdm
from landshark import iteration
from landshark.basetypes import CategoricalArraySource, CategoricalType, Worker
log = logging.getLogger(__name__)
class CategoryInfo(NamedTuple):
"""
Information about categorical features.
The mappings contain the original numerical categories in an array.
Their index in that array is the number they have been mapped to.
The counts list gives, for each categorical feature, the numbers of
appearances in the data of that category.
"""
mappings: List[np.ndarray]
counts: List[np.ndarray]
def _unique_values(x: np.ndarray) -> Tuple[List[np.ndarray], List[int]]:
"""Provide the unique entries and their counts for each column x."""
x = x.reshape((-1), x.shape[-1])
unique_vals, counts = zip(*[np.unique(c, return_counts=True) for c in x.T])
return unique_vals, counts
class _CategoryAccumulator:
"""Class for accumulating categorical values and their counts."""
def __init__(self, missing_value: CategoricalType) -> None:
"""Initialise the object."""
self.counts: OrderedDict = OrderedDict()
self.missing = missing_value
def update(self, values: np.ndarray, counts: np.ndarray) -> None:
"""Add a new set of values from a batch."""
assert values.ndim == 1
assert counts.ndim == 1
assert values.shape == counts.shape
assert counts.dtype == int
assert np.all(counts >= 0)
for v, c in zip(values, counts):
if v in self.counts:
self.counts[v] += c
else:
self.counts[v] = c
# Dont include the missing value
if self.missing in self.counts:
self.counts.pop(self.missing)
def get_maps(src: CategoricalArraySource, batchrows: int) -> CategoryInfo:
"""
Extract the unique categorical variables and their counts.
The function maps k arbitrary numerical categories to the integers
(0..k-1). It returns that mapping along with the counts of how many
times each value appeared in the dataset.
Arguments
---------
src : CategoricalArraySource
The ArraySource from which to extract the data.
batchrows : int
The number of rows to read from src in a single batch. Larger
values are probably faster but will use more memory.
Returns
-------
result : CategoryInfo
The mappings and counts for each categorical column in the dataset.
"""
n_rows = src.shape[0]
n_features = src.shape[-1]
missing_value = src.missing
accums = [_CategoryAccumulator(missing_value) for _ in range(n_features)]
if missing_value is not None and missing_value > 0:
raise ValueError("Missing value must be negative")
with tqdm(total=n_rows) as pbar:
with src:
for s in iteration.batch_slices(batchrows, n_rows):
x = src(s)
unique, counts = _unique_values(x)
for a, u, c in zip(accums, unique, counts):
a.update(u, c)
pbar.update(x.shape[0])
count_dicts = [m.counts for m in accums]
unsorted_mappings = [np.array(list(c.keys())) for c in count_dicts]
unsorted_counts = [np.array(list(c.values()), dtype=np.int64) for c in count_dicts]
sortings = [np.argsort(m, kind="mergesort") for m in unsorted_mappings]
mappings = [m[s] for m, s in zip(unsorted_mappings, sortings)]
counts = [c[s] for c, s in zip(unsorted_counts, sortings)]
result = CategoryInfo(mappings=mappings, counts=counts)
return result
class CategoryMapper(Worker):
"""
Worker class to perform a categorical data remapping.
Arguments
---------
mappings : List[np.ndarray]
The arrays giving the maps from arbitary numerical categories in
each column to the numbers 0..n-1.
missing_value : Optional[int]
If this dataset has a missing value, then providing here will ensure
that it gets mapped to 0 (helpful for doing extra-category imputing).
"""
def __init__(
self, mappings: List[np.ndarray], missing_value: Optional[int]
) -> None:
"""Initialise the worker object."""
for m in mappings:
is_sorted = np.all(m[:-1] <= m[1:])
assert is_sorted
self._mappings = mappings
self._missing = missing_value
def __call__(self, x: np.ndarray) -> np.ndarray:
"""Map the data in x into the new categories.
Arguments
---------
x : np.ndarray
The categorical data to remap. Assuming the last dimenion
of x is where the separate features are indexed.
Returns
-------
x_new : np.ndarray
The version of x in which the remappings have been applied.
"""
fill = self._missing if self._missing is not None else 0
x_new = np.empty_like(x)
for i, cats in enumerate(self._mappings):
x_i = x[..., i].ravel()
mask = (
x_i != self._missing if self._missing else np.ones_like(x_i, dtype=bool)
)
x_i_valid = x_i[mask].flatten()
flat = np.hstack((cats, x_i_valid))
actual_cat, remap = np.unique(flat, return_inverse=True)
x_i_new_valid = remap[len(cats) :]
x_i_new = np.full_like(x_i, fill)
x_i_new[mask] = x_i_new_valid
x_new[..., i] = x_i_new.reshape(x[..., i].shape)
assert np.all(actual_cat == cats)
return x_new
|
{"hexsha": "0c06f753c512a2d8a61d38d98e7b0b22e8ac6e36", "size": 6330, "ext": "py", "lang": "Python", "max_stars_repo_path": "landshark/category.py", "max_stars_repo_name": "basaks/landshark", "max_stars_repo_head_hexsha": "87ec1fada74addd58f37bdaf3b1adbc10b1544b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "landshark/category.py", "max_issues_repo_name": "basaks/landshark", "max_issues_repo_head_hexsha": "87ec1fada74addd58f37bdaf3b1adbc10b1544b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "landshark/category.py", "max_forks_repo_name": "basaks/landshark", "max_forks_repo_head_hexsha": "87ec1fada74addd58f37bdaf3b1adbc10b1544b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7802197802, "max_line_length": 88, "alphanum_fraction": 0.64328594, "include": true, "reason": "import numpy", "num_tokens": 1446}
|
#include <boost/algorithm/string.hpp>
#include <boost/archive/xml_iarchive.hpp>
#include <boost/archive/xml_oarchive.hpp>
#include "MeshingParametersDataIO.h"
#include "MeshingParametersBoostIO.h"
#include <MeshingParametersData.h>
#include <IO/MeshingKernelIOMimeTypes.h>
#include <IO/IOUtilDataSerializer.h>
REGISTER_IOUTILDATA_SERIALIZER(MeshingParametersData, crimson::MeshingKernelIOMimeTypes::MESHINGPARAMETERSDATA_DEFAULT_EXTENSION())
namespace crimson {
MeshingParametersDataIO::MeshingParametersDataIO()
: AbstractFileIO(MeshingParametersData::GetStaticNameOfClass(),
MeshingKernelIOMimeTypes::MESHINGPARAMETERSDATA_MIMETYPE(),
"Meshing parameters data")
{
RegisterService();
}
MeshingParametersDataIO::MeshingParametersDataIO(const MeshingParametersDataIO& other)
: AbstractFileIO(other)
{
}
MeshingParametersDataIO::~MeshingParametersDataIO()
{
}
std::vector< itk::SmartPointer<mitk::BaseData> > MeshingParametersDataIO::Read()
{
std::vector< itk::SmartPointer<mitk::BaseData> > result;
std::istream* inStream = GetInputStream();
std::shared_ptr<std::istream> fileInStream;
if (!inStream) {
fileInStream.reset(new std::ifstream(GetInputLocation()));
inStream = fileInStream.get();
}
boost::archive::xml_iarchive inArchive(*inStream);
auto data = MeshingParametersData::New();
auto& dataRef = *data;
inArchive >> BOOST_SERIALIZATION_NVP(dataRef);
result.push_back(data.GetPointer());
return result;
}
void MeshingParametersDataIO::Write()
{
auto data = static_cast<const MeshingParametersData*>(this->GetInput());
if (!data) {
MITK_ERROR << "Input MeshingParameters data has not been set!";
return;
}
std::ostream* outStream = GetOutputStream();
std::shared_ptr<std::ostream> fileOutStream;
if (!outStream) {
fileOutStream.reset(new std::ofstream(GetOutputLocation()));
outStream = fileOutStream.get();
}
auto& dataRef = *data;
boost::archive::xml_oarchive out(*outStream);
out << BOOST_SERIALIZATION_NVP(dataRef);
}
}
|
{"hexsha": "5059f6f13c40a2a5e3e103903540a0e386f1cb75", "size": 2099, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Modules/CGALVMTKMeshingKernel/IO/MeshingParametersDataIO.cpp", "max_stars_repo_name": "carthurs/CRIMSONGUI", "max_stars_repo_head_hexsha": "1464df9c4d04cf3ba131ca90b91988a06845c68e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2020-09-17T18:55:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T02:52:38.000Z", "max_issues_repo_path": "Modules/CGALVMTKMeshingKernel/IO/MeshingParametersDataIO.cpp", "max_issues_repo_name": "carthurs/CRIMSONGUI", "max_issues_repo_head_hexsha": "1464df9c4d04cf3ba131ca90b91988a06845c68e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modules/CGALVMTKMeshingKernel/IO/MeshingParametersDataIO.cpp", "max_forks_repo_name": "carthurs/CRIMSONGUI", "max_forks_repo_head_hexsha": "1464df9c4d04cf3ba131ca90b91988a06845c68e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-05-19T09:02:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-26T17:39:57.000Z", "avg_line_length": 27.2597402597, "max_line_length": 131, "alphanum_fraction": 0.7313006193, "num_tokens": 516}
|
# -*- coding:utf-8 -*-
# Created Time: 2018/05/10 17:22:38
# Author: Taihong Xiao <xiaotaihong@126.com>
import os
import scipy.ndimage as nd
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
class Config:
@property
def data_dir(self):
# data_dir = '/home/xiaoth/datasets/ModelNet/volumetric_data'
data_dir = '/gpfs/share/home/1501210096/datasets/ModelNet/volumetric_data'
if not os.path.exists(data_dir):
os.makedirs(data_dir)
return data_dir
@property
def exp_dir(self):
exp_dir = os.path.join('train_log')
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
return exp_dir
@property
def model_dir(self):
model_dir = os.path.join(self.exp_dir, 'model')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
return model_dir
@property
def log_dir(self):
log_dir = os.path.join(self.exp_dir, 'log')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
@property
def img_dir(self):
img_dir = os.path.join(self.exp_dir, 'img')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
return img_dir
nchw = [32,64,64,64]
G_lr = 2.5e-3
D_lr = 1e-5
step_size = 2000
gamma = 0.95
shuffle = True
num_workers = 0
max_iter = 20000
config = Config()
class Single(Dataset):
def __init__(self, filenames, config):
self.filenames = filenames
self.config = config
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
voxel = sio.loadmat(self.filenames[idx])['instance']
voxel = np.pad(voxel, (1,1), 'constant', constant_values=(0,0))
if self.config.nchw[-1] != 32:
ratio = self.config.nchw[-1] / 32.
voxel = nd.zoom(voxel, (ratio, ratio, ratio), mode='constant', order=0)
return np.expand_dims(voxel.astype(np.float32), 0)
def gen(self):
dataloader = DataLoader(self, batch_size=self.config.nchw[0], shuffle=self.config.shuffle, num_workers=self.config.num_workers, drop_last=True)
while True:
for data in dataloader:
yield data
class ShapeNet(object):
def __init__(self, category, config=config):
self.category = category
self.config = config
self.dict = {True: None, False: None}
for is_train in [True, False]:
prefix = os.path.join(self.config.data_dir, category, '30')
data_dir = prefix + '/train' if is_train else prefix + '/test'
filenames = [os.path.join(data_dir, name) for name in os.listdir(data_dir) if name.endswith('.mat')]
self.dict[is_train] = Single(filenames, self.config).gen()
def gen(self, is_train):
data_gen = self.dict[is_train]
return data_gen
def test():
dataset = ShapeNet('chair')
import cProfile
pr = cProfile.Profile()
pr.enable()
for i in range(10):
if 1 % 2 == 0:
voxel = next(dataset.gen(True))
else:
voxel = next(dataset.gen(False))
print(i)
print(voxel.shape)
pr.disable()
pr.print_stats()
if __name__ == "__main__":
test()
|
{"hexsha": "b7c912072faa432bfc580636c866eb82dd8a52ee", "size": 3462, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset.py", "max_stars_repo_name": "Prinsphield/3D-GAN-pytorch", "max_stars_repo_head_hexsha": "06b4b34e499a5c5187b6a67a8c8dfc35b2e1ce62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 36, "max_stars_repo_stars_event_min_datetime": "2018-05-11T11:10:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T12:46:00.000Z", "max_issues_repo_path": "dataset.py", "max_issues_repo_name": "biomedicalengineering/3D-GAN-pytorch-1", "max_issues_repo_head_hexsha": "06b4b34e499a5c5187b6a67a8c8dfc35b2e1ce62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-12-06T14:06:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-06T14:06:41.000Z", "max_forks_repo_path": "dataset.py", "max_forks_repo_name": "biomedicalengineering/3D-GAN-pytorch-1", "max_forks_repo_head_hexsha": "06b4b34e499a5c5187b6a67a8c8dfc35b2e1ce62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-05-11T18:53:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-24T04:47:15.000Z", "avg_line_length": 26.427480916, "max_line_length": 151, "alphanum_fraction": 0.6140958983, "include": true, "reason": "import numpy,import scipy", "num_tokens": 884}
|
\section{\module{types} ---
Names for all built-in types}
\declaremodule{standard}{types}
\modulesynopsis{Names for all built-in types.}
This module defines names for all object types that are used by the
standard Python interpreter, but not for the types defined by various
extension modules. It is safe to use \samp{from types import *} ---
the module does not export any names besides the ones listed here.
New names exported by future versions of this module will all end in
\samp{Type}.
Typical use is for functions that do different things depending on
their argument types, like the following:
\begin{verbatim}
from types import *
def delete(list, item):
if type(item) is IntType:
del list[item]
else:
list.remove(item)
\end{verbatim}
The module defines the following names:
\begin{datadesc}{NoneType}
The type of \code{None}.
\end{datadesc}
\begin{datadesc}{TypeType}
The type of type objects (such as returned by
\function{type()}\bifuncindex{type}).
\end{datadesc}
\begin{datadesc}{IntType}
The type of integers (e.g. \code{1}).
\end{datadesc}
\begin{datadesc}{LongType}
The type of long integers (e.g. \code{1L}).
\end{datadesc}
\begin{datadesc}{FloatType}
The type of floating point numbers (e.g. \code{1.0}).
\end{datadesc}
\begin{datadesc}{ComplexType}
The type of complex numbers (e.g. \code{1.0j}).
\end{datadesc}
\begin{datadesc}{StringType}
The type of character strings (e.g. \code{'Spam'}).
\end{datadesc}
\begin{datadesc}{UnicodeType}
The type of Unicode character strings (e.g. \code{u'Spam'}).
\end{datadesc}
\begin{datadesc}{TupleType}
The type of tuples (e.g. \code{(1, 2, 3, 'Spam')}).
\end{datadesc}
\begin{datadesc}{ListType}
The type of lists (e.g. \code{[0, 1, 2, 3]}).
\end{datadesc}
\begin{datadesc}{DictType}
The type of dictionaries (e.g. \code{\{'Bacon': 1, 'Ham': 0\}}).
\end{datadesc}
\begin{datadesc}{DictionaryType}
An alternate name for \code{DictType}.
\end{datadesc}
\begin{datadesc}{FunctionType}
The type of user-defined functions and lambdas.
\end{datadesc}
\begin{datadesc}{LambdaType}
An alternate name for \code{FunctionType}.
\end{datadesc}
\begin{datadesc}{CodeType}
The type for code objects such as returned by
\function{compile()}\bifuncindex{compile}.
\end{datadesc}
\begin{datadesc}{ClassType}
The type of user-defined classes.
\end{datadesc}
\begin{datadesc}{InstanceType}
The type of instances of user-defined classes.
\end{datadesc}
\begin{datadesc}{MethodType}
The type of methods of user-defined class instances.
\end{datadesc}
\begin{datadesc}{UnboundMethodType}
An alternate name for \code{MethodType}.
\end{datadesc}
\begin{datadesc}{BuiltinFunctionType}
The type of built-in functions like \function{len()} or
\function{sys.exit()}.
\end{datadesc}
\begin{datadesc}{BuiltinMethodType}
An alternate name for \code{BuiltinFunction}.
\end{datadesc}
\begin{datadesc}{ModuleType}
The type of modules.
\end{datadesc}
\begin{datadesc}{FileType}
The type of open file objects such as \code{sys.stdout}.
\end{datadesc}
\begin{datadesc}{XRangeType}
The type of range objects returned by
\function{xrange()}\bifuncindex{xrange}.
\end{datadesc}
\begin{datadesc}{SliceType}
The type of objects returned by
\function{slice()}\bifuncindex{slice}.
\end{datadesc}
\begin{datadesc}{EllipsisType}
The type of \code{Ellipsis}.
\end{datadesc}
\begin{datadesc}{TracebackType}
The type of traceback objects such as found in
\code{sys.exc_traceback}.
\end{datadesc}
\begin{datadesc}{FrameType}
The type of frame objects such as found in \code{tb.tb_frame} if
\code{tb} is a traceback object.
\end{datadesc}
\begin{datadesc}{BufferType}
The type of buffer objects created by the
\function{buffer()}\bifuncindex{buffer} function.
\end{datadesc}
|
{"hexsha": "8ade4a6be956aa18e0fe0a49e9e2b22fbde2cb7e", "size": 3749, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Doc/lib/libtypes.tex", "max_stars_repo_name": "marcosptf/cpython-2.0.1", "max_stars_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_stars_repo_licenses": ["PSF-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-26T21:53:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:47:20.000Z", "max_issues_repo_path": "Doc/lib/libtypes.tex", "max_issues_repo_name": "marcosptf/cpython-2.0.1", "max_issues_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_issues_repo_licenses": ["PSF-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-11-18T15:48:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-03T21:20:50.000Z", "max_forks_repo_path": "Doc/lib/libtypes.tex", "max_forks_repo_name": "marcosptf/cpython-2.0.1", "max_forks_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_forks_repo_licenses": ["PSF-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-16T08:14:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T01:55:17.000Z", "avg_line_length": 24.6644736842, "max_line_length": 69, "alphanum_fraction": 0.7441984529, "num_tokens": 1152}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.