code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import annotations
import warnings
import itertools
import inspect
from abc import ABC, abstractmethod
from typing import List, Optional, Union, Callable, Dict, Tuple, Any, Sequence, Iterable
import pandas as pd
import numpy as np
import joblib
from tqdm import tqdm
from sklearn.base import ClassifierMixin, RegressorMixin, TransformerMixin, clone
from sklearn.model_selection import (
BaseCrossValidator,
BaseShuffleSplit,
train_test_split,
)
from sklearn.preprocessing import (
StandardScaler,
MinMaxScaler,
RobustScaler,
OneHotEncoder,
OrdinalEncoder,
)
from sklearn.ensemble import (
VotingClassifier,
VotingRegressor,
StackingClassifier,
StackingRegressor,
)
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import (
cross_validate,
cross_val_predict,
GridSearchCV,
RandomizedSearchCV,
)
from sklearn.impute import SimpleImputer
from sklearn.exceptions import UndefinedMetricWarning
from poniard.utils import cramers_v
from poniard.utils import GRID
from poniard.plot import PoniardPlotFactory
class PoniardBaseEstimator(ABC):
"""Base estimator that sets up all the functionality for the classifier and regressor.
Parameters
----------
estimators :
Estimators to evaluate.
metrics :
Metrics to compute for each estimator. This is more restrictive than sklearn's scoring
parameter, as it does not allow callable scorers. Single strings are cast to lists
automatically.
preprocess : bool, optional
If True, impute missing values, standard scale numeric data and one-hot or ordinal
encode categorical data.
scaler :
Numeric scaler method. Either "standard", "minmax", "robust" or scikit-learn Transformer.
numeric_imputer :
Imputation method. Either "simple", "iterative" or scikit-learn Transformer.
custom_preprocessor :
Preprocessor used instead of the default preprocessing pipeline. It must be able to be
included directly in a scikit-learn Pipeline.
numeric_threshold :
Features with unique values above a certain threshold will be treated as numeric. If
float, the threshold is `numeric_threshold * samples`.
cardinality_threshold :
Non-numeric features with cardinality above a certain threshold will be treated as
ordinal encoded instead of one-hot encoded. If float, the threshold is
`cardinality_threshold * samples`.
cv :
Cross validation strategy. Either an integer, a scikit-learn cross validation object,
or an iterable.
verbose :
Verbosity level. Propagated to every scikit-learn function and estimator.
random_state :
RNG. Propagated to every scikit-learn function and estimator. The default None sets
random_state to 0 so that cross_validate results are comparable.
n_jobs :
Controls parallel processing. -1 uses all cores. Propagated to every scikit-learn
function.
plugins :
Plugin instances that run in set moments of setup, fit and plotting.
plot_options :
:class:poniard.plot.plot_factory.PoniardPlotFactory instance specifying Plotly format
options or None, which sets the default factory.
cache_transformations :
Whether to cache transformations and set the `memory` parameter for Pipelines. This can speed up slow transformations as they are not recalculated for each estimator.
Attributes
----------
estimators_ :
Estimators used for scoring.
preprocessor_ :
Pipeline that preprocesses the data.
metrics_ :
Metrics used for scoring estimators during fit and hyperparameter optimization.
cv_ :
Cross validation strategy.
"""
def __init__(
self,
estimators: Optional[
Union[
List[ClassifierMixin],
Dict[str, ClassifierMixin],
List[RegressorMixin],
Dict[str, RegressorMixin],
]
] = None,
metrics: Optional[Union[str, Dict[str, Callable], List[str]]] = None,
preprocess: bool = True,
scaler: Optional[Union[str, TransformerMixin]] = None,
numeric_imputer: Optional[Union[str, TransformerMixin]] = None,
custom_preprocessor: Union[None, Pipeline, TransformerMixin] = None,
numeric_threshold: Union[int, float] = 0.1,
cardinality_threshold: Union[int, float] = 20,
cv: Union[int, BaseCrossValidator, BaseShuffleSplit, Sequence] = None,
verbose: int = 0,
random_state: Optional[int] = None,
n_jobs: Optional[int] = None,
plugins: Optional[List[Any]] = None,
plot_options: Optional[PoniardPlotFactory] = None,
cache_transformations: bool = False,
):
# TODO: Ugly check that metrics conforms to expected types. Should improve.
if metrics and (
(
isinstance(metrics, (List, Tuple))
and not all(isinstance(m, str) for m in metrics)
)
or (
isinstance(metrics, Dict)
and not all(isinstance(m, str) for m in metrics.values())
and not all(isinstance(m, Callable) for m in metrics.values())
)
):
raise ValueError(
"metrics can only be a string, a sequence of strings, a dict with "
"strings as keys and callables as values, or None."
)
self.metrics = metrics
self.preprocess = preprocess
self.scaler = scaler or "standard"
self.numeric_imputer = numeric_imputer or "simple"
self.numeric_threshold = numeric_threshold
self.custom_preprocessor = custom_preprocessor
self.cardinality_threshold = cardinality_threshold
self.cv = cv
self.verbose = verbose
self.random_state = random_state or 0
self.estimators = estimators
self.n_jobs = n_jobs
self.plugins = (
plugins if isinstance(plugins, Sequence) or plugins is None else [plugins]
)
self.plot_options = plot_options or PoniardPlotFactory()
self._fitted_estimator_ids = []
self._build_initial_estimators()
if self.plugins:
[setattr(plugin, "_poniard", self) for plugin in self.plugins]
self.plot = self.plot_options
self.plot._poniard = self
if cache_transformations:
self._memory = joblib.Memory("transformation_cache", verbose=self.verbose)
else:
self._memory = None
def fit(self) -> PoniardBaseEstimator:
"""This is the main Poniard method. It uses scikit-learn's `cross_validate` function to
score all :attr:`metrics_` for every :attr:`preprocessor_` | :attr:`estimators_`, using
:attr:`cv` for cross validation.
After running :meth:`fit`, both :attr:`X` and :attr:`y` will be held as attributes.
Parameters
----------
X :
Features.
y :
Target.
Returns
-------
PoniardBaseEstimator
Self.
"""
if not hasattr(self, "cv_"):
raise ValueError("`setup` must be called before `fit`.")
self._run_plugin_methods("on_fit_start")
results = {}
filtered_estimators = {
name: estimator
for name, estimator in self.estimators_.items()
if id(estimator) not in self._fitted_estimator_ids
}
pbar = tqdm(filtered_estimators.items())
for i, (name, estimator) in enumerate(pbar):
pbar.set_description(f"{name}")
if self.preprocess:
final_estimator = Pipeline(
[("preprocessor", self.preprocessor_), (name, estimator)],
memory=self._memory,
)
else:
final_estimator = Pipeline([(name, estimator)])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UndefinedMetricWarning)
warnings.filterwarnings(
"ignore", message=".*will be encoded as all zeros"
)
result = cross_validate(
final_estimator,
self.X,
self.y,
scoring=self.metrics_,
cv=self.cv_,
return_train_score=True,
return_estimator=True,
verbose=self.verbose,
n_jobs=self.n_jobs,
)
results.update({name: result})
self._fitted_estimator_ids.append(id(estimator))
if i == len(pbar) - 1:
pbar.set_description("Completed")
if hasattr(self, "_experiment_results"):
self._experiment_results.update(results)
else:
self._experiment_results = results
self._process_results()
self._process_long_results()
self._run_plugin_methods("on_fit_end")
return self
def _predict(
self, method: str, estimator_names: Optional[List[str]] = None
) -> Dict[str, np.ndarray]:
"""Helper method for predicting targets or target probabilities with cross validation.
Accepts predict, predict_proba, predict_log_proba or decision_function."""
if not hasattr(self, "cv_"):
raise ValueError("`setup` must be called before `predict`.")
X, y = self.X, self.y
if not estimator_names:
estimator_names = [estimator for estimator in self.estimators_.keys()]
results = {}
pbar = tqdm(estimator_names)
for i, name in enumerate(pbar):
pbar.set_description(f"{name}")
estimator = self.estimators_[name]
if self.preprocess:
final_estimator = Pipeline(
[("preprocessor", self.preprocessor_), (name, estimator)],
memory=self._memory,
)
else:
final_estimator = estimator
try:
result = cross_val_predict(
final_estimator,
X,
y,
cv=self.cv_,
method=method,
verbose=self.verbose,
n_jobs=self.n_jobs,
)
except AttributeError:
warnings.warn(
f"{name} does not support `{method}` method. Filling with nan.",
stacklevel=2,
)
result = np.empty(self.y.shape)
result[:] = np.nan
results.update({name: result})
if not hasattr(self, "_experiment_results"):
self._experiment_results = {}
self._experiment_results.update({name: {method: result}})
elif name not in self._experiment_results:
self._experiment_results.update({name: {method: result}})
else:
self._experiment_results[name][method] = result
if i == len(pbar) - 1:
pbar.set_description("Completed")
return results
def predict(
self, estimator_names: Optional[List[str]] = None
) -> Dict[str, np.ndarray]:
"""Get cross validated target predictions where each sample belongs to a single test set.
Parameters
----------
estimator_names :
Estimators to include. If None, predict all estimators.
Returns
-------
Dict
Dict where keys are estimator names and values are numpy arrays of predictions.
"""
return self._predict(method="predict", estimator_names=estimator_names)
def predict_proba(
self, estimator_names: Optional[List[str]] = None
) -> Dict[str, np.ndarray]:
"""Get cross validated target probability predictions where each sample belongs to a
single test set.
Returns
-------
Dict
Dict where keys are estimator names and values are numpy arrays of prediction
probabilities.
"""
return self._predict(method="predict_proba", estimator_names=estimator_names)
def decision_function(
self, estimator_names: Optional[List[str]] = None
) -> Dict[str, np.ndarray]:
"""Get cross validated decision function predictions where each sample belongs to a
single test set.
Parameters
----------
estimator_names :
Estimators to include. If None, predict all estimators.
Returns
-------
Dict
Dict where keys are estimator names and values are numpy arrays of prediction
probabilities.
"""
return self._predict(
method="decision_function", estimator_names=estimator_names
)
def predict_all(
self, estimator_names: Optional[List[str]] = None
) -> Tuple[Dict[str, np.ndarray]]:
"""Get cross validated target predictions, probabilities and decision functions
where each sample belongs to all test sets.
Parameters
----------
estimator_names :
Estimators to include. If None, predict all estimators.
Returns
-------
Dict
Dict where keys are estimator names and values are numpy arrays of prediction
probabilities.
"""
return (
self._predict(method="predict", estimator_names=estimator_names),
self._predict(method="predict_proba", estimator_names=estimator_names),
self._predict(method="decision_function", estimator_names=estimator_names),
)
@property
@abstractmethod
def _base_estimators(self) -> List[ClassifierMixin]:
return [
DummyRegressor(),
DummyClassifier(),
]
def _build_initial_estimators(
self,
) -> Dict[str, Union[ClassifierMixin, RegressorMixin]]:
"""Build :attr:`estimators_` dict where keys are the estimator class names.
Adds dummy estimators if not included during construction. Does nothing if
:attr:`estimators_` exists.
"""
if hasattr(self, "estimators_"):
return
if isinstance(self.estimators, dict):
initial_estimators = self.estimators.copy()
elif self.estimators:
initial_estimators = {
estimator.__class__.__name__: estimator for estimator in self.estimators
}
else:
initial_estimators = {
estimator.__class__.__name__: estimator
for estimator in self._base_estimators
}
if (
self._check_estimator_type() == "classifier"
and "DummyClassifier" not in initial_estimators.keys()
):
initial_estimators.update(
{"DummyClassifier": DummyClassifier(strategy="prior")}
)
elif (
self._check_estimator_type() == "regressor"
and "DummyRegressor" not in initial_estimators.keys()
):
initial_estimators.update(
{"DummyRegressor": DummyRegressor(strategy="mean")}
)
for estimator in initial_estimators.values():
self._pass_instance_attrs(estimator)
self.estimators_ = initial_estimators
return
def setup(
self,
X: Union[pd.DataFrame, np.ndarray, List],
y: Union[pd.DataFrame, np.ndarray, List],
) -> PoniardBaseEstimator:
"""Orchestrator.
Converts inputs to arrays if necessary, sets :attr:`metrics_`,
:attr:`preprocessor_`, attr:`cv_` and :attr:`estimators_`.
Parameters
----------
X :
Features.
y :
Target
"""
self._run_plugin_methods("on_setup_start")
if not isinstance(X, (pd.DataFrame, pd.Series, np.ndarray)):
X = np.array(X)
if not isinstance(y, (pd.DataFrame, pd.Series, np.ndarray)):
y = np.array(y)
self.X = X
self.y = y
if self.metrics:
self.metrics_ = (
self.metrics if not isinstance(self.metrics, str) else [self.metrics]
)
else:
self.metrics_ = self._build_metrics()
print(f"Main metric: {self._first_scorer(sklearn_scorer=False)}")
if self.preprocess:
if self.custom_preprocessor:
self.preprocessor_ = self.custom_preprocessor
else:
self.preprocessor_ = self._build_preprocessor()
self.cv_ = self._build_cv()
self._run_plugin_methods("on_setup_end")
return self
def _infer_dtypes(self) -> Tuple[List[str], List[str], List[str]]:
"""Infer feature types (numeric, low-cardinality categorical or high-cardinality
categorical).
Returns
-------
List[str], List[str], List[str]
Three lists with column names or indices.
"""
X = self.X
numeric = []
categorical_high = []
categorical_low = []
if isinstance(self.cardinality_threshold, int):
self.cardinality_threshold_ = self.cardinality_threshold
else:
self.cardinality_threshold_ = int(self.cardinality_threshold * X.shape[0])
if isinstance(self.numeric_threshold, int):
self.numeric_threshold_ = self.numeric_threshold
else:
self.numeric_threshold_ = int(self.numeric_threshold * X.shape[0])
print(
"Minimum unique values to consider an integer feature numeric:",
self.numeric_threshold_,
)
print(
"Minimum unique values to consider a non-float feature high cardinality:",
self.cardinality_threshold_,
end="\n\n",
)
if isinstance(X, pd.DataFrame):
numbers = X.select_dtypes(include="number").columns
for column in numbers:
if X[column].nunique() > self.numeric_threshold_:
numeric.append(column)
elif X[column].nunique() > self.cardinality_threshold_:
categorical_high.append(column)
else:
categorical_low.append(column)
strings = X.select_dtypes(exclude="number").columns
for column in strings:
if X[column].nunique() > self.cardinality_threshold_:
categorical_high.append(column)
else:
categorical_low.append(column)
else:
if np.issubdtype(X.dtype, np.number):
for i in range(X.shape[1]):
if np.unique(X[:, i]).shape[0] > self.numeric_threshold_:
numeric.append(i)
elif np.unique(X[:, i]).shape[0] > self.cardinality_threshold_:
categorical_high.append(i)
else:
categorical_low.append(i)
else:
for i in range(X.shape[1]):
if np.unique(X[:, i]).shape[0] > self.cardinality_threshold_:
categorical_high.append(i)
else:
categorical_low.append(i)
self._inferred_dtypes = {
"numeric": numeric,
"categorical_high": categorical_high,
"categorical_low": categorical_low,
}
print(
"Inferred feature types:",
pd.DataFrame.from_dict(self._inferred_dtypes, orient="index").T.fillna(""),
sep="\n",
)
return numeric, categorical_high, categorical_low
def _build_preprocessor(self) -> Pipeline:
"""Build default preprocessor.
The preprocessor imputes missing values, scales numeric features and encodes categorical
features according to inferred types.
"""
X = self.X
if hasattr(self, "preprocessor_"):
return self.preprocessor_
numeric, categorical_high, categorical_low = self._infer_dtypes()
if isinstance(self.scaler, TransformerMixin):
scaler = self.scaler
elif self.scaler == "standard":
scaler = StandardScaler()
elif self.scaler == "minmax":
scaler = MinMaxScaler()
else:
scaler = RobustScaler()
cat_imputer = SimpleImputer(strategy="most_frequent")
if isinstance(self.numeric_imputer, TransformerMixin):
num_imputer = self.numeric_imputer
elif self.numeric_imputer == "iterative":
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
num_imputer = IterativeImputer(random_state=self.random_state)
else:
num_imputer = SimpleImputer(strategy="mean")
numeric_preprocessor = Pipeline(
[("numeric_imputer", num_imputer), ("scaler", scaler)]
)
cat_low_preprocessor = Pipeline(
[
("categorical_imputer", cat_imputer),
(
"one-hot_encoder",
OneHotEncoder(drop="if_binary", handle_unknown="ignore"),
),
]
)
cat_high_preprocessor = Pipeline(
[
("categorical_imputer", cat_imputer),
(
"ordinal_encoder",
OrdinalEncoder(
handle_unknown="use_encoded_value", unknown_value=99999
),
),
],
)
if isinstance(X, pd.DataFrame):
preprocessor = ColumnTransformer(
[
("numeric_preprocessor", numeric_preprocessor, numeric),
(
"categorical_low_preprocessor",
cat_low_preprocessor,
categorical_low,
),
(
"categorical_high_preprocessor",
cat_high_preprocessor,
categorical_high,
),
],
n_jobs=self.n_jobs,
)
else:
if np.issubdtype(X.dtype, float):
preprocessor = numeric_preprocessor
elif np.issubdtype(X.dtype, int):
preprocessor = ColumnTransformer(
[
("numeric_preprocessor", numeric_preprocessor, numeric),
(
"categorical_low_preprocessor",
cat_low_preprocessor,
categorical_low,
),
(
"categorical_high_preprocessor",
cat_high_preprocessor,
categorical_high,
),
],
n_jobs=self.n_jobs,
)
else:
preprocessor = ColumnTransformer(
[
(
"categorical_low_preprocessor",
cat_low_preprocessor,
categorical_low,
),
(
"categorical_high_preprocessor",
cat_high_preprocessor,
categorical_high,
),
],
n_jobs=self.n_jobs,
)
return preprocessor
@abstractmethod
def _build_metrics(self) -> Union[Dict[str, Callable], List[str]]:
"""Build metrics."""
return ["accuracy"]
def show_results(
self,
std: bool = False,
wrt_dummy: bool = False,
) -> Union[Tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame]:
"""Return dataframe containing scoring results. By default returns the mean score and fit
and score times. Optionally returns standard deviations as well.
Parameters
----------
std :
Whether to return standard deviation of the scores. Default False.
wrt_dummy :
Whether to compute each score/time with respect to the dummy estimator results. Default
False.
Returns
-------
Union[Tuple[pd.DataFrame, pd.DataFrame], pd.DataFrame]
Results
"""
means = self._means
stds = self._stds
if wrt_dummy:
dummy_means = means.loc[means.index.str.contains("Dummy")]
dummy_stds = stds.loc[stds.index.str.contains("Dummy")]
means = means / dummy_means.squeeze()
stds = stds / dummy_stds.squeeze()
if std:
return means, stds
else:
return means
@abstractmethod
def _build_cv(self):
return self.cv
def add_estimators(
self, estimators: Union[Dict[str, ClassifierMixin], List[ClassifierMixin]]
) -> PoniardBaseEstimator:
"""Include new estimator. This is the recommended way of adding an estimator (as opposed
to modifying :attr:`estimators_` directly), since it also injects random state, n_jobs
and verbosity.
Parameters
----------
estimators :
Estimators to add.
Returns
-------
PoniardBaseEstimator
Self.
"""
if not isinstance(estimators, (Sequence, dict)):
estimators = [estimators]
if not isinstance(estimators, dict):
new_estimators = {
estimator.__class__.__name__: estimator for estimator in estimators
}
else:
new_estimators = estimators
for new_estimator in new_estimators.values():
self._pass_instance_attrs(new_estimator)
self.estimators_.update(new_estimators)
return self
def remove_estimators(
self, estimator_names: List[str], drop_results: bool = True
) -> PoniardBaseEstimator:
"""Remove estimators. This is the recommended way of removing an estimator (as opposed
to modifying :attr:`estimators_` directly), since it also removes the associated rows from
the results tables.
Parameters
----------
estimator_names :
Estimators to remove.
drop_results :
Whether to remove the results associated with the estimators. Default True.
Returns
-------
PoniardBaseEstimator
Self.
"""
pruned_estimators = {
k: v for k, v in self.estimators_.items() if k not in estimator_names
}
if len(pruned_estimators) == 0:
raise ValueError("Cannot remove all estimators.")
self.estimators_ = pruned_estimators
if drop_results and hasattr(self, "_means"):
self._means = self._means.loc[~self._means.index.isin(estimator_names)]
self._stds = self._stds.loc[~self._stds.index.isin(estimator_names)]
self._experiment_results = {
k: v
for k, v in self._experiment_results.items()
if k not in estimator_names
}
self._run_plugin_methods("on_remove_estimators")
return self
def get_estimator(
self,
estimator_name: str,
include_preprocessor: bool = True,
retrain: bool = False,
) -> Union[Pipeline, ClassifierMixin, RegressorMixin]:
"""Obtain an estimator in :attr:`estimators_` by name. This is useful for extracting default
estimators or hyperparmeter-optimized estimators (after using :meth:`tune_estimator`).
Parameters
----------
estimator_name :
Estimator name.
include_preprocessor :
Whether to return a pipeline with a preprocessor or just the estimator. Default True.
retrain :
Whether to retrain with full data. Default False.
Returns
-------
ClassifierMixin
Estimator.
"""
model = self._experiment_results[estimator_name]["estimator"][0]
if not include_preprocessor:
model = model._final_estimator
model = clone(model)
if retrain:
model.fit(self.X, self.y)
self._run_plugin_methods(
"on_get_estimator", estimator=model, name=estimator_name
)
return model
def build_ensemble(
self,
method: str = "stacking",
estimator_names: Optional[List[str]] = None,
top_n: Optional[int] = 3,
sort_by: Optional[str] = None,
ensemble_name: Optional[str] = None,
**kwargs,
) -> PoniardBaseEstimator:
"""Combine estimators into an ensemble.
By default, orders estimators according to the first metric.
Parameters
----------
method :
Ensemble method. Either "stacking" or "voring". Default "stacking".
estimator_names :
Names of estimators to include. Default None, which uses `top_n`
top_n :
How many of the best estimators to include.
sort_by :
Which metric to consider for ordering results. Default None, which uses the first metric.
ensemble_name :
Ensemble name when adding to :attr:`estimators_`. Default None.
Returns
-------
PoniardBaseEstimator
Self.
Raises
------
ValueError
If `method` is not "stacking" or "voting".
"""
if method not in ["voting", "stacking"]:
raise ValueError("Method must be either voting or stacking.")
if estimator_names:
models = [
(name, self._experiment_results[name]["estimator"][0]._final_estimator)
for name in estimator_names
]
else:
if sort_by:
sorter = sort_by
else:
sorter = self._means.columns[0]
models = [
(name, self._experiment_results[name]["estimator"][0]._final_estimator)
for name in self._means.sort_values(sorter, ascending=False).index[
:top_n
]
]
if method == "voting":
if self._check_estimator_type() == "classifier":
ensemble = VotingClassifier(
estimators=models, verbose=self.verbose, **kwargs
)
else:
ensemble = VotingRegressor(
estimators=models, verbose=self.verbose, **kwargs
)
else:
if self._check_estimator_type() == "classifier":
ensemble = StackingClassifier(
estimators=models, verbose=self.verbose, cv=self.cv_, **kwargs
)
else:
ensemble = StackingRegressor(
estimators=models, verbose=self.verbose, cv=self.cv_, **kwargs
)
ensemble_name = ensemble_name or ensemble.__class__.__name__
self.add_estimators(estimators={ensemble_name: ensemble})
return self
def get_predictions_similarity(
self,
on_errors: bool = True,
) -> pd.DataFrame:
"""Compute correlation/association between cross validated predictions for each estimator.
This can be useful for ensembling.
Parameters
----------
on_errors :
Whether to compute similarity on prediction errors instead of predictions. Default
True.
Returns
-------
pd.DataFrame
Similarity.
"""
if self.y.ndim > 1:
raise ValueError("y must be a 1-dimensional array.")
raw_results = self.predict()
results = raw_results.copy()
for name, result in raw_results.items():
if on_errors:
if self._check_estimator_type() == "regressor":
results[name] = self.y - result
else:
results[name] = np.where(result == self.y, 1, 0)
results = pd.DataFrame(results)
if self._check_estimator_type() == "classifier":
estimator_names = [x for x in results.columns if x != "DummyClassifier"]
table = pd.DataFrame(
data=np.nan, index=estimator_names, columns=estimator_names
)
for row, col in itertools.combinations_with_replacement(
table.index[::-1], 2
):
cramer = cramers_v(results[row], results[col])
if row == col:
table.loc[row, col] = 1
else:
table.loc[row, col] = cramer
table.loc[col, row] = cramer
else:
table = results.drop("DummyRegressor", axis=1).corr()
return table
def tune_estimator(
self,
estimator_name: str,
grid: Optional[Dict] = None,
mode: str = "grid",
tuned_estimator_name: Optional[str] = None,
) -> Union[GridSearchCV, RandomizedSearchCV]:
"""Hyperparameter tuning for a single estimator.
Parameters
----------
estimator_name :
Estimator to tune.
grid :
Hyperparameter grid. Default None, which uses the grids available for default
estimators.
mode :
Type of search. Eithe "grid", "halving" or "random". Default "grid".
tuned_estimator_name :
Estimator name when adding to :attr:`estimators_`. Default None.
Returns
-------
PoniardBaseEstimator
Self.
Raises
------
KeyError
If no grid is defined and the estimator is not a default one.
"""
X, y = self.X, self.y
estimator = clone(self._experiment_results[estimator_name]["estimator"][0])
if not grid:
try:
grid = GRID[estimator_name]
grid = {f"{estimator_name}__{k}": v for k, v in grid.items()}
except KeyError:
raise KeyError(
f"Estimator {estimator_name} has no predefined hyperparameter grid, so it has to be supplied."
)
self._pass_instance_attrs(estimator)
scoring = self._first_scorer(sklearn_scorer=True)
if mode == "random":
search = RandomizedSearchCV(
estimator,
grid,
scoring=scoring,
cv=self.cv_,
verbose=self.verbose,
n_jobs=self.n_jobs,
random_state=self.random_state,
)
elif mode == "halving":
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
search = HalvingGridSearchCV(
estimator,
grid,
scoring=scoring,
cv=self.cv_,
verbose=self.verbose,
n_jobs=self.n_jobs,
random_state=self.random_state,
)
else:
search = GridSearchCV(
estimator,
grid,
scoring=scoring,
cv=self.cv_,
verbose=self.verbose,
n_jobs=self.n_jobs,
)
search.fit(X, y)
tuned_estimator_name = tuned_estimator_name or f"{estimator_name}_tuned"
self.add_estimators(
estimators={
tuned_estimator_name: clone(search.best_estimator_._final_estimator)
}
)
return self
def _process_results(self) -> None:
"""Compute mean and standard deviations of experiment results."""
# TODO: This processes every result, even those that were processed
# in previous runs (before add_estimators). Should be made more efficient
results = pd.DataFrame(self._experiment_results).T
results = results.loc[
:,
[
x
for x in results.columns
if x
not in ["estimator", "predict", "predict_proba", "decision_function"]
],
]
means = results.apply(lambda x: np.mean(x.values.tolist(), axis=1))
stds = results.apply(lambda x: np.std(x.values.tolist(), axis=1))
means = means[list(means.columns[2:]) + ["fit_time", "score_time"]]
stds = stds[list(stds.columns[2:]) + ["fit_time", "score_time"]]
self._means = means.sort_values(means.columns[0], ascending=False)
self._stds = stds.reindex(self._means.index)
return
def _process_long_results(self) -> None:
"""Prepare experiment results for plotting."""
base = pd.DataFrame(self._experiment_results).T.drop(["estimator"], axis=1)
melted = (
base.rename_axis("Model")
.reset_index()
.melt(id_vars="Model", var_name="Metric", value_name="Score")
.explode("Score")
)
melted["Type"] = "Fold"
means = melted.groupby(["Model", "Metric"])["Score"].mean().reset_index()
means["Type"] = "Mean"
melted = pd.concat([melted, means])
melted["Model"] = melted["Model"].str.replace(
"Classifier|Regressor", "", regex=True
)
self._long_results = melted
return
def _first_scorer(self, sklearn_scorer: bool) -> Union[str, Callable]:
"""Helper method to get the first scoring function or name."""
if isinstance(self.metrics_, (List, Tuple)):
return self.metrics_[0]
elif isinstance(self.metrics_, dict):
if sklearn_scorer:
return list(self.metrics_.values())[0]
else:
return list(self.metrics_.keys())[0]
else:
raise ValueError(
"self.metrics_ can only be a sequence of str or dict of str: callable."
)
def _train_test_split_from_cv(self):
"""Split data in a 80/20 fashion following the cross-validation strategy defined in the constructor."""
if isinstance(self.cv_, (int, Iterable)):
cv_params_for_split = {}
else:
cv_params_for_split = {
k: v
for k, v in vars(self.cv_).items()
if k in ["shuffle", "random_state"]
}
stratify = self.y if "Stratified" in self.cv_.__class__.__name__ else None
cv_params_for_split.update({"stratify": stratify})
return train_test_split(self.X, self.y, test_size=0.2, **cv_params_for_split)
def _check_estimator_type(self) -> Optional[str]:
"""Utility to check whether self is a Poniard regressor or classifier.
Returns
-------
Optional[str]
"classifier", "regressor" or None
"""
from poniard import PoniardRegressor, PoniardClassifier
if isinstance(self, PoniardRegressor):
return "regressor"
elif isinstance(self, PoniardClassifier):
return "classifier"
else:
return None
def _pass_instance_attrs(self, obj: Union[ClassifierMixin, RegressorMixin]):
"""Helper method to propagate instance attributes to objects."""
for attr, value in zip(
["random_state", "verbose", "verbosity"],
[self.random_state, self.verbose, self.verbose],
):
if hasattr(obj, attr):
setattr(obj, attr, value)
return
def _run_plugin_methods(self, method: str, **kwargs):
"""Helper method to run plugin methods by name."""
if not self.plugins:
return
for plugin in self.plugins:
fetched_method = getattr(plugin, method, None)
if callable(fetched_method):
accepted_kwargs = inspect.getargs(fetched_method.__code__).args
kwargs = {k: v for k, v in kwargs.items() if k in accepted_kwargs}
fetched_method(**kwargs)
return
def __repr__(self):
return f"""{self.__class__.__name__}(estimators={self.estimators}, metrics={self.metrics},
preprocess={self.preprocess}, scaler={self.scaler}, numeric_imputer={self.numeric_imputer},
custom_preprocessor={self.custom_preprocessor}, numeric_threshold={self.numeric_threshold},
cardinality_threshold={self.cardinality_threshold}, cv={self.cv}, verbose={self.verbose},
random_state={self.random_state}, n_jobs={self.n_jobs}, plugins={self.plugins},
plot_options={str(self.plot_options)})
"""
def __add__(
self, estimators: Union[Dict[str, ClassifierMixin], List[ClassifierMixin]]
) -> PoniardBaseEstimator:
"""Add estimators to a Poniard Estimator.
Parameters
----------
estimators :
List or dict of estimators to add.
Returns
-------
PoniardBaseEstimator
Self.
"""
return self.add_estimators(estimators)
def __sub__(self, estimator_names: List[str]) -> PoniardBaseEstimator:
"""Remove an estimator and its results.
Parameters
----------
estimator :
List of estimators names.
Returns
-------
PoniardBaseEstimator
Self.
"""
return self.remove_estimators(estimator_names, drop_results=True)
def __getitem__(self, estimators: Union[str, List[str]]) -> pd.DataFrame:
"""Get results by indexing with estimator names.
Parameters
----------
estimators :
Estimator name(s) as string or list of strings.
Returns
-------
pd.DataFrame
Filtered results.
"""
return self.show_results().loc[estimators, :]
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.HalvingGridSearchCV",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.train_test_split",
"poniard.plot.PoniardPlotFactory",
"numpy.empty",
"sklearn.preprocessing.MinMaxScaler",
... | [((9861, 9882), 'tqdm.tqdm', 'tqdm', (['estimator_names'], {}), '(estimator_names)\n', (9865, 9882), False, 'from tqdm import tqdm\n'), ((20803, 20842), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (20816, 20842), False, 'from sklearn.impute import SimpleImputer\n'), ((21309, 21373), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('numeric_imputer', num_imputer), ('scaler', scaler)]"], {}), "([('numeric_imputer', num_imputer), ('scaler', scaler)])\n", (21317, 21373), False, 'from sklearn.pipeline import Pipeline\n'), ((28830, 28842), 'sklearn.base.clone', 'clone', (['model'], {}), '(model)\n', (28835, 28842), False, 'from sklearn.base import ClassifierMixin, RegressorMixin, TransformerMixin, clone\n'), ((32781, 32802), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (32793, 32802), True, 'import pandas as pd\n'), ((34533, 34596), 'sklearn.base.clone', 'clone', (["self._experiment_results[estimator_name]['estimator'][0]"], {}), "(self._experiment_results[estimator_name]['estimator'][0])\n", (34538, 34596), False, 'from sklearn.base import ClassifierMixin, RegressorMixin, TransformerMixin, clone\n'), ((37940, 37966), 'pandas.concat', 'pd.concat', (['[melted, means]'], {}), '([melted, means])\n', (37949, 37966), True, 'import pandas as pd\n'), ((39314, 39384), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'test_size': '(0.2)'}), '(self.X, self.y, test_size=0.2, **cv_params_for_split)\n', (39330, 39384), False, 'from sklearn.model_selection import BaseCrossValidator, BaseShuffleSplit, train_test_split\n'), ((6304, 6324), 'poniard.plot.PoniardPlotFactory', 'PoniardPlotFactory', ([], {}), '()\n', (6322, 6324), False, 'from poniard.plot import PoniardPlotFactory\n'), ((6641, 6700), 'joblib.Memory', 'joblib.Memory', (['"""transformation_cache"""'], {'verbose': 'self.verbose'}), "('transformation_cache', verbose=self.verbose)\n", (6654, 6700), False, 'import joblib\n'), ((14111, 14127), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {}), '()\n', (14125, 14127), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((14141, 14158), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {}), '()\n', (14156, 14158), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((16286, 16297), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (16294, 16297), True, 'import numpy as np\n'), ((16383, 16394), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (16391, 16394), True, 'import numpy as np\n'), ((18985, 19018), 'numpy.issubdtype', 'np.issubdtype', (['X.dtype', 'np.number'], {}), '(X.dtype, np.number)\n', (18998, 19018), True, 'import numpy as np\n'), ((22101, 22358), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('numeric_preprocessor', numeric_preprocessor, numeric), (\n 'categorical_low_preprocessor', cat_low_preprocessor, categorical_low),\n ('categorical_high_preprocessor', cat_high_preprocessor, categorical_high)]"], {'n_jobs': 'self.n_jobs'}), "([('numeric_preprocessor', numeric_preprocessor, numeric),\n ('categorical_low_preprocessor', cat_low_preprocessor, categorical_low),\n ('categorical_high_preprocessor', cat_high_preprocessor,\n categorical_high)], n_jobs=self.n_jobs)\n", (22118, 22358), False, 'from sklearn.compose import ColumnTransformer\n'), ((22692, 22721), 'numpy.issubdtype', 'np.issubdtype', (['X.dtype', 'float'], {}), '(X.dtype, float)\n', (22705, 22721), True, 'import numpy as np\n'), ((32965, 33038), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'np.nan', 'index': 'estimator_names', 'columns': 'estimator_names'}), '(data=np.nan, index=estimator_names, columns=estimator_names)\n', (32977, 33038), True, 'import pandas as pd\n'), ((33097, 33158), 'itertools.combinations_with_replacement', 'itertools.combinations_with_replacement', (['table.index[::-1]', '(2)'], {}), '(table.index[::-1], 2)\n', (33136, 33158), False, 'import itertools\n'), ((35105, 35249), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', (['estimator', 'grid'], {'scoring': 'scoring', 'cv': 'self.cv_', 'verbose': 'self.verbose', 'n_jobs': 'self.n_jobs', 'random_state': 'self.random_state'}), '(estimator, grid, scoring=scoring, cv=self.cv_, verbose=\n self.verbose, n_jobs=self.n_jobs, random_state=self.random_state)\n', (35123, 35249), False, 'from sklearn.model_selection import cross_validate, cross_val_predict, GridSearchCV, RandomizedSearchCV\n'), ((36661, 36699), 'pandas.DataFrame', 'pd.DataFrame', (['self._experiment_results'], {}), '(self._experiment_results)\n', (36673, 36699), True, 'import pandas as pd\n'), ((7896, 7989), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('preprocessor', self.preprocessor_), (name, estimator)]"], {'memory': 'self._memory'}), "([('preprocessor', self.preprocessor_), (name, estimator)], memory=\n self._memory)\n", (7904, 7989), False, 'from sklearn.pipeline import Pipeline\n'), ((8096, 8125), 'sklearn.pipeline.Pipeline', 'Pipeline', (['[(name, estimator)]'], {}), '([(name, estimator)])\n', (8104, 8125), False, 'from sklearn.pipeline import Pipeline\n'), ((8143, 8168), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8166, 8168), False, 'import warnings\n'), ((8186, 8252), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UndefinedMetricWarning'}), "('ignore', category=UndefinedMetricWarning)\n", (8209, 8252), False, 'import warnings\n'), ((8269, 8344), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '""".*will be encoded as all zeros"""'}), "('ignore', message='.*will be encoded as all zeros')\n", (8292, 8344), False, 'import warnings\n'), ((8408, 8591), 'sklearn.model_selection.cross_validate', 'cross_validate', (['final_estimator', 'self.X', 'self.y'], {'scoring': 'self.metrics_', 'cv': 'self.cv_', 'return_train_score': '(True)', 'return_estimator': '(True)', 'verbose': 'self.verbose', 'n_jobs': 'self.n_jobs'}), '(final_estimator, self.X, self.y, scoring=self.metrics_, cv=\n self.cv_, return_train_score=True, return_estimator=True, verbose=self.\n verbose, n_jobs=self.n_jobs)\n', (8422, 8591), False, 'from sklearn.model_selection import cross_validate, cross_val_predict, GridSearchCV, RandomizedSearchCV\n'), ((10080, 10173), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('preprocessor', self.preprocessor_), (name, estimator)]"], {'memory': 'self._memory'}), "([('preprocessor', self.preprocessor_), (name, estimator)], memory=\n self._memory)\n", (10088, 10173), False, 'from sklearn.pipeline import Pipeline\n'), ((10332, 10446), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['final_estimator', 'X', 'y'], {'cv': 'self.cv_', 'method': 'method', 'verbose': 'self.verbose', 'n_jobs': 'self.n_jobs'}), '(final_estimator, X, y, cv=self.cv_, method=method,\n verbose=self.verbose, n_jobs=self.n_jobs)\n', (10349, 10446), False, 'from sklearn.model_selection import cross_validate, cross_val_predict, GridSearchCV, RandomizedSearchCV\n'), ((20639, 20655), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (20653, 20655), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder, OrdinalEncoder\n'), ((21157, 21205), 'sklearn.impute.IterativeImputer', 'IterativeImputer', ([], {'random_state': 'self.random_state'}), '(random_state=self.random_state)\n', (21173, 21205), False, 'from sklearn.impute import IterativeImputer\n'), ((21246, 21276), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (21259, 21276), False, 'from sklearn.impute import SimpleImputer\n'), ((22792, 22819), 'numpy.issubdtype', 'np.issubdtype', (['X.dtype', 'int'], {}), '(X.dtype, int)\n', (22805, 22819), True, 'import numpy as np\n'), ((31004, 31071), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': 'models', 'verbose': 'self.verbose'}), '(estimators=models, verbose=self.verbose, **kwargs)\n', (31020, 31071), False, 'from sklearn.ensemble import VotingClassifier, VotingRegressor, StackingClassifier, StackingRegressor\n'), ((31155, 31221), 'sklearn.ensemble.VotingRegressor', 'VotingRegressor', ([], {'estimators': 'models', 'verbose': 'self.verbose'}), '(estimators=models, verbose=self.verbose, **kwargs)\n', (31170, 31221), False, 'from sklearn.ensemble import VotingClassifier, VotingRegressor, StackingClassifier, StackingRegressor\n'), ((31362, 31449), 'sklearn.ensemble.StackingClassifier', 'StackingClassifier', ([], {'estimators': 'models', 'verbose': 'self.verbose', 'cv': 'self.cv_'}), '(estimators=models, verbose=self.verbose, cv=self.cv_, **\n kwargs)\n', (31380, 31449), False, 'from sklearn.ensemble import VotingClassifier, VotingRegressor, StackingClassifier, StackingRegressor\n'), ((31528, 31614), 'sklearn.ensemble.StackingRegressor', 'StackingRegressor', ([], {'estimators': 'models', 'verbose': 'self.verbose', 'cv': 'self.cv_'}), '(estimators=models, verbose=self.verbose, cv=self.cv_, **\n kwargs)\n', (31545, 31614), False, 'from sklearn.ensemble import VotingClassifier, VotingRegressor, StackingClassifier, StackingRegressor\n'), ((33215, 33252), 'poniard.utils.cramers_v', 'cramers_v', (['results[row]', 'results[col]'], {}), '(results[row], results[col])\n', (33224, 33252), False, 'from poniard.utils import cramers_v\n'), ((35564, 35709), 'sklearn.model_selection.HalvingGridSearchCV', 'HalvingGridSearchCV', (['estimator', 'grid'], {'scoring': 'scoring', 'cv': 'self.cv_', 'verbose': 'self.verbose', 'n_jobs': 'self.n_jobs', 'random_state': 'self.random_state'}), '(estimator, grid, scoring=scoring, cv=self.cv_, verbose=\n self.verbose, n_jobs=self.n_jobs, random_state=self.random_state)\n', (35583, 35709), False, 'from sklearn.model_selection import HalvingGridSearchCV\n'), ((35867, 35973), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['estimator', 'grid'], {'scoring': 'scoring', 'cv': 'self.cv_', 'verbose': 'self.verbose', 'n_jobs': 'self.n_jobs'}), '(estimator, grid, scoring=scoring, cv=self.cv_, verbose=self.\n verbose, n_jobs=self.n_jobs)\n', (35879, 35973), False, 'from sklearn.model_selection import cross_validate, cross_val_predict, GridSearchCV, RandomizedSearchCV\n'), ((10653, 10749), 'warnings.warn', 'warnings.warn', (['f"""{name} does not support `{method}` method. Filling with nan."""'], {'stacklevel': '(2)'}), "(f'{name} does not support `{method}` method. Filling with nan.',\n stacklevel=2)\n", (10666, 10749), False, 'import warnings\n'), ((10830, 10852), 'numpy.empty', 'np.empty', (['self.y.shape'], {}), '(self.y.shape)\n', (10838, 10852), True, 'import numpy as np\n'), ((15225, 15258), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""prior"""'}), "(strategy='prior')\n", (15240, 15258), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((20715, 20729), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (20727, 20729), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder, OrdinalEncoder\n'), ((20765, 20779), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {}), '()\n', (20777, 20779), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder, OrdinalEncoder\n'), ((21582, 21638), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'drop': '"""if_binary"""', 'handle_unknown': '"""ignore"""'}), "(drop='if_binary', handle_unknown='ignore')\n", (21595, 21638), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder, OrdinalEncoder\n'), ((21871, 21942), 'sklearn.preprocessing.OrdinalEncoder', 'OrdinalEncoder', ([], {'handle_unknown': '"""use_encoded_value"""', 'unknown_value': '(99999)'}), "(handle_unknown='use_encoded_value', unknown_value=99999)\n", (21885, 21942), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, OneHotEncoder, OrdinalEncoder\n'), ((22852, 23109), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('numeric_preprocessor', numeric_preprocessor, numeric), (\n 'categorical_low_preprocessor', cat_low_preprocessor, categorical_low),\n ('categorical_high_preprocessor', cat_high_preprocessor, categorical_high)]"], {'n_jobs': 'self.n_jobs'}), "([('numeric_preprocessor', numeric_preprocessor, numeric),\n ('categorical_low_preprocessor', cat_low_preprocessor, categorical_low),\n ('categorical_high_preprocessor', cat_high_preprocessor,\n categorical_high)], n_jobs=self.n_jobs)\n", (22869, 23109), False, 'from sklearn.compose import ColumnTransformer\n'), ((23523, 23719), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('categorical_low_preprocessor', cat_low_preprocessor, categorical_low), (\n 'categorical_high_preprocessor', cat_high_preprocessor, categorical_high)]"], {'n_jobs': 'self.n_jobs'}), "([('categorical_low_preprocessor', cat_low_preprocessor,\n categorical_low), ('categorical_high_preprocessor',\n cat_high_preprocessor, categorical_high)], n_jobs=self.n_jobs)\n", (23540, 23719), False, 'from sklearn.compose import ColumnTransformer\n'), ((32730, 32762), 'numpy.where', 'np.where', (['(result == self.y)', '(1)', '(0)'], {}), '(result == self.y, 1, 0)\n', (32738, 32762), True, 'import numpy as np\n'), ((36278, 36324), 'sklearn.base.clone', 'clone', (['search.best_estimator_._final_estimator'], {}), '(search.best_estimator_._final_estimator)\n', (36283, 36324), False, 'from sklearn.base import ClassifierMixin, RegressorMixin, TransformerMixin, clone\n'), ((37511, 37549), 'pandas.DataFrame', 'pd.DataFrame', (['self._experiment_results'], {}), '(self._experiment_results)\n', (37523, 37549), True, 'import pandas as pd\n'), ((40636, 40676), 'inspect.getargs', 'inspect.getargs', (['fetched_method.__code__'], {}), '(fetched_method.__code__)\n', (40651, 40676), False, 'import inspect\n'), ((15496, 15527), 'sklearn.dummy.DummyRegressor', 'DummyRegressor', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (15510, 15527), False, 'from sklearn.dummy import DummyClassifier, DummyRegressor\n'), ((19906, 19967), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['self._inferred_dtypes'], {'orient': '"""index"""'}), "(self._inferred_dtypes, orient='index')\n", (19928, 19967), True, 'import pandas as pd\n'), ((19087, 19105), 'numpy.unique', 'np.unique', (['X[:, i]'], {}), '(X[:, i])\n', (19096, 19105), True, 'import numpy as np\n'), ((19480, 19498), 'numpy.unique', 'np.unique', (['X[:, i]'], {}), '(X[:, i])\n', (19489, 19498), True, 'import numpy as np\n'), ((19209, 19227), 'numpy.unique', 'np.unique', (['X[:, i]'], {}), '(X[:, i])\n', (19218, 19227), True, 'import numpy as np\n')] |
#############################################################################
# Basic Input/Output Utility Functions
# v 0.02
#############################################################################
from copy import *
#**********************Auxiliary function to write auxiliary/debugging information to an ASCII file:
def write_text(_text, _file_path):
f = open(_file_path, 'w')
f.write(_text + '\n')
f.close()
#**********************Auxiliary function to read-in data comumns from ASCII file (2D table):
def read_ascii_data_cols(_file_path, _str_sep, _i_col_start=0, _i_col_end=-1, _n_line_skip=0, _float=True): #OC24112019
#def read_ascii_data_cols(_file_path, _str_sep, _i_col_start=0, _i_col_end=-1, _n_line_skip=0):
"""
Auxiliary function to read-in data comumns from ASCII file (2D table)
:param _file_path: full path (including file name) to the file
:param _str_sep: column separation symbol(s) (string)
:param _i_col_start: initial data column to read
:param _i_col_end: final data column to read
:param _n_line_skip: number of lines to skip in the beginning of the file
:return: 2D list containing data columns read
"""
f = open(_file_path, 'r')
lines = f.readlines()
resCols = []
#nCol = _i_col_end - _i_col_start + 1
#for iCol in range(nCol):
# resCols.append([])
nRows = len(lines) - _n_line_skip
for i in range(nRows):
curLine = lines[_n_line_skip + i]
curLineParts = curLine.split(_str_sep)
curNumParts = len(curLineParts)
#print(curLineParts)
colCount = 0; colCountTrue = 0
for iCol in range(curNumParts):
curPart = curLineParts[iCol]
#print(curPart)
if(len(curPart) > 0):
if(((_i_col_start <= colCount) or (_i_col_start < 0)) and ((colCount <= _i_col_end) or (_i_col_end < 0))):
if len(resCols) < (colCountTrue + 1): resCols.append([])
#resCols[colCountTrue].append(float(curPart))
if(_float): resCols[colCountTrue].append(float(curPart)) #OC24112019
else: resCols[colCountTrue].append(curPart)
colCountTrue += 1
colCount += 1
f.close()
return resCols #attn: returns lists, not arrays!
#**********************Auxiliary function to write (save) data comumns to ASCII file (2D table):
def write_ascii_data_cols(_file_path, _cols, _str_sep, _str_head=None, _i_col_start=0, _i_col_end=-1):
"""
Auxiliary function to write tabulated data (columns, i.e 2D table) to ASCII file
:param _file_path: full path (including file name) to the file to be (over-)written
:param _cols: array of data columns to be saved to file
:param _str_sep: column separation symbol(s) (string)
:param _str_head: header (string) to write before data columns
:param _i_col_start: initial data column to write
:param _i_col_end: final data column to write
"""
f = open(_file_path, 'w')
if(_str_head != None):
lenStrHead = len(_str_head)
if(lenStrHead > 0):
strHead = _str_head
if(_str_head[lenStrHead - 1] != '\n'):
strHead = copy(_str_head) + '\n'
f.write(strHead)
if(_cols == None):
f.close(); return
nCols = len(_cols)
if(nCols <= 0):
f.close(); return
nLines = len(_cols[0])
for i in range(1, nCols):
newLen = len(_cols[i])
if(nLines < newLen): nLines = newLen
strSep = '\t'
if(_str_sep != None):
if(len(_str_sep) > 0): strSep = _str_sep
strTot = ''
iColEndP1 = nCols
if((_i_col_end >= 0) and (_i_col_end < nCols)): iColEndP1 = _i_col_end + 1
iColEnd = iColEndP1 - 1
nLinesM1 = nLines - 1
for i in range(nLines):
curLine = ''
for j in range(_i_col_start, iColEndP1):
curElem = ' '
if(i < len(_cols[j])): curElem = repr(_cols[j][i])
curLine += curElem
if(j < iColEnd): curLine += strSep
if(i < nLinesM1): curLine += '\n'
strTot += curLine
f.write(strTot)
f.close()
#**********************Auxiliary function to write (save) data rows to ASCII file (2D table):
def write_ascii_data_rows(_file_path, _rows, _str_sep, _str_head=None, _i_col_start=0, _i_col_end=-1, _i_row_start=0, _i_row_end=-1): #OC16112019
"""
Auxiliary function to write tabulated data (columns, i.e 2D table) to ASCII file
:param _file_path: full path (including file name) to the file to be (over-)written
:param _rows: array of data rows (/lines) to be saved to file
:param _str_sep: column separation symbol(s) (string)
:param _str_head: header (string) to write before data columns
:param _i_col_start: initial data column to write
:param _i_col_end: final data column to write
:param _i_row_start: initial data row to write
:param _i_row_end: final data row to write
"""
f = open(_file_path, 'w')
if(_str_head != None):
lenStrHead = len(_str_head)
if(lenStrHead > 0):
strHead = _str_head
if(_str_head[lenStrHead - 1] != '\n'):
strHead = copy(_str_head) + '\n'
f.write(strHead)
if(_rows == None):
f.close(); return
nRows = len(_rows)
nCols = len(_rows[0])
if((nRows <= 0) or (nCols <= 0)):
f.close(); return
strSep = '\t'
if(_str_sep != None):
if(len(_str_sep) > 0): strSep = _str_sep
strTot = ''
iColEndP1 = nCols
if((_i_col_end >= 0) and (_i_col_end < nCols)): iColEndP1 = _i_col_end + 1
iRowEndP1 = nRows
if((_i_row_end >= 0) and (_i_row_end < nRows)): iRowEndP1 = _i_row_end + 1
iColEnd = iColEndP1 - 1
iRowEnd = iRowEndP1 - 1
for i in range(_i_row_start, iRowEndP1):
curLine = ''
curDataRow = _rows[i]
for j in range(_i_col_start, iColEndP1):
curElem = repr(curDataRow[j])
curLine += curElem
if(j < iColEnd): curLine += strSep
if(i < iRowEnd): curLine += '\n'
strTot += curLine
f.write(strTot)
f.close()
#********************** Read data from an image file:
def read_image(image_path, _do8bit=True): #OC23102019
#def read_image(image_path): # MR26102017
"""Read data from an image file.
:param image_path: full path to the image.
:return: dict with the processed data.
"""
msg = '{0} library is not installed. Use "pip install {0}" to install it.'
try:
import numpy as np
except:
raise ImportError(msg.format('numpy'))
try:
from PIL import Image
except:
raise ImportError(msg.format('pillow'))
#OC11112018 (as suggested by <NAME>, to walk around image size limit)
Image.MAX_IMAGE_PIXELS = None
# Read the image:
raw_image = Image.open(image_path)
image_format = raw_image.format
#OC23102019
#raw_image = raw_image.convert('L')
if(_do8bit): raw_image = raw_image.convert('L')
# Convert it to NumPy array:
data = np.array(raw_image)
if image_format not in ('TIFF', 'PNG', 'BMP', 'GIF', 'JPEG'):
raise ValueError('"{}" format is not supported at the moment.'.format(raw_image.format))
# Get bits per point:
mode_to_bpp = {'1': 1, 'L': 8, 'P': 8, 'I;16': 16, 'RGB': 24, 'RGBA': 32, 'CMYK': 32, 'YCbCr': 24, 'I': 32, 'F': 32}
bpp = mode_to_bpp[raw_image.mode]
limit_value = float(2 ** bpp - 1)
return {
'data': data,
'raw_image': raw_image,
'limit_value': limit_value,
}
| [
"numpy.array",
"PIL.Image.open"
] | [((6955, 6977), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (6965, 6977), False, 'from PIL import Image\n'), ((7168, 7187), 'numpy.array', 'np.array', (['raw_image'], {}), '(raw_image)\n', (7176, 7187), True, 'import numpy as np\n')] |
import numpy as np
def sigmoid(x):
# TODO: Implement sigmoid function
ans = 1 / (1 + np.exp(-x))
return ans
inputs = np.array([0.7, -0.3])
weights = np.array([0.1, 0.8])
bias = -0.1
# TODO: Calculate the output
output = sigmoid(np.dot(weights, inputs) + bias)
print('Output:')
print(output)
| [
"numpy.dot",
"numpy.array",
"numpy.exp"
] | [((133, 154), 'numpy.array', 'np.array', (['[0.7, -0.3]'], {}), '([0.7, -0.3])\n', (141, 154), True, 'import numpy as np\n'), ((165, 185), 'numpy.array', 'np.array', (['[0.1, 0.8]'], {}), '([0.1, 0.8])\n', (173, 185), True, 'import numpy as np\n'), ((245, 268), 'numpy.dot', 'np.dot', (['weights', 'inputs'], {}), '(weights, inputs)\n', (251, 268), True, 'import numpy as np\n'), ((95, 105), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (101, 105), True, 'import numpy as np\n')] |
from unityagents import UnityEnvironment
import numpy as np
import os
from collections import deque
import matplotlib.pyplot as plt
import torch
def dqn_network(max_t=1000, episode_num=2000, eps_start=1.0, eps_end=0.01, decay=0.995):
_epsd = eps_start
scores = []
scores_mean = []
scores_window = deque(maxlen=100)
for i in range(1, episode_num + 1):
state = env.reset(train_mode=True)[brain_name].vector_observations[0]
score = 0
for t in range(max_t):
action = agent.act(state, _epsd)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores.append(score)
scores_window.append(score)
scores_mean.append(np.mean(scores_window))
print('\rEpisode {}\tAverage Score: {:.2f}\teps: {:.4f}\tLR: {}'
.format(i, scores_mean[-1], _epsd, agent.lr_scheduler.get_lr()), end="")
_epsd = max(eps_end, decay * _epsd)
if i % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\teps: {:.4f}\tLR: {}'
.format(i, scores_mean[-1], _epsd, agent.lr_scheduler.get_lr()))
if np.mean(scores_window) >= 13.0:
print(
'\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i - 100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores, scores_mean
### Create and train the Agent
## Create the environment
env = UnityEnvironment(environment_path)
# Get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# Get the environment info
env_info = env.reset(train_mode=True)[brain_name]
action_size = brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
## Create the DQN Agent
agent = Agent(state_size=state_size, action_size=action_size, seed=0, lr_decay=0.9999)
## Train the Agent
scores, mean = dqn_network(n_episodes=200, eps_start=0.10, max_t=300, eps_end=0.01, decay=0.987)
## Plot the scores
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.plot(np.arange(len(mean)), mean)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.legend(('Score', 'Mean'), fontsize='xx-large')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"unityagents.UnityEnvironment",
"collections.deque"
] | [((1789, 1823), 'unityagents.UnityEnvironment', 'UnityEnvironment', (['environment_path'], {}), '(environment_path)\n', (1805, 1823), False, 'from unityagents import UnityEnvironment\n'), ((2354, 2382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2364, 2382), True, 'import matplotlib.pyplot as plt\n'), ((2488, 2507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (2498, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2531), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode #"""'], {}), "('Episode #')\n", (2518, 2531), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2582), 'matplotlib.pyplot.legend', 'plt.legend', (["('Score', 'Mean')"], {'fontsize': '"""xx-large"""'}), "(('Score', 'Mean'), fontsize='xx-large')\n", (2542, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((318, 335), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (323, 335), False, 'from collections import deque\n'), ((999, 1021), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (1006, 1021), True, 'import numpy as np\n'), ((1426, 1448), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (1433, 1448), True, 'import numpy as np\n'), ((1573, 1595), 'numpy.mean', 'np.mean', (['scores_window'], {}), '(scores_window)\n', (1580, 1595), True, 'import numpy as np\n')] |
from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlows.SWFlowProblem as SWFlowProblem
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",3.0,"Final time for simulation"),
("dt_output",1.0,"Time interval to output solution"),
("refinement",2,"Level of refinement"),
("cfl",0.33,"Desired CFL restriction"),
("reflecting_BCs",True,"Use reflecting BCs")
])
###################
# DOMAIN AND MESH #
###################
L=(75.0,30.0)
refinement = opts.refinement
domain = RectangularDomain(L=L)
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),2)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
######################
##### BATHYMETRY #####
######################
h0=10
a=3000
B=5
k=0.002
g = SWFlowProblem.default_physical_parameters['gravity']
p = old_div(np.sqrt(8*g*h0),a)
s = old_div(np.sqrt(p**2 - k**2),2.)
mannings = k
def bathymetry_function(X):
x = X[0]
y = X[1]
bump1 = 1-1./8*np.sqrt((x-30)**2+(y-6)**2)
bump2 = 1-1./8*np.sqrt((x-30)**2+(y-24)**2)
bump3 = 3-3./10*np.sqrt((x-47.5)**2+(y-15)**2)
return np.maximum(np.maximum(np.maximum(0.,bump1),bump2),bump3)
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
x = X[0]
if (x <= 16):
eta=1.875
else:
eta=0.
z = bathymetry_function(X)
return max(eta - z,0.)
class Zero(object):
def uOfXT(self,x,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': Zero(),
'y_mom': Zero()}
boundaryConditions = {'water_height': lambda x,flag: None,
'x_mom': lambda x,flag: None,
'y_mom': lambda x,flag: None}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
reflectingBCs=opts.reflecting_BCs,
bathymetry=bathymetry_function)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=0
mySWFlowProblem.physical_parameters['mannings']=0.02
| [
"proteus.SWFlows.SWFlowProblem.SWFlowProblem",
"numpy.maximum",
"past.utils.old_div",
"proteus.SWFlows.SWFlowProblem.OutputStepping",
"proteus.Domain.RectangularDomain",
"proteus.Context.Options",
"numpy.sqrt"
] | [((548, 875), 'proteus.Context.Options', 'Context.Options', (["[('sw_model', 0, 'sw_model = {0,1} for {SWEs,DSWEs}'), ('final_time', 3.0,\n 'Final time for simulation'), ('dt_output', 1.0,\n 'Time interval to output solution'), ('refinement', 2,\n 'Level of refinement'), ('cfl', 0.33, 'Desired CFL restriction'), (\n 'reflecting_BCs', True, 'Use reflecting BCs')]"], {}), "([('sw_model', 0, 'sw_model = {0,1} for {SWEs,DSWEs}'), (\n 'final_time', 3.0, 'Final time for simulation'), ('dt_output', 1.0,\n 'Time interval to output solution'), ('refinement', 2,\n 'Level of refinement'), ('cfl', 0.33, 'Desired CFL restriction'), (\n 'reflecting_BCs', True, 'Use reflecting BCs')])\n", (563, 875), False, 'from proteus import Domain, Context, MeshTools as mt\n'), ((989, 1011), 'proteus.Domain.RectangularDomain', 'RectangularDomain', ([], {'L': 'L'}), '(L=L)\n', (1006, 1011), False, 'from proteus.Domain import RectangularDomain\n'), ((2197, 2268), 'proteus.SWFlows.SWFlowProblem.OutputStepping', 'SWFlowProblem.OutputStepping', (['opts.final_time'], {'dt_output': 'opts.dt_output'}), '(opts.final_time, dt_output=opts.dt_output)\n', (2225, 2268), True, 'import proteus.SWFlows.SWFlowProblem as SWFlowProblem\n'), ((2584, 2883), 'proteus.SWFlows.SWFlowProblem.SWFlowProblem', 'SWFlowProblem.SWFlowProblem', ([], {'sw_model': '(0)', 'cfl': '(0.33)', 'outputStepping': 'outputStepping', 'structured': '(True)', 'he': 'he', 'nnx': 'nnx', 'nny': 'nny', 'domain': 'domain', 'initialConditions': 'initialConditions', 'boundaryConditions': 'boundaryConditions', 'reflectingBCs': 'opts.reflecting_BCs', 'bathymetry': 'bathymetry_function'}), '(sw_model=0, cfl=0.33, outputStepping=\n outputStepping, structured=True, he=he, nnx=nnx, nny=nny, domain=domain,\n initialConditions=initialConditions, boundaryConditions=\n boundaryConditions, reflectingBCs=opts.reflecting_BCs, bathymetry=\n bathymetry_function)\n', (2611, 2883), True, 'import proteus.SWFlows.SWFlowProblem as SWFlowProblem\n'), ((1081, 1100), 'past.utils.old_div', 'old_div', (['(nnx - 1)', '(2)'], {}), '(nnx - 1, 2)\n', (1088, 1100), False, 'from past.utils import old_div\n'), ((1344, 1363), 'numpy.sqrt', 'np.sqrt', (['(8 * g * h0)'], {}), '(8 * g * h0)\n', (1351, 1363), True, 'import numpy as np\n'), ((1375, 1399), 'numpy.sqrt', 'np.sqrt', (['(p ** 2 - k ** 2)'], {}), '(p ** 2 - k ** 2)\n', (1382, 1399), True, 'import numpy as np\n'), ((1488, 1525), 'numpy.sqrt', 'np.sqrt', (['((x - 30) ** 2 + (y - 6) ** 2)'], {}), '((x - 30) ** 2 + (y - 6) ** 2)\n', (1495, 1525), True, 'import numpy as np\n'), ((1535, 1573), 'numpy.sqrt', 'np.sqrt', (['((x - 30) ** 2 + (y - 24) ** 2)'], {}), '((x - 30) ** 2 + (y - 24) ** 2)\n', (1542, 1573), True, 'import numpy as np\n'), ((1584, 1624), 'numpy.sqrt', 'np.sqrt', (['((x - 47.5) ** 2 + (y - 15) ** 2)'], {}), '((x - 47.5) ** 2 + (y - 15) ** 2)\n', (1591, 1624), True, 'import numpy as np\n'), ((1648, 1670), 'numpy.maximum', 'np.maximum', (['(0.0)', 'bump1'], {}), '(0.0, bump1)\n', (1658, 1670), True, 'import numpy as np\n')] |
from doubleml import DoubleMLIIVM
import numpy as np
from sklearn.utils import check_X_y
from ._helper import _get_cond_smpls
from .double_ml_aws_lambda import DoubleMLLambda
from ._helper import _attach_learner, _attach_smpls
class DoubleMLIIVMServerless(DoubleMLIIVM, DoubleMLLambda):
def __init__(self,
lambda_function_name,
aws_region,
obj_dml_data,
ml_g,
ml_m,
ml_r,
n_folds=5,
n_rep=1,
score='ATE',
subgroups=None,
dml_procedure='dml2',
trimming_rule='truncate',
trimming_threshold=1e-12,
draw_sample_splitting=True,
apply_cross_fitting=True):
DoubleMLIIVM.__init__(self,
obj_dml_data,
ml_g,
ml_m,
ml_r,
n_folds,
n_rep,
score,
subgroups,
dml_procedure,
trimming_rule,
trimming_threshold,
draw_sample_splitting,
apply_cross_fitting)
DoubleMLLambda.__init__(self,
lambda_function_name,
aws_region)
def _ml_nuisance_aws_lambda(self, cv_params):
assert self._dml_data.n_treat == 1
self._i_treat = 0
x, y = check_X_y(self._dml_data.x, self._dml_data.y)
x, z = check_X_y(x, np.ravel(self._dml_data.z))
x, d = check_X_y(x, self._dml_data.d)
# get train indices for z == 0 and z == 1
smpls_z0, smpls_z1 = _get_cond_smpls(self.smpls, z)
payload = self._dml_data.get_payload()
payload_ml_g0 = payload.copy()
payload_ml_g1 = payload.copy()
payload_ml_m = payload.copy()
payload_ml_r0 = payload.copy()
payload_ml_r1 = payload.copy()
_attach_learner(payload_ml_g0,
'ml_g0', self.learner['ml_g'],
self._dml_data.y_col, self._dml_data.x_cols)
_attach_learner(payload_ml_g1,
'ml_g1', self.learner['ml_g'],
self._dml_data.y_col, self._dml_data.x_cols)
_attach_learner(payload_ml_m,
'ml_m', self.learner['ml_m'],
self._dml_data.z_cols[0], self._dml_data.x_cols,
method='predict_proba')
all_payloads = [payload_ml_g0, payload_ml_g1, payload_ml_m]
all_smpls = [smpls_z0, smpls_z1, self.smpls]
send_train_ids = [True, True, False]
params_names = ['ml_g0', 'ml_g1', 'ml_m']
if self.subgroups['always_takers']:
_attach_learner(payload_ml_r0,
'ml_r0', self.learner['ml_r'],
self._dml_data.d_cols[0], self._dml_data.x_cols,
method='predict_proba')
all_payloads.append(payload_ml_r0)
all_smpls.append(smpls_z0)
send_train_ids.append(True)
params_names.append('ml_r0')
if self.subgroups['never_takers']:
_attach_learner(payload_ml_r1,
'ml_r1', self.learner['ml_r'],
self._dml_data.d_cols[0], self._dml_data.x_cols,
method='predict_proba')
all_payloads.append(payload_ml_r1)
all_smpls.append(smpls_z1)
send_train_ids.append(True)
params_names.append('ml_r1')
payloads = _attach_smpls(all_payloads,
all_smpls,
self.n_folds,
self.n_rep,
self._dml_data.n_obs,
cv_params['n_lambdas_cv'],
send_train_ids,
cv_params['seed'])
preds = self.invoke_lambdas(payloads, self.smpls, params_names,
self._dml_data.n_obs, self.n_rep,
cv_params['n_lambdas_cv'])
if not self.subgroups['always_takers']:
preds['ml_r0'] = np.zeros_like(preds['ml_g0'])
if not self.subgroups['never_takers']:
preds['ml_r1'] = np.ones_like(preds['ml_g1'])
for i_rep in range(self.n_rep):
# compute score elements
self._psi_a[:, i_rep, self._i_treat], self._psi_b[:, i_rep, self._i_treat] = \
self._score_elements(y, z, d,
preds['ml_g0'][:, i_rep],
preds['ml_g1'][:, i_rep],
preds['ml_m'][:, i_rep],
preds['ml_r0'][:, i_rep],
preds['ml_r1'][:, i_rep],
self.smpls[i_rep])
return
| [
"doubleml.DoubleMLIIVM.__init__",
"numpy.ones_like",
"numpy.zeros_like",
"numpy.ravel",
"sklearn.utils.check_X_y"
] | [((821, 1016), 'doubleml.DoubleMLIIVM.__init__', 'DoubleMLIIVM.__init__', (['self', 'obj_dml_data', 'ml_g', 'ml_m', 'ml_r', 'n_folds', 'n_rep', 'score', 'subgroups', 'dml_procedure', 'trimming_rule', 'trimming_threshold', 'draw_sample_splitting', 'apply_cross_fitting'], {}), '(self, obj_dml_data, ml_g, ml_m, ml_r, n_folds, n_rep,\n score, subgroups, dml_procedure, trimming_rule, trimming_threshold,\n draw_sample_splitting, apply_cross_fitting)\n', (842, 1016), False, 'from doubleml import DoubleMLIIVM\n'), ((1671, 1716), 'sklearn.utils.check_X_y', 'check_X_y', (['self._dml_data.x', 'self._dml_data.y'], {}), '(self._dml_data.x, self._dml_data.y)\n', (1680, 1716), False, 'from sklearn.utils import check_X_y\n'), ((1788, 1818), 'sklearn.utils.check_X_y', 'check_X_y', (['x', 'self._dml_data.d'], {}), '(x, self._dml_data.d)\n', (1797, 1818), False, 'from sklearn.utils import check_X_y\n'), ((1745, 1771), 'numpy.ravel', 'np.ravel', (['self._dml_data.z'], {}), '(self._dml_data.z)\n', (1753, 1771), True, 'import numpy as np\n'), ((4500, 4529), 'numpy.zeros_like', 'np.zeros_like', (["preds['ml_g0']"], {}), "(preds['ml_g0'])\n", (4513, 4529), True, 'import numpy as np\n'), ((4606, 4634), 'numpy.ones_like', 'np.ones_like', (["preds['ml_g1']"], {}), "(preds['ml_g1'])\n", (4618, 4634), True, 'import numpy as np\n')] |
from fltk import *
import copy
import numpy as np
import sys
if '../PyCommon/modules' not in sys.path:
sys.path.append('../PyCommon/modules')
from PyCommon.modules.Math import mmMath as mm
# from PyCommon.modules.Resource import ysMotionLoader as yf
from PyCommon.modules.Renderer import ysRenderer as yr
# from PyCommon.modules.Renderer import csVpRenderer as cvr
from PyCommon.modules.Simulator import csVpWorld as cvw
from PyCommon.modules.Simulator import csVpModel as cvm
# from PyCommon.modules.GUI import ysSimpleViewer as ysv
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Optimization import ysAnalyticConstrainedOpt as yac
from PyCommon.modules.ArticulatedBody import ysJacobian as yjc
from PyCommon.modules.Util import ysPythonEx as ype
from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp
from PyCommon.modules.ArticulatedBody import ysMomentum as ymt
from PyCommon.modules.ArticulatedBody import ysControl as yct
from MomentumProject.working_example import mtOptimize as mot
from MomentumProject.working_example import mtInitialize as mit
g_initFlag = 0
forceShowTime = 0
JsysPre = 0
JsupPreL = 0
JsupPreR = 0
JconstPre = 0
contactChangeCount = 0
contactChangeType = 0
contact = 0
maxContactChangeCount = 30
preFootCenter = [None]
def main():
np.set_printoptions(precision=4, linewidth=200)
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_vchain_5()
motion, mcfg, wcfg, stepsPerFrame, config = mit.create_biped_zygote()
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_biped()
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_jump_biped()
vpWorld = cvw.VpWorld(wcfg)
motionModel = cvm.VpMotionModel(vpWorld, motion[0], mcfg)
controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)
vpWorld.initialize()
controlModel.initializeHybridDynamics()
# controlToMotionOffset = (1.5, -0.02, 0)
controlToMotionOffset = (1.5, 0, 0)
controlModel.translateByOffset(controlToMotionOffset)
totalDOF = controlModel.getTotalDOF()
DOFs = controlModel.getDOFs()
# parameter
Kt = config['Kt']; Dt = config['Dt'] # tracking gain
Kl = config['Kl']; Dl = config['Dl'] # linear balance gain
Kh = config['Kh']; Dh = config['Dh'] # angular balance gain
Ks = config['Ks']; Ds = config['Ds'] # penalty force spring gain
Bt = config['Bt']
Bl = config['Bl']
Bh = config['Bh']
w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])
supL = motion[0].skeleton.getJointIndex(config['supLink1'])
supR = motion[0].skeleton.getJointIndex(config['supLink2'])
# selectedBody = motion[0].skeleton.getJointIndex(config['end'])
selectedBody = motion[0].skeleton.getJointIndex('Spine')
constBody = motion[0].skeleton.getJointIndex('RightFoot')
# jacobian
JsupL = yjc.makeEmptyJacobian(DOFs, 1)
dJsupL = JsupL.copy()
JsupPreL = JsupL.copy()
JsupR = yjc.makeEmptyJacobian(DOFs, 1)
dJsupR = JsupR.copy()
JsupPreR = JsupR.copy()
Jconst = yjc.makeEmptyJacobian(DOFs, 1)
dJconst = Jconst.copy()
JconstPre = Jconst.copy()
Jsys = yjc.makeEmptyJacobian(DOFs, controlModel.getBodyNum())
dJsys = Jsys.copy()
JsysPre = Jsys.copy()
supLJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, supL)]
supRJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, supR)]
constJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, constBody)]
allLinkJointMasks = yjc.getAllLinkJointMasks(motion[0].skeleton)
# momentum matrix
linkMasses = controlModel.getBodyMasses()
totalMass = controlModel.getTotalMass()
TO = ymt.make_TO(linkMasses)
dTO = ymt.make_dTO(len(linkMasses))
# optimization
problem = yac.LSE(totalDOF, 12)
# a_sup = (0,0,0, 0,0,0) #ori
# a_sup = (0,0,0, 0,0,0) #L
a_supL = (0,0,0, 0,0,0)
a_supR = (0,0,0, 0,0,0)
a_sup_2 = (0,0,0, 0,0,0, 0,0,0, 0,0,0)
CP_old = [mm.v3(0.,0.,0.)]
# penalty method
bodyIDsToCheck = list(range(vpWorld.getBodyNum()))
# mus = [1.]*len(bodyIDsToCheck)
mus = [.5]*len(bodyIDsToCheck)
# flat data structure
ddth_des_flat = ype.makeFlatList(totalDOF)
dth_flat = ype.makeFlatList(totalDOF)
ddth_sol = ype.makeNestedList(DOFs)
# viewer
rd_footCenter = [None]
rd_footCenterL = [None]
rd_footCenterR = [None]
rd_CM_plane = [None]
rd_CM = [None]
rd_CP = [None]
rd_CP_des = [None]
rd_dL_des_plane = [None]
rd_dH_des = [None]
rd_grf_des = [None]
rd_exf_des = [None]
rd_exfen_des = [None]
rd_root_des = [None]
rd_foot_ori = [None]
rd_foot_pos = [None]
rd_CF = [None]
rd_CF_pos = [None]
rootPos = [None]
selectedBodyId = [selectedBody]
extraForce = [None]
extraForcePos = [None]
rightFootVectorX = [None]
rightFootVectorY = [None]
rightFootVectorZ = [None]
rightFootPos = [None]
rightVectorX = [None]
rightVectorY = [None]
rightVectorZ = [None]
rightPos = [None]
# viewer = ysv.SimpleViewer()
viewer = hsv.hpSimpleViewer(rect=(0, 0, 960+300, 1080+56), viewForceWnd=False)
# viewer.record(False)
# viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,255,255), yr.LINK_BONE))
viewer.doc.addObject('motion', motion)
viewer.doc.addRenderer('motionModel', yr.VpModelRenderer(motionModel, (150,150,255), yr.POLYGON_FILL))
viewer.doc.setRendererVisible('motionModel', False)
# viewer.doc.addRenderer('controlModel', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_LINE))
viewer.doc.addRenderer('controlModel', yr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))
# viewer.doc.setRendererVisible('rd_footCenter', False)
viewer.doc.addRenderer('rd_CM_plane', yr.PointsRenderer(rd_CM_plane, (255,255,0)))
# viewer.doc.setRendererVisible('rd_CM_plane', False)
viewer.doc.addRenderer('rd_CP', yr.PointsRenderer(rd_CP, (0,255,0)))
# viewer.doc.setRendererVisible('rd_CP', False)
viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (255,0,255)))
# viewer.doc.setRendererVisible('rd_CP_des', False)
viewer.doc.addRenderer('rd_dL_des_plane', yr.VectorsRenderer(rd_dL_des_plane, rd_CM, (255,255,0)))
# viewer.doc.setRendererVisible('rd_dL_des_plane', False)
viewer.doc.addRenderer('rd_dH_des', yr.VectorsRenderer(rd_dH_des, rd_CM, (0,255,0)))
# viewer.doc.setRendererVisible('rd_dH_des', False)
# viewer.doc.addRenderer('rd_grf_des', yr.ForcesRenderer(rd_grf_des, rd_CP_des, (0,255,0), .001))
viewer.doc.addRenderer('rd_CF', yr.VectorsRenderer(rd_CF, rd_CF_pos, (255,255,0)))
# viewer.doc.setRendererVisible('rd_CF', False)
viewer.doc.addRenderer('rd_foot_ori', yr.OrientationsRenderer(rd_foot_ori, rd_foot_pos, (255,255,0)))
# viewer.doc.setRendererVisible('rd_foot_ori', False)
viewer.doc.addRenderer('extraForce', yr.VectorsRenderer(rd_exf_des, extraForcePos, (0,255,0)))
viewer.doc.setRendererVisible('extraForce', False)
# viewer.doc.addRenderer('extraForceEnable', yr.VectorsRenderer(rd_exfen_des, extraForcePos, (255,0,0)))
viewer.doc.addRenderer('extraForceEnable', yr.WideArrowRenderer(rd_exfen_des, extraForcePos, (255,0,0), lineWidth=.05, fromPoint=False))
# viewer.doc.addRenderer('right_foot_oriX', yr.VectorsRenderer(rightFootVectorX, rightFootPos, (255,0,0)))
# viewer.doc.addRenderer('right_foot_oriY', yr.VectorsRenderer(rightFootVectorY, rightFootPos, (0,255,0)))
# viewer.doc.addRenderer('right_foot_oriZ', yr.VectorsRenderer(rightFootVectorZ, rightFootPos, (0,0,255)))
# viewer.doc.addRenderer('right_oriX', yr.VectorsRenderer(rightVectorX, rightPos, (255,0,0)))
# viewer.doc.addRenderer('right_oriY', yr.VectorsRenderer(rightVectorY, rightPos, (0,255,0)))
# viewer.doc.addRenderer('right_oriZ', yr.VectorsRenderer(rightVectorZ, rightPos, (0,0,255)))
# success!!
# initKt = 50
# initKl = 10.1
# initKh = 3.1
# initBl = .1
# initBh = .1
# initSupKt = 21.6
# initFm = 100.0
# success!! -- 2015.2.12. double stance
# initKt = 50
# initKl = 37.1
# initKh = 41.8
# initBl = .1
# initBh = .13
# initSupKt = 21.6
# initFm = 165.0
# single stance
# initKt = 25
# initKl = 80.1
# initKh = 10.8
# initBl = .1
# initBh = .13
# initSupKt = 21.6
# initFm = 50.0
# single stance -> double stance
# initKt = 25
# initKl = 60.
# initKh = 20.
# initBl = .1
# initBh = .13
# initSupKt = 21.6
# initFm = 50.0
initKt = 25
initKl = 100.
initKh = 100.
initBl = .1
initBh = .13
initSupKt = 17
# initKt = 25
# initKl = 60.
# initKh = 20.
#
# initBl = .1
# initBh = .13
initSupKt = 21.6
initFm = 50.0
initComX = 0.
initComY = 0.
initComZ = 0.
viewer.objectInfoWnd.add1DSlider("Kt", 0., 300., 1., initKt)
viewer.objectInfoWnd.add1DSlider("Kl", 0., 300., 1., initKl)
viewer.objectInfoWnd.add1DSlider("Kh", 0., 300., 1., initKh)
viewer.objectInfoWnd.add1DSlider("Bl", 0., 1., .001, initBl)
viewer.objectInfoWnd.add1DSlider("Bh", 0., 1., .001, initBh)
viewer.objectInfoWnd.add1DSlider("SupKt", 0., 100., 0.1, initSupKt)
viewer.objectInfoWnd.add1DSlider("Fm", 0., 1000., 1., initFm)
viewer.objectInfoWnd.add1DSlider("com X offset", -1., 1., 0.01, initComX)
viewer.objectInfoWnd.add1DSlider("com Y offset", -1., 1., 0.01, initComY)
viewer.objectInfoWnd.add1DSlider("com Z offset", -1., 1., 0.01, initComZ)
viewer.force_on = False
def viewer_SetForceState(object):
viewer.force_on = True
def viewer_GetForceState():
return viewer.force_on
def viewer_ResetForceState():
viewer.force_on = False
viewer.objectInfoWnd.addBtn('Force on', viewer_SetForceState)
viewer_ResetForceState()
offset = 60
viewer.objectInfoWnd.begin()
viewer.objectInfoWnd.labelForceX = Fl_Value_Input(20, 30+offset*9, 40, 20, 'X')
viewer.objectInfoWnd.labelForceX.value(0)
viewer.objectInfoWnd.labelForceY = Fl_Value_Input(80, 30+offset*9, 40, 20, 'Y')
viewer.objectInfoWnd.labelForceY.value(0)
viewer.objectInfoWnd.labelForceZ = Fl_Value_Input(140, 30+offset*9, 40, 20, 'Z')
viewer.objectInfoWnd.labelForceZ.value(-1)
viewer.objectInfoWnd.labelForceDur = Fl_Value_Input(220, 30+offset*9, 40, 20, 'Dur')
viewer.objectInfoWnd.labelForceDur.value(0.4)
viewer.objectInfoWnd.end()
# self.sliderFm = Fl_Hor_Nice_Slider(10, 42+offset*6, 250, 10)
def getParamVal(paramname):
return viewer.objectInfoWnd.getVal(paramname)
def getParamVals(paramnames):
return (getParamVal(name) for name in paramnames)
###################################
# simulate
###################################
def simulateCallback(frame):
if frame == 200:
viewer.force_on = True
motionModel.update(motion[frame])
global g_initFlag
global forceShowTime
global JsysPre
global JsupPreL
global JsupPreR
global JsupPre
global JconstPre
global preFootCenter
global maxContactChangeCount
global contactChangeCount
global contact
global contactChangeType
# Kt, Kl, Kh, Bl, Bh, kt_sup = viewer.GetParam()
Kt, Kl, Kh, Bl, Bh, kt_sup = getParamVals(['Kt', 'Kl', 'Kh', 'Bl', 'Bh', 'SupKt'])
Dt = 2*(Kt**.5)
Dl = 2*(Kl**.5)
Dh = 2*(Kh**.5)
dt_sup = 2*(kt_sup**.5)
doubleTosingleOffset = 0.15
singleTodoubleOffset = 0.30
# doubleTosingleOffset = 0.09
doubleTosingleVelOffset = 0.0
# tracking
th_r = motion.getDOFPositions(frame)
th = controlModel.getDOFPositions()
dth_r = motion.getDOFVelocities(frame)
dth = controlModel.getDOFVelocities()
ddth_r = motion.getDOFAccelerations(frame)
ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt)
ype.flatten(ddth_des, ddth_des_flat)
ype.flatten(dth, dth_flat)
#################################################
# jacobian
#################################################
# desire footCenter[1] = 0.041135
# desire footCenter[1] = 0.0197
footCenterL = controlModel.getBodyPositionGlobal(supL)
footCenterR = controlModel.getBodyPositionGlobal(supR)
footBodyOriL = controlModel.getBodyOrientationGlobal(supL)
footBodyOriR = controlModel.getBodyOrientationGlobal(supR)
footBodyVelL = controlModel.getBodyVelocityGlobal(supL)
footBodyVelR = controlModel.getBodyVelocityGlobal(supR)
footBodyAngVelL = controlModel.getBodyAngVelocityGlobal(supL)
footBodyAngVelR = controlModel.getBodyAngVelocityGlobal(supR)
refFootL = motionModel.getBodyPositionGlobal(supL)
refFootR = motionModel.getBodyPositionGlobal(supR)
refFootVelL = motionModel.getBodyVelocityGlobal(supL)
refFootVelR = motionModel.getBodyVelocityGlobal(supR)
refFootAngVelL = motionModel.getBodyAngVelocityGlobal(supL)
refFootAngVelR = motionModel.getBodyAngVelocityGlobal(supR)
refFootOriL = motionModel.getBodyOrientationGlobal(supL)
refFootOriR = motionModel.getBodyOrientationGlobal(supR)
refFootJointVelR = motion.getJointVelocityGlobal(supR, frame)
refFootJointAngVelR = motion.getJointAngVelocityGlobal(supR, frame)
refFootJointR = motion.getJointPositionGlobal(supR, frame)
refFootVelR = refFootJointVelR + np.cross(refFootJointAngVelR, (refFootR-refFootJointR))
refFootJointVelL = motion.getJointVelocityGlobal(supL, frame)
refFootJointAngVelL = motion.getJointAngVelocityGlobal(supL, frame)
refFootJointL = motion.getJointPositionGlobal(supL, frame)
refFootVelL = refFootJointVelL + np.cross(refFootJointAngVelL, (refFootL-refFootJointL))
contactR = 1
contactL = 1
if refFootVelR[1] < 0 and refFootVelR[1]/30. + refFootR[1] > singleTodoubleOffset:
contactR = 0
if refFootVelL[1] < 0 and refFootVelL[1]/30. + refFootL[1] > singleTodoubleOffset:
contactL = 0
if refFootVelR[1] > 0 and refFootVelR[1]/30. + refFootR[1] > doubleTosingleOffset:
contactR = 0
if refFootVelL[1] > 0 and refFootVelL[1]/30. + refFootL[1] > doubleTosingleOffset:
contactL = 0
# if 32 < frame < 147:
# contactR = 0
contMotionOffset = th[0][0] - th_r[0][0]
linkPositions = controlModel.getBodyPositionsGlobal()
linkVelocities = controlModel.getBodyVelocitiesGlobal()
linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()
linkInertias = controlModel.getBodyInertiasGlobal()
jointPositions = controlModel.getJointPositionsGlobal()
jointAxeses = controlModel.getDOFAxeses()
CM = yrp.getCM(linkPositions, linkMasses, totalMass)
dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)
CM_plane = copy.copy(CM); CM_plane[1]=0.
dCM_plane = copy.copy(dCM); dCM_plane[1]=0.
P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)
dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)
# calculate jacobian
Jsys, dJsys = controlModel.computeCom_J_dJdq()
JsupL = Jsys[6*supL:6*supL+6, :]
dJsupL = dJsys[6*supL:6*supL+6]
JsupR = Jsys[6*supR:6*supR+6, :]
dJsupR = dJsys[6*supR:6*supR+6]
# calculate contact state
# if g_initFlag == 1 and contact == 1 and refFootR[1] < doubleTosingleOffset and footCenterR[1] < 0.08:
if g_initFlag == 1:
# contact state
# 0: flying 1: right only 2: left only 3: double
# if contact == 2 and refFootR[1] < doubleTosingleOffset:
if contact == 2 and contactR == 1:
contact = 3
maxContactChangeCount+=30
contactChangeCount += maxContactChangeCount
contactChangeType = 'StoD'
# elif contact == 3 and refFootL[1] < doubleTosingleOffset:
elif contact == 1 and contactL == 1:
contact = 3
maxContactChangeCount+=30
contactChangeCount += maxContactChangeCount
contactChangeType = 'StoD'
# elif contact == 3 and refFootR[1] > doubleTosingleOffset:
elif contact == 3 and contactR == 0:
contact = 2
contactChangeCount += maxContactChangeCount
contactChangeType = 'DtoS'
# elif contact == 3 and refFootL[1] > doubleTosingleOffset:
elif contact == 3 and contactL == 0:
contact = 1
contactChangeCount += maxContactChangeCount
contactChangeType = 'DtoS'
else:
contact = 0
# if refFootR[1] < doubleTosingleOffset:
if contactR == 1:
contact +=1
# if refFootL[1] < doubleTosingleOffset:
if contactL == 1:
contact +=2
# initialization
if g_initFlag == 0:
JsysPre = Jsys.copy()
JsupPreL = JsupL.copy()
JsupPreR = JsupR.copy()
JconstPre = Jconst.copy()
softConstPoint = footCenterR.copy()
# yjc.computeJacobian2(JsysPre, DOFs, jointPositions, jointAxeses, linkPositions, allLinkJointMasks)
# yjc.computeJacobian2(JsupPreL, DOFs, jointPositions, jointAxeses, [footCenterL], supLJointMasks)
# yjc.computeJacobian2(JsupPreR, DOFs, jointPositions, jointAxeses, [footCenterR], supRJointMasks)
# yjc.computeJacobian2(JconstPre, DOFs, jointPositions, jointAxeses, [softConstPoint], constJointMasks)
footCenter = footCenterL + (footCenterR - footCenterL)/2.0
footCenter[1] = 0.
preFootCenter = footCenter.copy()
# footToBodyFootRotL = np.dot(np.transpose(footOriL), footBodyOriL)
# footToBodyFootRotR = np.dot(np.transpose(footOriR), footBodyOriR)
if refFootR[1] < doubleTosingleOffset:
contact +=1
if refFootL[1] < doubleTosingleOffset:
contact +=2
g_initFlag = 1
# calculate footCenter
footCenter = footCenterL + (footCenterR - footCenterL)/2.0
# if refFootR[1] >doubleTosingleOffset:
# if refFootR[1] > doubleTosingleOffset or footCenterR[1] > 0.08:
# if contact == 1 or footCenterR[1] > 0.08:
# if contact == 2 or footCenterR[1] > doubleTosingleOffset/2:
if contact == 2:
footCenter = footCenterL.copy()
# elif contact == 1 or footCenterL[1] > doubleTosingleOffset/2:
if contact == 1:
footCenter = footCenterR.copy()
footCenter[1] = 0.
if contactChangeCount >0 and contactChangeType == 'StoD':
# change footcenter gradually
footCenter = preFootCenter + (maxContactChangeCount - contactChangeCount)*(footCenter-preFootCenter)/maxContactChangeCount
preFootCenter = footCenter.copy()
# linear momentum
# TODO:
# We should consider dCM_ref, shouldn't we?
# add getBodyPositionGlobal and getBodyPositionsGlobal in csVpModel!
# todo that, set joint velocities to vpModel
CM_ref_plane = footCenter
# CM_ref_plane[1] += motionModel.getCOM()[1]
CM_ref = footCenter + np.array([getParamVal('com X offset'), motionModel.getCOM()[1] + getParamVal('com Y offset'), getParamVal('com Z offset')])
# dL_des_plane = Kl*totalMass*(CM_ref_plane - CM_plane) - Dl*totalMass*dCM_plane
dL_des_plane = Kl * totalMass * (CM_ref - CM) - Dl * totalMass * dCM
# dL_des_plane[1] = 0.
# angular momentum
CP_ref = footCenter
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
# bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = vpWorld.calcManyPenaltyForce(0, bodyIDsToCheck, mus, Ks, Ds)
CP = yrp.getCP(contactPositions, contactForces)
if CP_old[0] is None or CP is None:
dCP = None
else:
dCP = (CP - CP_old[0])/(1/30.)
CP_old[0] = CP
if CP is not None and dCP is not None:
ddCP_des = Kh*(CP_ref - CP) - Dh*dCP
CP_des = CP + dCP*(1/30.) + .5*ddCP_des*((1/30.)**2)
dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
if contactChangeCount >0:# and contactChangeType == 'DtoS':
# dH_des *= (maxContactChangeCount - contactChangeCount)/(maxContactChangeCount*10)
dH_des *= (maxContactChangeCount - contactChangeCount)/(maxContactChangeCount)
# dH_des *= (contactChangeCount)/(maxContactChangeCount)*.9+.1
else:
dH_des = None
# H = np.dot(P, np.dot(Jsys, dth_flat))
# dH_des = -Kh* H[3:]
# soft point constraint
#softConstPoint = refFootR.copy()
##softConstPoint[0] += 0.2
#Ksc = 50
#Dsc = 2*(Ksc**.5)
#Bsc = 1.
#P_des = softConstPoint
#P_cur = controlModel.getBodyPositionGlobal(constBody)
#dP_des = [0, 0, 0]
#dP_cur = controlModel.getBodyVelocityGlobal(constBody)
#ddP_des1 = Ksc*(P_des - P_cur) + Dsc*(dP_des - dP_cur)
#r = P_des - P_cur
#I = np.vstack(([1,0,0],[0,1,0],[0,0,1]))
#Z = np.hstack((I, mm.getCrossMatrixForm(-r)))
#yjc.computeJacobian2(Jconst, DOFs, jointPositions, jointAxeses, [softConstPoint], constJointMasks)
#dJconst = (Jconst - Jconst)/(1/30.)
#JconstPre = Jconst.copy()
##yjc.computeJacobianDerivative2(dJconst, DOFs, jointPositions, jointAxeses, linkAngVelocities, [softConstPoint], constJointMasks, False)
#JL, JA = np.vsplit(Jconst, 2)
#Q1 = np.dot(Z, Jconst)
#q1 = np.dot(JA, dth_flat)
#q2 = np.dot(mm.getCrossMatrixForm(q1), np.dot(mm.getCrossMatrixForm(q1), r))
#q_bias1 = np.dot(np.dot(Z, dJconst), dth_flat) + q2
#set up equality constraint
L_ddq = mm.logSO3(np.dot(footBodyOriL.T, np.dot(refFootOriL, mm.getSO3FromVectors(np.dot(refFootOriL, mm.unitY()), mm.unitY()))))
R_ddq = mm.logSO3(np.dot(footBodyOriR.T, np.dot(refFootOriR, mm.getSO3FromVectors(np.dot(refFootOriR, mm.unitY()), mm.unitY()))))
L_q = mm.logSO3(footBodyOriL)
R_q = mm.logSO3(footBodyOriR)
L_ang = np.dot(footBodyOriL, footBodyAngVelL)
R_ang = np.dot(footBodyOriR, footBodyAngVelR)
L_dq = mm.vel2qd(L_ang, L_q)
R_dq = mm.vel2qd(R_ang, R_q)
a_oriL = np.dot(footBodyOriL, mm.qdd2accel(L_dq, L_dq, L_q))
a_oriR = np.dot(footBodyOriR, mm.qdd2accel(R_dq, R_dq, R_q))
# body_ddqs = list(map(mm.logSO3, [np.dot(contact_body_ori[i].T, np.dot(ref_body_ori[i], mm.getSO3FromVectors(np.dot(ref_body_ori[i], up_vec_in_each_link[contact_ids[i]]), mm.unitY()))) for i in range(len(contact_body_ori))]))
# body_qs = list(map(mm.logSO3, contact_body_ori))
# body_angs = [np.dot(contact_body_ori[i], contact_body_angvel[i]) for i in range(len(contact_body_ori))]
# body_dqs = [mm.vel2qd(body_angs[i], body_qs[i]) for i in range(len(body_angs))]
# a_oris = [np.dot(contact_body_ori[i], mm.qdd2accel(body_ddqs[i], body_dqs[i], body_qs[i])) for i in range(len(contact_body_ori))]
#
a_oriL = mm.logSO3(mm.getSO3FromVectors(np.dot(footBodyOriL, mm.unitY()), mm.unitY()))
a_oriR = mm.logSO3(mm.getSO3FromVectors(np.dot(footBodyOriR, mm.unitY()), mm.unitY()))
#if contact == 3 and contactChangeCount < maxContactChangeCount/4 and contactChangeCount >=1:
#kt_sup = 30
#viewer.objectInfoWnd.labelSupKt.value(kt_sup)
#viewer.objectInfoWnd.sliderSupKt.value(initSupKt*10)
# a_supL = np.append(kt_sup*(refFootL - footCenterL + contMotionOffset) + dt_sup*(refFootVelL - footBodyVelL), kt_sup*a_oriL+dt_sup*(refFootAngVelL-footBodyAngVelL))
# a_supR = np.append(kt_sup*(refFootR - footCenterR + contMotionOffset) + dt_sup*(refFootVelR - footBodyVelR), kt_sup*a_oriR+dt_sup*(refFootAngVelR-footBodyAngVelR))
a_supL = np.append(kt_sup*(refFootL - footCenterL + contMotionOffset) - dt_sup*footBodyVelL, kt_sup*a_oriL - dt_sup*footBodyAngVelL)
# a_supL[1] = kt_sup*(0.028-footCenterL[1]) -dt_sup*footBodyVelL[1]
a_supL[1] = kt_sup*(0.0-footCenterL[1]) - dt_sup*footBodyVelL[1]
a_supR = np.append(kt_sup*(refFootR - footCenterR + contMotionOffset) - dt_sup*footBodyVelR, kt_sup*a_oriR - dt_sup*footBodyAngVelR)
# a_supR[1] = kt_sup*(0.028-footCenterR[1]) -dt_sup*footBodyVelR[1]
a_supR[1] = kt_sup*(0.0-footCenterR[1]) - dt_sup*footBodyVelR[1]
##if contact == 2:
#if refFootR[1] <doubleTosingleOffset :
#Jsup = np.vstack((JsupL, JsupR))
#dJsup = np.vstack((dJsupL, dJsupR))
#a_sup = np.append(a_supL, a_supR)
#else:
#Jsup = JsupL.copy()
#dJsup = dJsupL.copy()
#a_sup = a_supL.copy()
# momentum matrix
RS = np.dot(P, Jsys)
R, S = np.vsplit(RS, 2)
# rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)
rs = np.dot(dP, np.dot(Jsys, dth_flat)) + np.dot(P, dJsys)
r_bias, s_bias = np.hsplit(rs, 2)
#######################################################
# optimization
#######################################################
#if contact == 2 and footCenterR[1] > doubleTosingleOffset/2:
if contact == 2:
config['weightMap']['RightUpLeg'] = .8
config['weightMap']['RightLeg'] = .8
config['weightMap']['RightFoot'] = .8
else:
config['weightMap']['RightUpLeg'] = .1
config['weightMap']['RightLeg'] = .25
config['weightMap']['RightFoot'] = .2
#if contact == 1 and footCenterL[1] > doubleTosingleOffset/2:
if contact == 1:
config['weightMap']['LeftUpLeg'] = .8
config['weightMap']['LeftLeg'] = .8
config['weightMap']['LeftFoot'] = .8
else:
config['weightMap']['LeftUpLeg'] = .1
config['weightMap']['LeftLeg'] = .25
config['weightMap']['LeftFoot'] = .2
w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])
#if contact == 2:
#mot.addSoftPointConstraintTerms(problem, totalDOF, Bsc, ddP_des1, Q1, q_bias1)
mot.addTrackingTerms(problem, totalDOF, Bt, w, ddth_des_flat)
if dH_des is not None:
mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R, r_bias)
mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)
#if contact & 1 and contactChangeCount == 0:
if contact & 1:
#if refFootR[1] < doubleTosingleOffset:
mot.addConstraint2(problem, totalDOF, JsupR, dJsupR, dth_flat, a_supR)
if contact & 2:
#if refFootL[1] < doubleTosingleOffset:
mot.addConstraint2(problem, totalDOF, JsupL, dJsupL, dth_flat, a_supL)
if contactChangeCount > 0:
contactChangeCount -= 1
if contactChangeCount == 0:
maxContactChangeCount = 30
contactChangeType = 0
r = problem.solve()
problem.clear()
ype.nested(r['x'], ddth_sol)
rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)
localPos = [[0, 0, 0]]
for i in range(stepsPerFrame):
# apply penalty force
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
# print(contactForces)
#bodyIDs, contactPositions, contactPositionLocals, contactForces, contactVelocities = vpWorld.calcManyPenaltyForce(0, bodyIDsToCheck, mus, Ks, Ds)
vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)
controlModel.setDOFAccelerations(ddth_sol)
controlModel.solveHybridDynamics()
if forceShowTime > viewer.objectInfoWnd.labelForceDur.value():
forceShowTime = 0
viewer_ResetForceState()
forceforce = np.array([viewer.objectInfoWnd.labelForceX.value(), viewer.objectInfoWnd.labelForceY.value(), viewer.objectInfoWnd.labelForceZ.value()])
extraForce[0] = getParamVal('Fm') * mm.normalize2(forceforce)
# extraForce[0] = viewer.objectInfoWnd.labelFm.value() * mm.normalize2(forceforce)
if viewer_GetForceState():
forceShowTime += wcfg.timeStep
vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
vpWorld.step()
# rendering
rightVectorX[0] = np.dot(footBodyOriL, np.array([.1,0,0]))
rightVectorY[0] = np.dot(footBodyOriL, np.array([0,.1,0]))
rightVectorZ[0] = np.dot(footBodyOriL, np.array([0,0,.1]))
rightPos[0] = footCenterL + np.array([.1,0,0])
rd_footCenter[0] = footCenter
rd_footCenterL[0] = footCenterL
rd_footCenterR[0] = footCenterR
rd_CM[0] = CM
rd_CM_plane[0] = CM.copy()
rd_CM_plane[0][1] = 0.
if CP is not None and dCP is not None:
rd_CP[0] = CP
rd_CP_des[0] = CP_des
rd_dL_des_plane[0] = [dL_des_plane[0]/100, dL_des_plane[1]/100, dL_des_plane[2]/100]
rd_dH_des[0] = dH_des
rd_grf_des[0] = dL_des_plane - totalMass*mm.s2v(wcfg.gravity)
rd_root_des[0] = rootPos[0]
del rd_foot_ori[:]
del rd_foot_pos[:]
rd_foot_ori.append(controlModel.getBodyOrientationGlobal(supL))
rd_foot_ori.append(controlModel.getBodyOrientationGlobal(supR))
rd_foot_pos.append(controlModel.getBodyPositionGlobal(supL))
rd_foot_pos.append(controlModel.getBodyPositionGlobal(supR))
del rd_CF[:]
del rd_CF_pos[:]
for i in range(len(contactPositions)):
rd_CF.append( contactForces[i]/400)
rd_CF_pos.append(contactPositions[i].copy())
if viewer_GetForceState():
rd_exfen_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]
rd_exf_des[0] = [0,0,0]
else:
rd_exf_des[0] = [extraForce[0][0]/100, extraForce[0][1]/100, extraForce[0][2]/100]
rd_exfen_des[0] = [0,0,0]
extraForcePos[0] = controlModel.getBodyPositionGlobal(selectedBody) - 0.1 * np.array([viewer.objectInfoWnd.labelForceX.value(), 0., viewer.objectInfoWnd.labelForceZ.value()])
viewer.setSimulateCallback(simulateCallback)
viewer.startTimer(1/30.)
viewer.show()
Fl.run()
main()
| [
"PyCommon.modules.Util.ysPythonEx.makeFlatList",
"PyCommon.modules.ArticulatedBody.ysReferencePoints.getCM",
"PyCommon.modules.ArticulatedBody.ysReferencePoints.getCP",
"PyCommon.modules.Util.ysPythonEx.nested",
"PyCommon.modules.ArticulatedBody.ysJacobian.makeEmptyJacobian",
"PyCommon.modules.Renderer.ys... | [((108, 146), 'sys.path.append', 'sys.path.append', (['"""../PyCommon/modules"""'], {}), "('../PyCommon/modules')\n", (123, 146), False, 'import sys\n'), ((1319, 1366), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'linewidth': '(200)'}), '(precision=4, linewidth=200)\n', (1338, 1366), True, 'import numpy as np\n'), ((1488, 1513), 'MomentumProject.working_example.mtInitialize.create_biped_zygote', 'mit.create_biped_zygote', ([], {}), '()\n', (1511, 1513), True, 'from MomentumProject.working_example import mtInitialize as mit\n'), ((1672, 1689), 'PyCommon.modules.Simulator.csVpWorld.VpWorld', 'cvw.VpWorld', (['wcfg'], {}), '(wcfg)\n', (1683, 1689), True, 'from PyCommon.modules.Simulator import csVpWorld as cvw\n'), ((1708, 1751), 'PyCommon.modules.Simulator.csVpModel.VpMotionModel', 'cvm.VpMotionModel', (['vpWorld', 'motion[0]', 'mcfg'], {}), '(vpWorld, motion[0], mcfg)\n', (1725, 1751), True, 'from PyCommon.modules.Simulator import csVpModel as cvm\n'), ((1771, 1815), 'PyCommon.modules.Simulator.csVpModel.VpControlModel', 'cvm.VpControlModel', (['vpWorld', 'motion[0]', 'mcfg'], {}), '(vpWorld, motion[0], mcfg)\n', (1789, 1815), True, 'from PyCommon.modules.Simulator import csVpModel as cvm\n'), ((2457, 2525), 'MomentumProject.working_example.mtOptimize.getTrackingWeight', 'mot.getTrackingWeight', (['DOFs', 'motion[0].skeleton', "config['weightMap']"], {}), "(DOFs, motion[0].skeleton, config['weightMap'])\n", (2478, 2525), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((2876, 2906), 'PyCommon.modules.ArticulatedBody.ysJacobian.makeEmptyJacobian', 'yjc.makeEmptyJacobian', (['DOFs', '(1)'], {}), '(DOFs, 1)\n', (2897, 2906), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((2974, 3004), 'PyCommon.modules.ArticulatedBody.ysJacobian.makeEmptyJacobian', 'yjc.makeEmptyJacobian', (['DOFs', '(1)'], {}), '(DOFs, 1)\n', (2995, 3004), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3073, 3103), 'PyCommon.modules.ArticulatedBody.ysJacobian.makeEmptyJacobian', 'yjc.makeEmptyJacobian', (['DOFs', '(1)'], {}), '(DOFs, 1)\n', (3094, 3103), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3520, 3564), 'PyCommon.modules.ArticulatedBody.ysJacobian.getAllLinkJointMasks', 'yjc.getAllLinkJointMasks', (['motion[0].skeleton'], {}), '(motion[0].skeleton)\n', (3544, 3564), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3687, 3710), 'PyCommon.modules.ArticulatedBody.ysMomentum.make_TO', 'ymt.make_TO', (['linkMasses'], {}), '(linkMasses)\n', (3698, 3710), True, 'from PyCommon.modules.ArticulatedBody import ysMomentum as ymt\n'), ((3785, 3806), 'PyCommon.modules.Optimization.ysAnalyticConstrainedOpt.LSE', 'yac.LSE', (['totalDOF', '(12)'], {}), '(totalDOF, 12)\n', (3792, 3806), True, 'from PyCommon.modules.Optimization import ysAnalyticConstrainedOpt as yac\n'), ((4199, 4225), 'PyCommon.modules.Util.ysPythonEx.makeFlatList', 'ype.makeFlatList', (['totalDOF'], {}), '(totalDOF)\n', (4215, 4225), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((4241, 4267), 'PyCommon.modules.Util.ysPythonEx.makeFlatList', 'ype.makeFlatList', (['totalDOF'], {}), '(totalDOF)\n', (4257, 4267), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((4283, 4307), 'PyCommon.modules.Util.ysPythonEx.makeNestedList', 'ype.makeNestedList', (['DOFs'], {}), '(DOFs)\n', (4301, 4307), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((5112, 5185), 'PyCommon.modules.GUI.hpSimpleViewer.hpSimpleViewer', 'hsv.hpSimpleViewer', ([], {'rect': '(0, 0, 960 + 300, 1080 + 56)', 'viewForceWnd': '(False)'}), '(rect=(0, 0, 960 + 300, 1080 + 56), viewForceWnd=False)\n', (5130, 5185), True, 'from PyCommon.modules.GUI import hpSimpleViewer as hsv\n'), ((3302, 3348), 'PyCommon.modules.ArticulatedBody.ysJacobian.getLinkJointMask', 'yjc.getLinkJointMask', (['motion[0].skeleton', 'supL'], {}), '(motion[0].skeleton, supL)\n', (3322, 3348), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3372, 3418), 'PyCommon.modules.ArticulatedBody.ysJacobian.getLinkJointMask', 'yjc.getLinkJointMask', (['motion[0].skeleton', 'supR'], {}), '(motion[0].skeleton, supR)\n', (3392, 3418), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3443, 3494), 'PyCommon.modules.ArticulatedBody.ysJacobian.getLinkJointMask', 'yjc.getLinkJointMask', (['motion[0].skeleton', 'constBody'], {}), '(motion[0].skeleton, constBody)\n', (3463, 3494), True, 'from PyCommon.modules.ArticulatedBody import ysJacobian as yjc\n'), ((3986, 4006), 'PyCommon.modules.Math.mmMath.v3', 'mm.v3', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (3991, 4006), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((5392, 5457), 'PyCommon.modules.Renderer.ysRenderer.VpModelRenderer', 'yr.VpModelRenderer', (['motionModel', '(150, 150, 255)', 'yr.POLYGON_FILL'], {}), '(motionModel, (150, 150, 255), yr.POLYGON_FILL)\n', (5410, 5457), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((5668, 5734), 'PyCommon.modules.Renderer.ysRenderer.VpModelRenderer', 'yr.VpModelRenderer', (['controlModel', '(255, 240, 255)', 'yr.POLYGON_FILL'], {}), '(controlModel, (255, 240, 255), yr.POLYGON_FILL)\n', (5686, 5734), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((5778, 5810), 'PyCommon.modules.Renderer.ysRenderer.PointsRenderer', 'yr.PointsRenderer', (['rd_footCenter'], {}), '(rd_footCenter)\n', (5795, 5810), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((5914, 5959), 'PyCommon.modules.Renderer.ysRenderer.PointsRenderer', 'yr.PointsRenderer', (['rd_CM_plane', '(255, 255, 0)'], {}), '(rd_CM_plane, (255, 255, 0))\n', (5931, 5959), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6053, 6090), 'PyCommon.modules.Renderer.ysRenderer.PointsRenderer', 'yr.PointsRenderer', (['rd_CP', '(0, 255, 0)'], {}), '(rd_CP, (0, 255, 0))\n', (6070, 6090), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6182, 6225), 'PyCommon.modules.Renderer.ysRenderer.PointsRenderer', 'yr.PointsRenderer', (['rd_CP_des', '(255, 0, 255)'], {}), '(rd_CP_des, (255, 0, 255))\n', (6199, 6225), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6327, 6384), 'PyCommon.modules.Renderer.ysRenderer.VectorsRenderer', 'yr.VectorsRenderer', (['rd_dL_des_plane', 'rd_CM', '(255, 255, 0)'], {}), '(rd_dL_des_plane, rd_CM, (255, 255, 0))\n', (6345, 6384), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6486, 6535), 'PyCommon.modules.Renderer.ysRenderer.VectorsRenderer', 'yr.VectorsRenderer', (['rd_dH_des', 'rd_CM', '(0, 255, 0)'], {}), '(rd_dH_des, rd_CM, (0, 255, 0))\n', (6504, 6535), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6729, 6780), 'PyCommon.modules.Renderer.ysRenderer.VectorsRenderer', 'yr.VectorsRenderer', (['rd_CF', 'rd_CF_pos', '(255, 255, 0)'], {}), '(rd_CF, rd_CF_pos, (255, 255, 0))\n', (6747, 6780), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((6874, 6938), 'PyCommon.modules.Renderer.ysRenderer.OrientationsRenderer', 'yr.OrientationsRenderer', (['rd_foot_ori', 'rd_foot_pos', '(255, 255, 0)'], {}), '(rd_foot_ori, rd_foot_pos, (255, 255, 0))\n', (6897, 6938), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((7038, 7096), 'PyCommon.modules.Renderer.ysRenderer.VectorsRenderer', 'yr.VectorsRenderer', (['rd_exf_des', 'extraForcePos', '(0, 255, 0)'], {}), '(rd_exf_des, extraForcePos, (0, 255, 0))\n', (7056, 7096), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((7307, 7407), 'PyCommon.modules.Renderer.ysRenderer.WideArrowRenderer', 'yr.WideArrowRenderer', (['rd_exfen_des', 'extraForcePos', '(255, 0, 0)'], {'lineWidth': '(0.05)', 'fromPoint': '(False)'}), '(rd_exfen_des, extraForcePos, (255, 0, 0), lineWidth=\n 0.05, fromPoint=False)\n', (7327, 7407), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((12139, 12207), 'PyCommon.modules.ArticulatedBody.ysControl.getDesiredDOFAccelerations', 'yct.getDesiredDOFAccelerations', (['th_r', 'th', 'dth_r', 'dth', 'ddth_r', 'Kt', 'Dt'], {}), '(th_r, th, dth_r, dth, ddth_r, Kt, Dt)\n', (12169, 12207), True, 'from PyCommon.modules.ArticulatedBody import ysControl as yct\n'), ((12217, 12253), 'PyCommon.modules.Util.ysPythonEx.flatten', 'ype.flatten', (['ddth_des', 'ddth_des_flat'], {}), '(ddth_des, ddth_des_flat)\n', (12228, 12253), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((12262, 12288), 'PyCommon.modules.Util.ysPythonEx.flatten', 'ype.flatten', (['dth', 'dth_flat'], {}), '(dth, dth_flat)\n', (12273, 12288), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((15168, 15215), 'PyCommon.modules.ArticulatedBody.ysReferencePoints.getCM', 'yrp.getCM', (['linkPositions', 'linkMasses', 'totalMass'], {}), '(linkPositions, linkMasses, totalMass)\n', (15177, 15215), True, 'from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\n'), ((15230, 15278), 'PyCommon.modules.ArticulatedBody.ysReferencePoints.getCM', 'yrp.getCM', (['linkVelocities', 'linkMasses', 'totalMass'], {}), '(linkVelocities, linkMasses, totalMass)\n', (15239, 15278), True, 'from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\n'), ((15298, 15311), 'copy.copy', 'copy.copy', (['CM'], {}), '(CM)\n', (15307, 15311), False, 'import copy\n'), ((15348, 15362), 'copy.copy', 'copy.copy', (['dCM'], {}), '(dCM)\n', (15357, 15362), False, 'import copy\n'), ((15393, 15466), 'PyCommon.modules.ArticulatedBody.ysMomentum.getPureInertiaMatrix', 'ymt.getPureInertiaMatrix', (['TO', 'linkMasses', 'linkPositions', 'CM', 'linkInertias'], {}), '(TO, linkMasses, linkPositions, CM, linkInertias)\n', (15417, 15466), True, 'from PyCommon.modules.ArticulatedBody import ysMomentum as ymt\n'), ((15480, 15589), 'PyCommon.modules.ArticulatedBody.ysMomentum.getPureInertiaMatrixDerivative', 'ymt.getPureInertiaMatrixDerivative', (['dTO', 'linkMasses', 'linkVelocities', 'dCM', 'linkAngVelocities', 'linkInertias'], {}), '(dTO, linkMasses, linkVelocities, dCM,\n linkAngVelocities, linkInertias)\n', (15514, 15589), True, 'from PyCommon.modules.ArticulatedBody import ysMomentum as ymt\n'), ((20558, 20600), 'PyCommon.modules.ArticulatedBody.ysReferencePoints.getCP', 'yrp.getCP', (['contactPositions', 'contactForces'], {}), '(contactPositions, contactForces)\n', (20567, 20600), True, 'from PyCommon.modules.ArticulatedBody import ysReferencePoints as yrp\n'), ((22945, 22968), 'PyCommon.modules.Math.mmMath.logSO3', 'mm.logSO3', (['footBodyOriL'], {}), '(footBodyOriL)\n', (22954, 22968), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((22983, 23006), 'PyCommon.modules.Math.mmMath.logSO3', 'mm.logSO3', (['footBodyOriR'], {}), '(footBodyOriR)\n', (22992, 23006), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((23023, 23060), 'numpy.dot', 'np.dot', (['footBodyOriL', 'footBodyAngVelL'], {}), '(footBodyOriL, footBodyAngVelL)\n', (23029, 23060), True, 'import numpy as np\n'), ((23077, 23114), 'numpy.dot', 'np.dot', (['footBodyOriR', 'footBodyAngVelR'], {}), '(footBodyOriR, footBodyAngVelR)\n', (23083, 23114), True, 'import numpy as np\n'), ((23130, 23151), 'PyCommon.modules.Math.mmMath.vel2qd', 'mm.vel2qd', (['L_ang', 'L_q'], {}), '(L_ang, L_q)\n', (23139, 23151), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((23167, 23188), 'PyCommon.modules.Math.mmMath.vel2qd', 'mm.vel2qd', (['R_ang', 'R_q'], {}), '(R_ang, R_q)\n', (23176, 23188), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((24785, 24920), 'numpy.append', 'np.append', (['(kt_sup * (refFootL - footCenterL + contMotionOffset) - dt_sup * footBodyVelL)', '(kt_sup * a_oriL - dt_sup * footBodyAngVelL)'], {}), '(kt_sup * (refFootL - footCenterL + contMotionOffset) - dt_sup *\n footBodyVelL, kt_sup * a_oriL - dt_sup * footBodyAngVelL)\n', (24794, 24920), True, 'import numpy as np\n'), ((25075, 25210), 'numpy.append', 'np.append', (['(kt_sup * (refFootR - footCenterR + contMotionOffset) - dt_sup * footBodyVelR)', '(kt_sup * a_oriR - dt_sup * footBodyAngVelR)'], {}), '(kt_sup * (refFootR - footCenterR + contMotionOffset) - dt_sup *\n footBodyVelR, kt_sup * a_oriR - dt_sup * footBodyAngVelR)\n', (25084, 25210), True, 'import numpy as np\n'), ((25724, 25739), 'numpy.dot', 'np.dot', (['P', 'Jsys'], {}), '(P, Jsys)\n', (25730, 25739), True, 'import numpy as np\n'), ((25755, 25771), 'numpy.vsplit', 'np.vsplit', (['RS', '(2)'], {}), '(RS, 2)\n', (25764, 25771), True, 'import numpy as np\n'), ((25936, 25952), 'numpy.hsplit', 'np.hsplit', (['rs', '(2)'], {}), '(rs, 2)\n', (25945, 25952), True, 'import numpy as np\n'), ((26933, 27001), 'MomentumProject.working_example.mtOptimize.getTrackingWeight', 'mot.getTrackingWeight', (['DOFs', 'motion[0].skeleton', "config['weightMap']"], {}), "(DOFs, motion[0].skeleton, config['weightMap'])\n", (26954, 27001), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((27129, 27190), 'MomentumProject.working_example.mtOptimize.addTrackingTerms', 'mot.addTrackingTerms', (['problem', 'totalDOF', 'Bt', 'w', 'ddth_des_flat'], {}), '(problem, totalDOF, Bt, w, ddth_des_flat)\n', (27149, 27190), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((28021, 28049), 'PyCommon.modules.Util.ysPythonEx.nested', 'ype.nested', (["r['x']", 'ddth_sol'], {}), "(r['x'], ddth_sol)\n", (28031, 28049), True, 'from PyCommon.modules.Util import ysPythonEx as ype\n'), ((13800, 13855), 'numpy.cross', 'np.cross', (['refFootJointAngVelR', '(refFootR - refFootJointR)'], {}), '(refFootJointAngVelR, refFootR - refFootJointR)\n', (13808, 13855), True, 'import numpy as np\n'), ((14111, 14166), 'numpy.cross', 'np.cross', (['refFootJointAngVelL', '(refFootL - refFootJointL)'], {}), '(refFootJointAngVelL, refFootL - refFootJointL)\n', (14119, 14166), True, 'import numpy as np\n'), ((23227, 23256), 'PyCommon.modules.Math.mmMath.qdd2accel', 'mm.qdd2accel', (['L_dq', 'L_dq', 'L_q'], {}), '(L_dq, L_dq, L_q)\n', (23239, 23256), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((23296, 23325), 'PyCommon.modules.Math.mmMath.qdd2accel', 'mm.qdd2accel', (['R_dq', 'R_dq', 'R_q'], {}), '(R_dq, R_dq, R_q)\n', (23308, 23325), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((25894, 25910), 'numpy.dot', 'np.dot', (['P', 'dJsys'], {}), '(P, dJsys)\n', (25900, 25910), True, 'import numpy as np\n'), ((27234, 27300), 'MomentumProject.working_example.mtOptimize.addLinearTerms', 'mot.addLinearTerms', (['problem', 'totalDOF', 'Bl', 'dL_des_plane', 'R', 'r_bias'], {}), '(problem, totalDOF, Bl, dL_des_plane, R, r_bias)\n', (27252, 27300), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((27313, 27374), 'MomentumProject.working_example.mtOptimize.addAngularTerms', 'mot.addAngularTerms', (['problem', 'totalDOF', 'Bh', 'dH_des', 'S', 's_bias'], {}), '(problem, totalDOF, Bh, dH_des, S, s_bias)\n', (27332, 27374), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((29485, 29506), 'numpy.array', 'np.array', (['[0.1, 0, 0]'], {}), '([0.1, 0, 0])\n', (29493, 29506), True, 'import numpy as np\n'), ((29552, 29573), 'numpy.array', 'np.array', (['[0, 0.1, 0]'], {}), '([0, 0.1, 0])\n', (29560, 29573), True, 'import numpy as np\n'), ((29619, 29640), 'numpy.array', 'np.array', (['[0, 0, 0.1]'], {}), '([0, 0, 0.1])\n', (29627, 29640), True, 'import numpy as np\n'), ((29675, 29696), 'numpy.array', 'np.array', (['[0.1, 0, 0]'], {}), '([0.1, 0, 0])\n', (29683, 29696), True, 'import numpy as np\n'), ((24058, 24068), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (24066, 24068), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((24153, 24163), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (24161, 24163), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((25868, 25890), 'numpy.dot', 'np.dot', (['Jsys', 'dth_flat'], {}), '(Jsys, dth_flat)\n', (25874, 25890), True, 'import numpy as np\n'), ((27529, 27599), 'MomentumProject.working_example.mtOptimize.addConstraint2', 'mot.addConstraint2', (['problem', 'totalDOF', 'JsupR', 'dJsupR', 'dth_flat', 'a_supR'], {}), '(problem, totalDOF, JsupR, dJsupR, dth_flat, a_supR)\n', (27547, 27599), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((27696, 27766), 'MomentumProject.working_example.mtOptimize.addConstraint2', 'mot.addConstraint2', (['problem', 'totalDOF', 'JsupL', 'dJsupL', 'dth_flat', 'a_supL'], {}), '(problem, totalDOF, JsupL, dJsupL, dth_flat, a_supL)\n', (27714, 27766), True, 'from MomentumProject.working_example import mtOptimize as mot\n'), ((29102, 29127), 'PyCommon.modules.Math.mmMath.normalize2', 'mm.normalize2', (['forceforce'], {}), '(forceforce)\n', (29115, 29127), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((24045, 24055), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (24053, 24055), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((24140, 24150), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (24148, 24150), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((30197, 30217), 'PyCommon.modules.Math.mmMath.s2v', 'mm.s2v', (['wcfg.gravity'], {}), '(wcfg.gravity)\n', (30203, 30217), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((20981, 21001), 'PyCommon.modules.Math.mmMath.s2v', 'mm.s2v', (['wcfg.gravity'], {}), '(wcfg.gravity)\n', (20987, 21001), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((22778, 22788), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (22786, 22788), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((22916, 22926), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (22924, 22926), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((22765, 22775), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (22773, 22775), True, 'from PyCommon.modules.Math import mmMath as mm\n'), ((22903, 22913), 'PyCommon.modules.Math.mmMath.unitY', 'mm.unitY', ([], {}), '()\n', (22911, 22913), True, 'from PyCommon.modules.Math import mmMath as mm\n')] |
import numpy as np
import cv2
import os, glob
import _init_paths
import caffe
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w
from fast_rcnn.nms_wrapper import nms, pnms
from utils.blob import im_list_to_blob
from shapely.geometry import *
caffe.set_mode_gpu()
caffe.set_device(0)
net_prototxt = "../models/ctd/test_ctd_tloc.prototxt"
model = "../output/ctd_tloc.caffemodel"
cofig_file = "../experiments/cfgs/rfcn_ctd.yml"
images = glob.glob("../images/demo/*.jpg")
def _get_image_blob(im):
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def im_detect(net, im, boxes=None):
blobs, im_scales = _get_blobs(im, boxes)
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
rois = net.blobs['rois'].data.copy()
boxes = rois[:, 1:5] / im_scales[0]
scores = blobs_out['cls_prob']
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
############################################### curve
info_deltas_h = blobs_out['info_pred_h']
pred_infos_h = info_syn_transform_inv_h(boxes, info_deltas_h)
info_deltas_w = blobs_out['info_pred_w']
pred_infos_w = info_syn_transform_inv_w(boxes, info_deltas_w)
assert len(boxes) == len(pred_infos_h) == len(pred_infos_w)
###############################################
return scores, pred_boxes, pred_infos_h, pred_infos_w
def vis(im, dets, thresh=0.3):
for i in xrange(np.minimum(100, dets.shape[0])):
bbox = dets[i, :4]
score = dets[i, 4]
info_bbox = dets[i, 5:33] # syn
pts = [info_bbox[i] for i in xrange(28)]
assert(len(pts) == 28), 'wrong length.'
if score > thresh:
for p in xrange(0,28,2):
cv2.line(im,(int(bbox[0]) + int(pts[p%28]), int(bbox[1]) + int(pts[(p+1)%28])),
(int(bbox[0]) + int(pts[(p+2)%28]), int(bbox[1]) + int(pts[(p+3)%28])),(0,0,255),2)
im = cv2.resize(im, (1280, 720)) # visualization
cv2.imshow('Dectecting results syn.', im)
cv2.waitKey(0)
def nps(dets, cdets):
delete_inds = []
for i in xrange(cdets.shape[0]):
bbox = cdets[i, :4]
score = cdets[i, 4]
info_bbox = cdets[i, 5:33]
pts = [(int(bbox[0]) + info_bbox[j], int(bbox[1]) + info_bbox[j+1]) for j in xrange(0,28,2)]
ploygon_test = Polygon(pts)
if not ploygon_test.is_valid:
print('non-ploygon detected')
delete_inds.append(i)
if int(ploygon_test.area) < 10:
print('neg-ploygon')
delete_inds.append(i)
dets = np.delete(dets, delete_inds, 0)
cdets = np.delete(cdets, delete_inds, 0)
return dets, cdets
if __name__ == "__main__":
cfg_from_file(cofig_file)
net = caffe.Net(net_prototxt, model, caffe.TEST)
for image in images:
im = cv2.imread(image)
scores, boxes, infos_h, infos_w = im_detect(net, im, None)
assert(scores.shape[0] == infos_h.shape[0] == infos_w.shape[0]) , 'length mismatch'
inds = np.where(scores[:, 1] > 0.5)[0]
cls_scores = scores[inds, 1]
cls_boxes = boxes[inds, 4:8]
## curve
cls_infos_h = infos_h[inds, :14]
cls_infos_w = infos_w[inds, :14]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
# stack h and w pred.
cls_infos = np.zeros((cls_infos_h.shape[0], 28))
wh_stack_temp = np.dstack((cls_infos_w, cls_infos_h))
assert(wh_stack_temp.shape[0] == cls_infos.shape[0]), 'wh stack length mismatch.'
for ixstack, row_cls_infos in enumerate(cls_infos):
cls_infos[ixstack] = wh_stack_temp[ixstack].ravel()
cls_dets_withInfo = np.hstack((cls_boxes, cls_scores[:, np.newaxis], cls_infos)) \
.astype(np.float32, copy=False)
cls_dets, cls_dets_withInfo = nps(cls_dets, cls_dets_withInfo)
if cfg.TEST.USE_PNMS:
keep = pnms(cls_dets_withInfo, cfg.TEST.PNMS)
else:
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
cls_dets_withInfo = cls_dets_withInfo[keep, :]
vis(im, cls_dets_withInfo, 0.1)
| [
"fast_rcnn.bbox_transform.info_syn_transform_inv_h",
"fast_rcnn.bbox_transform.bbox_transform_inv",
"glob.glob",
"cv2.imshow",
"numpy.round",
"caffe.set_device",
"numpy.max",
"fast_rcnn.nms_wrapper.pnms",
"fast_rcnn.nms_wrapper.nms",
"fast_rcnn.bbox_transform.clip_boxes",
"cv2.resize",
"numpy.... | [((378, 398), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (396, 398), False, 'import caffe\n'), ((399, 418), 'caffe.set_device', 'caffe.set_device', (['(0)'], {}), '(0)\n', (415, 418), False, 'import caffe\n'), ((573, 606), 'glob.glob', 'glob.glob', (['"""../images/demo/*.jpg"""'], {}), "('../images/demo/*.jpg')\n", (582, 606), False, 'import os, glob\n'), ((760, 781), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (766, 781), True, 'import numpy as np\n'), ((800, 821), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (806, 821), True, 'import numpy as np\n'), ((1430, 1460), 'utils.blob.im_list_to_blob', 'im_list_to_blob', (['processed_ims'], {}), '(processed_ims)\n', (1445, 1460), False, 'from utils.blob import im_list_to_blob\n'), ((1878, 1963), 'numpy.array', 'np.array', (['[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]]'], {'dtype': 'np.float32'}), '([[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32\n )\n', (1886, 1963), True, 'import numpy as np\n'), ((2525, 2562), 'fast_rcnn.bbox_transform.bbox_transform_inv', 'bbox_transform_inv', (['boxes', 'box_deltas'], {}), '(boxes, box_deltas)\n', (2543, 2562), False, 'from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w\n'), ((2580, 2612), 'fast_rcnn.bbox_transform.clip_boxes', 'clip_boxes', (['pred_boxes', 'im.shape'], {}), '(pred_boxes, im.shape)\n', (2590, 2612), False, 'from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w\n'), ((2736, 2782), 'fast_rcnn.bbox_transform.info_syn_transform_inv_h', 'info_syn_transform_inv_h', (['boxes', 'info_deltas_h'], {}), '(boxes, info_deltas_h)\n', (2760, 2782), False, 'from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w\n'), ((2847, 2893), 'fast_rcnn.bbox_transform.info_syn_transform_inv_w', 'info_syn_transform_inv_w', (['boxes', 'info_deltas_w'], {}), '(boxes, info_deltas_w)\n', (2871, 2893), False, 'from fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, info_syn_transform_inv_h, info_syn_transform_inv_w\n'), ((3630, 3657), 'cv2.resize', 'cv2.resize', (['im', '(1280, 720)'], {}), '(im, (1280, 720))\n', (3640, 3657), False, 'import cv2\n'), ((3678, 3719), 'cv2.imshow', 'cv2.imshow', (['"""Dectecting results syn."""', 'im'], {}), "('Dectecting results syn.', im)\n", (3688, 3719), False, 'import cv2\n'), ((3724, 3738), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3735, 3738), False, 'import cv2\n'), ((4281, 4312), 'numpy.delete', 'np.delete', (['dets', 'delete_inds', '(0)'], {}), '(dets, delete_inds, 0)\n', (4290, 4312), True, 'import numpy as np\n'), ((4325, 4357), 'numpy.delete', 'np.delete', (['cdets', 'delete_inds', '(0)'], {}), '(cdets, delete_inds, 0)\n', (4334, 4357), True, 'import numpy as np\n'), ((4413, 4438), 'fast_rcnn.config.cfg_from_file', 'cfg_from_file', (['cofig_file'], {}), '(cofig_file)\n', (4426, 4438), False, 'from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list\n'), ((4449, 4491), 'caffe.Net', 'caffe.Net', (['net_prototxt', 'model', 'caffe.TEST'], {}), '(net_prototxt, model, caffe.TEST)\n', (4458, 4491), False, 'import caffe\n'), ((1184, 1278), 'cv2.resize', 'cv2.resize', (['im_orig', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2\n .INTER_LINEAR)\n', (1194, 1278), False, 'import cv2\n'), ((1479, 1505), 'numpy.array', 'np.array', (['im_scale_factors'], {}), '(im_scale_factors)\n', (1487, 1505), True, 'import numpy as np\n'), ((3121, 3151), 'numpy.minimum', 'np.minimum', (['(100)', 'dets.shape[0]'], {}), '(100, dets.shape[0])\n', (3131, 3151), True, 'import numpy as np\n'), ((4531, 4548), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (4541, 4548), False, 'import cv2\n'), ((5112, 5148), 'numpy.zeros', 'np.zeros', (['(cls_infos_h.shape[0], 28)'], {}), '((cls_infos_h.shape[0], 28))\n', (5120, 5148), True, 'import numpy as np\n'), ((5173, 5210), 'numpy.dstack', 'np.dstack', (['(cls_infos_w, cls_infos_h)'], {}), '((cls_infos_w, cls_infos_h))\n', (5182, 5210), True, 'import numpy as np\n'), ((1048, 1080), 'numpy.round', 'np.round', (['(im_scale * im_size_max)'], {}), '(im_scale * im_size_max)\n', (1056, 1080), True, 'import numpy as np\n'), ((4730, 4758), 'numpy.where', 'np.where', (['(scores[:, 1] > 0.5)'], {}), '(scores[:, 1] > 0.5)\n', (4738, 4758), True, 'import numpy as np\n'), ((5692, 5730), 'fast_rcnn.nms_wrapper.pnms', 'pnms', (['cls_dets_withInfo', 'cfg.TEST.PNMS'], {}), '(cls_dets_withInfo, cfg.TEST.PNMS)\n', (5696, 5730), False, 'from fast_rcnn.nms_wrapper import nms, pnms\n'), ((5764, 5791), 'fast_rcnn.nms_wrapper.nms', 'nms', (['cls_dets', 'cfg.TEST.NMS'], {}), '(cls_dets, cfg.TEST.NMS)\n', (5767, 5791), False, 'from fast_rcnn.nms_wrapper import nms, pnms\n'), ((4965, 5014), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis])'], {}), '((cls_boxes, cls_scores[:, np.newaxis]))\n', (4974, 5014), True, 'import numpy as np\n'), ((5456, 5516), 'numpy.hstack', 'np.hstack', (['(cls_boxes, cls_scores[:, np.newaxis], cls_infos)'], {}), '((cls_boxes, cls_scores[:, np.newaxis], cls_infos))\n', (5465, 5516), True, 'import numpy as np\n')] |
import pytest
import sys
sys.path.insert(0,"..")
import autogenes as ag
import numpy as np
import pandas as pd
import anndata
def test_pareto_fitness():
ag.init(np.zeros((3,10)))
ag.optimize(mode='fixed',nfeatures=5,verbose=False,weights=(1,-1),objectives=('distance','correlation'))
assert np.array_equal(ag.pareto(), ag.main.main.pareto)
np.random.seed(20)
adata = anndata.AnnData(np.random.randint(-5,5,(2,10)))
adata.obs['celltype'] = ['1','2']
adata.var['highly_variable'] = np.array([True,True,True,True] + [False]*6)
ag.init(adata,use_highly_variable=True)
ag.optimize(ngen=3,mode='fixed',nfeatures=2,verbose=False,weights=(1,-1),objectives=('distance','correlation'))
for p in ag.pareto():
assert np.array_equal(p[4:], [False]*6)
print(ag.pareto())
print(ag.fitness_matrix())
assert ag.fitness_matrix().shape == (len(ag.pareto()), 2)
def test_process_selection():
ag.main.pre_selection = np.array([True, True, False, True, False, True, True])
process_selection = ag.main._Interface__process_selection
s1 = np.array([False,True,True,False,False])
s2 = np.array([True,True,False,False,False])
s3 = np.array([True,True,False,False,False,False])
assert np.all(process_selection(s1) == [False,True,False,True,False,False,False])
assert np.all(process_selection(s2) == [True,True,False,False,False,False,False])
with pytest.raises(Exception):
assert process_selection(s3)
def test_select():
data = np.zeros((3,6))
data[0,0:2] = 1
data[1,2:4] = 1
data[2,4:6] = 1
adata = anndata.AnnData(data)
adata.obs['celltype'] = ['c1','c2','c3']
adata.var['highly_variable'] = [True,True,True,True,False,False]
ag.init(adata,use_highly_variable=True)
ag.optimize(ngen=10,seed=0,mode='fixed',nfeatures=3,verbose=False,weights=(1,-1),objectives=('distance','correlation'))
s = ag.main.main.select(weights=(1,0))
s_target = [True, True, False, True]
s_target_processed = [True,True,False,True,False,False]
assert np.array_equal(s_target, s)
assert np.array_equal(s_target_processed, ag.main._Interface__process_selection(s))
res = ag.select(weights=(1,0))
assert np.array_equal(ag.adata().var['autogenes'], s_target_processed)
assert np.array_equal(res, s_target_processed)
assert np.array_equal(ag.selection(), s_target_processed)
res_adata = ag.select(weights=(1,0), copy=True)
assert not (res_adata is ag.main.adata)
assert np.array_equal(res_adata.var['autogenes'], s_target_processed)
def test_select2():
np.random.seed(0)
adata = anndata.AnnData(np.random.randint(-5,5,(3,5)))
adata.obs['celltype'] = ['c1','c2','c3']
ag.init(adata)
ag.optimize(ngen=10,offspring_size=100,verbose=False,mode='fixed',nfeatures=3,weights=(1,-1),objectives=('distance','correlation'))
ag.select(weights=(1,-1),key_added='autogenes',copy=False)
ag.select(weights=(1,0),key_added='autogenes2',copy=False)
assert not np.array_equal(ag.adata().var['autogenes'], ag.adata().var['autogenes2'])
r1 = ag.select(weights=(1,-1),key_added='autogenes',copy=True)
r2 = ag.select(weights=(1,0),key_added='autogenes2',copy=True)
assert not ('autogenes2' in r1.var_names)
assert not ('autogenes' in r2.var_names)
| [
"autogenes.selection",
"numpy.random.seed",
"numpy.array_equal",
"autogenes.main.main.select",
"numpy.zeros",
"sys.path.insert",
"autogenes.fitness_matrix",
"pytest.raises",
"autogenes.optimize",
"numpy.array",
"numpy.random.randint",
"autogenes.main._Interface__process_selection",
"autogene... | [((26, 50), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (41, 50), False, 'import sys\n'), ((187, 301), 'autogenes.optimize', 'ag.optimize', ([], {'mode': '"""fixed"""', 'nfeatures': '(5)', 'verbose': '(False)', 'weights': '(1, -1)', 'objectives': "('distance', 'correlation')"}), "(mode='fixed', nfeatures=5, verbose=False, weights=(1, -1),\n objectives=('distance', 'correlation'))\n", (198, 301), True, 'import autogenes as ag\n'), ((353, 371), 'numpy.random.seed', 'np.random.seed', (['(20)'], {}), '(20)\n', (367, 371), True, 'import numpy as np\n'), ((499, 547), 'numpy.array', 'np.array', (['([True, True, True, True] + [False] * 6)'], {}), '([True, True, True, True] + [False] * 6)\n', (507, 547), True, 'import numpy as np\n'), ((545, 585), 'autogenes.init', 'ag.init', (['adata'], {'use_highly_variable': '(True)'}), '(adata, use_highly_variable=True)\n', (552, 585), True, 'import autogenes as ag\n'), ((587, 710), 'autogenes.optimize', 'ag.optimize', ([], {'ngen': '(3)', 'mode': '"""fixed"""', 'nfeatures': '(2)', 'verbose': '(False)', 'weights': '(1, -1)', 'objectives': "('distance', 'correlation')"}), "(ngen=3, mode='fixed', nfeatures=2, verbose=False, weights=(1, -\n 1), objectives=('distance', 'correlation'))\n", (598, 710), True, 'import autogenes as ag\n'), ((710, 721), 'autogenes.pareto', 'ag.pareto', ([], {}), '()\n', (719, 721), True, 'import autogenes as ag\n'), ((937, 991), 'numpy.array', 'np.array', (['[True, True, False, True, False, True, True]'], {}), '([True, True, False, True, False, True, True])\n', (945, 991), True, 'import numpy as np\n'), ((1063, 1106), 'numpy.array', 'np.array', (['[False, True, True, False, False]'], {}), '([False, True, True, False, False])\n', (1071, 1106), True, 'import numpy as np\n'), ((1110, 1153), 'numpy.array', 'np.array', (['[True, True, False, False, False]'], {}), '([True, True, False, False, False])\n', (1118, 1153), True, 'import numpy as np\n'), ((1157, 1207), 'numpy.array', 'np.array', (['[True, True, False, False, False, False]'], {}), '([True, True, False, False, False, False])\n', (1165, 1207), True, 'import numpy as np\n'), ((1470, 1486), 'numpy.zeros', 'np.zeros', (['(3, 6)'], {}), '((3, 6))\n', (1478, 1486), True, 'import numpy as np\n'), ((1551, 1572), 'anndata.AnnData', 'anndata.AnnData', (['data'], {}), '(data)\n', (1566, 1572), False, 'import anndata\n'), ((1686, 1726), 'autogenes.init', 'ag.init', (['adata'], {'use_highly_variable': '(True)'}), '(adata, use_highly_variable=True)\n', (1693, 1726), True, 'import autogenes as ag\n'), ((1728, 1859), 'autogenes.optimize', 'ag.optimize', ([], {'ngen': '(10)', 'seed': '(0)', 'mode': '"""fixed"""', 'nfeatures': '(3)', 'verbose': '(False)', 'weights': '(1, -1)', 'objectives': "('distance', 'correlation')"}), "(ngen=10, seed=0, mode='fixed', nfeatures=3, verbose=False,\n weights=(1, -1), objectives=('distance', 'correlation'))\n", (1739, 1859), True, 'import autogenes as ag\n'), ((1857, 1892), 'autogenes.main.main.select', 'ag.main.main.select', ([], {'weights': '(1, 0)'}), '(weights=(1, 0))\n', (1876, 1892), True, 'import autogenes as ag\n'), ((1998, 2025), 'numpy.array_equal', 'np.array_equal', (['s_target', 's'], {}), '(s_target, s)\n', (2012, 2025), True, 'import numpy as np\n'), ((2121, 2146), 'autogenes.select', 'ag.select', ([], {'weights': '(1, 0)'}), '(weights=(1, 0))\n', (2130, 2146), True, 'import autogenes as ag\n'), ((2228, 2267), 'numpy.array_equal', 'np.array_equal', (['res', 's_target_processed'], {}), '(res, s_target_processed)\n', (2242, 2267), True, 'import numpy as np\n'), ((2343, 2379), 'autogenes.select', 'ag.select', ([], {'weights': '(1, 0)', 'copy': '(True)'}), '(weights=(1, 0), copy=True)\n', (2352, 2379), True, 'import autogenes as ag\n'), ((2430, 2492), 'numpy.array_equal', 'np.array_equal', (["res_adata.var['autogenes']", 's_target_processed'], {}), "(res_adata.var['autogenes'], s_target_processed)\n", (2444, 2492), True, 'import numpy as np\n'), ((2517, 2534), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2531, 2534), True, 'import numpy as np\n'), ((2640, 2654), 'autogenes.init', 'ag.init', (['adata'], {}), '(adata)\n', (2647, 2654), True, 'import autogenes as ag\n'), ((2657, 2800), 'autogenes.optimize', 'ag.optimize', ([], {'ngen': '(10)', 'offspring_size': '(100)', 'verbose': '(False)', 'mode': '"""fixed"""', 'nfeatures': '(3)', 'weights': '(1, -1)', 'objectives': "('distance', 'correlation')"}), "(ngen=10, offspring_size=100, verbose=False, mode='fixed',\n nfeatures=3, weights=(1, -1), objectives=('distance', 'correlation'))\n", (2668, 2800), True, 'import autogenes as ag\n'), ((2792, 2853), 'autogenes.select', 'ag.select', ([], {'weights': '(1, -1)', 'key_added': '"""autogenes"""', 'copy': '(False)'}), "(weights=(1, -1), key_added='autogenes', copy=False)\n", (2801, 2853), True, 'import autogenes as ag\n'), ((2853, 2914), 'autogenes.select', 'ag.select', ([], {'weights': '(1, 0)', 'key_added': '"""autogenes2"""', 'copy': '(False)'}), "(weights=(1, 0), key_added='autogenes2', copy=False)\n", (2862, 2914), True, 'import autogenes as ag\n'), ((3007, 3067), 'autogenes.select', 'ag.select', ([], {'weights': '(1, -1)', 'key_added': '"""autogenes"""', 'copy': '(True)'}), "(weights=(1, -1), key_added='autogenes', copy=True)\n", (3016, 3067), True, 'import autogenes as ag\n'), ((3072, 3132), 'autogenes.select', 'ag.select', ([], {'weights': '(1, 0)', 'key_added': '"""autogenes2"""', 'copy': '(True)'}), "(weights=(1, 0), key_added='autogenes2', copy=True)\n", (3081, 3132), True, 'import autogenes as ag\n'), ((167, 184), 'numpy.zeros', 'np.zeros', (['(3, 10)'], {}), '((3, 10))\n', (175, 184), True, 'import numpy as np\n'), ((316, 327), 'autogenes.pareto', 'ag.pareto', ([], {}), '()\n', (325, 327), True, 'import autogenes as ag\n'), ((398, 431), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(5)', '(2, 10)'], {}), '(-5, 5, (2, 10))\n', (415, 431), True, 'import numpy as np\n'), ((734, 768), 'numpy.array_equal', 'np.array_equal', (['p[4:]', '([False] * 6)'], {}), '(p[4:], [False] * 6)\n', (748, 768), True, 'import numpy as np\n'), ((776, 787), 'autogenes.pareto', 'ag.pareto', ([], {}), '()\n', (785, 787), True, 'import autogenes as ag\n'), ((797, 816), 'autogenes.fitness_matrix', 'ag.fitness_matrix', ([], {}), '()\n', (814, 816), True, 'import autogenes as ag\n'), ((1381, 1405), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1394, 1405), False, 'import pytest\n'), ((2070, 2110), 'autogenes.main._Interface__process_selection', 'ag.main._Interface__process_selection', (['s'], {}), '(s)\n', (2107, 2110), True, 'import autogenes as ag\n'), ((2292, 2306), 'autogenes.selection', 'ag.selection', ([], {}), '()\n', (2304, 2306), True, 'import autogenes as ag\n'), ((2561, 2593), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(5)', '(3, 5)'], {}), '(-5, 5, (3, 5))\n', (2578, 2593), True, 'import numpy as np\n'), ((828, 847), 'autogenes.fitness_matrix', 'ag.fitness_matrix', ([], {}), '()\n', (845, 847), True, 'import autogenes as ag\n'), ((862, 873), 'autogenes.pareto', 'ag.pareto', ([], {}), '()\n', (871, 873), True, 'import autogenes as ag\n'), ((2170, 2180), 'autogenes.adata', 'ag.adata', ([], {}), '()\n', (2178, 2180), True, 'import autogenes as ag\n'), ((2940, 2950), 'autogenes.adata', 'ag.adata', ([], {}), '()\n', (2948, 2950), True, 'import autogenes as ag\n'), ((2969, 2979), 'autogenes.adata', 'ag.adata', ([], {}), '()\n', (2977, 2979), True, 'import autogenes as ag\n')] |
#-------------------------------------------------------------------------------
# pnorm
# A class to compute the "p norm" of a given vector of data. This is defined as
# ||x||_p = ( \sum_i |x_i|^p )^(1/p)
#
# The special case of p -> \infinity is given by
# ||x||_\infinity = max( |x_i| )
#
# Optionally we can use the "grid p norm" modification, which takes into account
# resolution as
# ||x||_gp = ( \sum_i dx (|x_i|)^p )^(1/p)
#-------------------------------------------------------------------------------
import numpy as np
from numpy import linalg as la
class Pnorm:
#---------------------------------------------------------------------------
# Constructor.
#---------------------------------------------------------------------------
def __init__(self,
vectorData,
positionData = None,
ansData = None):
self.positionData = np.array(positionData)
if ansData is None:
self.vectorData = np.absolute(np.array(vectorData))
else:
self.vectorData = np.absolute(np.array(vectorData) - np.array(ansData))
return
#---------------------------------------------------------------------------
# Compute the slice weighting, i.e., checking if the points are in range.
#---------------------------------------------------------------------------
def computeSliceWeighting(self, positionData, rmin, rmax):
# Make a copy to work on, and sort it.
n = len(positionData)
rData = zip(positionData[:], range(n))
rData.sort()
if rmin is None:
rmin = min([x[0] for x in rData])
if rmax is None:
rmax = max([x[0] for x in rData])
n = len(positionData)
weightData = np.zeros(n)
for i in xrange(n):
if positionData[i] >= rmin and positionData[i] <= rmax:
weightData[i] = 1.0
return weightData
#---------------------------------------------------------------------------
# Compute the grid weighting based on the given position data.
#---------------------------------------------------------------------------
def computeGridWeighting(self, positionData, rmin, rmax):
# Make a copy to work on, and sort it.
n = len(positionData)
rData = zip(positionData[:], range(n))
rData.sort()
if rmin is None:
rmin = min([x[0] for x in rData])
if rmax is None:
rmax = max([x[0] for x in rData])
# Now build up the grid weighting based on the dr steps.
weightData = np.zeros(n)
for j in xrange(n):
i = rData[j][1]
if j == 0:
r0 = max(rmin, min(rmax, rData[j][0]))
else:
r0 = max(rmin, min(rmax, 0.5*(rData[j-1][0] + rData[j][0])))
if j == n - 1:
r1 = max(rmin, min(rmax, rData[j][0]))
else:
r1 = max(rmin, min(rmax, 0.5*(rData[j][0] + rData[j+1][0])))
weightData[i] = r1 - r0
assert weightData[i] >= 0.0
# That's it, we now have the grid weighting.
assert len(weightData) == len(positionData)
assert min(weightData) >= 0.0
return weightData
#---------------------------------------------------------------------------
# Compute the p norm.
#---------------------------------------------------------------------------
def pnorm(self, p,
rmin = None,
rmax = None):
weightData = self.computeSliceWeighting(self.positionData, rmin, rmax)
if p == "inf":
Ln = la.norm(weightData*self.vectorData, np.inf)
else:
Ln = la.norm(weightData*self.vectorData, p)/max(1e-30, sum(weightData))**(1.0/p)
return Ln
#---------------------------------------------------------------------------
# Compute the grid p norm.
#---------------------------------------------------------------------------
def gridpnorm(self, p,
rmin = None,
rmax = None):
if p == "inf":
weightData = self.computeSliceWeighting(self.positionData, rmin, rmax)
Ln = la.norm(weightData*self.vectorData, np.inf)
else:
weightData = self.computeGridWeighting(self.positionData, rmin, rmax)
Ln = la.norm(weightData*self.vectorData, p)/max(1e-30, sum(weightData))**(1.0/p)
return Ln
| [
"numpy.zeros",
"numpy.linalg.norm",
"numpy.array"
] | [((921, 943), 'numpy.array', 'np.array', (['positionData'], {}), '(positionData)\n', (929, 943), True, 'import numpy as np\n'), ((1794, 1805), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1802, 1805), True, 'import numpy as np\n'), ((2632, 2643), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2640, 2643), True, 'import numpy as np\n'), ((3683, 3728), 'numpy.linalg.norm', 'la.norm', (['(weightData * self.vectorData)', 'np.inf'], {}), '(weightData * self.vectorData, np.inf)\n', (3690, 3728), True, 'from numpy import linalg as la\n'), ((4260, 4305), 'numpy.linalg.norm', 'la.norm', (['(weightData * self.vectorData)', 'np.inf'], {}), '(weightData * self.vectorData, np.inf)\n', (4267, 4305), True, 'from numpy import linalg as la\n'), ((1014, 1034), 'numpy.array', 'np.array', (['vectorData'], {}), '(vectorData)\n', (1022, 1034), True, 'import numpy as np\n'), ((3758, 3798), 'numpy.linalg.norm', 'la.norm', (['(weightData * self.vectorData)', 'p'], {}), '(weightData * self.vectorData, p)\n', (3765, 3798), True, 'from numpy import linalg as la\n'), ((4417, 4457), 'numpy.linalg.norm', 'la.norm', (['(weightData * self.vectorData)', 'p'], {}), '(weightData * self.vectorData, p)\n', (4424, 4457), True, 'from numpy import linalg as la\n'), ((1092, 1112), 'numpy.array', 'np.array', (['vectorData'], {}), '(vectorData)\n', (1100, 1112), True, 'import numpy as np\n'), ((1115, 1132), 'numpy.array', 'np.array', (['ansData'], {}), '(ansData)\n', (1123, 1132), True, 'import numpy as np\n')] |
import anndata
import numpy as np
import pandas as pd
import scanpy.api as sc
import utils.hgnc
import utils.ontology
def basic_curation(adata):
adata.obs["assay"] = "Microwell-seq"
adata.obs["assay_ontology"] = ""
adata.obs["disease_ontology"] = "PATO:0000461"
adata.obs["disease"] = utils.ontology.get_ontology_label("PATO:0000461")
adata.uns["organism_ontology"] = "NCBITaxon:9606"
adata.uns["organism"] = utils.ontology.get_ontology_label("NCBITaxon:9606")
adata.uns["title"] = "Construction of a human cell landscape at single-cell level"
adata.uns["contributors"] = [
{"name": "<NAME>", "institution": "Zhejiang University School of Medicine", "email": "<EMAIL>"},
{"name": "<NAME>", "institution": "Zhejiang University School of Medicine", "email": "<EMAIL>"},
]
adata.uns["publication_doi"] = "https://doi.org/10.1038/s41586-020-2157-4"
adata.uns["project_name"] = adata.uns["title"]
adata.uns["project_description"] = (
"Single-cell analysis is a valuable tool for dissecting cellular heterogeneity in complex systems. "
"However, a comprehensive single-cell atlas has not been achieved for humans. Here we use single-cell "
"mRNA sequencing to determine the cell-type composition of all major human organs and construct a scheme "
"for the human cell landscape (HCL). We have uncovered a single-cell hierarchy for many tissues that have "
"not been well characterized. We established a 'single-cell HCL analysis' pipeline that helps to define "
"human cell identity. Finally, we performed a single-cell comparative analysis of landscapes from human "
"and mouse to identify conserved genetic networks. We found that stem and progenitor cells exhibit "
"strong transcriptomic stochasticity, whereas diferentiated cells are more distinct. Our results provide a"
"useful resource for the study of human biology."
)
adata.uns["project_protocol_links"] = []
adata.uns["project_raw_data_links"] = ["https://figshare.com/articles/HCL_DGE_Data/7235471"]
adata.uns["project_other_links"] = [
"https://db.cngb.org/HCL/",
"https://github.com/ggjlab/HCL/",
]
def remix(adata):
# Handle tissue. This one has lots of them, and the original name collides with the corpora name
adata.obs.rename(columns={"tissue": "original_tissue"}, inplace=True)
tissue_ontology_map = {
"AdultLung": "UBERON:0002048",
"FetalIntestine": "UBERON:0000160",
"AdultAdrenalGland": "UBERON:0002369",
"AdultKidney": "UBERON:0002113",
"FetalKidney": "UBERON:0002113",
"AdultPleura": "UBERON:0000977",
"FetalPancreas": "UBERON:0001264",
"FetalMuscle": "UBERON:0001630",
"FetalLiver": "UBERON:0002107",
"AdultPeripheralBlood": "UBERON:0000178",
"AdultTransverseColon": "UBERON:0001157",
"CordBloodCD34P": "UBERON:0012168",
"AdultSpleen": "UBERON:0002106",
"AdultStomach": "UBERON:0000945",
"FetalAdrenalGland": "UBERON:0002369",
"FetalBrain": "UBERON:0000955",
"FetalMaleGonad": "UBERON:0000473",
"AdultOmentum": "UBERON:0003688",
"AdultThyroid": "UBERON:0002046",
"AdultEsophagus": "UBERON:0001043",
"AdultLiver": "UBERON:0002107",
"AdultTrachea": "UBERON:0003126",
"ChorionicVillus": "UBERON:0007106",
"AdultGallbladder": "UBERON:0002110",
"AdultPancreas": "UBERON:0001264",
"AdultArtery": "UBERON:0001637",
"FetalLung": "UBERON:0002048",
"Placenta": "UBERON:0001987",
"AdultTemporalLobe": "UBERON:0001871",
"AdultBladder": "UBERON:0018707",
"AdultBoneMarrow": "UBERON:0002371",
"AdultCervix": "UBERON:0000002",
"FetalHeart": "UBERON:0000948",
"FetalStomach": "UBERON:0000945",
"AdultMuscle": "UBERON:0001630",
"AdultUterus": "UBERON:0000995",
"AdultCerebellum": "UBERON:0002037",
"FetalSkin": "UBERON:0002097",
"FetalFemaleGonad": "UBERON:0000992",
"CordBlood": "UBERON:0012168",
"AdultFallopiantube": "UBERON:0003889",
"FetalRib": "UBERON:0002228",
"FetalSpinalCord": "UBERON:0002240",
"NeonatalAdrenalGland": "UBERON:0002369",
"AdultRectum": "UBERON:0001052",
"AdultJeJunum": "UBERON:0002115",
"FetalCalvaria": "UBERON:0004339",
"AdultDuodenum": "UBERON:0002114",
"FetalThymus": "UBERON:0002370",
"AdultEpityphlon": "UBERON:0001154",
"AdultIleum": "UBERON:0002116",
"AdultSigmoidColon": "UBERON:0001159",
"AdultHeart": "UBERON:0000948",
"AdultProstate": "UBERON:0002367",
"AdultUreter": "UBERON:0000056",
"AdultAscendingColon": "UBERON:0001156",
"FetalEyes": "UBERON:0000970",
"HESC": "",
"AdultAdipose": "UBERON:0001013",
}
tissue_map = {k: utils.ontology.get_ontology_label(v) for k, v in tissue_ontology_map.items()}
tissue_map["HESC"] = "HESC"
adata.obs["tissue_ontology"] = adata.obs["original_tissue"].replace(tissue_ontology_map, inplace=False)
adata.obs["tissue"] = adata.obs["original_tissue"].replace(tissue_map, inplace=False)
adata.obs["cell_type_ontology"] = ""
adata.obs["cell_type"] = ""
adata.obs["ethnicity_ontology"] = "HANCESTRO:0027"
adata.obs["ethnicity"] = utils.ontology.get_ontology_label("HANCESTRO:0027")
adata.obs["sex"] = "unknown"
development_stage_ontology_map = {}
for k in tissue_ontology_map:
if k.startswith("Adult") or k in (
"CordBlood",
"Placenta",
"ChorionicVillus",
"CordBloodCD34P",
):
development_stage_ontology_map[k] = "HsapDv:0000087"
elif k.startswith("Fetal"):
development_stage_ontology_map[k] = "HsapDv:0000037"
elif k.startswith("Neonatal"):
development_stage_ontology_map[k] = "HsapDv:0000082"
elif k == "HESC":
development_stage_ontology_map[k] = "HsapDv:0000002"
development_stage_map = {k: utils.ontology.get_ontology_label(v) for k, v in development_stage_ontology_map.items()}
adata.obs["development_stage_ontology"] = adata.obs["original_tissue"].replace(
development_stage_ontology_map, inplace=False
)
adata.obs["development_stage"] = adata.obs["original_tissue"].replace(development_stage_map, inplace=False)
adata.uns["layer_descriptions"] = {"X": "log1p CPM"}
upgraded_var_index = utils.hgnc.get_upgraded_var_index(adata.var)
merged_df = pd.DataFrame(np.expm1(adata.X), index=adata.obs.index, columns=upgraded_var_index).sum(
axis=1, level=0, skipna=False
)
remix_adata = anndata.AnnData(
X=np.log1p(merged_df.to_numpy()),
obs=adata.obs,
var=merged_df.columns.to_frame(name="hgnc_gene_symbol"),
uns=adata.uns,
obsm=adata.obsm,
varm=adata.varm,
)
return remix_adata
def main():
original_filename = "human_cell_landscape-3-original.h5ad"
curated_filename = "human_cell_landscape-3-curated.h5ad"
remixed_filename = "human_cell_landscape-3-remixed.h5ad"
# Read raw, X has most of the genes filtered out
adata = sc.read_h5ad(original_filename).raw.to_adata()
basic_curation(adata)
adata.write(curated_filename, compression="gzip")
remix_adata = remix(adata)
remix_adata.write(remixed_filename, compression="gzip")
main()
| [
"scanpy.api.read_h5ad",
"numpy.expm1"
] | [((6674, 6691), 'numpy.expm1', 'np.expm1', (['adata.X'], {}), '(adata.X)\n', (6682, 6691), True, 'import numpy as np\n'), ((7327, 7358), 'scanpy.api.read_h5ad', 'sc.read_h5ad', (['original_filename'], {}), '(original_filename)\n', (7339, 7358), True, 'import scanpy.api as sc\n')] |
"""
Starts a Websocket server, with 3 datasets:
MNIST
eICU mortality classification
eICU length of stay regression
Each server keeps a subset of these dataset and has a teparate test set too.
"""
import logging
import syft as sy
from syft.workers import WebsocketServerWorker
import torch
import argparse
from torchvision import datasets
from torchvision import transforms
from sklearn.preprocessing import RobustScaler
import numpy as np
import pandas as pd
def get_mnist_dataset(keep_labels, training=True):
"""
Sets up MNIST dataset for training or testing.
"""
mnist_dataset = datasets.MNIST(
root="./data",
train=training,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
# create mnist training
indices = np.isin(mnist_dataset.targets, keep_labels).astype("uint8")
logger.info("number of true indices: %s", indices.sum())
selected_data = (
torch.masked_select(
mnist_dataset.data.transpose(0, 2),
torch.tensor(indices)
).view(28, 28, -1).transpose(2, 0)
)
logger.info("after selection: %s", selected_data.shape)
selected_targets = torch.masked_select(
mnist_dataset.targets,
torch.tensor(indices)
)
return sy.BaseDataset(
data=selected_data,
targets=selected_targets,
transform=mnist_dataset.transform
)
def get_eicu_dataset(hospitalid, outcome):
"""
Sets up the eICU dataset for training or testing.
"""
df_x = pd.read_csv('x.csv')
df_y = pd.read_csv('y.csv')
# delete rows where the outcome is missing
to_keep = ~(pd.isnull(df_y).sum(axis=1) > 0)
df_x = df_x[to_keep]
df_y = df_y[to_keep]
# restrict x and y to the required hospital or test set
to_keep = df_x.hospitalid.values == hospitalid
df_x.drop('hospitalid', axis=1, inplace=True)
df_x = df_x[to_keep]
scaler = RobustScaler(quantile_range=(10.0, 90.0))
x = scaler.fit_transform(df_x.values)
y = df_y[outcome][to_keep].values
return sy.BaseDataset(
data=torch.from_numpy(x.astype('float32')),
targets=torch.from_numpy(y.astype('float32'))
)
def start_websocket_server_worker(id, host, port, hook, verbose, keep_labels=None):
"""
Helper function for spinning up a websocket server and setting up the local
datasets: MNIST, eICU for classification and for regression.
"""
server = WebsocketServerWorker(
id=id,
host=host,
port=port,
hook=hook,
verbose=verbose
)
# add mnist train & test
server.add_dataset(
get_mnist_dataset(keep_labels, training=True),
key='mnist_train'
)
server.add_dataset(
get_mnist_dataset(list(range(10)), training=False),
key='mnist_test'
)
# add eicu train & test for classification
id2hospitalid = {
'h1': 1,
'h2': 2,
'h3': 3,
}
server.add_dataset(
get_eicu_dataset(hospitalid=id2hospitalid[id], outcome='hosp_mort'),
key='eicu_class_train'
)
server.add_dataset(
get_eicu_dataset(hospitalid=4, outcome='hosp_mort'),
key='eicu_class_test'
)
# add eicu train & test for regression
server.add_dataset(
get_eicu_dataset(hospitalid=id2hospitalid[id], outcome='icu_los_hours'),
key='eicu_reg_train'
)
server.add_dataset(
get_eicu_dataset(hospitalid=4, outcome='icu_los_hours'),
key='eicu_reg_test'
)
server.start()
return server
if __name__ == "__main__":
# Logging setup
logger = logging.getLogger("run_websocket_server")
FORMAT = ("%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) "
"- %(message)s")
logging.basicConfig(format=FORMAT)
logger.setLevel(level=logging.DEBUG)
# Parse args
parser = argparse.ArgumentParser(description="Run websocket server worker.")
parser.add_argument(
"--port",
"-p",
type=int,
help="port number of the websocket server worker, e.g. --port 8777",
)
parser.add_argument(
"--host", type=str,
default="localhost",
help="host for the connection"
)
parser.add_argument(
"--id",
type=str,
help="name (id) of the websocket server worker, e.g. --id hospital1"
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="if set, websocket server worker will be started in verbose mode",
)
args = parser.parse_args()
# define which hospital gets which mnist examples for training
mnist_keep_labels = {
"h1": [0, 1, 2, 3],
"h2": [4, 5, 6],
"h3": [7, 8, 9],
}
# Hook and start server
hook = sy.TorchHook(torch)
server = start_websocket_server_worker(
id=args.id,
host=args.host,
port=args.port,
hook=hook,
verbose=args.verbose,
keep_labels=mnist_keep_labels[args.id]
)
| [
"syft.workers.WebsocketServerWorker",
"numpy.isin",
"argparse.ArgumentParser",
"logging.basicConfig",
"pandas.read_csv",
"sklearn.preprocessing.RobustScaler",
"torchvision.transforms.Normalize",
"pandas.isnull",
"syft.TorchHook",
"syft.BaseDataset",
"torch.tensor",
"logging.getLogger",
"torc... | [((1372, 1472), 'syft.BaseDataset', 'sy.BaseDataset', ([], {'data': 'selected_data', 'targets': 'selected_targets', 'transform': 'mnist_dataset.transform'}), '(data=selected_data, targets=selected_targets, transform=\n mnist_dataset.transform)\n', (1386, 1472), True, 'import syft as sy\n'), ((1624, 1644), 'pandas.read_csv', 'pd.read_csv', (['"""x.csv"""'], {}), "('x.csv')\n", (1635, 1644), True, 'import pandas as pd\n'), ((1656, 1676), 'pandas.read_csv', 'pd.read_csv', (['"""y.csv"""'], {}), "('y.csv')\n", (1667, 1676), True, 'import pandas as pd\n'), ((2024, 2065), 'sklearn.preprocessing.RobustScaler', 'RobustScaler', ([], {'quantile_range': '(10.0, 90.0)'}), '(quantile_range=(10.0, 90.0))\n', (2036, 2065), False, 'from sklearn.preprocessing import RobustScaler\n'), ((2547, 2625), 'syft.workers.WebsocketServerWorker', 'WebsocketServerWorker', ([], {'id': 'id', 'host': 'host', 'port': 'port', 'hook': 'hook', 'verbose': 'verbose'}), '(id=id, host=host, port=port, hook=hook, verbose=verbose)\n', (2568, 2625), False, 'from syft.workers import WebsocketServerWorker\n'), ((3721, 3762), 'logging.getLogger', 'logging.getLogger', (['"""run_websocket_server"""'], {}), "('run_websocket_server')\n", (3738, 3762), False, 'import logging\n'), ((3883, 3917), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT'}), '(format=FORMAT)\n', (3902, 3917), False, 'import logging\n'), ((3990, 4057), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run websocket server worker."""'}), "(description='Run websocket server worker.')\n", (4013, 4057), False, 'import argparse\n'), ((4910, 4929), 'syft.TorchHook', 'sy.TorchHook', (['torch'], {}), '(torch)\n', (4922, 4929), True, 'import syft as sy\n'), ((1332, 1353), 'torch.tensor', 'torch.tensor', (['indices'], {}), '(indices)\n', (1344, 1353), False, 'import torch\n'), ((878, 921), 'numpy.isin', 'np.isin', (['mnist_dataset.targets', 'keep_labels'], {}), '(mnist_dataset.targets, keep_labels)\n', (885, 921), True, 'import numpy as np\n'), ((751, 772), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (770, 772), False, 'from torchvision import transforms\n'), ((774, 816), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (794, 816), False, 'from torchvision import transforms\n'), ((1741, 1756), 'pandas.isnull', 'pd.isnull', (['df_y'], {}), '(df_y)\n', (1750, 1756), True, 'import pandas as pd\n'), ((1110, 1131), 'torch.tensor', 'torch.tensor', (['indices'], {}), '(indices)\n', (1122, 1131), False, 'import torch\n')] |
import sys
import logging
import heapq
import numpy
import random
import time
import re
import math
from Bio.PDB import *
from Bio import SeqIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# python 3 compatibility
from functools import reduce
from builtins import input
sys.path.append('../../')
from config import *
sys.path.append(scripts_dir)
from utils import *
from image_helper import *
# if draw_figures == True:
try:
import pymol
from pymol import stored
except Exception as e:
try:
sys.path.append(pymol_py3_dir)
import pymol
from pymol import stored
except Exception as e:
pass
# logger.error('PyMOL not found.')
# using z-value for all rmsd
# def generate_merging_rmsd_threshold(cluster_pairwise_alignment_details, rmsd_zscore_threshold=-0.75):
# rmsd_list = []
# for (i, r1) in cluster_pairwise_alignment_details:
# average_rmsd, pairwise_scores = cluster_pairwise_alignment_details[(i, r1)]
# for j, r2, rmsd, align_len in pairwise_scores:
# rmsd_list.append(rmsd)
# mean = get_mean(rmsd_list)
# std = numpy.std(rmsd_list)
# merging_rmsd_threshold = round(mean + std * rmsd_zscore_threshold, 4)
# logger.info('Merging threshold is set to: ' + str(merging_rmsd_threshold))
# # mean = get_mean(rmsd_list, True)
# # std = numpy.std(rmsd_list)
# # merging_rmsd_threshold_temp = round(mean + std * rmsd_zscore_threshold, 4)
# # logger.info('Merging threshold could be set to (for median): ' + str(merging_rmsd_threshold_temp))
# return merging_rmsd_threshold
# # previous stable version with condition: align_len_threshold = min(max_align_length * x%, 10)
# def generate_align_length_threshold(cluster_pairwise_alignment_details, threshold_defining_coefficient=0.75):
# align_len_threshold_upper_limit = 10
# max_align_len = 0
# for (i, r1) in cluster_pairwise_alignment_details:
# average_rmsd, pairwise_scores = cluster_pairwise_alignment_details[(i, r1)]
# for j, r2, rmsd, align_len in pairwise_scores:
# if align_len > max_align_len:
# max_align_len = align_len
# # consider doing it using z-value of align lenth distribution
# align_len_threshold = min(round(max_align_len * threshold_defining_coefficient),align_len_threshold_upper_limit)
# return align_len_threshold
# using z-value for all align_len
def generate_align_length_threshold(cluster_pairwise_alignment_details):
if align_len_threshold_type == 'length':
return align_len_threshold_value
elif align_len_threshold_type == 'z-score':
align_len_list = []
max_align_len = 0
for (i, r1) in cluster_pairwise_alignment_details:
average_rmsd, pairwise_scores = cluster_pairwise_alignment_details[(i, r1)]
for j, r2, rmsd, align_len in pairwise_scores:
align_len_list.append(align_len)
if align_len > max_align_len:
max_align_len = align_len
mean = get_mean(align_len_list)
std = numpy.std(align_len_list)
align_len_threshold = int(round(mean + std * align_len_zscore_threshold))
if use_max_align_len_in_equation == True:
align_len_threshold = min(align_len_threshold, int(round(max_align_len * 0.66)))
align_len_threshold = max(align_len_threshold, min_align_len_threshold)
# logger.info('Align len threshold is set to: ' + str(align_len_threshold))
return align_len_threshold
else:
logger.error('Invalid align_len_threshold_type. Exiting...')
sys.exit()
# # using z-value for all align_len and loop length ratio
# def generate_align_length_threshold(cluster_pairwise_alignment_details, align_len_zscore_threshold=0.0):
# ratio_list = []
# loop_length_list = []
# loop_list = []
# for (i, r1) in cluster_pairwise_alignment_details:
# loop_len1 = get_loop_length(r1.strip().split(':')[1].strip().split('_'))
# if r1 not in loop_list:
# loop_list.append(r1)
# loop_length_list.append(loop_len1)
# average_rmsd, pairwise_scores = cluster_pairwise_alignment_details[(i, r1)]
# for j, r2, rmsd, align_len in pairwise_scores:
# loop_len2 = get_loop_length(r2.strip().split(':')[1].strip().split('_'))
# if r2 not in loop_list:
# loop_list.append(r2)
# loop_length_list.append(loop_len2)
# ratio1 = align_len / float(loop_len1)
# ratio2 = align_len / float(loop_len2)
# ratio_list.append(ratio1)
# ratio_list.append(ratio2)
# # print(get_z_scores(align_len_list))
# # print(get_z_scores(align_len_list, True))
# # sys.exit()
# mean = get_mean(ratio_list)
# # sys.exit()
# std = numpy.std(ratio_list)
# align_len_threshold = int(round(get_mean(loop_length_list) * mean))
# logger.info('Align len threshold is set to: ' + str(align_len_threshold))
# return align_len_threshold
def is_better_rmsd(rmsd_a, rmsd_b, is_normalized_score = False):
# Normalized score denotes the higher the better score (reverse of RMSD property)
if is_normalized_score:
if compare_rmsd(rmsd_a, rmsd_b) < 0:
return True
return False
if compare_rmsd(rmsd_a, rmsd_b) > 0:
return True
return False
#For RMSD, the lower, the better
#Returns 1 if better RMSD, -1 for worse and 0 for equal
def compare_rmsd(rmsd_a, rmsd_b):
prec = 0.000001
# definitively better RMSD
if rmsd_a < rmsd_b - prec:
return 1
# definitively worse RMSD
if rmsd_a > rmsd_b + prec:
return -1
return 0
def is_better_alignment_score_length_priority(rmsd_a, alignment_length_a, rmsd_b, alignment_length_b, is_normalized_score = False):
if alignment_length_a > alignment_length_b:
return True
if alignment_length_a < alignment_length_b:
return False
return is_better_rmsd(rmsd_a, rmsd_b, is_normalized_score)
def is_better_alignment_score_RMSD_priority(rmsd_a, alignment_length_a, rmsd_b, alignment_length_b, is_normalized_score):
rmsd_comp = compare_rmsd(rmsd_a, rmsd_b)
# Normalized score denotes the higher, the better score (reverse of RMSD property)
if is_normalized_score:
if rmsd_comp < 0:
return True
if rmsd_comp > 0:
return False
else:
if rmsd_comp > 0:
return True
if rmsd_comp < 0:
return False
return alignment_length_a > alignment_length_b
def is_acceptable_align_len(align_len, align_len_threshold):
if align_len >= align_len_threshold:
return True
return False
def is_acceptable_rmsd(rmsd, align_len, is_length_adjusted_score, merging_rmsd_threshold):
if is_length_adjusted_score:
rmsd = rmsd * math.sqrt(align_len)
if rmsd <= merging_rmsd_threshold:
return True
return False
def extract_alignment_scores(scores):
alignment_length = alignment_score = 0
rmsd = zscore = 0.0
if len(scores) == 1:
rmsd = scores
elif len(scores) == 2:
rmsd, alignment_length = scores
elif len(scores) == 3:
rmsd, alignment_length, zscore = scores
elif len(scores) == 4:
rmsd, alignment_length, zscore, alignment_score = scores
else:
# print(alignment)
logger.error('Invalid "scores" value.')
return rmsd, alignment_length, zscore, alignment_score
#scores can be (rmsd, [align_length, [zscore, [alignment_score]]])
def is_better_alignment_score(scores_a, scores_b, align_len_threshold, is_normalized_score, priority = 'balance'):
rmsd_a, align_length_a, zscore_a, alignment_score_a = extract_alignment_scores(scores_a)
rmsd_b, align_length_b, zscore_b, alignment_score_b = extract_alignment_scores(scores_b)
# if priority == 'balance':
if is_acceptable_align_len(align_length_a, align_len_threshold) and is_acceptable_align_len(align_length_b, align_len_threshold):
return is_better_alignment_score_RMSD_priority(rmsd_a, align_length_a, rmsd_b, align_length_b, is_normalized_score)
else:
return is_better_alignment_score_length_priority(rmsd_a, align_length_a, rmsd_b, align_length_b, is_normalized_score)
# Other options can be added (not considered here yet)
def find_best_aligned_pair(pairwise_align_details, align_len_threshold):
# max align loop
max_align_length = max_loop_index = 0
max_length_loop = ''
max_loop_rmsd = 1000.0
for (j, r2, rmsd, align_length) in pairwise_align_details:
if is_better_alignment_score((rmsd, align_length), (max_loop_rmsd, max_align_length), align_len_threshold, is_normalized_score):
max_align_length = align_length
max_length_loop = r2
max_loop_index = j
max_loop_rmsd = rmsd
return max_loop_index, max_length_loop, max_loop_rmsd, max_align_length
def get_weighted_avg_rmsd(rmsd_align_len_list):
total_alignment_length = 0
total_rmsd = 0.0
for rmsd, align_len in rmsd_align_len_list:
total_alignment_length += align_len
if is_length_adjusted_score == True:
rmsd = rmsd * math.sqrt(align_len)
total_rmsd += rmsd * rmsd * align_len
avg_rmsd = 0.0
if total_alignment_length > 0:
avg_rmsd = math.sqrt(total_rmsd / total_alignment_length)
if is_length_adjusted_score == True:
avg_rmsd /= math.sqrt(total_alignment_length)
return avg_rmsd, total_alignment_length
def centroid(coord_list):
if len(coord_list) > 0:
return list(map(lambda z: 1.*z/len(coord_list), reduce(lambda x, y: (x[0]+y[0], x[1]+y[1], x[2]+y[2]), coord_list)))
return None
def get_atom_coordinate(pdb_fn, residue_list):
backbone_atoms, sugar_atoms = get_backbone_and_sugar_atoms()
pdb_id = os.path.basename(pdb_fn)[:4]
parser = FastMMCIFParser()
structure = parser.get_structure('struct', pdb_fn)
backbone = {}
sugar = {}
for chain_id, index, icd in residue_list:
# if chain_id == 'n' and index == 'a':
if chain_id == '':
continue
chain = structure[0][chain_id]
residues = chain.get_residues()
my_residues = {}
for r in residues:
hetflag, resseq, icode = r.get_id()
my_residues[(resseq, icode)] = r
i = int(index)
icode = icd if len(icd) > 0 else ' '
if (i, icode) not in my_residues:
# ret.append(0)
backbone[(pdb_id, chain_id, index, icd)] = 0.
sugar[(pdb_id, chain_id, index, icd)] = 0.
else:
atom_coord = []
for atom in backbone_atoms:
if atom in my_residues[(i, icode)]:
atom_coord.append(my_residues[(i, icode)][atom].get_vector())
backbone[(pdb_id, chain_id, index, icd)] = centroid(atom_coord)
atom_coord = []
for atom in sugar_atoms:
if atom in my_residues[(i, icode)]:
atom_coord.append(my_residues[(i, icode)][atom].get_vector())
sugar[(pdb_id, chain_id, index, icd)] = centroid(atom_coord)
return backbone, sugar, structure
def pdb_pos_map(pdb_res_map, m):
"""position in pdb index alignment"""
ret = []
for i in m:
if i in pdb_res_map:
ret.append(pdb_res_map[i])
# if
else:
# ret.append('na')
ret.append(('', '', ''))
logger.warning('!!!!!!!!!!!!!!!!!!!!!ALERT: APPENDING EMPTY TUPLE (NA) !!!!!!!!!!!!!!!!!!!!')
return ret
def get_pdb_index_list(lp):
pdb_chain, regions = lp.split(':')
# segments = regions.strip().split('_')
# index_list = []
r = list(map(lambda x: x.split('-'), regions.split('_')))
index_list = reduce(lambda y, z: y+z, list(map(lambda x: list(range(int(x[0]), int(x[1])+1)), r)))
pdb_res_map = load_pdb_res_map(pdb_chain)
pdb_index_list = pdb_pos_map(pdb_res_map, index_list)
return pdb_index_list
def aln_map(aln1, aln2, aln_start, aln_end):
"""the aligned nucleotides in two regions"""
"""return index in the aligned regions"""
if len(aln1) != len(aln2):
return None
aln1 = aln1.replace('.', '')
aln2 = aln2.replace('.', '')
aln1 = aln1.replace('~', '')
aln2 = aln2.replace('~', '')
# print 'aln1: ' + aln1
# print 'aln2: ' + aln2
ret1 = []
ret2 = []
i = j = 0
for index, (c1, c2) in enumerate(zip(aln1, aln2)):
if c1 == '-' and c2 != '-':
j += 1
elif c1 != '-' and c2 == '-':
i += 1
elif c1 != '-' and c2 != '-':
if index in range(aln_start, aln_end+1):
ret1.append(i)
ret2.append(j)
i += 1
j += 1
else:
return None
# print 'ret1: ' + ret1
# print 'ret2: ' + ret2
return ret1, ret2
def pos_map(region, m, extend):
"""region is based on ref_index"""
"""m is the output of aln_map"""
"""position in sequence alignment"""
r = list(map(lambda x: x.split('-'), region.split('_')))
i = reduce(lambda y, z: y+z, list(map(lambda x: list(range(int(x[0]), int(x[1])+1)), r)))
# i = reduce(lambda x, y: range(int(x[0]), int(x[1])+1)+range(int(y[0]), int(y[1])+1), r)
i_e = reduce(lambda y, z: y+z, list(map(lambda x: list(range(int(x[0])-extend, int(x[1])+1+extend)), r)))
# i_e = reduce(lambda x, y: range(int(x[0])-extend, int(x[1])+1+extend)+range(int(y[0])-extend, int(y[1])+1+extend), r)
ret = []
for j in m:
ret.append(i[j])
return ret, i_e
def pos_map_is_safe(region, m, extend):
"""region is based on ref_index"""
"""m is the output of aln_map"""
"""position in sequence alignment"""
r = list(map(lambda x: x.split('-'), region.split('_')))
i = reduce(lambda y, z: y+z, list(map(lambda x: list(range(int(x[0]), int(x[1])+1)), r)))
# i = reduce(lambda x, y: range(int(x[0]), int(x[1])+1)+range(int(y[0]), int(y[1])+1), r)
i_e = reduce(lambda y, z: y+z, list(map(lambda x: list(range(int(x[0])-extend, int(x[1])+1+extend)), r)))
# i_e = reduce(lambda x, y: range(int(x[0])-extend, int(x[1])+1+extend)+range(int(y[0])-extend, int(y[1])+1+extend), r)
ret = []
# print i
# print m
if m[len(m)-1] >= len(i):
# if len(i) != len(m):
return False
return True
def aln_residue(r1, r2, loop1, loop2, aln1, aln2, aln_start, aln_end, extend):
chain1, region1 = loop1.split(':')
chain2, region2 = loop2.split(':')
# load the ref_index to pdb_index mapping
pdb1_res_map = load_pdb_res_map(chain1)
pdb2_res_map = load_pdb_res_map(chain2)
# find the index of mapping nucleotides
am1, am2 = aln_map(aln1, aln2, aln_start, aln_end)
(pm1, i1) = pos_map(region1, am1, extend)
(pm2, i2) = pos_map(region2, am2, extend)
# get the ref_index for aligned nucleotides
(pm1, i1) = pos_map(region1, am1, extend)
(pm2, i2) = pos_map(region2, am2, extend)
# get the pdb_index for aligned nucleotides
pdb1_pm = pdb_pos_map(pdb1_res_map, pm1)
pdb2_pm = pdb_pos_map(pdb2_res_map, pm2)
i1_pm = pdb_pos_map(pdb1_res_map, i1)
i2_pm = pdb_pos_map(pdb2_res_map, i2)
return pdb1_pm, pdb2_pm, i1_pm, i2_pm
def write_alignment_fixing_log(fw, fix_cnt, r1, r2, loop1, loop2, aln1, aln2, status):
if output_env == 'local':
if status == 'before':
fw.write('fix count: ' + str(fix_cnt) + '\n')
fw.write(r1 + '\n')
fw.write(r2 + '\n')
fw.write('Aligned region(s):\n')
fw.write(loop1 + '\n')
fw.write(loop2 + '\n')
fw.write('Alignment string BEFORE fixing:' + '\n')
fw.write(aln1 + '\n')
fw.write(aln2 + '\n')
else:
fw.write('Alignment string AFTER fixing:' + '\n')
fw.write(aln1 + '\n')
fw.write(aln2 + '\n\n')
def remove_hiphen_and_corresponding_character(aln1, aln2):
new_aln1 = ''
new_aln2 = ''
for i in range(len(aln1)):
if aln1[i] != '-' and aln2[i] != '-':
new_aln1 += aln1[i]
new_aln2 += aln2[i]
return new_aln1, new_aln2
def get_segment_fasta_seq(fasta_seq, regions):
seq = ''
regions = regions.strip().split('_')
for region in regions:
s, e = region.strip().split('-')
s = int(s)
e = int(e)
for i in range(s, e+1):
seq += fasta_seq[i]
return seq
def fix_alignment(fasta_seq, aln1, aln2):
new_aln1 = list(aln1)
new_aln2 = list(aln2)
last_hyphen_ind = -1
for i in range(len(aln1)):
if aln2[i] == '-':
last_hyphen_ind = i
if fasta_seq[i] != aln1[i]:
j = last_hyphen_ind
new_aln1.pop(j)
new_aln2.pop(j)
break
return ''.join(new_aln1), ''.join(new_aln2)
def fix_alignment_pair(fasta_seq1, fasta_seq2, aln1, aln2):
if '#' in fasta_seq1:
new_aln1, new_aln2 = fix_alignment(fasta_seq1, aln1, aln2)
if '#' in fasta_seq2:
new_aln2, new_aln1 = fix_alignment(fasta_seq2, aln2, aln1)
return new_aln1, new_aln2
def aln_residue_temp(pdb_res_mapping_dict, fasta_seq_dict, r1, r2, loop1, loop2, aln1, aln2, aln_start, aln_end, extend):
if output_env == 'local':
fw = open('alignment_fixing_log.log', 'a')
# print('loops: ')
# print(loop1)
# print(loop2)
chain1, region1 = loop1.split(':')
chain2, region2 = loop2.split(':')
# load the ref_index to pdb_index mapping
# pdb1_res_map = load_pdb_res_map(chain1)
# pdb2_res_map = load_pdb_res_map(chain2)
pdb1_res_map = pdb_res_mapping_dict[chain1]
pdb2_res_map = pdb_res_mapping_dict[chain2]
# find the index of mapping nucleotides
am1, am2 = aln_map(aln1, aln2, aln_start, aln_end)
# print am1, am2
# sys.exit()
fix_cnt = 0
while pos_map_is_safe(region1, am1, extend) == False or pos_map_is_safe(region2, am2, extend) == False:
loop1_expected_length = get_loop_length(region1.strip().split('_'))
loop2_expected_length = get_loop_length(region2.strip().split('_'))
# fix alignment
fix_cnt += 1
write_alignment_fixing_log(fw, fix_cnt, r1, r2, loop1, loop2, aln1, aln2, 'before')
fasta_seq1 = get_segment_fasta_seq(fasta_seq_dict[chain1], region1)
fasta_seq2 = get_segment_fasta_seq(fasta_seq_dict[chain2], region2)
hyphen_free_seq1 = aln1.replace('-', '')
hyphen_free_seq2 = aln2.replace('-', '')
fasta_seq1 = fasta_seq1.ljust(len(hyphen_free_seq1), '#')
fasta_seq2 = fasta_seq2.ljust(len(hyphen_free_seq2), '#')
aln1, aln2 = fix_alignment_pair(fasta_seq1, fasta_seq2, aln1, aln2)
write_alignment_fixing_log(fw, fix_cnt, r1, r2, loop1, loop2, aln1, aln2, 'after')
am1, am2 = aln_map(aln1, aln2, aln_start, aln_end)
if fix_cnt > max(len(aln1), len(aln2)):
break
if pos_map_is_safe(region1, am1, extend) == False or pos_map_is_safe(region2, am2, extend) == False:
logger.error('Alignment length missmatch fixing failed. r1: ' + r1 + ', r2: ' + r2)
logger.error(aln1)
logger.error(aln2)
sys.exit()
(pm1, i1) = pos_map(region1, am1, extend)
(pm2, i2) = pos_map(region2, am2, extend)
# get the ref_index for aligned nucleotides
(pm1, i1) = pos_map(region1, am1, extend)
(pm2, i2) = pos_map(region2, am2, extend)
# if loop1 == '3J7Q_5:3277-3280' and loop2 == '4V9F_0:1699-1703':
# sys.exit()
# get the pdb_index for aligned nucleotides
pdb1_pm = pdb_pos_map(pdb1_res_map, pm1)
pdb2_pm = pdb_pos_map(pdb2_res_map, pm2)
i1_pm = pdb_pos_map(pdb1_res_map, i1)
i2_pm = pdb_pos_map(pdb2_res_map, i2)
if output_env == 'local':
fw.close()
return pdb1_pm, pdb2_pm, i1_pm, i2_pm
def load_fasta_seq(pdb_id, chains):
fasta_seq_dict = {}
fasta_fn = os.path.join(fasta_dir, pdb_id + '.fasta')
for record in SeqIO.parse(fasta_fn, 'fasta'):
# fasta_seq_dict[pdb_id + '_' + record.id.strip().split('|')[0].strip().split(':')[1]] = str(record.seq)
chain_ids = record.description.strip().split('|')[1].strip().split(' ')[1].strip().split(',')
for chain_id in chain_ids:
fasta_seq_dict[chain_id] = str(record.seq)
return fasta_seq_dict
def load_pdb_res_map(chain):
"""load sequence index->pdb index"""
"""{ref_index: (chain_id, pdb_index)}"""
ret = {}
# map_dir = '../nrPDBs_Old' # the directory for the mapping data
fp = open(os.path.join(pdb_fasta_mapping_dir, chain+'.rmsx.nch'))
for line in fp.readlines():
decom = line.strip().split('\t')
##################### for PDB #####################
# if decom[0][0] == "'":
# ret[int(decom[1])] = (decom[0][1], decom[0][3:].replace('.', ''))
# else:
# ret[int(decom[1])] = (decom[0][0], decom[0][1:].replace('.', ''))
##################### for PDB #####################
##################### for PDBx ####################
if decom[0][0] == "'":
chain_id = decom[0][1:].strip().split("'")[0]
i = len(chain_id)+2
else:
chain_id = re.split('-?(\d+)',decom[0])[0]
i = len(chain_id)
if decom[0][-1].isalpha():
icode = decom[0][-1]
j = len(decom[0])-2
else:
icode = ''
j = len(decom[0])
seqnum = decom[0][i:j]
ret[int(decom[1])] = (chain_id, seqnum, icode)
##################### for PDBx ####################
return ret
def create_best_alignment_graph(cluster_pairwise_alignment_details, align_len_threshold):
adjacency_list = {}
directed_adjacency_list = {}
transpose_directed_adjacency_list = {}
# Create undirected, weighted directed, transpose-directed graph from best alignment edge among loops
for (i, r1) in sorted(cluster_pairwise_alignment_details, key=lambda x: cluster_pairwise_alignment_details[x][0]):
adjacency_list[(i, r1)] = []
directed_adjacency_list[(i, r1)] = {}
transpose_directed_adjacency_list[(i, r1)] = []
for (i, r1) in sorted(cluster_pairwise_alignment_details, key=lambda x: cluster_pairwise_alignment_details[x][0]):
avg_rmsd, pairwise_align_details = cluster_pairwise_alignment_details[(i, r1)]
if len(pairwise_align_details) > 0:
j, r2, max_loop_rmsd, max_align_length = find_best_aligned_pair(pairwise_align_details, align_len_threshold)
adjacency_list[(i, r1)].append((j, r2))
adjacency_list[(j, r2)].append((i, r1))
directed_adjacency_list[(j, r2)][(i, r1)] = (max_loop_rmsd, max_align_length)
transpose_directed_adjacency_list[(i, r1)].append((j, r2))
return adjacency_list, directed_adjacency_list, transpose_directed_adjacency_list
def create_weighted_complete_directed_graph(cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score):
# Create weighted directed graph from alignment edge among loops
weighted_directed_adjacency_matrix = {}
for (i, r1) in sorted(cluster_pairwise_alignment_details, key=lambda x: cluster_pairwise_alignment_details[x][0]):
weighted_directed_adjacency_matrix[(i, r1)] = {}
for (i, r1) in sorted(cluster_pairwise_alignment_details, key=lambda x: cluster_pairwise_alignment_details[x][0]):
avg_rmsd, pairwise_align_details = cluster_pairwise_alignment_details[(i, r1)]
for (j, r2, rmsd, align_length) in pairwise_align_details:
weighted_directed_adjacency_matrix[(j, r2)][(i, r1)] = (rmsd, align_length)
return weighted_directed_adjacency_matrix
def get_connected_components(adjacency_list):
# Find the connected components of the graph
connected_components = []
all_visited = []
for (i, r1) in adjacency_list:
if (i, r1) not in all_visited:
current_visted = []
dfs(adjacency_list, (i,r1), current_visted, None, [])
connected_components.append(current_visted)
all_visited.extend(current_visted)
return connected_components
def extract_cycle(traverse_list_with_parent, node, initial_node, cycle):
if node == initial_node:
return
cycle.append(node)
extract_cycle(traverse_list_with_parent, traverse_list_with_parent[node], initial_node, cycle)
# Take the transpose of the directed connected graph and find dependency order (on the best alignment component)
# Feature 1: each node will have exactly one out-edge in the reverse graph
# Feature 1 implies one node can be part of at most one cycle
# Feature 2: There will always be exactly 1 cycle
# Feature 2 can be proved by contradiction
# Contradiction Case 1 (no cycle): n nodes, n edge cannot for a tree, where (n-1) can be accomodated
# Contradiction Case 2 (2 or more cycles): cycles can have shared node, can't have connected edge
def bfs_find_cycle(graph, start):
visited = []
traverse_list_with_parent = {}
parent = None
queue = [(start, parent)]
while queue:
(node, parent) = queue.pop(0)
if node not in visited:
visited.append(node)
traverse_list_with_parent[node] = parent
for n in sorted(graph[node], key = lambda x:len(graph[x])):
queue.append((n, node))
else:
cycle = [node]
extract_cycle(traverse_list_with_parent, parent, node, cycle)
return cycle
return []
def get_central_node_in_the_component(component, directed_adjacency_list, cluster_pairwise_alignment_details):
# There will always be one in-degree, but 0+ outdegree
# Find the best node based on the out degree, and then avg rmsd
central_node = ''
central_node_out_degree = 0
central_node_avg_rmsd = 100.0
for loop in component:
out_degree = len(directed_adjacency_list[loop])
rmsd_align_len_list = []
for loop2 in directed_adjacency_list[loop]:
rmsd_align_len_list.append(directed_adjacency_list[loop][loop2])
avg_rmsd, total_align_len = get_weighted_avg_rmsd(rmsd_align_len_list)
# avg_rmsd = cluster_pairwise_alignment_details[loop]
if out_degree > central_node_out_degree or (out_degree == central_node_out_degree and avg_rmsd < central_node_avg_rmsd):
central_node = loop
central_node_out_degree = out_degree
central_node_avg_rmsd = avg_rmsd
return central_node
def get_component_features(cluster_pairwise_alignment_details, directed_adjacency_list, transpose_directed_adjacency_list, connected_components):
### Find features (central_node, cycle_nodes_of_the_component, component_nodes) of components
connected_components_features = {}
for component_id, component_nodes in enumerate(sorted(connected_components, key=lambda x:len(connected_components), reverse = True)):
## Find the nodes that can be drawn without dependency
start_node = component_nodes[0]
# Returns the cycle in reverse order
cycle_nodes_of_the_component = bfs_find_cycle(transpose_directed_adjacency_list, start_node)
# extract the adjaceny list of the current component
component_directed_adjacency_list = {}
for (i, r1) in directed_adjacency_list:
if (i, r1) in component_nodes:
component_directed_adjacency_list[(i, r1)] = directed_adjacency_list[(i, r1)]
central_node = start_node
## If the graph has more than one node, there will the a cycle and we can find a sutable node_to_start there
if len(cycle_nodes_of_the_component) > 0:
if len(cycle_nodes_of_the_component) > 2 and output_env == 'local':
logger.info('### CYCLE FOUND WITH MORE THAN TWO MOTIFS ###')
print(component_id)
print('Cycle nodes: ')
print(cycle_nodes_of_the_component)
sys.exit()
# Find the most important loop in the cycle of the current connected component
# Returns the node that has most outgoing edge, then best average rmsd
central_node = get_central_node_in_the_component(cycle_nodes_of_the_component, component_directed_adjacency_list, cluster_pairwise_alignment_details)
connected_components_features[component_id] = (central_node, cycle_nodes_of_the_component, component_nodes, component_directed_adjacency_list)
return connected_components_features
def find_best_edge_between_components(cycle_nodes_of_the_component2, component1, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score):
best_align_length = 0
best_align_rmsd = 100.0
best_align_loop_source = ''
best_align_loop_source_index = 0
best_align_loop_dest = ''
best_align_loop_dest_index = 0
for (i, r1) in cycle_nodes_of_the_component2:
_, pairwise_align_details = cluster_pairwise_alignment_details[(i,r1)]
new_pairwise_align_details = []
for (j, r2, rmsd, align_length) in pairwise_align_details:
if (j, r2) in component1:
new_pairwise_align_details.append((j, r2, rmsd, align_length))
best_j, best_r2, rmsd, align_length = find_best_aligned_pair(new_pairwise_align_details, align_len_threshold)
if is_better_alignment_score((rmsd, align_length), (best_align_rmsd, best_align_length), align_len_threshold, is_normalized_score):
best_align_length = align_length
best_align_rmsd = rmsd
best_align_loop_source = best_r2
best_align_loop_source_index = best_j
best_align_loop_dest = r1
best_align_loop_dest_index = i
return ((best_align_loop_source_index, best_align_loop_source), (best_align_loop_dest_index, best_align_loop_dest), best_align_length, best_align_rmsd)
def find_best_edges_to_another_component(component1, component2, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score):
best_edges = {}
for (i, r1) in component1:
_, pairwise_align_details = cluster_pairwise_alignment_details[(i, r1)]
new_pairwise_align_details = []
for (j, r2, rmsd, align_length) in pairwise_align_details:
if (j, r2) in component2:
new_pairwise_align_details.append((j, r2, rmsd, align_length))
best_j, best_r2, best_align_rmsd, best_align_length = find_best_aligned_pair(new_pairwise_align_details, align_len_threshold)
best_edges[(i, r1)] = (best_j, best_r2), best_align_rmsd, best_align_length
return best_edges
# Implements Prim's MST approach
def get_component_ordering(component_adj_matrix, start, align_len_threshold, is_normalized_score):
visited = []
component_ordering = []
best_next_node = start
best_edge_loop = None
best_edge_parent = None
best_align_length = 0
best_align_loop_rmsd = 0.0
while best_next_node != None:
visited.append(best_next_node)
component_ordering.append((best_next_node, best_edge_loop, best_edge_parent, best_align_loop_rmsd, best_align_length))
best_next_node = None
best_align_length = 0
best_align_loop_rmsd = 100.0
#Find which edge is the best among all the outgoing edge from the already visited nodes
#Time efficiency was not considered as the number of nodes (component) is small
for i in visited:
for j in component_adj_matrix[i]:
if j not in visited:
(loop_source, loop_dest, align_length, rmsd) = component_adj_matrix[i][j]
# if align_len > best_align_len or (align_len == best_align_len and is_better_rmsd(rmsd, best_align_loop_rsmd, is_normalized_score)):
if is_better_alignment_score((rmsd, align_length), (best_align_loop_rmsd, best_align_length), align_len_threshold, is_normalized_score):
best_align_length = align_length
best_align_loop_rmsd = rmsd
best_next_node = j
best_edge_loop = loop_dest
best_edge_parent = loop_source
return component_ordering
# Test if at least given percentage/count of the best edges conecting components passes the merging threshold
def assess_merging_two_components(best_edges, align_len_threshold):
acceptable_edge_rmsd = []
for (i, r1) in best_edges:
(j, r2), rmsd, align_len = best_edges[(i,r1)]
if merge_components and acceptable_edge_for_merging(rmsd, align_len, align_len_threshold, is_length_adjusted_score):
acceptable_edge_rmsd.append(rmsd)
# For the case of connectivity_test_type = "count", consider the given value or # of loops
match_count = min(connectivity_test_threshold, len(best_edges))
if connectivity_test_type == "percent":
match_count = math.ceil(len(best_edges)*(connectivity_test_threshold/100.0))
if len(acceptable_edge_rmsd) < match_count:
# Returning really bad RMSD
return False, 1000.0
else:
RMSD_sum = sum(sorted(acceptable_edge_rmsd)[:match_count])
if match_count > 0:
return True, RMSD_sum/match_count
else:
return True, 0.0
# Apply guided-tree based approach to merge the components
def merge_and_order_connected_components(connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score):
### Find relational features among components
component_count = len(connected_components_features)
component_adj_matrix = {}
for i in connected_components_features:
component_adj_matrix[i] = {}
for j in connected_components_features:
component_adj_matrix[i][j] = []
component_nodes_dict = {}
## Build graph with pair-wise edge of the best loop-alignment choice among the components
for i in connected_components_features:
_, _, component_nodes1, _ = connected_components_features[i]
component_nodes_dict[i] = copy.deepcopy(component_nodes1)
for j in connected_components_features:
if i != j:
_, _, component_nodes2, _ = connected_components_features[j]
# Find the best edge from all nodes in the component1 to any node in the component 2
best_edges = find_best_edges_to_another_component(component_nodes1, component_nodes2, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
component_adj_matrix[i][j] = best_edges
component_merged = {}
while(True):
got_mergable_components = False
mergable_component_id_min = None
mergable_component_id_max = None
best_component_merge_avg_RMSD = 1000.0
for i in connected_components_features:
if i not in component_merged:
for j in range(i+1, len(connected_components_features)):
if j not in component_merged:
is_mergable1, component_merge_avg_RMSD1 = assess_merging_two_components(component_adj_matrix[i][j], align_len_threshold)
is_mergable2, component_merge_avg_RMSD2 = assess_merging_two_components(component_adj_matrix[j][i], align_len_threshold)
if (is_mergable1 and is_mergable2) or (connectivity_direction == "one-way" and (is_mergable1 or is_mergable2)):
got_mergable_components = True
component_merge_avg_RMSD = (component_merge_avg_RMSD1 + component_merge_avg_RMSD2) / 2.0
if connectivity_direction == "one-way":
component_merge_avg_RMSD = min(component_merge_avg_RMSD1, component_merge_avg_RMSD2)
if not (is_mergable1 and is_mergable2):
component_merge_avg_RMSD = component_merge_avg_RMSD1 if is_mergable1 else component_merge_avg_RMSD2
if component_merge_avg_RMSD < best_component_merge_avg_RMSD:
best_component_merge_avg_RMSD = component_merge_avg_RMSD
mergable_component_id_min = i
mergable_component_id_max = j
if got_mergable_components == False:
break
else:
# Merge two components
component_merged[mergable_component_id_max] = mergable_component_id_min
component_nodes_dict[mergable_component_id_min] += component_nodes_dict[mergable_component_id_max]
component1 = component_nodes_dict[mergable_component_id_min]
# Update the matrix
for i in connected_components_features:
if i != mergable_component_id_min and (i not in component_merged):
best_edges1 = find_best_edges_to_another_component(component1, component_nodes_dict[i], cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
component_adj_matrix[mergable_component_id_min][i] = best_edges1
best_edges2 = find_best_edges_to_another_component(component_nodes_dict[i], component1, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
component_adj_matrix[i][mergable_component_id_min] = best_edges2
merged_components_dict = {}
# Include the components that was not merged
for i in sorted(connected_components_features):
if i not in component_merged:
merged_components_dict[i] = []
merged_components_dict[i].append(i)
# Complete the list with the merged components
for i in sorted(component_merged):
key = component_merged[i]
# Find the smallest component id that it is connected to (the component that was not merged)
while key in component_merged:
key = component_merged[key]
merged_components_dict[key].append(i)
merged_components_features = {}
for i in sorted(merged_components_dict):
merged_components_nodes = []
merged_cycle_nodes_of_the_components = []
for j in sorted(merged_components_dict[i]):
_, cycle_nodes_of_the_component, component_nodes, _ = connected_components_features[j]
merged_cycle_nodes_of_the_components += cycle_nodes_of_the_component
merged_components_nodes += component_nodes
merged_components_features[i] = (merged_cycle_nodes_of_the_components, merged_components_nodes)
start_node_list = None
if start_traversal_with_largest_subfamily == True:
start_node_list = get_largest_subfamily(merged_components_features, cluster_pairwise_alignment_details)
merged_component_ordering = get_best_component_ordering(merged_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score, start_node_list)
merged_component_ordering_list = []
first_component = True
for a_component in merged_component_ordering:
i, best_edge_next_loop, best_edge_parent, best_align_loop_rmsd, best_align_length = a_component
if len(merged_components_dict[i]) > 1:
next_loop_component = None
next_loop_component_list = None
filtered_connected_components_features = {}
for j in merged_components_dict[i]:
_, cycle_nodes_of_the_component, component_nodes, _ = connected_components_features[j]
# Find which is the next component to traverse given the next loop we know
if first_component == False and best_edge_next_loop in component_nodes:
next_loop_component = j
next_loop_component_list = [j]
filtered_connected_components_features[j] = cycle_nodes_of_the_component, component_nodes
if start_traversal_with_largest_subfamily == True and first_component == True:
next_loop_component_list = get_largest_subfamily(filtered_connected_components_features, cluster_pairwise_alignment_details)
component_ordering = get_best_component_ordering(filtered_connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score, next_loop_component_list)
if first_component == False:
component_ordering[0] = (next_loop_component, best_edge_next_loop, best_edge_parent, best_align_loop_rmsd, best_align_length)
merged_component_ordering_list.append(component_ordering)
else:
merged_component_ordering_list.append([a_component])
first_component = False
return merged_component_ordering_list
def get_largest_subfamily(connected_components_features, cluster_pairwise_alignment_details):
largest_subfamily_ind_list = None
largest_node_count = -1
for i in connected_components_features:
_, component_nodes = connected_components_features[i]
if len(component_nodes) > largest_node_count:
largest_node_count = len(component_nodes)
largest_subfamily_ind_list = [i]
elif len(component_nodes) == largest_node_count:
largest_subfamily_ind_list.append(i)
return largest_subfamily_ind_list
def get_best_component_ordering(connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score, start_node_list = None):
### Find relational features among components
component_count = len(connected_components_features)
component_adj_matrix = {}
for i in connected_components_features:
component_adj_matrix[i] = {}
for j in connected_components_features:
component_adj_matrix[i][j] = ((0,''),(0,''),0,0.0)
best_edge_index_i = 0
overall_best_align_length = 0
overall_best_align_rmsd = 100.0
## Build graph with pair-wise edge of the best loop-alignment choice among the components
for i in connected_components_features:
cycle_nodes_of_the_component1, component_nodes1 = connected_components_features[i]
best_align_length = 0
best_align_rmsd = 100.0
best_align__j = 0
for j in connected_components_features:
if i != j:
cycle_nodes_of_the_component2, component_nodes2 = connected_components_features[j]
# Find the best edge from any node in the component1 to any node in the cycle of component 2 (best align from component2 to component 1)
(source_loop, dest_loop, align_length, rmsd) = find_best_edge_between_components(cycle_nodes_of_the_component2, component_nodes1, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
component_adj_matrix[i][j] = (source_loop, dest_loop, align_length, rmsd)
# Find the component that has the best incoming edge
# if is_better_alignment_score((rmsd, align_length), (best_align_rmsd, best_align_length), align_len_threshold, is_normalized_score):
# best_align_rmsd = rmsd
# best_align_length = align_length
# best_align__j = j
# Find the component that has the best outgoing edge
# if is_better_alignment_score((best_align_rmsd, best_align_length), (overall_best_align_rmsd, overall_best_align_length), align_len_threshold, is_normalized_score):
# overall_best_align_rmsd = best_align_rmsd
# overall_best_align_length = best_align_length
# best_edge_index_i = i
# print best_edge_index_i
best_traversal_rmsd = 1000000.0
best_component_ordering = None
# Find the traversal that reduces the overall traversal rmsd
if start_node_list == None:
start_node_list = connected_components_features.keys()
for i in start_node_list:
# Order components with traversal order of Prim's MST algorithm
component_ordering = get_component_ordering(component_adj_matrix, i, align_len_threshold, is_normalized_score)
sum = 0.0
for _,_,_,rmsd,_ in component_ordering:
sum += rmsd
if sum < best_traversal_rmsd:
best_traversal_rmsd = sum
best_component_ordering = copy.deepcopy(component_ordering)
# print('Best traversal RMSD ' + str(best_traversal_rmsd))
# else:
# best_component_ordering = get_component_ordering(component_adj_matrix, start_node, align_len_threshold, is_normalized_score)
return best_component_ordering
# def get_best_subfamily_and_component_ordering(merged_components_dict, connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score):
# ### Find relational features among components
# merged_components_adj_matrix = {}
# for i in merged_components_dict:
# merged_components_adj_matrix[i] = {}
# for j in merged_components_dict:
# merged_components_adj_matrix[i][j] = ((0,''),(0,''),0,0.0)
# max_edge_index_i = 0
# overall_best_align_length = 0
# overall_best_align_rmsd = 100.0
# merged_components_features = {}
# for k in sorted(merged_components_dict):
# merged_components_nodes = []
# merged_cycle_nodes_of_the_components = []
# for i in sorted(merged_components_dict[k]):
# _, cycle_nodes_of_the_component, component_nodes, _ = connected_components_features[i]
# merged_cycle_nodes_of_the_components += cycle_nodes_of_the_component
# merged_components_nodes += component_nodes
# merged_components_features[k] = (merged_cycle_nodes_of_the_components, merged_components_nodes)
# ## Build graph with pair-wise edge of the best loop-alignment choice among the merged components (subfamily)
# for i in sorted(merged_components_dict):
# merged_cycle_nodes_of_the_components1, merged_components_nodes1 = merged_components_features[i]
# best_align_length = 0
# best_align_rmsd = 100.0
# max_j = 0
# for j in sorted(merged_components_dict):
# if i != j:
# merged_cycle_nodes_of_the_components2, merged_components_nodes2 = merged_components_features[j]
# # Find the best edge from any node in the component1 to any node in the cycle of component 2 (best align from component2 to component 1)
# (source_loop, dest_loop, align_length, rmsd) = find_best_edge_between_components(merged_cycle_nodes_of_the_components2, merged_components_nodes1, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
# merged_components_adj_matrix[i][j] = (source_loop, dest_loop, align_length, rmsd)
# # Find the merged component that has the best incoming edge
# if is_better_alignment_score((rmsd, align_length), (best_align_rmsd, best_align_length), align_len_threshold, is_normalized_score):
# best_align_rmsd = rmsd
# best_align_length = align_length
# max_j = j
# # Find the component that has the best outgoing edge
# if is_better_alignment_score((best_align_rmsd, best_align_length), (overall_best_align_rmsd, overall_best_align_length), align_len_threshold, is_normalized_score):
# overall_best_align_rmsd = best_align_rmsd
# overall_best_align_length = best_align_length
# max_edge_index_i = i
# ## Build graph with pair-wise edge of the best loop-alignment choice among the components inside merged components
# for k in sorted(merged_components_dict):
# for k in sorted(merged_components_dict):
# central_node1, cycle_nodes_of_the_component1, component_nodes1, component_directed_adjacency_list1 = connected_components_features[i]
# best_align_length = 0
# best_align_rmsd = 100.0
# max_j = 0
# for j in merged_components_dict[k]:
# if i != j:
# central_node2, cycle_nodes_of_the_component2, component_nodes2, component_directed_adjacency_list2 = connected_components_features[j]
# # Find the best edge from any node in the component1 to any node in the cycle of component 2 (best align from component2 to component 1)
# (source_loop, dest_loop, align_length, rmsd) = find_best_edge_between_components(cycle_nodes_of_the_component2, component_nodes1, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
# component_adj_matrix[i][j] = (source_loop, dest_loop, align_length, rmsd)
# # Find the component that has the best incoming edge
# if is_better_alignment_score((rmsd, align_length), (best_align_rmsd, best_align_length), align_len_threshold, is_normalized_score):
# best_align_rmsd = rmsd
# best_align_length = align_length
# max_j = j
# # Find the component that has the best outgoing edge
# if is_better_alignment_score((best_align_rmsd, best_align_length), (overall_best_align_rmsd, overall_best_align_length), align_len_threshold, is_normalized_score):
# overall_best_align_rmsd = best_align_rmsd
# overall_best_align_length = best_align_length
# max_edge_index_i = i
# best_traversal_rmsd = 1000000.0
# best_component_ordering = None
# for i in connected_components_features:
# # Order components with traversal order of Prim's MST algorithm
# component_ordering = get_component_ordering(component_adj_matrix, i, align_len_threshold, is_normalized_score)
# sum = 0.0
# for _,_,_,rmsd,_ in component_ordering:
# sum += rmsd
# # print sum
# if sum < best_traversal_rmsd:
# best_traversal_rmsd = sum
# best_component_ordering = copy.deepcopy(component_ordering)
# print('Best traversal RMSD ' + str(best_traversal_rmsd))
# return best_component_ordering
def dfs(graph, node, visited, parent, traverse_list_with_parent):
if node not in visited:
visited.append(node)
traverse_list_with_parent.append((node, parent))
for n in sorted(graph[node], key = lambda x:len(graph[x])):
dfs(graph, n, visited, node, traverse_list_with_parent)
def bfs(graph, start, visited, parent, traverse_list_with_parent):
queue = [(start, parent)]
while queue:
(node, parent) = queue.pop(0)
if node not in visited:
visited.append(node)
traverse_list_with_parent.append((node, parent))
for n in sorted(graph[node], key = lambda x:len(graph[x])):
queue.append((n, node))
def sort_value(edge, align_len_threshold, is_length_adjusted_score, max_threshold = 10000.0):
rmsd, align_len = edge
# if is_length_adjusted_score:
# return rmsd
inverted_val_of_align_len = (max_threshold - align_len)
if is_acceptable_align_len(align_len, align_len_threshold):
return (rmsd * 1000 * max_threshold) + inverted_val_of_align_len
else:
# Returning a pseudo RMSD, considering 100000 as max possible RMSD
# Subtracting align_len as lower RMSD is better, while larger align_len is better
return inverted_val_of_align_len
def dijkstra(graph, weighted_directed_adjacency_matrix, start, parent, align_len_threshold, is_length_adjusted_score):
visited = []
traverse_list_with_parent = []
traverse_list_with_parent_and_weight = []
heap = []
heapq.heappush(heap, (0.0, (0.0, 0), start, parent))
while len(heap) > 0:
(_, edge_weight, node, parent) = heapq.heappop(heap)
visited.append(node)
traverse_list_with_parent.append((node, parent))
traverse_list_with_parent_and_weight.append((node, parent, edge_weight))
for n in graph[node]:
if n not in visited:
edge_weight = graph[node][n]
traversal_weight = None
if traversal_algorithm == "dijkstra":
traversal_weight = sort_value(edge_weight, align_len_threshold, is_length_adjusted_score)
elif traversal_algorithm == "root-oriented":
traversal_weight = sort_value(weighted_directed_adjacency_matrix[start][n], align_len_threshold, is_length_adjusted_score)
heapq.heappush(heap, (traversal_weight, edge_weight, n, node))
return traverse_list_with_parent, traverse_list_with_parent_and_weight
def acceptable_edge_for_merging(rmsd, align_len, align_len_threshold, is_length_adjusted_score):
if is_acceptable_align_len(align_len, align_len_threshold):
# need to adjust rmsd rank
if is_length_adjusted_score:
rmsd = rmsd * math.sqrt(align_length)
if rmsd <= rmsd_threshold_for_merging:
return True
# rmsd_rank = get_rmsd_rank(rmsd, align_len, is_length_adjusted_score)
# # if rmsd_rank <= 2:
# if rmsd_rank < 2:
# # if rmsd <= acceptable_RMSD_threshold_for_merging:
# return True
return False
# def acceptable_edge_for_merging(rmsd, align_len, align_len_threshold, merging_rmsd_threshold, is_length_adjusted_score):
# if is_acceptable_align_len(align_len, align_len_threshold):
# if is_acceptable_rmsd(rmsd, align_len, is_length_adjusted_score, merging_rmsd_threshold):
# return True
# return False
def align_to_ordering(component_ordering, connected_components_features, cluster_pairwise_alignment_details):
### Order loops according to the best component first, and align all of them on it
merged_component_features = []
merged_component_features.append([])
edges_in_merged_components = []
edges_in_merged_components.append([])
loop_ordering = []
loop_ordering.append([])
edges_among_all_components = []
if len(component_ordering) > 0:
(i, _, _, _, _) = component_ordering[0][0]
central_node, _, _, _ = connected_components_features[i]
node_to_start = central_node
loop_ordering[-1].append((node_to_start, None))
i, r1 = node_to_start
for component_list in component_ordering:
for (k, _, _, _, _) in component_list:
# central_node, _, component = connected_components_features[i]
_, _, component_nodes, _ = connected_components_features[k]
_, _, component_nodes, _ = connected_components_features[k]
first_item = True
for j, r2 in component_nodes:
if (j, r2) != node_to_start:
align_rmsd, align_len = get_rmsd_align_len(i, r1, j, r2, cluster_pairwise_alignment_details)
loop_ordering[-1].append(((j, r2), node_to_start))
if first_item == True:
edges_in_merged_components[-1].append((node_to_start, (j, r2), (align_rmsd, align_len)))
first_item = False
merged_component_features[-1].append(connected_components_features[k])
return loop_ordering, merged_component_features, edges_among_all_components, edges_in_merged_components
def get_align_to_rmsd_info(cluster_id, cluster_pairwise_alignment_details, alignment_data, fp_align_len_threshold = None):
global scanx_align_to_superimposition
prev_status = scanx_align_to_superimposition
scanx_align_to_superimposition = True
ordered_dependency_list, merged_components_features, edges_among_all_components, edges_in_merged_components = generate_loop_print_dependency_v2(cluster_id, cluster_pairwise_alignment_details, alignment_data, fp_align_len_threshold)
avg_rmsd, total_alignment_length = get_family_rmsd_and_alignment_summary(ordered_dependency_list, cluster_pairwise_alignment_details)
scanx_align_to_superimposition = prev_status
return avg_rmsd, total_alignment_length
def generate_loop_print_dependency_v2(cluster_id, cluster_pairwise_alignment_details, alignment_data, fp_align_len_threshold = None):
align_len_threshold = generate_align_length_threshold(cluster_pairwise_alignment_details)
if fp_align_len_threshold != None:
fp_align_len_threshold.write(cluster_id + ',' + str(align_len_threshold) + '\n')
adjacency_list, directed_adjacency_list, transpose_directed_adjacency_list = create_best_alignment_graph(cluster_pairwise_alignment_details, align_len_threshold)
weighted_directed_adjacency_matrix = create_weighted_complete_directed_graph(cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
connected_components = get_connected_components(adjacency_list)
# print('\nConnected Components')
# print('Component count = ' + str(len(connected_components)))
# Find features (central_node, cycle_nodes_of_the_component, component_nodes, component_directed_adjacency_list) of components
connected_components_features = get_component_features(cluster_pairwise_alignment_details, directed_adjacency_list, transpose_directed_adjacency_list, connected_components)
# Write some analysis on the length and rmsd of components to variation data log file
# analyze_component_features(cluster_id, alignment_data, connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, align_dir, log_file_list, is_length_adjusted_score)
component_ordering = merge_and_order_connected_components(connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
# component_ordering = get_best_component_ordering(connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
if scanx_align_to_superimposition:
return align_to_ordering(component_ordering, connected_components_features, cluster_pairwise_alignment_details)
### Order loops according to the component first, and then the weighted BFS. Merge components accordingly.
merged_component_features = []
edges_among_all_components = []
edges_in_merged_components = []
loop_ordering = []
first_component = True
# rmsd_list = []
# rmsd_list1 = []
# print_a_list(component_ordering)
for component_list in component_ordering:
loop_ordering.append([])
merged_component_features.append([])
edges_in_merged_components.append([])
first_item = True
for (i, node_to_start, parent, align_rmsd, align_len) in component_list:
if not first_item:
edges_in_merged_components[-1].append((parent, node_to_start, (align_rmsd, align_len)))
first_item = False
# central_node, _, component = connected_components_features[i]
central_node, _, component_nodes, component_directed_adjacency_list = connected_components_features[i]
if first_component:
node_to_start = central_node
else:
edges_among_all_components.append((parent, node_to_start, (align_rmsd, align_len)))
# rmsd_list.append(align_rmsd)
# rmsd_list1.append((align_rmsd, node_to_start, parent))
# dfs(component_directed_adjacency_list, node_to_start, visited, parent, traverse_list_with_parent)
# traverse_list_with_parent, traverse_list_with_parent_and_weight = bfs_weighted(component_directed_adjacency_list, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
# traverse_list_with_parent = Prims_MST(component_directed_adjacency_list, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
traverse_list_with_parent, traverse_list_with_parent_and_weight = dijkstra(component_directed_adjacency_list, weighted_directed_adjacency_matrix, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
# for node, parent, weight in traverse_list_with_parent_and_weight:
# # print node, parent, weight
# if parent != None:
# rmsd_list.append(weight[0])
# rmsd_list1.append((weight[0], node, parent))
merged_component_features[-1].append(connected_components_features[i])
loop_ordering[-1] += traverse_list_with_parent
first_component = False
# generate_subfamily_representative(cluster_id, loop_ordering, alignment_data, cluster_pairwise_alignment_details, align_len_threshold)
return loop_ordering, merged_component_features, edges_among_all_components, edges_in_merged_components
# def generate_loop_print_dependency(cluster_id, cluster_pairwise_alignment_details, alignment_data, fp_align_len_threshold = None):
# print(cluster_id)
# align_len_threshold = generate_align_length_threshold(cluster_pairwise_alignment_details)
# # merging_rmsd_threshold = generate_merging_rmsd_threshold(cluster_pairwise_alignment_details)
# # fp_rmsd_threshold = open("Familywise_RMSD_Threshold_" + loop_type + ".txt", 'a')
# # fp_rmsd_threshold.write(cluster_id + ',' + str(merging_rmsd_threshold) + '\n')
# # fp_rmsd_threshold.close()
# if fp_align_len_threshold != None:
# fp_align_len_threshold.write(cluster_id + ',' + str(align_len_threshold) + '\n')
# adjacency_list, directed_adjacency_list, transpose_directed_adjacency_list = create_best_alignment_graph(cluster_pairwise_alignment_details, align_len_threshold)
# weighted_directed_adjacency_matrix = create_weighted_complete_directed_graph(cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
# connected_components = get_connected_components(adjacency_list)
# print('\nConnected Components')
# print('Component count = ' + str(len(connected_components)))
# # Find features (central_node, cycle_nodes_of_the_component, component_nodes, component_directed_adjacency_list) of components
# connected_components_features = get_component_features(cluster_pairwise_alignment_details, directed_adjacency_list, transpose_directed_adjacency_list, connected_components)
# # Write some analysis on the length and rmsd of components to variation data log file
# # analyze_component_features(cluster_id, alignment_data, connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, align_dir, log_file_list, is_length_adjusted_score)
# filtered_connected_components_features = {}
# for i in connected_components_features:
# _, cycle_nodes_of_the_component, component_nodes, _ = connected_components_features[i]
# filtered_connected_components_features[i] = cycle_nodes_of_the_component, component_nodes
# component_ordering = get_best_component_ordering(filtered_connected_components_features, cluster_pairwise_alignment_details, align_len_threshold, is_normalized_score)
# # if scanx_align_to_superimposition:
# # return align_to_ordering(component_ordering, connected_components_features, cluster_pairwise_alignment_details)
# ### Order loops according to the component first, and then the weighted BFS. Merge components accordingly.
# merged_component_features = []
# edges_among_all_components = []
# edges_in_merged_components = []
# loop_ordering = []
# first_component = True
# rmsd_list = []
# rmsd_list1 = []
# for (i, node_to_start, parent, align_rmsd, align_len) in component_ordering:
# # central_node, _, component = connected_components_features[i]
# central_node, _, component_nodes, component_directed_adjacency_list = connected_components_features[i]
# if first_component:
# node_to_start = central_node
# else:
# edges_among_all_components.append((parent, node_to_start, (align_rmsd, align_len)))
# rmsd_list.append(align_rmsd)
# rmsd_list1.append((align_rmsd, node_to_start, parent))
# # dfs(component_directed_adjacency_list, node_to_start, visited, parent, traverse_list_with_parent)
# # traverse_list_with_parent, traverse_list_with_parent_and_weight = bfs_weighted(component_directed_adjacency_list, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
# # traverse_list_with_parent = Prims_MST(component_directed_adjacency_list, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
# traverse_list_with_parent, traverse_list_with_parent_and_weight = dijkstra(component_directed_adjacency_list, weighted_directed_adjacency_matrix, node_to_start, parent, align_len_threshold, is_length_adjusted_score)
# for node, parent, weight in traverse_list_with_parent_and_weight:
# # print node, parent, weight
# if parent != None:
# rmsd_list.append(weight[0])
# rmsd_list1.append((weight[0], node, parent))
# if first_component or not merge_components or not acceptable_edge_for_merging(align_rmsd, align_len, align_len_threshold, is_length_adjusted_score):
# # if first_component or not merge_components or not acceptable_edge_for_merging(align_rmsd, align_len, align_len_threshold, merging_rmsd_threshold, is_length_adjusted_score):
# loop_ordering.append([])
# merged_component_features.append([])
# edges_in_merged_components.append([])
# print('Component ' + str(i) + ' Not Merged')
# else:
# edges_in_merged_components[-1].append((parent, node_to_start, (align_rmsd, align_len)))
# print('Component ' + str(i) + ' merged')
# # parent = None
# # node_to_start = central_node
# # print(align_rmsd)
# # print(align_len)
# merged_component_features[-1].append(connected_components_features[i])
# loop_ordering[-1] += traverse_list_with_parent
# first_component = False
# # print('RMSD Z-Scores')
# # print(cluster_id)
# # z_scores = list(map(lambda x: (round(x[0],1),round(x[1],1)), get_z_scores(rmsd_list)))
# # rmsd_with_loop = list(map(lambda x: (round(x[0],1),x[1][1],x[2][1]), rmsd_list1))
# # if cluster_id == 'kink-turn':
# # if cluster_id == 'sarcin-ricin':
# # if cluster_id == 'reverse-kink-turn':
# # sys.exit()
# generate_subfamily_representative(cluster_id, loop_ordering, alignment_data, cluster_pairwise_alignment_details, align_len_threshold)
# return loop_ordering, merged_component_features, edges_among_all_components, edges_in_merged_components
def get_component_organism_stat(ordered_dependency_list, pdb_organism_details): #stat by org_type only
component_organism_stat = {}
for component_id, component in enumerate(ordered_dependency_list):
component_organism_stat[component_id] = {}
for component_loop_id, ((i, loop), parent) in enumerate(component):
pdb_chain = loop.strip().split(':')[0]
if pdb_chain in pdb_organism_details:
RNA_Types, organism, org_class, org_type, pdb_source = pdb_organism_details[pdb_chain]
else:
org_type = 'Unknown'
if org_type not in component_organism_stat[component_id]:
component_organism_stat[component_id][org_type] = 0
component_organism_stat[component_id][org_type] += 1
return component_organism_stat
def write_alignments_to_file(i, r1, j, r2, aln1, aln2, rmsd, parent_load_id, load_id, f):
###########################################################
f.write(str(parent_load_id).ljust(25) + '\t\t')
if input_index_type == 'fasta':
f.write(r2.rjust(30) + '(FASTA)\t\t')
r2_pdb_ind = convert_a_loop_from_FASTA_to_PDB(r2)
f.write(r2_pdb_ind.rjust(30) + '(PDB)\t\t' + aln2.rjust(40) + '\t\t' + str(round(rmsd, 2)).rjust(15) + '\n')
f.write(str(load_id).ljust(25) + '\t\t')
if input_index_type == 'fasta':
f.write(r1.rjust(30) + '(FASTA)\t\t')
r1_pdb_ind = convert_a_loop_from_FASTA_to_PDB(r1)
f.write(r1_pdb_ind.rjust(30) + '(PDB)\t\t' + aln1.rjust(40) + '\n')
f.write('\n')
###########################################################
def get_boundary_canonical_interactions(boundary_index_list, pdb_index_list, index_residue_dict):
boundary_interaction_list = []
temp_s = None
prev_e = None
for i, (s, e) in enumerate(boundary_index_list):
if i == 0:
temp_s = s
prev_e = e
continue
a = prev_e
b = s
bp = a + "-" + b
line = bp + "," + index_residue_dict[a] + "-" + index_residue_dict[b] + ",W/W" + ",cis" + "\n"
boundary_interaction_list.append(line)
prev_e = e
a = temp_s
b = prev_e
bp = a + "-" + b
line = bp + "," + index_residue_dict[a] + "-" + index_residue_dict[b] + ",W/W" + ",cis" + "\n"
boundary_interaction_list.append(line)
return boundary_interaction_list
def write_alignments_with_interactions_to_file(load_id, r, aln, rmsd, pdb_res_map_dict, f, categorize_annotation = True):
r_pdb_ind = convert_a_loop_from_FASTA_to_PDB(r)
if(len(load_id) > 0):
f.write(str(load_id).ljust(25) + '\t\t')
if input_index_type == 'fasta':
f.write(r + '(FASTA)')
f.write(r_pdb_ind.rjust(50) + '(PDB)')
else:
if input_index_type == 'fasta':
f.write(r + ' (FASTA)\n')
f.write(r_pdb_ind + ' (PDB)')
if len(aln) > 0:
f.write('\t\t' + aln.rjust(40))
f.write('\t\t' + str(round(rmsd, 2)).rjust(8))
f.write('\n')
# f4.write(str(parent_load_id).ljust(25) + '\t\t' + r2.rjust(30) + '\t\t')
# r2_pdb_ind = convert_a_loop_from_FASTA_to_PDB(r2)
# f4.write(r2_pdb_ind.rjust(25) + '\t\t' + aln2.rjust(40) + '\t\t' + str(round(rmsd, 2)).rjust(15) + '\n')
f_loop = open(os.path.join(loop_dir, r + ".smf"))
loop_info_lines = f_loop.readlines()
f_loop.close()
pdb_chain, loop_regions = r.strip().split(":")
if pdb_chain not in pdb_res_map_dict:
pdb_res_map_dict[pdb_chain] = load_pdb_res_map(pdb_chain)
pdb_res_map = pdb_res_map_dict[pdb_chain]
loop_regions = loop_regions.strip().split("_")
loop_regions_seq = "".join(loop_info_lines[1].strip().split("..."))
loop_regions_pdb_ind = []
# index_residue_dict = {}
pdb_index_list = []
boundary_index_list = []
for part, region in enumerate(loop_regions):
region_pdb_index = []
s, e = region.strip().split("-")
s = int(s)
e = int(e)
for fasta_indx in range(s, e+1):
if len(pdb_res_map[fasta_indx][2]) > 0:
pdb_index_list.append(str(pdb_res_map[fasta_indx][1]) + '.' + str(pdb_res_map[fasta_indx][2]))
else:
pdb_index_list.append(pdb_res_map[fasta_indx][1])
start_ind = str(pdb_res_map[s][1])
if len(pdb_res_map[s][2]) > 0:
start_ind += '.' + str(pdb_res_map[s][2])
end_ind = pdb_res_map[e][1]
if len(pdb_res_map[e][2]) > 0:
end_ind += '.' + str(pdb_res_map[e][2])
loop_regions_pdb_ind.append(start_ind + "-" + end_ind)
boundary_index_list.append((start_ind, end_ind))
index_residue_dict = dict(zip(pdb_index_list, loop_regions_seq))
cat1_lines = []
cat2_lines = []
other_lines = []
stack_lines = []
in_stack = False
for line in loop_info_lines:
if line.startswith(">"):
continue
elif line.startswith("#info=stacking") or in_stack == True:
# break
pieces = line.strip().split(",")
if len(pieces) == 2:
a, b = pieces[0].strip().split("-")
a = pdb_index_list[int(a)]
b = pdb_index_list[int(b)]
bp = a + "-" + b
line = bp + "," + index_residue_dict[a] + "-" + index_residue_dict[b] + "," + pieces[1] + '\n'
stack_lines.append(line)
in_stack = True
else:
pieces = line.strip().split(",")
if len(pieces) == 3:
a, b = pieces[0].strip().split("-")
a = pdb_index_list[int(a)]
b = pdb_index_list[int(b)]
bp = a + "-" + b
line = bp + "," + index_residue_dict[a] + "-" + index_residue_dict[b] + "," + pieces[1] + "," + pieces[2] + '\n'
if categorize_annotation == True and (pieces[1].strip() == 'S/H' or pieces[1].strip() == 'H/S' or pieces[1].strip() == 'S/S'):
cat1_lines.append(line)
else:
cat2_lines.append(line)
# f4.write(line)
else:
other_lines.append(line)
# f4.write(line)
# f.write(line)
if len(other_lines) > 0:
f.write(''.join(other_lines))
if len(cat1_lines) > 0:
f.write('\n')
f.write(''.join(cat1_lines))
f.write('\n')
boundary_interaction_list = get_boundary_canonical_interactions(boundary_index_list, pdb_index_list, index_residue_dict)
f.write(boundary_interaction_list[-1])
if len(cat2_lines) > 0:
f.write(''.join(cat2_lines))
f.write(''.join(boundary_interaction_list[:-1]))
if len(stack_lines) > 0:
# f.write('\n')
f.write(''.join(stack_lines))
def extract_rmsd_from_dict(rmsd_data_list_dict, i, r1, j, r2):
_, rmsd_list = rmsd_data_list_dict[(i, r1)]
rmsd = -1.0
for (x, rx, rmsdx, _) in rmsd_list:
if j == x and rx == r2:
rmsd = rmsdx
break
return rmsd
def get_align_len(aln1, aln2):
len1 = len(aln1.replace('-',''))
len2 = len(aln2.replace('-',''))
return len1 if len1 < len2 else len2;
# def is_better_rmsd_len(rmsd_len1, rmsd_len2, align_loop_count):
# rmsd1, len1 = rmsd_len1
# rmsd2, len2 = rmsd_len2
# len1 /= align_loop_count
# len2 /= align_loop_count
# if abs(rmsd1 - rmsd2) < 0.000000001:
# return len1 > len2
# return rmsd1 < rmsd2
def is_better_rmsd_len(rmsd_len1, rmsd_len2, align_loop_count):
rmsd1, len1 = rmsd_len1
rmsd2, len2 = rmsd_len2
len1 /= align_loop_count
len2 /= align_loop_count
if abs(len1 - len2) < 0.000000001:
return rmsd1 < rmsd2
return len1 > len2
def generate_subfamily_representative(fp_representative, cluster_id, component_id, component, alignment_data, rmsd_data_list_dict, pdb_res_map_dict, align_len_threshold):
# if is_generate_subfamily_representative == False:
# return
# output_dir = os.path.join(superimposition_output_dir, 'componentwise_analysis')
# output_dir = representative_dir
# create_directory(output_dir)
# f = open(os.path.join(output_dir, str(cluster_id) + "_representatives.txt"), "w")
# pdb_res_map_dict = {}
# for component_id, component in enumerate(ordered_dependency_list):
representative_loop = ''
represetative_i = -1
max_acceptable_align_count = 0
best_weighted_avg_rmsd = (20000.0, 0)
loop_count = len(component) - 1
for component_loop_id1, ((i, r1), _) in enumerate(component):
rmsd_align_len_list = []
for component_loop_id2, ((j, r2), _) in enumerate(component):
if component_loop_id1 != component_loop_id2:
(t1, t2, zscore, cr1, cr2, aln2, aln1, score) = alignment_data[cluster_id][strToNode(r2)][strToNode(r1)]
rmsd = extract_rmsd_from_dict(rmsd_data_list_dict, i, r1, j, r2)
align_len = get_align_len(aln1, aln2);
if is_acceptable_align_len(align_len, align_len_threshold):
rmsd_align_len_list.append((rmsd,align_len))
weighted_avg_rmsd = get_weighted_avg_rmsd(rmsd_align_len_list)
acceptable_align_count = len(rmsd_align_len_list)
# Try to make sure the represetative has acceptable alignment with at least half of the members
if ((max_acceptable_align_count < int((loop_count + 1) / 2) and
(acceptable_align_count > max_acceptable_align_count or
(acceptable_align_count == max_acceptable_align_count and is_better_rmsd_len(weighted_avg_rmsd , best_weighted_avg_rmsd, loop_count))))
or
(acceptable_align_count >= int((loop_count + 1) / 2) and is_better_rmsd_len(weighted_avg_rmsd , best_weighted_avg_rmsd, loop_count))):
max_acceptable_align_count = acceptable_align_count
best_weighted_avg_rmsd = weighted_avg_rmsd
representative_loop = r1
represetative_i = i
avg_rmsd, total_align_len = best_weighted_avg_rmsd
if cluster_id in known_motif_fullname:
fp_representative.write(known_motif_fullname[cluster_id] + '-Sub' + str(component_id + 1) + ':\n')
else:
fp_representative.write(cluster_id + '-Sub' + str(component_id + 1) + ':\n')
if output_env == 'local':
fp_representative.write("Acceptable Align Count = " + str(max_acceptable_align_count) + "/" + str(loop_count) + ",\nWeighted Average RMSD = (" + str(round(avg_rmsd, 2)) + ',' + str(total_align_len) + ")\n")
write_alignments_with_interactions_to_file("", representative_loop, "", best_weighted_avg_rmsd, pdb_res_map_dict, fp_representative, False)
fp_representative.write("\n\n")
return represetative_i, representative_loop
def generate_representative_loop_image(time_in_distance_calc, representative_dir, rotation_version, cluster_id, component_id, i, r, loop_display_info_dict, draw_figures, show_extended_loop, show_label):
if draw_figures == False:
return 0
image_fname = os.path.join(representative_dir, add_rotation_version_prefix(rotation_version) + cluster_id + '-Sub' + str(component_id + 1) + '_repr.png')
display_load_name, align_load_name, chain_load_name = loop_display_info_dict[(i,r)]
# print(display_load_name, align_load_name, chain_load_name)
# pymol.cmd._do('hide all')
# pymol.cmd.sync()
# wait_for_certain_time_according_to_wait_factor()
# time.sleep(.200)
display_color = 'gray'
cano_atom_color = 'orange'
other_atom_color = 'blue'
bp_atom_color = 'green'
bp_line_color = 'red'
cano_seqnums = []
other_seqnums = []
bp_seqnums = []
bp_list = []
pdb_chain, loop_regions = r.strip().split(":")
pdb_res_map = load_pdb_res_map(pdb_chain)
loop_regions = loop_regions.strip().split('_')
pdb_index_list = []
for region in loop_regions:
s, e = region.strip().split("-")
s = int(s)
e = int(e)
cano_seqnums.append(pdb_res_map[s][1] + pdb_res_map[s][2])
cano_seqnums.append(pdb_res_map[e][1] + pdb_res_map[e][2])
for fasta_indx in range(s+1, e):
other_seqnums.append(pdb_res_map[fasta_indx][1] + pdb_res_map[fasta_indx][2])
for fasta_indx in range(s, e+1):
if fasta_indx != s and fasta_indx != e:
other_seqnums.append(pdb_res_map[fasta_indx][1] + pdb_res_map[fasta_indx][2])
pdb_index_list.append((pdb_res_map[fasta_indx][1], pdb_res_map[fasta_indx][2]))
f_loop = open(os.path.join(loop_dir, r + ".smf"))
loop_info_lines = f_loop.readlines()
f_loop.close()
for line in loop_info_lines:
if line.startswith(">"):
continue
elif line.startswith("#info=stacking"):
break
else:
pieces = line.strip().split(",")
if len(pieces) == 3:
a, b = pieces[0].strip().split("-")
seqnum1, icode1 = pdb_index_list[int(a)]
seqnum2, icode2 = pdb_index_list[int(b)]
a = seqnum1 + icode1
b = seqnum2 + icode2
bp_seqnums.append(a)
bp_seqnums.append(b)
bp_list.append((a, b))
other_bp_load_name = cluster_id + '_sub' + str(component_id + 1) + '_other_bp'
bp_atoms_load_name = cluster_id + '_sub' + str(component_id + 1) + '_bp_atoms'
cano_bp_load_name = cluster_id + '_sub' + str(component_id + 1) + '_cano_bp'
dist_load_name = ''
pymol.cmd.select(other_bp_load_name, chain_load_name + ' and (%s)' % ' or '.join(list(map(lambda x: 'resi '+x, other_seqnums))))
pymol.cmd.select(bp_atoms_load_name, chain_load_name + ' and (%s)' % ' or '.join(list(map(lambda x: 'resi '+x, bp_seqnums))))
pymol.cmd.select(cano_bp_load_name, chain_load_name + ' and (%s)' % ' or '.join(list(map(lambda x: 'resi '+x, cano_seqnums))))
config_pymol_cartoon(display_color, True)
# pymol.cmd._do('set stick_transparency, 0.5')
if show_extended_loop:
pymol.cmd.show('cartoon', chain_load_name)
else:
pymol.cmd.show('cartoon', display_load_name)
# pymol.cmd.show('stick', bp_atoms_load_name)
pymol.cmd.color(display_color, chain_load_name)
pymol.cmd.color(other_atom_color, other_bp_load_name)
pymol.cmd.color(bp_atom_color, bp_atoms_load_name)
pymol.cmd.color(cano_atom_color, cano_bp_load_name)
a_load_name = 'a_atoms'
b_load_name = 'b_atoms'
# print(bp_list)
dist_load_names = []
atom_list_for_distance_measure = ["N1", "N3", "N7", "C2", "C4", "C5" "C6", "C8"]
for a, b in bp_list:
# print(a,b)
a_name_list = []
b_name_list = []
pymol.cmd.select(a_load_name, chain_load_name + ' and resi ' + a)
pymol.cmd.select(b_load_name, chain_load_name + ' and resi ' + b)
name_dict = { 'a_name_list' : [], 'b_name_list' : [] }
pymol.cmd.iterate(a_load_name, 'a_name_list.append(name)', space=name_dict)
pymol.cmd.iterate(b_load_name, 'b_name_list.append(name)', space=name_dict)
# print(name_dict)
if len(name_dict['a_name_list']) == 0 or len(name_dict['b_name_list']) == 0:
logger.warning('Skipping min_distance calculation. Check.')
logger.warning(r)
logger.warning(str(a) + ', ' + str(b))
continue
min_distance = 1000
min_a_name = ''
min_b_name = ''
a_single_name = 'a_atom'
b_single_name = 'b_atom'
time_ss = time.time()
for a_name in name_dict['a_name_list']:
# if not (a_name.startswith('C') or a_name.startswith('N')):
if a_name not in atom_list_for_distance_measure:
continue
for b_name in name_dict['b_name_list']:
# if not (b_name.startswith('C') or b_name.startswith('N')):
if b_name not in atom_list_for_distance_measure:
continue
pymol.cmd.select(a_single_name, a_load_name + ' and name ' + a_name)
pymol.cmd.select(b_single_name, b_load_name + ' and name ' + b_name)
distance = pymol.cmd.distance('dist', a_single_name, b_single_name)
if distance < min_distance:
min_distance = distance
min_a_name = a_name
min_b_name = b_name
pymol.cmd.delete('dist')
# print(min_distance, min_a_name, min_b_name)
time_dd = time.time() - time_ss
time_in_distance_calc += time_dd
pymol.cmd.select(a_single_name, a_load_name + ' and name ' + min_a_name)
pymol.cmd.select(b_single_name, b_load_name + ' and name ' + min_b_name)
dist_load_name = cluster_id + '_sub' + str(component_id + 1) + 'dist_' + min_a_name.replace("'", "p") + '_' + min_b_name.replace("'", "p")
pymol.cmd.distance(dist_load_name, a_single_name, b_single_name)
# pymol.cmd._do('hide labels, ' + dist_load_name)
pymol.cmd.hide('labels', dist_load_name)
# print(bp_line_color, dist_load_name)
pymol.cmd.color(bp_line_color, dist_load_name)
pymol.cmd.delete(a_single_name)
pymol.cmd.delete(b_single_name)
dist_load_names.append(dist_load_name)
# pymol.cmd._do('zoom')
pymol.cmd.zoom()
wait_for_certain_time_according_to_wait_factor(len(atom_list_for_distance_measure)) #remove after changing distance code
pymol.cmd.sync()
# time.sleep(.100)
pymol.cmd.png(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)
# time.sleep(.100)
wait_for_certain_files_to_be_generated([image_fname], False)
pymol.cmd.sync()
if save_pymol_session == True:
pymol.cmd.deselect()
session_fname = os.path.join(representative_dir, cluster_id + '-Sub' + str(component_id + 1) + '_repr.pse')
pymol.cmd._do('save ' + os.path.join(representative_dir, session_fname))
# time.sleep(.100)
wait_for_certain_files_to_be_generated([session_fname], False)
pymol.cmd.sync()
# sys.exit()
pymol.cmd.delete(a_load_name)
pymol.cmd.delete(b_load_name)
pymol.cmd.delete(other_bp_load_name)
pymol.cmd.delete(bp_atoms_load_name)
pymol.cmd.delete(cano_bp_load_name)
for item in dist_load_names:
pymol.cmd.delete(item)
config_pymol_cartoon(display_color, show_label)
pymol.cmd.hide()
# print('deleting ', a_load_name)
# print('deleting ', b_load_name)
# print('deleting ', other_bp_load_name)
# print('deleting ', bp_atoms_load_name)
# print('deleting ', cano_bp_load_name)
# print('deleting ', dist_load_names)
wait_for_certain_time_according_to_wait_factor(1)
pymol.cmd.sync()
# time.sleep(.100)
return time_in_distance_calc
def generate_table(summary_dir, subfamily_details_table_data, loop_type, is_latex=False):
if is_latex == True:
subfamily_tbl_fn = os.path.join(summary_dir, 'Subfamily_summary_table.txt')
else:
subfamily_tbl_fn = os.path.join(summary_dir, 'Subfamily_summary_table.tsv')
fw = open(subfamily_tbl_fn, 'w')
# Table top
if is_latex == True:
fw.write('\\begin{table*}[b]\n')
fw.write('\\tableparts{%\n')
fw.write('\\caption{Subfamilies of the known motif families (Merging Threshold: RMSD = ' + str(rmsd_threshold_for_merging) + ', Align Len Z-Score = ' + str(align_len_zscore_threshold) + ', Connectivity = ' + str(connectivity_test_threshold) + ', Max_align_len_in_equation? = ' + str(use_max_align_len_in_equation) + ')}\n')
fw.write('\\label{table:subfamily}%\n')
fw.write('}{%\n')
fw.write('\\begin{tabular*}{0.83\\textwidth}{@{}llclccc@{}}\n')
fw.write('\\toprule\n')
fw.write('% \\cline{1-7}\n')
# Table Title
fw.write('\\multirow{2}{*}{Loop Type} & \\multirow{2}{*}{Motif Family} & \n')
fw.write('\\multirow{2}{*}{\\begin{tabular}[c]{@{}c@{}} No of \\\\ Motifs\\end{tabular}} & \n')
fw.write('\\multirow{2}{*}{\\begin{tabular}[l]{@{}l@{}} Subfamily \\\\ Count (Sizes)\\end{tabular}} & \n')
fw.write('\\multicolumn{3}{c}{Superimposition Avg. RMSD / Aligned Length} \\\\ \n')
fw.write('& & & & No Ordering & Ordered & Subfamilies \\\\\n')
fw.write('\\colrule\n')
else:
fw.write('Subfamilies of the known motif families (Merging Threshold: RMSD = ' + str(rmsd_threshold_for_merging) + ', Align Len Z-Score = ' + str(align_len_zscore_threshold) + ', Connectivity = ' + str(connectivity_test_threshold) + ')\n')
fw.write('Loop Type \tMotif Family \tNo of Motifs \tSubfamily Count (Sizes) \tSuperimposition Avg. RMSD / Aligned Length\n')
fw.write('\t\t\t\tNo Ordering \tOrdered \tSubfamilies\n')
# Table Data
if loop_type == 'IL':
fw.write('Internal Loop (IL)')
elif loop_type == 'HL':
fw.write('Hairpin Loop (HL)')
elif len(loop_type.strip()) > 0:
fw.write(loop_type)
# if is_latex == False:
# fw.write('\t')
priority_order = ["GNRA", "GNAA", "GNGA", "Kink-turn", "reverse-Kink-turn", "Sarcin-ricin", "C-loop", "E-loop", "Hook-turn", "Tandem-shear",
"Tetraloop-receptor", "L1-complex", "T-loop", "Rope-sling"]
# Print families in priority order that has 2 or more subfamilies
for cluster_id in priority_order:
if cluster_id in subfamily_details_table_data:
row = subfamily_details_table_data[cluster_id]
# If there are more than one subfamily
if ',' in row[2]:
# if len(loop_type.strip()) > 0:
if is_latex == True:
fw.write(' & ')
else:
fw.write('\t')
if is_latex == True:
fw.write(' & '.join(row) + ' \\\\\n')
else:
fw.write('\t'.join(row) + '\n')
# Print families NOT in priority order that has 2 or more subfamilies
for cluster_id in subfamily_details_table_data:
if cluster_id not in priority_order:
row = subfamily_details_table_data[cluster_id]
# If there are more than one subfamily
if ',' in row[2]:
# if len(loop_type.strip()) > 0:
if is_latex == True:
fw.write(' & ')
else:
fw.write('\t')
if is_latex == True:
fw.write(' & '.join(row) + ' \\\\\n')
else:
fw.write('\t'.join(row) + '\n')
# Print families in priority order that has only 1 subfamily
for cluster_id in priority_order:
if cluster_id in subfamily_details_table_data:
row = subfamily_details_table_data[cluster_id]
# If there is exactly one subfamily
if ',' not in row[2]:
# if len(loop_type.strip()) > 0:
if is_latex == True:
fw.write(' & ')
else:
fw.write('\t')
if is_latex == True:
fw.write(' & '.join(row) + ' \\\\\n')
else:
fw.write('\t'.join(row) + '\n')
# Print families NOT in priority order that has only 1 subfamily
for cluster_id in subfamily_details_table_data:
if cluster_id not in priority_order:
row = subfamily_details_table_data[cluster_id]
# If there is exactly one subfamily
if ',' not in row[2]:
# if len(loop_type.strip()) > 0:
if is_latex == True:
fw.write(' & ')
else:
fw.write('\t')
if is_latex == True:
fw.write(' & '.join(row) + ' \\\\\n')
else:
fw.write('\t'.join(row) + '\n')
if is_latex == True:
fw.write('& & & & & & \\\\\n')
else:
fw.write('\n')
# fw.write('Internal Loop (IL) & Kink-Turn (KT) & 61 & 3 (52, 4, 5) & 2.269 & 0.690 & 0.447 \\\\\n')
# fw.write('& reverse Kink-Turn (rKT) & 14 & 3 (6, 2, 6) & 1.060 & 0.900 & 0.629 \\\\\n')
# fw.write('& Sarcin-Ricin (SR) & 72 & 4 (7, 55, 7, 3) & 1.724 & 0.625 & 0.419 \\\\\n')
# fw.write('& C-Loop (CL) & 43 & 6 (9, 8, 11, 4, 9, 2) & 1.795 & 1.194 & 0.527 \\\\\n')
# fw.write('& E-Loop (EL) & 49 & 3 (4, 43, 2) & 1.709 & 0.721 & 0.498 \\\\\n')
# fw.write('& Tetraloop Receptor (TR) & 19 & 2 (17, 2) & 0.992 & 0.539 & 0.536 \\\\\n')
# fw.write('& L1-Complex (L1C) & 6 & 2 (4, 2) & 2.400 & 1.701 & 0.833 \\\\\n')
# fw.write('& Hook Turn (HT) & 33 & 3 (29, 2, 2) & 1.453 & 0.883 & 0.775 \\\\\n')
# fw.write('& Tandem Sheared (TS) & 46 & 1 (46) & 1.016 & 0.495 & 0.495 \\\\\n')
# fw.write('& T-Loop (TL) & 2 & 1 (2) & 0.500 & 0.500 & 0.500 \\\\\n')
# fw.write('& Rope Sling (RS) & 9 & 1 (9) & 0.632 & 0.409 & 0.408 \\\\\n')
# fw.write('& & & & & \\\\\n')
# fw.write('Hairpin Loop (HL) & GNAA & 230 & 3 (226, 2, 2) & 0.774 & 0.465 & 0.269 \\\\\n')
# fw.write('& GNGA & 64 & 3 (13, 41, 10) & 0.867 & 0.371 & 0.329 \\\\\n')
# fw.write('& T-Loop (TL) & 109 & 7 (3, 86, 2, 2, 7, 4, 5) & 0.865 & 0.533 & 0.428 \\\\\n')
if is_latex == True:
# Table Ending
fw.write('\\botrule\n')
fw.write('\\end{tabular*}%\n')
fw.write('}\n')
fw.write('{}\n')
fw.write('% {This is a table footnote}\n')
fw.write('\\end{table*}\n')
fw.close()
def generate_subfamily_details_table_data(cluster_id, ordered_dependency_list, avg_rmsd_align_to, total_alignment_length_align_to, avg_rmsd, total_alignment_length, avg_rmsd_sufamily_only, total_alignment_length_subfamily_only, subfamily_details_table_data):
subfamily_instance_count = []
total_loop_count = 0
for component_id, component in enumerate(ordered_dependency_list):
subfamily_instance_count.append(str(len(component)))
total_loop_count += len(component)
instance_count_string = ','.join(subfamily_instance_count)
cluster_full_name = cluster_id
cluster_shortcode = cluster_id
if cluster_id in known_motif_fullname:
cluster_full_name = known_motif_fullname[cluster_id]
if cluster_id in known_motif_shortcode:
cluster_shortcode = known_motif_shortcode[cluster_id]
row_items = []
cluster_name = cluster_full_name
if cluster_full_name != cluster_shortcode:
cluster_name += ' (' + cluster_shortcode + ')'
avg_alignment_length_align_to = total_alignment_length_align_to / (total_loop_count - 1)
avg_alignment_length = total_alignment_length / (total_loop_count - 1)
avg_alignment_length_subfamily_only = total_alignment_length_subfamily_only / (total_loop_count - len(ordered_dependency_list))
row_items.append(cluster_name)
row_items.append(str(total_loop_count))
row_items.append(str(len(ordered_dependency_list)) + ' (' + instance_count_string + ')')
row_items.append("{:.3f}".format(round(avg_rmsd_align_to, 3)) + " / " + "{:.0f}".format(round(avg_alignment_length_align_to)))
row_items.append("{:.3f}".format(round(avg_rmsd, 3)) + " / " + "{:.0f}".format(round(avg_alignment_length)))
row_items.append("{:.3f}".format(round(avg_rmsd_sufamily_only,3)) + " / " + "{:.0f}".format(round(avg_alignment_length_subfamily_only)))
subfamily_details_table_data[cluster_id] = row_items
def generate_componentwise_analysis_files(superimposition_output_dir, cluster_id, ordered_dependency_list, load_id_dict, alignment_data, rmsd_data):
if generate_bp_ann_files == False:
return
output_dir = os.path.join(superimposition_output_dir, 'componentwise_analysis')
create_directory(output_dir)
if generate_loop_source_info == True:
f_source1 = open(os.path.join(output_dir, str(cluster_id) + "_loop_source_data_summary.txt"), "w")
f_source2 = open(os.path.join(output_dir, str(cluster_id) + "_loop_source_data_details.txt"), "w")
source_dict = {}
_, rmsd_data_list_dict = rmsd_data[cluster_id]
pdb_res_map_dict = {}
for component_id, component in enumerate(ordered_dependency_list):
f1 = open(os.path.join(output_dir, str(cluster_id) + "_" + str(component_id+1) + "_alignments_only.txt"), "w")
f2 = open(os.path.join(output_dir, str(cluster_id) + "_" + str(component_id+1) + "_alignments_with_grouped_interactions.txt"), "w")
f3 = open(os.path.join(output_dir, str(cluster_id) + "_" + str(component_id+1) + "_inter_subfam_alignments_only.txt"), "w")
f4 = open(os.path.join(output_dir, str(cluster_id) + "_" + str(component_id+1) + "_alignments_with_interactions.txt"), "w")
if generate_loop_source_info == True:
source_dict['DeNovo'] = []
source_dict['R3D'] = []
source_dict['Both'] = []
source_dict['N/A'] = []
is_first = True
for component_loop_id, ((i, r1), parent) in enumerate(component):
if generate_loop_source_info == True:
source_dict[get_loop_cluster_source(r1)].append(r1)
if is_first == True:
is_first = False
# inter-subfamily edge
if parent != None:
(j, r2) = parent
parent_load_id, parent_loop_id = load_id_dict[(j, r2)]
load_id, loop_id = load_id_dict[(i, r1)]
(t1, t2, zscore, cr1, cr2, aln2, aln1, score) = alignment_data[cluster_id][strToNode(r2)][strToNode(r1)]
rmsd = extract_rmsd_from_dict(rmsd_data_list_dict, i, r1, j, r2)
write_alignments_to_file(i, r1, j, r2, aln1, aln2, rmsd, parent_load_id, load_id, f3)
else:
# if parent != None:
(j, r2) = parent
parent_load_id, parent_loop_id = load_id_dict[(j, r2)]
load_id, loop_id = load_id_dict[(i, r1)]
(t1, t2, zscore, cr1, cr2, aln2, aln1, score) = alignment_data[cluster_id][strToNode(r2)][strToNode(r1)]
rmsd = extract_rmsd_from_dict(rmsd_data_list_dict, i, r1, j, r2)
write_alignments_to_file(i, r1, j, r2, aln1, aln2, rmsd, parent_load_id, load_id, f1)
###########################################################
#*************************r2******************************#
write_alignments_with_interactions_to_file(parent_load_id, r2, aln2, rmsd, pdb_res_map_dict, f2)
write_alignments_with_interactions_to_file(parent_load_id, r2, aln2, rmsd, pdb_res_map_dict, f4, False)
#*************************r2******************************#
f2.write('\n')
f4.write('\n')
write_alignments_with_interactions_to_file(load_id, r1, aln1, rmsd, pdb_res_map_dict, f2)
write_alignments_with_interactions_to_file(load_id, r1, aln1, rmsd, pdb_res_map_dict, f4, False)
#*************************r1******************************#
f2.write('\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n')
f2.write('\n\n\n')
f4.write('\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n')
f4.write('\n\n\n')
###########################################################
f1.close()
f2.close()
f4.close()
f3.close()
if generate_loop_source_info == True:
f_source1.write(str(cluster_id) + '_' + str(component_id+1) + ' loop source summary: \n')
if len(source_dict['DeNovo']) > 0:
f_source1.write('From DeNovo: ' + str(len(source_dict['DeNovo'])) + '\n')
if len(source_dict['R3D']) > 0:
f_source1.write('From R3D: ' + str(len(source_dict['R3D'])) + '\n')
if len(source_dict['Both']) > 0:
f_source1.write('From Both: ' + str(len(source_dict['Both'])) + '\n')
if len(source_dict['N/A']) > 0:
f_source1.write('N/A: ' + str(len(source_dict['N/A'])) + '\n')
f_source1.write('Total: ' + str(len(component)) + '\n')
f_source1.write('\n\n')
f_source2.write(str(cluster_id) + '_' + str(component_id+1) + ' loop source details: (FASTA index)\n')
if len(source_dict['DeNovo']) > 0:
f_source2.write('From DeNovo: (' + str(len(source_dict['DeNovo'])) + ')\n' + ', '.join(source_dict['DeNovo']) + '\n')
if len(source_dict['R3D']) > 0:
f_source2.write('From R3D: (' + str(len(source_dict['R3D'])) + ')\n' + ', '.join(source_dict['R3D']) + '\n')
if len(source_dict['Both']) > 0:
f_source2.write('From Both: (' + str(len(source_dict['Both'])) + ')\n' + ', '.join(source_dict['Both']) + '\n')
if len(source_dict['N/A']) > 0:
f_source2.write('N/A: (' + str(len(source_dict['N/A'])) + ')\n' + ', '.join(source_dict['N/A']) + '\n')
f_source2.write('Total: ' + str(len(component)) + '\n')
f_source2.write('\n\n')
if generate_loop_source_info == True:
f_source1.close()
f_source2.close()
# write_alignments_with_interactions_to_file("", representative_loop, "", best_weighted_avg_rmsd, pdb_res_map_dict, f, False)
def generate_componentwise_bp_annotation_files(subfamily_details_dir, cluster_id, ordered_dependency_list, load_id_dict):
if generate_bp_ann_files == False:
return
# return ordered_dependency_list
# mapping_dir = pdb_fasta_mapping_dir
# loop_dir = loop_dir
create_directory(subfamily_details_dir)
ordered_dependency_list_with_bp_ann = []
pdb_res_map_dict = {}
fw_bp_details = open(os.path.join(subfamily_details_dir, str(cluster_id) + "_bp_details.txt"), "w")
for component_id, component in enumerate(ordered_dependency_list):
new_component = []
fw = open(os.path.join(subfamily_details_dir, str(cluster_id) + "-Sub" + str(component_id+1) + "_bp_ann.txt"), "w")
fw_bp_details.write(str(cluster_id) + "-Sub" + str(component_id+1) + ':\n')
fw_bp_details.write('Total motifs: ' + str(len(component)) + '\n')
bp_dict = {}
for component_loop_id, ((i, r1), parent) in enumerate(component):
load_id, loop_id = load_id_dict[(i, r1)]
f_loop = open(os.path.join(loop_dir, r1 + ".smf"))
loop_info_lines = f_loop.readlines()
f_loop.close()
t_bp_dict = {}
in_section = False
for line in loop_info_lines:
if line.startswith('#info=basepair'):
in_section = True
elif line.startswith('#info=stacking'):
in_section = False
break
elif in_section == True:
_, bp, orientation = line.strip().split(',')
bp_edges = bp.strip().split('/')
bp_edges.sort()
bp = '/'.join(bp_edges)
if len(orientation) > 0:
bp = orientation[0] + bp
if bp not in t_bp_dict:
t_bp_dict[bp] = 0
t_bp_dict[bp] += 1
for bp in t_bp_dict:
if bp not in bp_dict:
bp_dict[bp] = []
bp_dict[bp].append(t_bp_dict[bp])
if output_env == 'local':
fw.write(str(load_id) + "\n")
write_alignments_with_interactions_to_file("", r1, "", 0.0, pdb_res_map_dict, fw, False)
fw.write('\n\n')
for bp in sorted(bp_dict):
fw_bp_details.write(bp + ' (' + str(len(bp_dict[bp])) + ' motifs, ' + str(sum(bp_dict[bp])) + ' occurences): ' + ','.join(map(lambda x: str(x), bp_dict[bp])) + '\n')
fw_bp_details.write('\n')
fw_bp_details.close()
# return ordered_dependency_list_with_bp_ann
# def generate_componentwise_bp_annotation_files(cluster_id, ordered_dependency_list, load_id_dict):
# if generate_bp_ann_files == False:
# return ordered_dependency_list
# # mapping_dir = pdb_fasta_mapping_dir
# # loop_dir = loop_dir
# output_dir = os.path.join(superimposition_output_dir, 'subfamilywise_bp_ann')
# create_directory(output_dir)
# ordered_dependency_list_with_bp_ann = []
# pdb_res_map_dict = {}
# for component_id, component in enumerate(ordered_dependency_list):
# new_component = []
# fw = open(os.path.join(output_dir, str(cluster_id) + "_" + str(component_id+1) + ".txt"), "w")
# for component_loop_id, ((i, r1), parent) in enumerate(component):
# load_id, loop_id = load_id_dict[(i, r1)]
# f_loop = open(os.path.join(loop_dir, r1 + ".smf"))
# loop_info_lines = f_loop.readlines()
# f_loop.close()
# fw.write(str(load_id) + "\t" + r1 + "\t\t")
# pdb_chain, loop_regions = r1.strip().split(":")
# if pdb_chain not in pdb_res_map_dict:
# pdb_res_map_dict[pdb_chain] = load_pdb_res_map(pdb_chain)
# pdb_res_map = pdb_res_map_dict[pdb_chain]
# loop_regions = loop_regions.strip().split("_")
# loop_regions_seq = "".join(loop_info_lines[1].strip().split("..."))
# loop_regions_pdb_ind = []
# # index_residue_dict = {}
# pdb_index_list = []
# for part, region in enumerate(loop_regions):
# region_pdb_index = []
# s, e = region.strip().split("-")
# s = int(s)
# e = int(e)
# for fasta_indx in range(s, e+1):
# pdb_index_list.append(pdb_res_map[fasta_indx][1])
# start_ind = str(pdb_res_map[s][1])
# if len(pdb_res_map[s][2]) > 0:
# start_ind += '.' + str(pdb_res_map[s][2])
# end_ind = pdb_res_map[e][1]
# if len(pdb_res_map[e][2]) > 0:
# end_ind += '.' + str(pdb_res_map[e][2])
# loop_regions_pdb_ind.append(start_ind + "-" + end_ind)
# index_residue_dict = dict(zip(pdb_index_list, loop_regions_seq))
# r1_pdb_ind = pdb_chain + ":" + "_".join(loop_regions_pdb_ind)
# fw.write(r1_pdb_ind + "\n")
# bp_ann_list = []
# for line in loop_info_lines:
# if line.startswith(">"):
# continue
# elif line.startswith("#info=stacking"):
# break
# else:
# pieces = line.strip().split(",")
# if len(pieces) == 3:
# a, b = pieces[0].strip().split("-")
# a = pdb_index_list[int(a)]
# b = pdb_index_list[int(b)]
# bp = a + "-" + b
# line = bp + "," + index_residue_dict[a] + "-" + index_residue_dict[b] + "," + pieces[1] + "," + pieces[2] + "\n"
# bp_ann_list.append(((a, b), index_residue_dict[a] + "-" + index_residue_dict[b], pieces[1], pieces[2]))
# fw.write(line)
# if "..." in line:
# print_a_dict_sorted(index_residue_dict, fw, separator=": ")
# new_component.append(((i, r1), parent, index_residue_dict, bp_ann_list))
# fw.write("\n")
# fw.close()
# ordered_dependency_list_with_bp_ann.append((component_id, new_component))
# return ordered_dependency_list_with_bp_ann
def get_rmsd_align_len(i, r1, j, r2, cluster_pairwise_alignment_details):
avg_rmsd, pairwise_align_details = cluster_pairwise_alignment_details[(i, r1)]
for (j_c, r2_c, rmsd, align_length) in pairwise_align_details:
if j_c == j and r2 == r2_c:
return rmsd, align_length
return 0, 0
def get_family_rmsd_and_alignment_summary(ordered_dependency_list, cluster_pairwise_alignment_details, subfamily_only = False):
rmsd_align_len_list = []
for component_id, component in enumerate(ordered_dependency_list):
is_first = True
for component_loop_id, ((i, r1), parent) in enumerate(component):
if parent != None:
j, r2 = parent
if is_first == False or subfamily_only == False:
rmsd, align_len = get_rmsd_align_len(i, r1, j, r2, cluster_pairwise_alignment_details)
rmsd_align_len_list.append((rmsd, align_len))
is_first = False
return get_weighted_avg_rmsd(rmsd_align_len_list)
def similar_to_existing_color(subfamily_colors_rgb, new_color):
for color in subfamily_colors_rgb:
is_similar = True
# Check if all values are same or not
for i, val in enumerate(color):
if abs(color[i] - new_color[i]) > 0.01:
is_similar = False
break
# Maybe check if value ratio is same or not, if is_similar == False
return is_similar
def get_sub_family_colors(max_component_count = 100):
# PyMol and pyplot common colors
# (pyplot order) 'red','green','blue','brown','firebrick','salmon','darksalmon','chocolate','orange','wheat','olive','yellow','limegreen','aquamarine','teal','cyan','lightblue','skyblue','violet','purple','magenta','pink','lightpink'
# (sorted) 'aquamarine','blue','brown','chocolate','cyan','darksalmon','firebrick','green','lightblue','lightpink','limegreen','magenta','olive','orange','pink','purple','red','salmon','skyblue','teal','violet','wheat','yellow'
subfamily_colors_by_name = ['red', 'green', 'blue', 'cyan', 'brown', 'lightpink', 'magenta', 'wheat', 'teal', 'orange', 'purple', 'lightblue', 'violet', 'darksalmon', 'olive', 'yellow']
subfamily_colors_dict = {'red': [1.0, 0.0, 0.0],
'green': [0.0, 1.0, 0.0],
'blue': [0.0, 0.0, 1.0],
'cyan': [0.0, 1.0, 1.0],
'brown': [0.65, 0.32, 0.17],
'lightpink': [1.00, 0.75, 0.87],
'purple': [0.75, 0.00, 0.75],
'wheat': [0.99, 0.82, 0.65],
'teal': [0.00, 0.75, 0.75],
'orange': [1.0, 0.5, 0.0],
'lightblue': [0.75, 0.75, 1.0],
'violet': [1.0, 0.5, 1.0],
'magenta': [1.0, 0.0, 1.0],
'darksalmon': [0.73, 0.55, 0.52],
'olive': [0.77, 0.70, 0.00],
'yellow': [1.0, 1.0, 0.0]}
subfamily_colors_rgb = []
for color_name in subfamily_colors_by_name:
subfamily_colors_rgb.append(subfamily_colors_dict[color_name])
random.seed(3)
for i in range (max_component_count * 10):
if len(subfamily_colors_rgb) >= max_component_count:
break
rand_ind1 = random.randint(0,len(subfamily_colors_rgb) - 1)
rand_ind2 = random.randint(0,len(subfamily_colors_rgb) - 1)
rand_ind3 = random.randint(0,len(subfamily_colors_rgb) - 1)
# mix 3 random existing color to generate a new color
new_color = [(x + y + z)/3.0 for x, y, z in zip(subfamily_colors_rgb[rand_ind1], subfamily_colors_rgb[rand_ind2], subfamily_colors_rgb[rand_ind3])]
if not similar_to_existing_color(subfamily_colors_rgb, new_color):
subfamily_colors_rgb.append(new_color)
subfamily_colors_by_name.append(new_color)
# If need more class, assign random colors
for i in range (max_component_count * 100):
if len(subfamily_colors_rgb) >= max_component_count:
break
new_color = [random.random(), random.random(), random.random()]
if not similar_to_existing_color(subfamily_colors_rgb, new_color):
subfamily_colors_rgb.append(new_color)
subfamily_colors_by_name.append(new_color)
# Generate tuple to make it compatible with pyplot (as list is needed for pymol)
subfamily_colors_tuple = []
for item in subfamily_colors_by_name:
if type(item) == 'list' and len(item) == 3:
subfamily_colors_tuple.append((item[0], item[1], item[2]))
else:
subfamily_colors_tuple.append(item)
return subfamily_colors_by_name, subfamily_colors_tuple
def get_component_graph(component_features, load_id_dict, color):
cycle_node_list = []
uncycled_node_list = []
edge_list = []
_, cycle_node_list_original, component_nodes, component_directed_adjacency_list = component_features
for node in component_nodes:
# family_name, loop_id = load_id_dict[node][0].split('_')
family_name, loop_id = separate_family_name_and_loop_id(load_id_dict[node][0])
loop_short_name = get_motif_family_short_code(family_name) + '_' + loop_id
if node in cycle_node_list_original:
cycle_node_list.append(loop_short_name)
else:
uncycled_node_list.append(loop_short_name)
for node1 in component_directed_adjacency_list:
for node2 in component_directed_adjacency_list[node1]:
# family_name1, loop_id1 = load_id_dict[node1][0].split('_')
family_name1, loop_id1 = separate_family_name_and_loop_id(load_id_dict[node1][0])
# family_name2, loop_id2 = load_id_dict[node2][0].split('_')
family_name2, loop_id2 = separate_family_name_and_loop_id(load_id_dict[node2][0])
loop_short_name1 = get_motif_family_short_code(family_name1) + '_' + loop_id1
loop_short_name2 = get_motif_family_short_code(family_name2) + '_' + loop_id2
edge_list.append((loop_short_name1, loop_short_name2, round(component_directed_adjacency_list[node1][node2][0],2)))
return (cycle_node_list, uncycled_node_list, edge_list, color)
def generate_subfamily_image(image_file_list, pdb_organism_details, cluster_id, subfamily_dir, draw_figures, suffix = '', is_graph_image=False, show_pdb_info=False, show_image_caption=True):
if draw_figures == False:
return
# if create_subfamily_images == False:
# return
# superimposition_output_dir = image_file_list[0][:-1*len(os.path.basename(image_file_list[0]))]
# subfamily_dir = os.path.join(superimposition_output_dir, 'subfamily')
create_directory(subfamily_dir)
if len(suffix) > 0:
create_collage(image_file_list, pdb_organism_details, os.path.join(subfamily_dir, str(cluster_id) + '_' + suffix + '.png'), show_pdb_info, is_graph_image, show_image_caption)
else:
create_collage(image_file_list, pdb_organism_details, os.path.join(subfamily_dir, str(cluster_id) + '.png'), show_pdb_info, is_graph_image, show_image_caption)
# for image_fname in image_file_list:
# copy_subfamily_image(image_fname, suffix)
def get_index_list(loop):
ind_list = []
segments = loop.strip().split(':')[1].strip().split('_')
for segment in segments:
a, b = segment.strip().split('-')
ind_list.append((int(a), int(b)))
return ind_list
def union_segments(r1_union_ind_list, r1_ind_list, cr1_ind_list):
index = 0
for (a, b) in r1_ind_list:
if a > b:
logger.error('ERROR: Range is reversed (' + str(a) + '-' + str(b) + ')')
sys.exit()
for (c, d) in cr1_ind_list:
if c > d:
logger.error('ERROR: Range is reversed (' + str(c) + '-' + str(d) + ')')
sys.exit()
# if a <= c and d <= b:
if c <= b and d >= a: # has overlap
overlap_start = max(a, c)
overlap_end = min(b, d)
(x, y) = r1_union_ind_list[index]
if overlap_start < x:
r1_union_ind_list[index] = (overlap_start, y)
if overlap_end > y:
r1_union_ind_list[index] = (r1_union_ind_list[index][0], overlap_end)
index += 1
return r1_union_ind_list
def generate_loop_boundary(items, cluster_alignment_data):
# print cluster_alignment_data
loop_boundary = {}
loop_boundary_original = {}
for (i, r1) in sorted(items, key=lambda x: items[x][0]):
r1_ind_list = get_index_list(r1)
loop_boundary_original[(i, r1)] = r1_ind_list
if len(items) > 1:
r1_union_ind_list = []
for (a, b) in r1_ind_list:
r1_union_ind_list.append((b, a))
target_loop = strToNode(r1)
_, pairwise_align_details = items[(i, r1)]
for (j, r2, _, _) in pairwise_align_details:
mobile_loop = strToNode(r2)
(_, _, _, cr1, cr2, _, _, _) = cluster_alignment_data[target_loop][mobile_loop]
cr1_ind_list = get_index_list(cr1)
r1_union_ind_list = union_segments(r1_union_ind_list, r1_ind_list, cr1_ind_list)
r1_final_union_ind_list = []
for (a, b) in r1_union_ind_list:
if a <= b:
r1_final_union_ind_list.append((a,b))
loop_boundary[(i, r1)] = r1_final_union_ind_list
else:
loop_boundary[(i, r1)] = r1_ind_list
return loop_boundary, loop_boundary_original
def get_loop_boundary_pdb_index(loop_boundary_fasta):
loop_boundary_pdb = {}
for (i, r1) in loop_boundary_fasta:
loop_boundary_pdb[(i, r1)] = []
pdb_chain = r1.strip().split(':')[0]
pdb_res_map = load_pdb_res_map(pdb_chain)
for (a, b) in loop_boundary_fasta[(i, r1)]:
for ind in range(a, b+1):
if ind in pdb_res_map:
loop_boundary_pdb[(i, r1)].append(pdb_res_map[ind])
return loop_boundary_pdb
def reset_pymol():
pymol.cmd.sync()
# pymol.commanding.sync()
pymol.cmd.deselect()
pymol.cmd.delete('all')
pymol.cmd.reinitialize()
pymol.cmd.bg_color('white')
def load_pdb_in_pymol(partial_pdbx_dir, pdb_chain, pdb_align_ind_list, pdb_disp_ind_list, backbone_atom_list, sugar_atom_list, load_name_id, is_cif, loop_name):
pdb_id, chain_id = pdb_chain.strip().split('_')
pdb_load_name = 'pdb_' + str(load_name_id)
chain_load_name = 'rna_' + str(load_name_id)
target_load_name = 'target_' + str(load_name_id)
display_load_name = 'display_target_' + str(load_name_id)
if is_cif:
# pymol.cmd.load(os.path.join(pdb_dir, pdb_id+'.cif'), pdb_load_name)
loop_name = str(strToNode(loop_name))
pymol.cmd.load(os.path.join(partial_pdbx_dir, loop_name+'.cif'), pdb_load_name)
else:
pymol.cmd.load(os.path.join(partial_pdbx_dir, pdb_id+'.pdb'), pdb_load_name)
pymol.cmd.select(chain_load_name, pdb_load_name + ' and chain %s' % chain_id)
pymol.cmd.select(target_load_name, chain_load_name + ' and (%s)' % ' or '.join(list(map(lambda x: 'resi '+x, pdb_align_ind_list))))
pymol.cmd.select(display_load_name, chain_load_name + ' and (%s)' % ' or '.join(list(map(lambda x: 'resi '+x, pdb_disp_ind_list))))
pymol.cmd.hide('everything', pdb_load_name)
# pymol.commanding.sync()
centroid = get_centroid_of_loop(target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list)
return pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid
# return test, pdb_load_name
def get_ordered_loop_coordinates(coordinates, atom_list, res, target_load_name):
stored.sel = []
for atom in atom_list[res]:
pymol.cmd.select('t', target_load_name + ' and resi %s and name %s' % (res, atom))
pymol.cmd.iterate_state(1, 't', 'stored.sel.append([x,y,z])')
if len(stored.sel) > 0:
coordinates.append(numpy.sum(stored.sel, axis=0)/float(len(stored.sel)))
def get_ordered_loop_backbone_coordinates(target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list):
coordinates = []
for res in pdb_align_ind_list:
get_ordered_loop_coordinates(coordinates, backbone_atom_list, res, target_load_name)
get_ordered_loop_coordinates(coordinates, sugar_atom_list, res, target_load_name)
return coordinates
def get_centroid_of_loop(target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list):
coord_list = get_ordered_loop_backbone_coordinates(target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list)
centroid = numpy.sum(coord_list, axis=0)/float(len(coord_list))
return centroid
def get_seqnums_from_indices(indices):
seqnums = []
# print(indices)
for chain, index, icode in indices:
seqnums.append(index + icode)
return seqnums
def load_one_pdb_in_pymol(partial_pdbx_dir, i, r1, loop_boundary_dict, loop_boundary_original_dict, load_name_id, is_cif, load_color):
pdb_chain = r1.strip().split(':')[0]
align_target = get_seqnums_from_indices(loop_boundary_dict[(i, r1)])
align_mobile = get_seqnums_from_indices(loop_boundary_original_dict[(i, r1)])
backbone_atom_list = {}
sugar_atom_list = {}
for index in align_target:
# backbone_atom_list[index] = ['P']
# sugar_atom_list[index] = ["C1'"]
backbone_atom_list[index], sugar_atom_list[index] = get_backbone_and_sugar_atoms()
pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid = load_pdb_in_pymol(partial_pdbx_dir, pdb_chain, align_target, align_mobile, backbone_atom_list, sugar_atom_list, load_name_id, is_cif, r1)
# pymol.commanding.sync()
pymol.cmd.sync()
pymol.cmd.color(load_color, chain_load_name)
return (pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, r1, load_name_id)
def translate_coords(pdb_data, target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list, centroid):
coord_list = get_ordered_loop_backbone_coordinates(target_load_name, pdb_align_ind_list, backbone_atom_list, sugar_atom_list)
coord_list -= centroid
pdb_translated = pdb_data - centroid
return coord_list, pdb_translated
def alter_structure(pdb_translated, pdb_load_name):
stored.res_list = pdb_translated.tolist()
pymol.cmd.alter_state(1, pdb_load_name, '(x,y,z)=stored.res_list.pop(0)')
def translate_and_show_single_loop(pymol_load_info, align_target, disp_target, load_id, image_fname, show_extended_loop, show_label, display_color = 'gray', align_color = 'red'):
pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, lp, load_name_id = pymol_load_info
disp_target = get_seqnums_from_indices(disp_target)
align_target = get_seqnums_from_indices(align_target)
#load pdb file in pymol
pdb_data = get_pdb_coordinates(pdb_load_name)
backbone_atom_list = {}
sugar_atom_list = {}
for index in align_target:
# backbone_atom_list[index] = ['P']
# sugar_atom_list[index] = ["C1'"]
backbone_atom_list[index], sugar_atom_list[index] = get_backbone_and_sugar_atoms()
#Translate coordinates around centroid (to be used to find superimposition matrix)
sel, pdb_translated = translate_coords(pdb_data, target_load_name, align_target, backbone_atom_list, sugar_atom_list, centroid)
alter_structure(pdb_translated, pdb_load_name)
# display_load_name = alter_and_display_structure(pdb_translated, load_id, pdb_load_name, chain_load_name, disp_target, color1, target_load_name, color2, show_label, "C2'")
# display_load_name = alter_and_display_structure(pdb1_translated, load_id + '_1', pdb_load_name, chain_load_name, disp_target, 'gray', target_load_name, 'tv_yellow')
if draw_input_images == True:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, target_load_name, image_fname, show_extended_loop, show_label, "C2'", display_color, align_color)
else:
set_loop_color(display_color, align_color, display_load_name, target_load_name)
# pymol.cmd._do('zoom')
# pymol.commanding.sync()
# pymol.cmd.png(image_fname, 1200, 1200, dpi=300, ray=1, quiet=0)
# pymol.commanding.sync()
return (display_load_name, target_load_name, chain_load_name), (pdb_load_name, chain_load_name, target_load_name, display_load_name, np.zeros(3), pdb_chain, lp, load_name_id)
def config_pymol_cartoon(display_color, show_label):
# arrow,dash,loop,putty,skip,automatic,dumbbell,oval,rectangle,tube
# pymol.cmd.cartoon('dumbbell')
# pymol.cmd._do('set cartoon_nucleic_acid_color, ' + display_color)
# pymol.cmd._do('cartoon oval')
# pymol.cmd._do('set cartoon_ring_color, '+ display_color)
if show_label:
# pymol.cmd._do('set cartoon_ring_mode, 1') # (or 2 or 3)
# pymol.cmd._do('set cartoon_ring_transparency, 0.5')
pymol.cmd.set(name='cartoon_ring_mode',value=1,quiet=1)
pymol.cmd.set(name='cartoon_ring_transparency',value=0.5,quiet=1)
else:
pymol.cmd.set(name='cartoon_ring_mode',value=0,quiet=1)
# pymol.cmd._do('set cartoon_tube_radius,0.8')
# pymol.cmd._do('set cartoon_ring_finder, 1') # (or 2 or 3 or 4)
# pymol.cmd._do('set cartoon_nucleic_acid_mode, 4') # (or 1 or 2 or 3 or 4)
# pymol.cmd._do('set cartoon_fancy_helices=1')
# pymol.cmd._do('set cartoon_highlight_color, gray')
# pymol.cmd._do('rotate y, 145, all')
# pymol.cmd._do('rotate z, -45, all')
pass
def set_loop_color(display_color, align_color, display_load_name, align_load_name):
if type(display_color) is list and len(display_color) == 3:
pymol.cmd.set_color(display_load_name + '_color', display_color)
display_color = display_load_name + '_color'
# print(display_color, align_color)
if type(align_color) is list and len(align_color) == 3:
pymol.cmd.set_color(align_load_name + '_color', align_color)
align_color = align_load_name + '_color'
pymol.cmd.color(display_color, display_load_name)
pymol.cmd.color(align_color, align_load_name)
def show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, label_atom, display_color = 'gray', align_color = 'red'):
set_loop_color(display_color, align_color, display_load_name, align_load_name)
if show_label:
pymol.cmd.label(display_load_name + " and name " + label_atom, "'%s-%s' %(resn, resi)")
config_pymol_cartoon(display_color, show_label)
# else:
# pymol.cmd.label(display_load_name + " and name " + "dummy", "'%s-%s' %(resn, resi)")
if show_extended_loop:
pymol.cmd.show('cartoon', chain_load_name)
# print('showing cartoon')
else:
pymol.cmd.show('cartoon', display_load_name)
# pymol.cmd._do('zoom')
pymol.cmd.sync()
pymol.cmd.zoom()
# pymol.commanding.sync()
pymol.cmd.sync()
# pymol.cmd._do('set ray_opaque_background, 0')
# pymol.cmd.set(name='ray_opaque_background',value=0,quiet=1)
pymol.cmd.png(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)
# pymol.commanding.sync()
# wait_for_certain_files_to_be_generated([image_fname], True)
pymol.cmd.sync()
def get_rotatation_matrix(axis = 'x', angle = 0):
cos_t = math.cos(math.radians(angle))
sin_t = math.sin(math.radians(angle))
mat = np.zeros((3,3))
if axis.lower() == 'x':
mat[0][0] = 1
mat[1][1] = cos_t
mat[2][2] = cos_t
mat[1][2] = -1 * sin_t
mat[2][1] = sin_t
elif axis.lower() == 'y':
mat[1][1] = 1
mat[0][0] = cos_t
mat[2][2] = cos_t
mat[0][2] = sin_t
mat[2][0] = -1 * sin_t
elif axis.lower() == 'z':
mat[2][2] = 1
mat[0][0] = cos_t
mat[1][1] = cos_t
mat[0][1] = -1 * sin_t
mat[1][0] = sin_t
else:
logger.error('Invalid axis. Please choose x, y, or z.')
sys.exit()
return mat
def get_multiple_orientation_rotation_matrices():
rotation_matrices = []
rotation_matrices.append(get_rotatation_matrix())
# generate multiple orientation when any specific view file not found
# if r1_view == None and generate_multiple_orientation == True:
if generate_multiple_orientation:
rotation_matrices.append(numpy.dot(get_rotatation_matrix('x',45), get_rotatation_matrix('y',45))) # x: 45, y: 45
# rotation_matrices.append(get_rotatation_matrix('x',90)) # x: 90
# rotation_matrices.append(get_rotatation_matrix('y',90)) # x: 90
# rotation_matrices.append(numpy.dot(get_rotatation_matrix('x',90), get_rotatation_matrix('y',90))) # x: 135, y: 135
return rotation_matrices
# def get_rotation_matrices():
# rotation_matrices = []
# # rotation_matrices.append(get_rotatation_matrix())
# rotation_start, rotation_end, step = rotation_angle_start_end_step
# for angle_x in range(rotation_start, rotation_end, step):
# for angle_y in range(rotation_start, rotation_end, step):
# for angle_z in range(rotation_start, rotation_end, step):
# rotation_matrices.append(numpy.dot(get_rotatation_matrix('x', angle_x), get_rotatation_matrix('y', angle_y), get_rotatation_matrix('z', angle_z)))
# return rotation_matrices
# def generate_matrices_and_loop_rotations(ordered_dependency_list):
# first_motif = None
# if len(ordered_dependency_list) > 0 and len(ordered_dependency_list[0]) > 0:
# first_motif = ordered_dependency_list[0][0][1]
# else:
# logger.warning('No motif found in component.')
# sys.exit()
# rotation_matrices = get_rotation_matrices()
# for v, rotation_matrix in enumerate(rotation_matrices):
# rotation_version = 'rotation_v' + str('{:05d}'.format(v+1))
# ### Rotate, group and show the subfamilies
# pymol.cmd._do('hide all')
# pymol.commanding.sync()
# time.sleep(.100)
# pdb_load_name1, chain_load_name1, target_load_name1, display_load_name1, centroid1, pdb_chain1, lp1, load_name_id = pymol_load_info_dict[(i, r1)]
# ### superimposition subfamilies
# # file_name = os.path.join(superimposition_output_dir, str(cluster_id) + '_' + str(component_id + 1) + '_' + str(component_loop_id + 1) + '_' + str(family_loop_id))
# image_fname = os.path.join(rotated_loop_image_dir, rotation_version + '_' + load_name_id + '_3.png')
# text_fname = os.path.join(superimposition_output_dir, str(cluster_id) + '_' + str(component_id + 1) + '_' + str(component_loop_id + 1) + '_' + str(family_loop_id) + '.txt')
# # Draw the first loop of the first component independent of any other loops
# if component_id == 0 and component_loop_id == 0:
# if draw_pymol_figure:
# display_load_name, align_load_name, chain_load_name = loop_display_info_dict[(i,r1)]
# rotate_first_loop(pymol_load_info_dict[(i, r1)], rotation_matrix)
# show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_label, "C2'", 'gray', subfamily_colors[component_id])
def get_pdb_coordinates(pdb_load_name):
stored.pdb = []
pymol.cmd.iterate_state(1, pdb_load_name, 'stored.pdb.append([x,y,z])')
return stored.pdb
# Rotate the first loop of the cluster to define the orientation
def rotate_first_loop(pymol_load_info, rotation_matrix):
pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, lp, load_name_id = pymol_load_info
pdb_data = get_pdb_coordinates(pdb_load_name)
pdb_translated = pdb_data - centroid
pdb_rotated = numpy.dot(pdb_translated, rotation_matrix)
alter_structure(pdb_rotated, pdb_load_name)
def write_pdb_organisgm_details(pdb_organism_details, loop, fp):
pdb_chain = loop.strip().split(':')[0]
if pdb_chain in pdb_organism_details:
RNA_Types, organism, org_class, org_type, pdb_source = pdb_organism_details[pdb_chain]
fp.write(pdb_chain + '\t' + RNA_Types + '\t' + organism + '\t' + org_class + '\t' + org_type + '\t' + pdb_source + '\t')
else:
fp.write(pdb_chain + '\t\t\t\t\t\t')
def write_dock_file_list(c_id, m_id, i, r1, j, r2, align_len, zscore, score, fp1, cluster_pairwise_alignment_details, pdb_organism_details, t1, t2):
rmsd = 0.0
# fp1 = open(text_fname, 'a')
if(r2 != 'None'):
rmsd, align_len2 = get_rmsd_align_len(i, r1, j, r2, cluster_pairwise_alignment_details)
if align_len != align_len2 :
logger.error('ERROR!!! Align length inconsistency!!!')
# print('Error message generated from script rmsd_based_benchmarking.py')
sys.exit()
if(r2 != 'None'):
fp1.write(str(c_id) + '\t')
write_pdb_organisgm_details(pdb_organism_details, r1, fp1)
fp1.write(str(m_id) + ',' + '(' + str(i) + ',' + r1 + ') <- (' + str(j) + ',' + r2 + '), rmsd: ' + str(round(rmsd, 3)) + ', align len: ' + str(align_len) + ', score: ' + str(score)+ ', zscore: ' + str(zscore))
fp1.write('\t' + t1 + ',' + t2 + '\n')
else:
if c_id == 1:
# Write Title
fp1.write('c_id \t PDB_chain \t RNA_Types \t Organism \t Org_class \t Org_type \t PDB_source \t Loop \t Align Order \n')
fp1.write(str(c_id) + '\t')
write_pdb_organisgm_details(pdb_organism_details, r1, fp1)
fp1.write(str(m_id) + ',' + '(' + str(i) + ',' + r1 + ')' + '\n')
# print(text_fname)
# fp1.close()
def get_pdb_residues(pdb_dir, pdb_chain, residue_list, structures, is_cif, loop):
# pdb_dir = '../../DataCollection/nrpdbs'
pdb_id, chain_id = pdb_chain.strip().split('_')
# print(pdb_id, chain_id)
# print(loop)
residue_dict = None
# if (pdb_id, chain_id) in structures:
# residue_dict = structures[(pdb_id, chain_id)]
if loop in structures:
residue_dict = structures[loop]
else:
pdb_fn = None
if is_cif:
pdb_fn = os.path.join(pdb_dir, str(strToNode(loop)) + '.cif')
# pdb_fn = os.path.join(pdb_dir, pdb_id + '.cif')
else:
pdb_fn = os.path.join(pdb_dir, pdb_id + '.pdb')
parser = None
if is_cif:
parser = FastMMCIFParser()
else:
parser = PDBParser()
# print(pdb_fn)
structure = parser.get_structure('struct', pdb_fn)
chain = structure[0][chain_id]
residues = chain.get_residues()
residue_dict = {}
for res in residues:
hetflag, ind, icode = res.get_id()
residue_dict[(ind, icode)] = res
structures[loop] = residue_dict
# structures[(pdb_id, chain_id)] = residue_dict
return residue_dict
def get_atom_list(atom_names, residue_dict, residue_list):
# residues = chain.get_residues()
atom_list_dict = {}
for index in residue_list:
# if chain_id == 'n' and index == 'a':
# continue
ind, icode = get_separated_index_icode(index)
atom_list = []
if (ind, icode) in residue_dict:
for atom in atom_names:
if atom in residue_dict[(ind, icode)]:
atom_list.append(atom)
atom_list_dict[index] = atom_list
return atom_list_dict
def get_backbone_atom_list(pdb_dir, pdb_chain, residue_list, structures, is_cif, loop):
backbone_atoms, sugar_atoms = get_backbone_and_sugar_atoms()
residue_dict = get_pdb_residues(pdb_dir, pdb_chain, residue_list, structures, is_cif, loop)
backbone_atom_list = get_atom_list(backbone_atoms, residue_dict, residue_list)
sugar_atom_list = get_atom_list(sugar_atoms, residue_dict, residue_list)
return backbone_atom_list, sugar_atom_list
def get_common_atom_list(atom_list1, atom_list2, pdb1_align_ind_list, pdb2_align_ind_list, lp1, lp2):
common_atom_list1 = {}
common_atom_list2 = {}
for i, index1 in enumerate(pdb1_align_ind_list):
index2 = pdb2_align_ind_list[i]
common_atom_list1[index1] = []
common_atom_list2[index2] = []
if index1 not in atom_list1 or index2 not in atom_list2:
# print 'residue index ' + str(index2) + ' not found in ' + lp2
continue
# sys.exit()
else:
for atom in atom_list1[index1]:
if atom in atom_list2[index2]:
common_atom_list1[index1].append(atom)
common_atom_list2[index2].append(atom)
return common_atom_list1, common_atom_list2
def collect_common_backbone_atom_list(pdb_dir, pdb_chain1, pdb_chain2, pdb1_align_ind_list, pdb2_align_ind_list, is_cif, structures, lp1, lp2):
# pdb_id, chain_id = pdb_chain.strip().split('_')
backbone_atom_list1, sugar_atom_list1 = get_backbone_atom_list(pdb_dir, pdb_chain1, pdb1_align_ind_list, structures, is_cif, lp1)
backbone_atom_list2, sugar_atom_list2 = get_backbone_atom_list(pdb_dir, pdb_chain2, pdb2_align_ind_list, structures, is_cif, lp2)
common_backbone_atom_list1, common_backbone_atom_list2 = get_common_atom_list(backbone_atom_list1, backbone_atom_list2, pdb1_align_ind_list, pdb2_align_ind_list, lp1, lp2)
common_sugar_atom_list1, common_sugar_atom_list2 = get_common_atom_list(sugar_atom_list1, sugar_atom_list2, pdb1_align_ind_list, pdb2_align_ind_list, lp1, lp2)
return common_backbone_atom_list1, common_backbone_atom_list2, common_sugar_atom_list1, common_sugar_atom_list2
def rotate_pdb(sel1, sel2, pdb_translated):
e0 = numpy.sum( numpy.sum(sel1 * sel1,axis=0),axis=0) + numpy.sum( numpy.sum(sel2 * sel2,axis=0),axis=0)
v, s, wt = numpy.linalg.svd( numpy.dot( numpy.transpose(sel2), sel1))
reflect = float(str(float(numpy.linalg.det(v) * numpy.linalg.det(wt))))
if reflect == -1.0:
s[-1] = -s[-1]
v[:,-1] = -v[:,-1]
u = numpy.dot(v, wt)
return numpy.dot((pdb_translated), u)
# Rotates the second loop
def rotate_loop(partial_pdbx_dir, pymol_load_info1, pymol_load_info2, align_target, align_mobile, disp_mobile, is_cif=True):
# Stores the atoms of loops
structures = {}
align_target = get_seqnums_from_indices(align_target)
align_mobile = get_seqnums_from_indices(align_mobile)
disp_mobile = get_seqnums_from_indices(disp_mobile)
pdb_load_name1, chain_load_name1, target_load_name1, display_load_name1, centroid1, pdb_chain1, lp1, load_name_id = pymol_load_info1
pdb_load_name2, chain_load_name2, target_load_name2, display_load_name2, centroid2, pdb_chain2, lp2, load_name_id = pymol_load_info2
#Make a list of common atoms for all the aligned residues
backbone_atom_list1, backbone_atom_list2, sugar_atom_list1, sugar_atom_list2 = collect_common_backbone_atom_list(partial_pdbx_dir, pdb_chain1, pdb_chain2, align_target, align_mobile, is_cif, structures, lp1, lp2)
#load 1st pdb data from pymol
pdb1_data = get_pdb_coordinates(pdb_load_name1)
#Translate coordinates around centroid (to be used to find superimposition matrix)
# sel1 = get_ordered_loop_backbone_coordinates(chain_load_name1, align_target, backbone_atom_list1, sugar_atom_list1)
sel1, pdb1_translated = translate_coords(pdb1_data, chain_load_name1, align_target, backbone_atom_list1, sugar_atom_list1, centroid1)
# load 2nd pdb data from pymol and translate
pdb2_data = get_pdb_coordinates(pdb_load_name2)
# sel2 = get_ordered_loop_backbone_coordinates(chain_load_name2, align_mobile, backbone_atom_list2, sugar_atom_list2)
centroid2 = get_centroid_of_loop(target_load_name2, align_mobile, backbone_atom_list2, sugar_atom_list2)
sel2, pdb2_translated = translate_coords(pdb2_data, chain_load_name2, align_mobile, backbone_atom_list2, sugar_atom_list2, centroid2)
#rotate the 2nd pdb to align with the 1st one
pdb2_rotated = rotate_pdb(sel1, sel2, pdb2_translated)
#Adjust coordinate to fit to the centroid shift due to local alignment
centroid_resolve = get_centroid_of_loop(target_load_name1, align_target, backbone_atom_list1, sugar_atom_list1)
pdb2_rotated += centroid_resolve
# display_load_name_cur = alter_and_display_structure(pdb2_rotated, load_id + "_2", pdb_load_name2, chain_load_name2, disp_mobile, 'gray', target_load_name2, color, show_label, "O4'")
alter_structure(pdb2_rotated, pdb_load_name2)
def generate_and_add_family_image(superimposition_output_dir, cluster_id, image_file_list, component_list, display_load_name_list, rotation_version, draw_figures, show_extended_loop):
# Show each components
# Show all loops in one image
image_fname = os.path.join(superimposition_output_dir, add_rotation_version_prefix(rotation_version) + cluster_id + '__all.png')
total_loop_count = 0
for component in component_list:
total_loop_count += len(component)
if draw_figures:
# pymol.cmd._do('hide all')
# pymol.cmd.hide()
# pymol.cmd.sync()
if show_extended_loop:
pymol.cmd.show('cartoon', 'all')
else:
# for component in display_load_name_list:
for component in component_list:
for (i, r1), parent in component:
display_load_name, _, _ = display_load_name_list[(i, r1)]
pymol.cmd.show('cartoon', display_load_name)
wait_for_certain_time_according_to_wait_factor(total_loop_count)
# pymol.cmd._do('zoom')
pymol.cmd.zoom()
# pymol.cmd.zoom('everything')
pymol.cmd.sync()
pymol.cmd.png(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)
wait_for_certain_files_to_be_generated([image_fname], False)
pymol.cmd.sync()
image_file_list.append((None, image_fname, total_loop_count))
return image_file_list
# new_image_file_list = [image_fname]
# new_image_file_list += image_file_list
# return new_image_file_list
def get_loop_count_for_rmsd_data_dict(current_rmsd_data_dict):
loop_count = 0
for cluster_id in sorted(current_rmsd_data_dict):
_, cluster_pairwise_alignment_details = current_rmsd_data_dict[cluster_id]
loop_count += len(cluster_pairwise_alignment_details)
return loop_count
def write_rmsd_and_alignment_summary(rmsd_and_alignment_summary_dict, current_rmsd_data_dict):
rmsd_align_len_list = []
# max_cid_len = max([len(x) for x in rmsd_and_alignment_summary_dict])
# print('Family Name'.ljust(max_cid_len) + '\tAvg. RMSD\tTotal Aln Len')
for cluster_id in rmsd_and_alignment_summary_dict:
# rmsd, aln_len = rmsd_and_alignment_summary_dict[cluster_id]
# print(str(cluster_id).ljust(max_cid_len) + '\t' + "{:.3f}".format(round(rmsd, 3)).rjust(9) + '\t' + str(aln_len).rjust(13))
rmsd_align_len_list.append(rmsd_and_alignment_summary_dict[cluster_id])
avg_rmsd, total_alignment_length = get_weighted_avg_rmsd(rmsd_align_len_list)
print('Total Loops: ' + str(get_loop_count_for_rmsd_data_dict(current_rmsd_data_dict)))
print('Total superimposition average RMSD: ' + str(round(avg_rmsd, 3)) + '\n')
# print('Total Superimposition Alignment Length: '.ljust(24) + str(total_alignment_length))
def load_view_file(ordered_dependency_list):
if len(ordered_dependency_list) < 1 or len(ordered_dependency_list[0]) < 1:
logger.info('No motif found for superimposition in ' + cluster_id + '.')
return None
(i, r1), _ = ordered_dependency_list[0][0]
view_fn = os.path.join(views_dir, str(r1) + '.view')
if os.path.isfile(view_fn):
if input_index_type == 'pdb':
logger.info('View file found for ' + convert_a_loop_from_FASTA_to_PDB(r1) + '. Loading view of this motif to set orientation for all motifs in this cluster.')
else:
logger.info('View file found for ' + r1 + '. Loading view of this motif to set orientation for all motifs in this cluster.')
fv = open(view_fn)
view_lines = fv.readlines()
fv.close()
return view_lines[0]
return None
# def get_view_from_user(partial_pdbx_dir, cluster_id, i, r, loop_boundary_dict, loop_boundary_original_dict, load_name_id, show_extended_loop, is_cif, load_color):
def get_view_from_user(partial_pdbx_dir, cluster_id, ordered_dependency_list, loop_boundary_dict, loop_boundary_original_dict, show_extended_loop, is_cif):
if len(ordered_dependency_list) < 1 or len(ordered_dependency_list[0]) < 1:
logger.info('No loop found for superimposition in ' + cluster_id + '.')
return
(i, r), _ = ordered_dependency_list[0][0]
load_name_id = 'first_loop'
display_color = 'gray'
align_color = 'red'
# pymol.finish_launching()
# pymol.cmd.hide('all')
# pymol.cmd.hide()
(pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, r, load_name_id) = load_one_pdb_in_pymol(partial_pdbx_dir, i, r, loop_boundary_dict, loop_boundary_original_dict, load_name_id, is_cif, display_color)
pymol.cmd.color(align_color, target_load_name)
pymol.cmd.deselect()
view_fname = os.path.join(views_dir, r + '.view')
if os.path.isfile(view_fname):
if input_index_type == 'pdb':
logger.info('View file found for ' + convert_a_loop_from_FASTA_to_PDB(r))
else:
logger.info('View file found for ' + r)
fp = open(view_fname)
lines = fp.readlines()
fp.close()
if len(lines) > 0:
pymol.cmd.set_view(lines[0])
if show_extended_loop:
pymol.cmd.show('cartoon', chain_load_name)
# pymol.cmd.show('cartoon', pdb_load_name)
else:
pymol.cmd.show('cartoon', display_load_name)
pymol.cmd.zoom(chain_load_name)
set_adj_res_view(r, (pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, r, load_name_id))
inp = 'N'
# while(True):
print('Please provide desired orientation for ' + cluster_id + '.')
print('(Yes: To save current orientation / No: To keep previous orientation)')
inp = input('Continue? (Yes/No): ')
inp = inp.lower()
if inp == 'y' or inp == 'yes':
view = pymol.cmd.get_view()
# print('got view')
# print(view)
fp = open(view_fname, 'w')
fp.write(str(view))
fp.close()
# pymol.cmd.hide('all')
pymol.cmd.hide()
pymol.cmd.delete(pdb_load_name)
pymol.cmd.delete(chain_load_name)
pymol.cmd.delete(target_load_name)
pymol.cmd.delete(display_load_name)
wait_for_certain_time_according_to_wait_factor(1)
pymol.cmd.sync()
# sys.exit()
def generate_formatted_superimposition_details(superimposition_details_dir, cluster_id, prev_text_name, pdb_organism_details):
fp_t = open(prev_text_name)
fp_superimposition_details = open(os.path.join(superimposition_details_dir, str(cluster_id) + '_superimposition_details.tsv'), 'w')
if len(pdb_organism_details) == 0:
fp_superimposition_details.write('Subfamily Id\tLoop (Child)\tSuperimposition Reference (Parent)\tAlignment/Superimposition Details')
else:
fp_superimposition_details.write('Subfamily Id\tPDB Chain\tRNA Types\tOrganism Name\tLoop (Child)\tSuperimposition Reference (Parent)\tAlignment/Superimposition Details')
if input_index_type == 'fasta':
fp_superimposition_details.write('\tLoop (Child) (FASTA)\tSuperimposition Reference (Parent) (FASTA)')
fp_superimposition_details.write('\n')
lines = fp_t.readlines()
fp_t.close()
pieces = lines[1].split('\t')
child_loop = pieces[-1].split(',')[-1].strip().split(')')[0].strip()
# if input_index_type == 'pdb':
child_loop_pdb = convert_a_loop_from_FASTA_to_PDB(child_loop)
if len(pdb_organism_details) == 0:
fp_superimposition_details.write('\t'.join(pieces[:1]) + '\t' + child_loop_pdb + '\t\t')
else:
fp_superimposition_details.write('\t'.join(pieces[:4]) + '\t' + child_loop_pdb + '\t\t')
if input_index_type == 'fasta':
fp_superimposition_details.write('\t' + child_loop)
fp_superimposition_details.write('\n')
for line in lines[2:]:
pieces = line.split('\t')
child_loop, parent_loop = pieces[-1].split(',')
child_loop = child_loop.strip()
parent_loop = parent_loop.strip()
# if input_index_type == 'pdb':
child_loop_pdb = convert_a_loop_from_FASTA_to_PDB(child_loop)
parent_loop_pdb = convert_a_loop_from_FASTA_to_PDB(parent_loop)
# print(pieces[7])
rmsd, align_len, score, zscore = pieces[7].split(',')[4:8]
rmsd = str(round(float(rmsd.strip().split(':')[1].strip()), 2))
align_len = align_len.strip().split(':')[1].strip()
score = str(round(float(score.strip().split(':')[1].strip()), 2))
zscore = str(round(float(zscore.strip().split(':')[1].strip()), 2))
if len(pdb_organism_details) == 0:
fp_superimposition_details.write('\t'.join(pieces[:1]))
else:
fp_superimposition_details.write('\t'.join(pieces[:4]))
fp_superimposition_details.write('\t' + child_loop_pdb + '\t' + parent_loop_pdb + '\t' + 'rmsd: ' + rmsd + ', align_len: ' + align_len + ', score: ' + score + ', zscore: ' + zscore)
if input_index_type == 'fasta':
fp_superimposition_details.write('\t' + child_loop + '\t' + parent_loop)
fp_superimposition_details.write('\n')
fp_superimposition_details.close()
def load_pdb_fasta_mapping_and_fasta_seq_dict(cluster_id, alignment_data):
pdb_chain_dict = {}
pdb_res_mapping_dict = {}
fasta_seq_dict = {}
for l1 in alignment_data[cluster_id]:
pdb_chain, _ = str(l1).strip().split(':')
pdb_id, chain_id = pdb_chain.strip().split('_')
if pdb_id not in pdb_chain_dict:
pdb_chain_dict[pdb_id] = []
pdb_chain_dict[pdb_id].append(chain_id)
if pdb_chain not in pdb_res_mapping_dict:
pdb_res_mapping_dict[pdb_chain] = load_pdb_res_map(pdb_chain)
for pdb_id in pdb_chain_dict:
fasta_seq_dict.update(load_fasta_seq(pdb_id, pdb_chain_dict[pdb_id]))
return pdb_res_mapping_dict, fasta_seq_dict
def get_inter_subfamily_parent_info(ordered_dependency_list, edges_among_all_components):
parent_usage = {}
for component in ordered_dependency_list:
loop_list = []
for (i, r1), parent in component:
loop_list.append((i, r1))
for (i, r1), (j, r2), _ in edges_among_all_components:
if (i, r1) in loop_list and (j, r2) not in loop_list:
if (i, r1) not in parent_usage:
parent_usage[(i, r1)] = 0
parent_usage[(i, r1)] += 1
return parent_usage
def delete_motif_from_pymol(pymol_load_info_motif):
pdb_load_name, chain_load_name, target_load_name, display_load_name, _, _, _, _ = pymol_load_info_motif
pymol.cmd.delete(display_load_name)
pymol.cmd.delete(target_load_name)
pymol.cmd.delete(chain_load_name)
pymol.cmd.delete(pdb_load_name)
def add_rotation_version_prefix(rotation_version):
if len(rotation_version) > 0:
return rotation_version + '__'
return ''
def add_rotation_version_suffix(rotation_version):
if len(rotation_version) > 0:
return '__' + rotation_version
return ''
def split_subfamily_cumulative_count(ordered_dependency_list):
splitted_subfamily_cumulative_count = []
for component in ordered_dependency_list:
current_list = []
remaining_motifs = len(component)
# start_ind = 0
previous_val = 0
while remaining_motifs > max_no_of_motifs_in_superimposition:
next_split_size = max_no_of_motifs_in_superimposition
if remaining_motifs < 2 * max_no_of_motifs_in_superimposition:
next_split_size = (remaining_motifs + 1) / 2
# end_ind = start_ind + next_split_size
remaining_motifs -= next_split_size
# current_list.append(component[start_ind:end_ind])
current_list.append(previous_val + next_split_size)
previous_val += next_split_size
# start_ind = end_ind
# current_list.append(component[start_ind:])
current_list.append(previous_val + remaining_motifs)
splitted_subfamily_cumulative_count.append(current_list)
# for i, component in enumerate(ordered_dependency_list):
# print(str(len(component)) + ': '),
# print(','.join(map(lambda x: str(x), splitted_subfamily_cumulative_count[i])))
return splitted_subfamily_cumulative_count
def set_adj_res_view(r, pymol_load_info):
fname = os.path.join(neares_protein_data_dir, r + '.adj_info')
if os.path.isfile(fname):
pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, lp, load_name_id = pymol_load_info
fp = open(fname)
lines = fp.readlines()
fp.close()
ring_mode, ring_transparency = lines[3].strip().split(',')
ring_mode, ring_transparency = int(ring_mode), float(ring_transparency)
items = lines[2].strip().split(',')
adj_chain_str = ''
for item in items:
if len(item) > 0:
chain_id = item.strip().split(':')[0]
adj_chain_str += ' or chain ' + chain_id
if len(adj_chain_str) == 0:
adj_chain_str = target_load_name
else:
adj_chain_str = "(" + target_load_name + adj_chain_str + ")"
base_atoms = ["C1'", "C2", "C4", "C5", "C6", "C8", "N1", "N3", "N7", "N9"]
Giovanni_Ciriello_2010 = ["P", "OP1", "OP2", "O5'", "C5'", "C4'"]
select_str = ''
for i, atom in enumerate(base_atoms):
if i > 0:
select_str += " or"
select_str += " name " + atom
pymol.cmd.select(display_load_name + '_temp', target_load_name + " and (" + select_str + ")")
pymol.cmd.select(display_load_name + '_temp2', 'byres ' + pdb_load_name + ' within 5 of ' + display_load_name + '_temp')
pymol.cmd.select(display_load_name + '_temp3', 'byres ' + pdb_load_name + ' within 10 of ' + display_load_name + '_temp')
pymol.cmd.select(display_load_name + '_label', display_load_name + '_temp2' + ' and ' + adj_chain_str)
pymol.cmd.select(display_load_name + '_zoom', display_load_name + '_temp3' + ' and ' + adj_chain_str)
pymol.cmd.set(name='cartoon_ring_mode',value=ring_mode,quiet=1)
pymol.cmd.set(name='cartoon_ring_transparency',value=ring_transparency,quiet=1)
pymol.cmd.label(display_load_name + '_label' + " and (name C2' or name CA)", "'%s-%s' %(resn, resi)")
pymol.cmd.zoom(display_load_name + '_zoom')
pymol.cmd.deselect()
pymol.cmd.show('cartoon', pdb_load_name)
pymol.cmd.delete(display_load_name + '_zoom')
pymol.cmd.delete(display_load_name + '_label')
pymol.cmd.delete(display_load_name + '_temp3')
pymol.cmd.delete(display_load_name + '_temp2')
pymol.cmd.delete(display_load_name + '_temp')
return True
return False
def check_and_save_pymol_figure_of_a_loop_with_protein(r, pymol_load_info, image_fname, show_extended_loop, show_label, label_atom, display_color, align_color, superimposition_output_dir):
# fname = os.path.join(neares_protein_data_dir, r + '.adj_info')
# if os.path.isfile(fname):
# pdb_load_name, chain_load_name, target_load_name, display_load_name, centroid, pdb_chain, lp, load_name_id = pymol_load_info
# fp = open(fname)
# lines = fp.readlines()
# fp.close()
# # items = lines[1].strip().split(',')
# ring_mode, ring_transparency = lines[3].strip().split(',')
# ring_mode, ring_transparency = int(ring_mode), float(ring_transparency)
# items = lines[2].strip().split(',')
# adj_chain_str = ''
# for i, item in enumerate(items):
# chain_id = item.strip().split(':')[0]
# if i > 0:
# adj_chain_str += ' or'
# adj_chain_str += ' chain ' + chain_id
# # select_str = ''
# # for item in items:
# # chain_id, regions = item.strip().split(':')
# # regions = regions.strip().replace('_', ',')
# # select_str += ' or (resi ' + regions + ' and chain ' + chain_id + ')'
# # pymol.cmd.select(display_load_name + '_zoom', chain_load_name + select_str)
# base_atoms = ["C1'", "C2", "C4", "C5", "C6", "C8", "N1", "N3", "N7", "N9"]
# select_str = ''
# for i, atom in enumerate(base_atoms):
# if i > 0:
# select_str += " or"
# select_str += " name " + atom
# pymol.cmd.select(display_load_name + '_temp', target_load_name + " and (" + select_str + ")")
# pymol.cmd.select(display_load_name + '_temp2', 'byres all within 5 of ' + display_load_name + '_temp')
# pymol.cmd.select(display_load_name + '_temp3', 'byres all within 10 of ' + display_load_name + '_temp')
# pymol.cmd.select(display_load_name + '_zoom', display_load_name + '_temp3' + ' and (' + target_load_name + ' or ' + adj_chain_str + ')')
# pymol.cmd.select(display_load_name + '_label', display_load_name + '_temp2' + ' and (' + target_load_name + ' or ' + adj_chain_str + ')')
# pymol.cmd.set(name='cartoon_ring_mode',value=ring_mode,quiet=1)
# pymol.cmd.set(name='cartoon_ring_transparency',value=ring_transparency,quiet=1)
# pymol.cmd.label(display_load_name + '_label' + " and (name C2' or name CA)", "'%s-%s' %(resn, resi)")
# pymol.cmd.zoom(display_load_name + '_zoom')
# pymol.cmd.deselect()
# pymol.cmd.show('cartoon', pdb_load_name)
# pymol.cmd.delete(display_load_name + '_zoom')
# pymol.cmd.delete(display_load_name + '_label')
# pymol.cmd.delete(display_load_name + '_temp3')
# pymol.cmd.delete(display_load_name + '_temp2')
# pymol.cmd.delete(display_load_name + '_temp')
if set_adj_res_view(r, pymol_load_info):
image_dir_with_adj_info = os.path.join(superimposition_output_dir, 'rotated_loop_images_with_adj_info/')
create_directory(image_dir_with_adj_info)
image_fname = os.path.join(image_dir_with_adj_info, os.path.basename(image_fname))
image_fname = '.'.join(image_fname.split('.')[:-1]) + '_' + r +'.png'
session_fname = '.'.join(image_fname.split('.')[:-1]) + '.pse'
pymol.cmd.png(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)
pymol.cmd.sync()
if save_pymol_session == True:
pymol.cmd.save(session_fname)
pymol.cmd.sync()
pymol.cmd.hide()
config_pymol_cartoon('red', show_label)
def generate_pymol_images(time_in_distance_calc, removable_text_file_list, partial_pdbx_dir, summary_dir, superimposition_output_dir, subfamily_details_dir, superimposition_details_dir, representative_dir, pymol_session_dir, current_rmsd_data_dict, alignment_data, pdb_organism_details, loop_type, set_view_manually, draw_figures, show_extended_loop, is_cif=True):
if generate_similarity_graph_image:
plt_fig = plt.figure(frameon = False)
plt_fig = plt.figure()
plt_fig.set_size_inches(9, 9)
fp_align_len_threshold = None
subfamily_cluster_fp = None
show_label = False
if set_view_manually == True:
pymol.finish_launching()
else:
if draw_figures == True: # or generate_rotation_matrices:
# pymol.finish_launching()
pymol.finish_launching(['pymol', '-cqQ'])
# If all cluster length is <= 2, show labels (this is supposed to happen for testing purpose clusters)
show_label = True
for cluster_id in current_rmsd_data_dict:
_, cluster_pairwise_alignment_details = current_rmsd_data_dict[cluster_id]
if len(cluster_pairwise_alignment_details) > 2:
show_label = False
break
if save_pymol_session == True:
create_directory(pymol_session_dir)
# To save alignment length threshold
familywise_align_len_threshold_fn = os.path.join(summary_dir, 'Familywise_Align_Length_Threshold.txt')
fp_align_len_threshold = open(familywise_align_len_threshold_fn, 'w')
# File to write components(subfamilies) as cluster
subfamily_cluster_fname = os.path.join(superimposition_output_dir, 'subfamily_cluster.csv')
subfamily_cluster_fp = open(subfamily_cluster_fname, 'w')
initial_loop_image_dir = os.path.join(superimposition_output_dir, 'initial_loop_images/')
rotated_loop_image_dir = os.path.join(superimposition_output_dir, 'rotated_loop_images/')
if draw_input_images == True:
create_directory(initial_loop_image_dir)
create_directory(rotated_loop_image_dir)
create_directory(representative_dir)
cluster_image_file_list = {}
rmsd_and_alignment_summary_dict = {}
subfamily_details_table_data = {}
pdb_res_map_dict = {}
representative_pymol_load_info = {}
subfamily_pymol_load_info = {}
all_pymol_load_info = {}
for cluster_id in sorted(current_rmsd_data_dict):
# logger.info('Started processing ' + cluster_id)
time_start_for_cluster = time.time()
representative_pymol_load_info[cluster_id] = {}
subfamily_pymol_load_info[cluster_id] = {}
cluster_image_file_list[cluster_id] = {}
logger.info('Loading pdb-fasta seq dict')
pdb_res_mapping_dict, fasta_seq_dict = load_pdb_fasta_mapping_and_fasta_seq_dict(cluster_id, alignment_data)
_, cluster_pairwise_alignment_details = current_rmsd_data_dict[cluster_id]
load_id_dict = {} # Store the id assigned for each loop
loop_list = cluster_pairwise_alignment_details.keys()
for loop_id, (i, r1) in enumerate(loop_list):
load_id = str(cluster_id) + '__' + str(loop_id + 1)
load_id_dict[(i, r1)] = (load_id, loop_id + 1)
logger.info('Generating loop boundaries')
### Generate boundary of each loop in the context of the cluster (family)
# loop_boundary_dict, loop_boundary_original_dict = generate_loop_boundary(cluster_pairwise_alignment_details, alignment_data[cluster_id.strip().split('_')[0]])
loop_boundary_dict, loop_boundary_original_dict = generate_loop_boundary(cluster_pairwise_alignment_details, alignment_data[cluster_id])
loop_boundary_dict = get_loop_boundary_pdb_index(loop_boundary_dict)
loop_boundary_original_dict = get_loop_boundary_pdb_index(loop_boundary_original_dict)
# loop_coord_dict = get_translated_coordinates(loop_boundary_dict, pdb_dir, is_cif)
logger.info('Generating subfamilies for ' + cluster_id)
### Generate the optimal ordering of loops to show the best possible superimposition
# To Do: Return the components of the graph
# merged_components_features -> (central_node, cycle_nodes_of_the_component, component_nodes, component_directed_adjacency_list) of components
time_start_for_dependency_list = time.time()
ordered_dependency_list, merged_components_features, edges_among_all_components, edges_in_merged_components = generate_loop_print_dependency_v2(cluster_id, cluster_pairwise_alignment_details, alignment_data, fp_align_len_threshold)
logger.info('Completed generating subfamilies. Time taken: ' + str(round(time.time() - time_start_for_dependency_list, 3)) + ' seconds.' )
print('')
splitted_subfamily_cumulative_count = split_subfamily_cumulative_count(ordered_dependency_list)
parent_usage = get_inter_subfamily_parent_info(ordered_dependency_list, edges_among_all_components)
# set view for the first loop of traversal
if set_view_manually == True:
get_view_from_user(partial_pdbx_dir, cluster_id, ordered_dependency_list, loop_boundary_dict, loop_boundary_original_dict, show_extended_loop, is_cif)
continue
r1_view = None
if draw_figures == True:
r1_view = load_view_file(ordered_dependency_list)
generate_componentwise_bp_annotation_files(subfamily_details_dir, cluster_id, ordered_dependency_list, load_id_dict)
if output_env == 'local':
generate_componentwise_analysis_files(superimposition_output_dir, cluster_id, ordered_dependency_list, load_id_dict, alignment_data, current_rmsd_data_dict)
avg_rmsd, total_alignment_length = get_family_rmsd_and_alignment_summary(ordered_dependency_list, cluster_pairwise_alignment_details)
rmsd_and_alignment_summary_dict[cluster_id] = (avg_rmsd, total_alignment_length)
subfamily_colors, subfamily_colors_tuple = get_sub_family_colors(len(ordered_dependency_list))
# Generate subfamily details table data
avg_rmsd_align_to, total_alignment_length_align_to = get_align_to_rmsd_info(cluster_id, cluster_pairwise_alignment_details, alignment_data)
avg_rmsd_sufamily_only, total_alignment_length_subfamily_only = get_family_rmsd_and_alignment_summary(ordered_dependency_list, cluster_pairwise_alignment_details, True)
generate_subfamily_details_table_data(cluster_id, ordered_dependency_list, avg_rmsd_align_to, total_alignment_length_align_to, avg_rmsd, total_alignment_length, avg_rmsd_sufamily_only, total_alignment_length_subfamily_only, subfamily_details_table_data)
if generate_similarity_graph_image:
# Generate component graph image
graph_image_list = []
subfamily_dir = os.path.join(superimposition_output_dir, 'subfamily')
graph_dir = os.path.join(subfamily_dir, 'graph')
create_directory(graph_dir)
# all_components_info_list = []
for merged_component_id, a_merged_component_features in enumerate(merged_components_features):
component_id = 1
# merged_components_info_list = []
total_loop = 0
for component_features in a_merged_component_features:
image_fname = os.path.join(graph_dir, str(cluster_id) + '__' + str(merged_component_id + 1) + '_' + str(component_id)+'.png')
graph_image_list.append((None, image_fname, len(component_features[2])))
component_info = get_component_graph(component_features, load_id_dict, subfamily_colors_tuple[merged_component_id])
# merged_components_info_list.append(component_info)
# all_components_info_list.append(component_info)
draw_graph([component_info], [], None, image_fname, plt_fig)
component_id += 1
total_loop += len(component_features[2])
generate_subfamily_image(graph_image_list, pdb_organism_details, cluster_id, os.path.join(superimposition_output_dir, 'subfamily'), draw_figures, 'step2_subfamilies_identified', is_graph_image=True)
### Load all the loops in PDB and save corresponding images
pymol_load_info_dict = {} # Store all pymol data load related info
loop_display_info_dict = {} # Store only the info to show the loops in pymol
if draw_figures:
reset_pymol()
fp_representative = open(os.path.join(representative_dir, str(cluster_id) + "_representatives.txt"), "w")
text_fname = os.path.join(superimposition_output_dir, str(cluster_id) + '.txt')
fp_text_fname = open(text_fname, 'w')
removable_text_file_list.append(text_fname)
prev_component_count = 0
initial_loop_image_dict = {}
rotated_loop_image_list_dict = {}
component_image_file_list_dict = {}
displayed_motifs = {}
for component_id, component in enumerate(ordered_dependency_list):
# write subcluster_id to file
if cluster_id in known_motif_fullname:
subfamily_cluster_fp.write(known_motif_fullname[cluster_id] + '-Sub' + str(component_id + 1))
else:
subfamily_cluster_fp.write(str(cluster_id) + '-Sub' + str(component_id + 1))
###### To Do: add code for deleting loops from pymol
if draw_figures == True and whole_family_superimpose == False:
for (i, r1) in displayed_motifs:
if (i, r1) not in parent_usage or parent_usage[(i, r1)] < 1:
delete_motif_from_pymol(displayed_motifs[(i, r1)])
# if draw_figures == True:
# # pymol.cmd._do('hide all')
# pymol.cmd.hide()
# wait_for_certain_time_according_to_wait_factor(prev_component_count)
# pymol.cmd.sync()
if len(component) > 0:
_, parent = component[0]
if parent != None:
if parent not in parent_usage:
logger.error('Parent not found.')
sys.exit()
parent_usage[parent] -= 1
subfamily_pymol_load_info[cluster_id][component_id] = []
# Show the list in the original loop_id order
for (i, r1), parent in component:
if input_index_type == 'pdb':
r1_pdb_ind = convert_a_loop_from_FASTA_to_PDB(r1)
subfamily_cluster_fp.write(',' + r1_pdb_ind)
else:
subfamily_cluster_fp.write(',' + r1)
load_id, _ = load_id_dict[(i, r1)]
image_fname = os.path.join(initial_loop_image_dir, load_id + '.png')
initial_loop_image_dict[(i, r1)] = (r1, image_fname, 1)
if draw_figures:
pymol_load_info_dict[(i, r1)] = load_one_pdb_in_pymol(partial_pdbx_dir, i, r1, loop_boundary_dict, loop_boundary_original_dict, load_id, is_cif, 'gray')
loop_display_info_dict[(i, r1)], pymol_load_info_dict[(i, r1)] = translate_and_show_single_loop(pymol_load_info_dict[(i, r1)], loop_boundary_dict[(i,r1)], loop_boundary_original_dict[(i, r1)], load_id, image_fname, show_extended_loop, show_label, 'gray', 'gray')
displayed_motifs[(i, r1)] = pymol_load_info_dict[(i, r1)]
subfamily_pymol_load_info[cluster_id][component_id].append((i, r1))
if draw_input_images == True:
# pymol.cmd._do('hide all')
pymol.cmd.hide()
pymol.cmd.sync()
subfamily_cluster_fp.write('\n')
represetative_i, representative_loop = generate_subfamily_representative(fp_representative, cluster_id, component_id, component, alignment_data, cluster_pairwise_alignment_details, pdb_res_map_dict, generate_align_length_threshold(cluster_pairwise_alignment_details))
representative_pymol_load_info[cluster_id][component_id] = (represetative_i, representative_loop)
rotation_matrices = get_multiple_orientation_rotation_matrices()
## Generate images for different orientation
for v, rotation_matrix in enumerate(rotation_matrices):
rotation_version = ''
if len(rotation_matrices) > 1:
rotation_version = 'v' + str(v + 1)
# if draw_figures == True:
# pymol.cmd.hide()
# pymol.cmd.sync()
### Rotate, group and show the subfamilies
# family_loop_id = 0
if rotation_version not in rotated_loop_image_list_dict:
rotated_loop_image_list_dict[rotation_version] = {}
if component_id not in rotated_loop_image_list_dict[rotation_version]:
rotated_loop_image_list_dict[rotation_version][component_id] = []
if rotation_version not in cluster_image_file_list[cluster_id]:
cluster_image_file_list[cluster_id][rotation_version] = []
if rotation_version not in component_image_file_list_dict:
component_image_file_list_dict[rotation_version] = []
## Save rotated motifs to generate side-by-side image
# current_cumulative_index = 0
for component_loop_id, ((i, r1), parent) in enumerate(component):
# family_loop_id += 1
load_name_id, _ = load_id_dict[(i, r1)]
# if draw_figures:
# pymol.cmd._do('hide all')
# pymol.cmd.hide()
# pymol.cmd.sync()
# pdb_load_name1, chain_load_name1, target_load_name1, display_load_name1, centroid1, pdb_chain1, lp1, load_name_id = pymol_load_info_dict[(i, r1)]
### superimposition subfamilies
image_fname = os.path.join(rotated_loop_image_dir, add_rotation_version_prefix(rotation_version) + load_name_id + '__3.png')
component_id_str = str(component_id + 1)
# if len(splitted_subfamily_cumulative_count[component_id]) > 1:
# component_id_str += get_string_equivalent_index(current_cumulative_index)
file_name = os.path.join(superimposition_output_dir, add_rotation_version_prefix(rotation_version) + str(cluster_id) + '__' + component_id_str + '_' + str(component_loop_id + 1))
adj_image_fname = file_name + '.png'
# Draw the first loop of the first component independent of any other loops
if component_id == 0 and component_loop_id == 0:
if draw_figures:
display_load_name, align_load_name, chain_load_name = loop_display_info_dict[(i,r1)]
if r1_view != None and v == 0:
pymol.cmd.set_view(r1_view.strip())
else:
rotate_first_loop(pymol_load_info_dict[(i, r1)], rotation_matrix)
if scanx_align_to_superimposition == False:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', subfamily_colors[component_id])
check_and_save_pymol_figure_of_a_loop_with_protein(r1, pymol_load_info_dict[(i, r1)], adj_image_fname, show_extended_loop, show_label, "C2'", 'gray', subfamily_colors[component_id], superimposition_output_dir)
else:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', 'red')
rotated_loop_image_list_dict[rotation_version][component_id].append((r1, image_fname, 1))
if v == 0:
write_dock_file_list(component_id + 1, component_loop_id + 1, i, r1, 0, 'None', 0, 0, 0, fp_text_fname, cluster_pairwise_alignment_details, pdb_organism_details, '', '')
else:
mobile_loop = strToNode(r1)
# mobile loop has the best superimposition feature (e.g. rmsd, alignment length) with target loop
j, r2 = parent
target_loop = strToNode(r2)
(t1, t2, zscore, cr1, cr2, aln1, aln2, score) = alignment_data[cluster_id][mobile_loop][target_loop]
# if output_env == 'local':
pdb1_pm, pdb2_pm, i1_pm, i2_pm = aln_residue_temp(pdb_res_mapping_dict, fasta_seq_dict, r1, r2, cr1, cr2, aln1, aln2, 0, len(aln1)-1, 0)
# else:
# pdb1_pm, pdb2_pm, i1_pm, i2_pm = aln_residue(r1, r2, cr1, cr2, aln1, aln2, 0, len(aln1)-1, 0)
rmsd, align_len = get_rmsd_align_len(i, r1, j, r2, cluster_pairwise_alignment_details)
# rmsd_rank = get_rmsd_rank(rmsd, align_len, is_length_adjusted_score)
if draw_figures:
display_load_name, align_load_name, chain_load_name = loop_display_info_dict[(i,r1)]
# pymol_load_info_dict[(i, r1)], display_load_name, display_load_name_cur = superimposition_pair_of_loops(pymol_load_info_dict[(j, r2)], pymol_load_info_dict[(i, r1)], load_id, pdb2_pm, pdb1_pm, loop_boundary_original_dict[(i, r1)], pdb_dir, display_load_name, image_fname, is_cif, rmsd_rank, show_label, component_id, component_loop_id)
rotate_loop(partial_pdbx_dir, pymol_load_info_dict[(j, r2)], pymol_load_info_dict[(i, r1)], pdb2_pm, pdb1_pm, loop_boundary_original_dict[(i, r1)])
if scanx_align_to_superimposition == False:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', subfamily_colors[component_id])
check_and_save_pymol_figure_of_a_loop_with_protein(r1, pymol_load_info_dict[(i, r1)], adj_image_fname, show_extended_loop, show_label, "C2'", 'gray', subfamily_colors[component_id], superimposition_output_dir)
else:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', 'green')
rotated_loop_image_list_dict[rotation_version][component_id].append((r1, image_fname, 1))
# subfamily_colors[component_id]
if v == 0:
write_dock_file_list(component_id + 1, component_loop_id + 1, i, r1, j, r2, align_len, zscore, score, fp_text_fname, cluster_pairwise_alignment_details, pdb_organism_details, t1, t2)
if draw_figures:
# pymol.cmd._do('hide all')
pymol.cmd.hide()
pymol.cmd.sync()
time_in_distance_calc += generate_representative_loop_image(time_in_distance_calc, representative_dir, rotation_version, cluster_id, component_id, represetative_i, representative_loop, loop_display_info_dict, draw_figures, show_extended_loop, show_label)
# time.sleep(.100)
#### Draw subfamilies superimposed
# component_image_file_list = []
# if draw_figures == True:
# # pymol.cmd._do('hide all')
# pymol.cmd.hide()
# wait_for_certain_time_according_to_wait_factor(prev_component_count)
# pymol.cmd.sync()
image_fname = None
r1 = ''
current_cumulative_index = 0
previous_cumulative_value = 0
## Generate progressive images and subfamily superimposition
for component_loop_id, ((i, r1), parent) in enumerate(component):
### superimposition subfamilies
component_id_str = str(component_id + 1)
if len(splitted_subfamily_cumulative_count[component_id]) > 1:
component_id_str += get_string_equivalent_index(current_cumulative_index)
file_name = os.path.join(superimposition_output_dir, add_rotation_version_prefix(rotation_version) + str(cluster_id) + '__' + component_id_str + '_' + str(component_loop_id + 1))
image_fname = file_name + '.png'
if draw_figures:
display_load_name, align_load_name, chain_load_name = loop_display_info_dict[(i,r1)]
if scanx_align_to_superimposition == True:
if component_id == 0 and component_loop_id == 0:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', 'red')
else:
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', 'green')
else:
# print('showing and saving ' + r1)
show_and_save_pymol_fig_of_a_loop(chain_load_name, display_load_name, align_load_name, image_fname, show_extended_loop, show_label, "C2'", 'gray', subfamily_colors[component_id])
if component_loop_id + 1 == splitted_subfamily_cumulative_count[component_id][current_cumulative_index]:
number_of_motifs = splitted_subfamily_cumulative_count[component_id][current_cumulative_index] - previous_cumulative_value
component_image_file_list_dict[rotation_version].append((r1, image_fname, number_of_motifs))
if draw_figures:
wait_for_certain_files_to_be_generated([image_fname], True)
pymol.cmd.sync()
pymol.cmd.hide()
wait_for_certain_time_according_to_wait_factor(number_of_motifs)
pymol.cmd.sync()
previous_cumulative_value = splitted_subfamily_cumulative_count[component_id][current_cumulative_index]
current_cumulative_index += 1
# if image_fname != None:
# number_of_motifs = splitted_subfamily_cumulative_count[component_id][current_cumulative_index] - previous_cumulative_value
# component_image_file_list_dict[rotation_version].append((r1, image_fname, number_of_motifs))
if draw_figures == True:
if save_pymol_session == True:
pymol.cmd.deselect()
# pymol.cmd._do('save ' + os.path.join(pymol_session_dir, str(cluster_id) + '-Sub' + str(component_id+1) + '.pse'))
pymol.cmd.save(os.path.join(pymol_session_dir, str(cluster_id) + '-Sub' + str(component_id+1) + '.pse'))
pymol.cmd.sync()
# pymol.cmd._do('hide all')
pymol.cmd.hide()
wait_for_certain_time_according_to_wait_factor(len(component))
pymol.cmd.sync()
# prev_component_count = len(component)
fp_text_fname.close()
generate_formatted_superimposition_details(superimposition_details_dir, cluster_id, text_fname, pdb_organism_details)
all_pymol_load_info[cluster_id] = pymol_load_info_dict
for rotation_version in component_image_file_list_dict:
if len(component_image_file_list_dict[rotation_version]) > 0:
if whole_family_superimpose == True and len(component_image_file_list_dict[rotation_version]) > 1:
component_image_file_list_dict[rotation_version] = generate_and_add_family_image(superimposition_output_dir, cluster_id, component_image_file_list_dict[rotation_version], ordered_dependency_list, loop_display_info_dict, rotation_version, draw_figures, show_extended_loop)
cluster_image_file_list[cluster_id][rotation_version] = component_image_file_list_dict[rotation_version]
fp_representative.close()
if draw_input_images == True:
initial_loop_image_list = [] # Store the list of ungrouped loop
for i, r1 in loop_list:
initial_loop_image_list.append(initial_loop_image_dict[(i, r1)])
# Collage of input motifs
generate_subfamily_image(initial_loop_image_list, pdb_organism_details, cluster_id, os.path.join(superimposition_output_dir, 'subfamily'), draw_figures, 'step1_initial', show_pdb_info=True, show_image_caption=False)
for rotation_version in rotated_loop_image_list_dict:
combined_rotated_image_list = []
for component_id in rotated_loop_image_list_dict[rotation_version]:
component_collage_image_fname = '-Sub' + str(component_id + 1) + add_rotation_version_suffix(rotation_version)
generate_subfamily_image(rotated_loop_image_list_dict[rotation_version][component_id], pdb_organism_details, cluster_id, subfamily_details_dir, draw_figures, component_collage_image_fname, show_image_caption=subfamily_side_by_side_image_caption)
# generate_subfamily_image(rotated_loop_image_list_dict[rotation_version][component_id], pdb_organism_details, cluster_id, subfamily_details_dir, draw_figures, component_collage_image_fname, show_pdb_info=True, show_image_caption=True)
combined_rotated_image_list += rotated_loop_image_list_dict[rotation_version][component_id]
generate_subfamily_image(combined_rotated_image_list, pdb_organism_details, cluster_id, os.path.join(superimposition_output_dir, 'subfamily'), draw_figures, 'step3_loops_rotate_and_grouped' + add_rotation_version_suffix(rotation_version), show_image_caption=family_side_by_side_image_caption)
time_diff_for_cluster = time.time() - time_start_for_cluster
logger.info('Processed cluster: ' + cluster_id)
logger.info('Total loops: ' + str(len(loop_list)))
# logger.info('Time elapsed: ' + str(round(time_diff_for_cluster/60)) + ' minutes.')
logger.info('Time elapsed: ' + str(round(time_diff_for_cluster, 3)) + ' seconds.')
if set_view_manually == True:
pymol.cmd.quit()
return
if output_env == 'local':
generate_table(summary_dir, subfamily_details_table_data, loop_type, True)
else:
generate_table(summary_dir, subfamily_details_table_data, loop_type)
subfamily_cluster_fp.close()
fp_align_len_threshold.close()
write_rmsd_and_alignment_summary(rmsd_and_alignment_summary_dict, current_rmsd_data_dict)
for cluster_id in sorted(cluster_image_file_list):
for rotation_version in sorted(cluster_image_file_list[cluster_id]):
generate_subfamily_image(cluster_image_file_list[cluster_id][rotation_version], pdb_organism_details, cluster_id, os.path.join(superimposition_output_dir, 'subfamily'), draw_figures, 'step4_subfamily_superimposed' + add_rotation_version_suffix(rotation_version))
subfamily_out_dir = os.path.join(superimposition_output_dir, 'subfamily')
for cluster_id in sorted(cluster_image_file_list):
for rotation_version in sorted(cluster_image_file_list[cluster_id]):
image_file_list = []
if draw_input_images == True:
image_file_list.append(os.path.join(subfamily_out_dir, cluster_id + '_step1_initial.png'))
image_file_list.append(os.path.join(subfamily_out_dir, cluster_id + '_step3_loops_rotate_and_grouped' + add_rotation_version_suffix(rotation_version) + '.png'))
image_file_list.append(os.path.join(subfamily_out_dir, cluster_id + '_step4_subfamily_superimposed' + add_rotation_version_suffix(rotation_version) + '.png'))
create_combined_subfamily_collage(os.path.join(subfamily_out_dir, 'Subfamily_Combined'), image_file_list, cluster_id, draw_figures, 'step5_combined_subfamily_output' + add_rotation_version_suffix(rotation_version))
if draw_figures == True and save_pymol_session == True:
optimize_pymol_session(pymol_session_dir, representative_dir, all_pymol_load_info, subfamily_pymol_load_info, representative_pymol_load_info)
return time_in_distance_calc
def optimize_pymol_session(pymol_session_dir, representative_dir, all_pymol_load_info, subfamily_pymol_load_info, representative_pymol_load_info):
logger.info('Optimizing PyMol sessions.')
for cluster_id in subfamily_pymol_load_info:
for component_id in subfamily_pymol_load_info[cluster_id]:
pymol_session_name = str(cluster_id) + '-Sub' + str(component_id+1) + '.pse'
pymol.cmd.load(os.path.join(pymol_session_dir, pymol_session_name))
pymol.cmd.sync()
for (i, r) in all_pymol_load_info[cluster_id]:
if (i, r) not in subfamily_pymol_load_info[cluster_id][component_id]:
delete_motif_from_pymol(all_pymol_load_info[cluster_id][(i, r)])
pymol.cmd.delete('t')
pymol.cmd.sync()
pymol.cmd.show('cartoon')
pymol.cmd.sync()
pymol.cmd.save(os.path.join(pymol_session_dir, pymol_session_name))
pymol.cmd.sync()
for cluster_id in representative_pymol_load_info:
for component_id in representative_pymol_load_info[cluster_id]:
pymol_session_name = str(cluster_id) + '-Sub' + str(component_id+1) + '_repr.pse'
pymol.cmd.load(os.path.join(representative_dir, pymol_session_name))
pymol.cmd.sync()
for (i, r) in all_pymol_load_info[cluster_id]:
if (i, r) != representative_pymol_load_info[cluster_id][component_id]:
delete_motif_from_pymol(all_pymol_load_info[cluster_id][(i, r)])
pymol.cmd.delete('t')
pymol.cmd.sync()
pymol.cmd.save(os.path.join(representative_dir, pymol_session_name))
pymol.cmd.sync()
| [
"pymol.cmd.png",
"numpy.sum",
"heapq.heappush",
"pymol.cmd.iterate_state",
"pymol.cmd.save",
"pymol.cmd.alter_state",
"pymol.finish_launching",
"matplotlib.pyplot.figure",
"pymol.cmd.zoom",
"pymol.cmd.color",
"sys.path.append",
"pymol.cmd.select",
"math.radians",
"numpy.std",
"numpy.tran... | [((162, 183), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (176, 183), False, 'import matplotlib\n'), ((300, 325), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (315, 325), False, 'import sys\n'), ((347, 375), 'sys.path.append', 'sys.path.append', (['scripts_dir'], {}), '(scripts_dir)\n', (362, 375), False, 'import sys\n'), ((20228, 20258), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_fn', '"""fasta"""'], {}), "(fasta_fn, 'fasta')\n", (20239, 20258), False, 'from Bio import SeqIO\n'), ((52145, 52197), 'heapq.heappush', 'heapq.heappush', (['heap', '(0.0, (0.0, 0), start, parent)'], {}), '(heap, (0.0, (0.0, 0), start, parent))\n', (52159, 52197), False, 'import heapq\n'), ((81578, 81625), 'pymol.cmd.color', 'pymol.cmd.color', (['display_color', 'chain_load_name'], {}), '(display_color, chain_load_name)\n', (81593, 81625), False, 'import pymol\n'), ((81630, 81683), 'pymol.cmd.color', 'pymol.cmd.color', (['other_atom_color', 'other_bp_load_name'], {}), '(other_atom_color, other_bp_load_name)\n', (81645, 81683), False, 'import pymol\n'), ((81688, 81738), 'pymol.cmd.color', 'pymol.cmd.color', (['bp_atom_color', 'bp_atoms_load_name'], {}), '(bp_atom_color, bp_atoms_load_name)\n', (81703, 81738), False, 'import pymol\n'), ((81743, 81794), 'pymol.cmd.color', 'pymol.cmd.color', (['cano_atom_color', 'cano_bp_load_name'], {}), '(cano_atom_color, cano_bp_load_name)\n', (81758, 81794), False, 'import pymol\n'), ((84702, 84718), 'pymol.cmd.zoom', 'pymol.cmd.zoom', ([], {}), '()\n', (84716, 84718), False, 'import pymol\n'), ((84852, 84868), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (84866, 84868), False, 'import pymol\n'), ((84896, 84959), 'pymol.cmd.png', 'pymol.cmd.png', (['image_fname', '(1200)', '(1200)'], {'dpi': '(300)', 'ray': '(1)', 'quiet': '(1)'}), '(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)\n', (84909, 84959), False, 'import pymol\n'), ((85052, 85068), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (85066, 85068), False, 'import pymol\n'), ((85477, 85506), 'pymol.cmd.delete', 'pymol.cmd.delete', (['a_load_name'], {}), '(a_load_name)\n', (85493, 85506), False, 'import pymol\n'), ((85511, 85540), 'pymol.cmd.delete', 'pymol.cmd.delete', (['b_load_name'], {}), '(b_load_name)\n', (85527, 85540), False, 'import pymol\n'), ((85546, 85582), 'pymol.cmd.delete', 'pymol.cmd.delete', (['other_bp_load_name'], {}), '(other_bp_load_name)\n', (85562, 85582), False, 'import pymol\n'), ((85587, 85623), 'pymol.cmd.delete', 'pymol.cmd.delete', (['bp_atoms_load_name'], {}), '(bp_atoms_load_name)\n', (85603, 85623), False, 'import pymol\n'), ((85628, 85663), 'pymol.cmd.delete', 'pymol.cmd.delete', (['cano_bp_load_name'], {}), '(cano_bp_load_name)\n', (85644, 85663), False, 'import pymol\n'), ((85785, 85801), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (85799, 85801), False, 'import pymol\n'), ((86113, 86129), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (86127, 86129), False, 'import pymol\n'), ((110781, 110795), 'random.seed', 'random.seed', (['(3)'], {}), '(3)\n', (110792, 110795), False, 'import random\n'), ((117840, 117856), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (117854, 117856), False, 'import pymol\n'), ((117891, 117911), 'pymol.cmd.deselect', 'pymol.cmd.deselect', ([], {}), '()\n', (117909, 117911), False, 'import pymol\n'), ((117916, 117939), 'pymol.cmd.delete', 'pymol.cmd.delete', (['"""all"""'], {}), "('all')\n", (117932, 117939), False, 'import pymol\n'), ((117944, 117968), 'pymol.cmd.reinitialize', 'pymol.cmd.reinitialize', ([], {}), '()\n', (117966, 117968), False, 'import pymol\n'), ((117973, 118000), 'pymol.cmd.bg_color', 'pymol.cmd.bg_color', (['"""white"""'], {}), "('white')\n", (117991, 118000), False, 'import pymol\n'), ((118754, 118831), 'pymol.cmd.select', 'pymol.cmd.select', (['chain_load_name', "(pdb_load_name + ' and chain %s' % chain_id)"], {}), "(chain_load_name, pdb_load_name + ' and chain %s' % chain_id)\n", (118770, 118831), False, 'import pymol\n'), ((119109, 119152), 'pymol.cmd.hide', 'pymol.cmd.hide', (['"""everything"""', 'pdb_load_name'], {}), "('everything', pdb_load_name)\n", (119123, 119152), False, 'import pymol\n'), ((121558, 121574), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (121572, 121574), False, 'import pymol\n'), ((121579, 121623), 'pymol.cmd.color', 'pymol.cmd.color', (['load_color', 'chain_load_name'], {}), '(load_color, chain_load_name)\n', (121594, 121623), False, 'import pymol\n'), ((122203, 122276), 'pymol.cmd.alter_state', 'pymol.cmd.alter_state', (['(1)', 'pdb_load_name', '"""(x,y,z)=stored.res_list.pop(0)"""'], {}), "(1, pdb_load_name, '(x,y,z)=stored.res_list.pop(0)')\n", (122224, 122276), False, 'import pymol\n'), ((125913, 125962), 'pymol.cmd.color', 'pymol.cmd.color', (['display_color', 'display_load_name'], {}), '(display_color, display_load_name)\n', (125928, 125962), False, 'import pymol\n'), ((125967, 126012), 'pymol.cmd.color', 'pymol.cmd.color', (['align_color', 'align_load_name'], {}), '(align_color, align_load_name)\n', (125982, 126012), False, 'import pymol\n'), ((126789, 126805), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (126803, 126805), False, 'import pymol\n'), ((126810, 126826), 'pymol.cmd.zoom', 'pymol.cmd.zoom', ([], {}), '()\n', (126824, 126826), False, 'import pymol\n'), ((126862, 126878), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (126876, 126878), False, 'import pymol\n'), ((127001, 127064), 'pymol.cmd.png', 'pymol.cmd.png', (['image_fname', '(1200)', '(1200)'], {'dpi': '(300)', 'ray': '(1)', 'quiet': '(1)'}), '(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)\n', (127014, 127064), False, 'import pymol\n'), ((127165, 127181), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (127179, 127181), False, 'import pymol\n'), ((131239, 131310), 'pymol.cmd.iterate_state', 'pymol.cmd.iterate_state', (['(1)', 'pdb_load_name', '"""stored.pdb.append([x,y,z])"""'], {}), "(1, pdb_load_name, 'stored.pdb.append([x,y,z])')\n", (131262, 131310), False, 'import pymol\n'), ((131694, 131736), 'numpy.dot', 'numpy.dot', (['pdb_translated', 'rotation_matrix'], {}), '(pdb_translated, rotation_matrix)\n', (131703, 131736), False, 'import numpy\n'), ((137965, 137981), 'numpy.dot', 'numpy.dot', (['v', 'wt'], {}), '(v, wt)\n', (137974, 137981), False, 'import numpy\n'), ((137994, 138022), 'numpy.dot', 'numpy.dot', (['pdb_translated', 'u'], {}), '(pdb_translated, u)\n', (138003, 138022), False, 'import numpy\n'), ((145120, 145166), 'pymol.cmd.color', 'pymol.cmd.color', (['align_color', 'target_load_name'], {}), '(align_color, target_load_name)\n', (145135, 145166), False, 'import pymol\n'), ((145171, 145191), 'pymol.cmd.deselect', 'pymol.cmd.deselect', ([], {}), '()\n', (145189, 145191), False, 'import pymol\n'), ((145828, 145859), 'pymol.cmd.zoom', 'pymol.cmd.zoom', (['chain_load_name'], {}), '(chain_load_name)\n', (145842, 145859), False, 'import pymol\n'), ((146197, 146226), 'builtins.input', 'input', (['"""Continue? (Yes/No): """'], {}), "('Continue? (Yes/No): ')\n", (146202, 146226), False, 'from builtins import input\n'), ((146485, 146501), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (146499, 146501), False, 'import pymol\n'), ((146506, 146537), 'pymol.cmd.delete', 'pymol.cmd.delete', (['pdb_load_name'], {}), '(pdb_load_name)\n', (146522, 146537), False, 'import pymol\n'), ((146542, 146575), 'pymol.cmd.delete', 'pymol.cmd.delete', (['chain_load_name'], {}), '(chain_load_name)\n', (146558, 146575), False, 'import pymol\n'), ((146580, 146614), 'pymol.cmd.delete', 'pymol.cmd.delete', (['target_load_name'], {}), '(target_load_name)\n', (146596, 146614), False, 'import pymol\n'), ((146619, 146654), 'pymol.cmd.delete', 'pymol.cmd.delete', (['display_load_name'], {}), '(display_load_name)\n', (146635, 146654), False, 'import pymol\n'), ((146714, 146730), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (146728, 146730), False, 'import pymol\n'), ((151057, 151092), 'pymol.cmd.delete', 'pymol.cmd.delete', (['display_load_name'], {}), '(display_load_name)\n', (151073, 151092), False, 'import pymol\n'), ((151097, 151131), 'pymol.cmd.delete', 'pymol.cmd.delete', (['target_load_name'], {}), '(target_load_name)\n', (151113, 151131), False, 'import pymol\n'), ((151136, 151169), 'pymol.cmd.delete', 'pymol.cmd.delete', (['chain_load_name'], {}), '(chain_load_name)\n', (151152, 151169), False, 'import pymol\n'), ((151174, 151205), 'pymol.cmd.delete', 'pymol.cmd.delete', (['pdb_load_name'], {}), '(pdb_load_name)\n', (151190, 151205), False, 'import pymol\n'), ((9441, 9487), 'math.sqrt', 'math.sqrt', (['(total_rmsd / total_alignment_length)'], {}), '(total_rmsd / total_alignment_length)\n', (9450, 9487), False, 'import math\n'), ((19432, 19442), 'sys.exit', 'sys.exit', ([], {}), '()\n', (19440, 19442), False, 'import sys\n'), ((52265, 52284), 'heapq.heappop', 'heapq.heappop', (['heap'], {}), '(heap)\n', (52278, 52284), False, 'import heapq\n'), ((81416, 81458), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'chain_load_name'], {}), "('cartoon', chain_load_name)\n", (81430, 81458), False, 'import pymol\n'), ((81477, 81521), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'display_load_name'], {}), "('cartoon', display_load_name)\n", (81491, 81521), False, 'import pymol\n'), ((82087, 82152), 'pymol.cmd.select', 'pymol.cmd.select', (['a_load_name', "(chain_load_name + ' and resi ' + a)"], {}), "(a_load_name, chain_load_name + ' and resi ' + a)\n", (82103, 82152), False, 'import pymol\n'), ((82161, 82226), 'pymol.cmd.select', 'pymol.cmd.select', (['b_load_name', "(chain_load_name + ' and resi ' + b)"], {}), "(b_load_name, chain_load_name + ' and resi ' + b)\n", (82177, 82226), False, 'import pymol\n'), ((82299, 82374), 'pymol.cmd.iterate', 'pymol.cmd.iterate', (['a_load_name', '"""a_name_list.append(name)"""'], {'space': 'name_dict'}), "(a_load_name, 'a_name_list.append(name)', space=name_dict)\n", (82316, 82374), False, 'import pymol\n'), ((82383, 82458), 'pymol.cmd.iterate', 'pymol.cmd.iterate', (['b_load_name', '"""b_name_list.append(name)"""'], {'space': 'name_dict'}), "(b_load_name, 'b_name_list.append(name)', space=name_dict)\n", (82400, 82458), False, 'import pymol\n'), ((82905, 82916), 'time.time', 'time.time', ([], {}), '()\n', (82914, 82916), False, 'import time\n'), ((83954, 84026), 'pymol.cmd.select', 'pymol.cmd.select', (['a_single_name', "(a_load_name + ' and name ' + min_a_name)"], {}), "(a_single_name, a_load_name + ' and name ' + min_a_name)\n", (83970, 84026), False, 'import pymol\n'), ((84035, 84107), 'pymol.cmd.select', 'pymol.cmd.select', (['b_single_name', "(b_load_name + ' and name ' + min_b_name)"], {}), "(b_single_name, b_load_name + ' and name ' + min_b_name)\n", (84051, 84107), False, 'import pymol\n'), ((84263, 84327), 'pymol.cmd.distance', 'pymol.cmd.distance', (['dist_load_name', 'a_single_name', 'b_single_name'], {}), '(dist_load_name, a_single_name, b_single_name)\n', (84281, 84327), False, 'import pymol\n'), ((84394, 84434), 'pymol.cmd.hide', 'pymol.cmd.hide', (['"""labels"""', 'dist_load_name'], {}), "('labels', dist_load_name)\n", (84408, 84434), False, 'import pymol\n'), ((84490, 84536), 'pymol.cmd.color', 'pymol.cmd.color', (['bp_line_color', 'dist_load_name'], {}), '(bp_line_color, dist_load_name)\n', (84505, 84536), False, 'import pymol\n'), ((84546, 84577), 'pymol.cmd.delete', 'pymol.cmd.delete', (['a_single_name'], {}), '(a_single_name)\n', (84562, 84577), False, 'import pymol\n'), ((84586, 84617), 'pymol.cmd.delete', 'pymol.cmd.delete', (['b_single_name'], {}), '(b_single_name)\n', (84602, 84617), False, 'import pymol\n'), ((85113, 85133), 'pymol.cmd.deselect', 'pymol.cmd.deselect', ([], {}), '()\n', (85131, 85133), False, 'import pymol\n'), ((85437, 85453), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (85451, 85453), False, 'import pymol\n'), ((85705, 85727), 'pymol.cmd.delete', 'pymol.cmd.delete', (['item'], {}), '(item)\n', (85721, 85727), False, 'import pymol\n'), ((119561, 119647), 'pymol.cmd.select', 'pymol.cmd.select', (['"""t"""', "(target_load_name + ' and resi %s and name %s' % (res, atom))"], {}), "('t', target_load_name + ' and resi %s and name %s' % (res,\n atom))\n", (119577, 119647), False, 'import pymol\n'), ((119652, 119713), 'pymol.cmd.iterate_state', 'pymol.cmd.iterate_state', (['(1)', '"""t"""', '"""stored.sel.append([x,y,z])"""'], {}), "(1, 't', 'stored.sel.append([x,y,z])')\n", (119675, 119713), False, 'import pymol\n'), ((120457, 120486), 'numpy.sum', 'numpy.sum', (['coord_list'], {'axis': '(0)'}), '(coord_list, axis=0)\n', (120466, 120486), False, 'import numpy\n'), ((124805, 124862), 'pymol.cmd.set', 'pymol.cmd.set', ([], {'name': '"""cartoon_ring_mode"""', 'value': '(1)', 'quiet': '(1)'}), "(name='cartoon_ring_mode', value=1, quiet=1)\n", (124818, 124862), False, 'import pymol\n'), ((124869, 124936), 'pymol.cmd.set', 'pymol.cmd.set', ([], {'name': '"""cartoon_ring_transparency"""', 'value': '(0.5)', 'quiet': '(1)'}), "(name='cartoon_ring_transparency', value=0.5, quiet=1)\n", (124882, 124936), False, 'import pymol\n'), ((124953, 125010), 'pymol.cmd.set', 'pymol.cmd.set', ([], {'name': '"""cartoon_ring_mode"""', 'value': '(0)', 'quiet': '(1)'}), "(name='cartoon_ring_mode', value=0, quiet=1)\n", (124966, 125010), False, 'import pymol\n'), ((125570, 125634), 'pymol.cmd.set_color', 'pymol.cmd.set_color', (["(display_load_name + '_color')", 'display_color'], {}), "(display_load_name + '_color', display_color)\n", (125589, 125634), False, 'import pymol\n'), ((125798, 125858), 'pymol.cmd.set_color', 'pymol.cmd.set_color', (["(align_load_name + '_color')", 'align_color'], {}), "(align_load_name + '_color', align_color)\n", (125817, 125858), False, 'import pymol\n'), ((126328, 126419), 'pymol.cmd.label', 'pymol.cmd.label', (["(display_load_name + ' and name ' + label_atom)", '"""\'%s-%s\' %(resn, resi)"""'], {}), '(display_load_name + \' and name \' + label_atom,\n "\'%s-%s\' %(resn, resi)")\n', (126343, 126419), False, 'import pymol\n'), ((126615, 126657), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'chain_load_name'], {}), "('cartoon', chain_load_name)\n", (126629, 126657), False, 'import pymol\n'), ((126711, 126755), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'display_load_name'], {}), "('cartoon', display_load_name)\n", (126725, 126755), False, 'import pymol\n'), ((127254, 127273), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (127266, 127273), False, 'import math\n'), ((127296, 127315), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (127308, 127315), False, 'import math\n'), ((141545, 141561), 'pymol.cmd.zoom', 'pymol.cmd.zoom', ([], {}), '()\n', (141559, 141561), False, 'import pymol\n'), ((141609, 141625), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (141623, 141625), False, 'import pymol\n'), ((141634, 141697), 'pymol.cmd.png', 'pymol.cmd.png', (['image_fname', '(1200)', '(1200)'], {'dpi': '(300)', 'ray': '(1)', 'quiet': '(1)'}), '(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)\n', (141647, 141697), False, 'import pymol\n'), ((141775, 141791), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (141789, 141791), False, 'import pymol\n'), ((145662, 145704), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'chain_load_name'], {}), "('cartoon', chain_load_name)\n", (145676, 145704), False, 'import pymol\n'), ((145774, 145818), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'display_load_name'], {}), "('cartoon', display_load_name)\n", (145788, 145818), False, 'import pymol\n'), ((146299, 146319), 'pymol.cmd.get_view', 'pymol.cmd.get_view', ([], {}), '()\n', (146317, 146319), False, 'import pymol\n'), ((154042, 154139), 'pymol.cmd.select', 'pymol.cmd.select', (["(display_load_name + '_temp')", "(target_load_name + ' and (' + select_str + ')')"], {}), "(display_load_name + '_temp', target_load_name + ' and (' +\n select_str + ')')\n", (154058, 154139), False, 'import pymol\n'), ((154144, 154268), 'pymol.cmd.select', 'pymol.cmd.select', (["(display_load_name + '_temp2')", "('byres ' + pdb_load_name + ' within 5 of ' + display_load_name + '_temp')"], {}), "(display_load_name + '_temp2', 'byres ' + pdb_load_name +\n ' within 5 of ' + display_load_name + '_temp')\n", (154160, 154268), False, 'import pymol\n'), ((154273, 154398), 'pymol.cmd.select', 'pymol.cmd.select', (["(display_load_name + '_temp3')", "('byres ' + pdb_load_name + ' within 10 of ' + display_load_name + '_temp')"], {}), "(display_load_name + '_temp3', 'byres ' + pdb_load_name +\n ' within 10 of ' + display_load_name + '_temp')\n", (154289, 154398), False, 'import pymol\n'), ((154403, 154509), 'pymol.cmd.select', 'pymol.cmd.select', (["(display_load_name + '_label')", "(display_load_name + '_temp2' + ' and ' + adj_chain_str)"], {}), "(display_load_name + '_label', display_load_name + '_temp2' +\n ' and ' + adj_chain_str)\n", (154419, 154509), False, 'import pymol\n'), ((154514, 154619), 'pymol.cmd.select', 'pymol.cmd.select', (["(display_load_name + '_zoom')", "(display_load_name + '_temp3' + ' and ' + adj_chain_str)"], {}), "(display_load_name + '_zoom', display_load_name + '_temp3' +\n ' and ' + adj_chain_str)\n", (154530, 154619), False, 'import pymol\n'), ((154626, 154691), 'pymol.cmd.set', 'pymol.cmd.set', ([], {'name': '"""cartoon_ring_mode"""', 'value': 'ring_mode', 'quiet': '(1)'}), "(name='cartoon_ring_mode', value=ring_mode, quiet=1)\n", (154639, 154691), False, 'import pymol\n'), ((154698, 154783), 'pymol.cmd.set', 'pymol.cmd.set', ([], {'name': '"""cartoon_ring_transparency"""', 'value': 'ring_transparency', 'quiet': '(1)'}), "(name='cartoon_ring_transparency', value=ring_transparency,\n quiet=1)\n", (154711, 154783), False, 'import pymol\n'), ((154786, 154891), 'pymol.cmd.label', 'pymol.cmd.label', (['(display_load_name + \'_label\' + " and (name C2\' or name CA)")', '"""\'%s-%s\' %(resn, resi)"""'], {}), '(display_load_name + \'_label\' + " and (name C2\' or name CA)",\n "\'%s-%s\' %(resn, resi)")\n', (154801, 154891), False, 'import pymol\n'), ((154896, 154939), 'pymol.cmd.zoom', 'pymol.cmd.zoom', (["(display_load_name + '_zoom')"], {}), "(display_load_name + '_zoom')\n", (154910, 154939), False, 'import pymol\n'), ((154949, 154969), 'pymol.cmd.deselect', 'pymol.cmd.deselect', ([], {}), '()\n', (154967, 154969), False, 'import pymol\n'), ((154978, 155018), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'pdb_load_name'], {}), "('cartoon', pdb_load_name)\n", (154992, 155018), False, 'import pymol\n'), ((155028, 155073), 'pymol.cmd.delete', 'pymol.cmd.delete', (["(display_load_name + '_zoom')"], {}), "(display_load_name + '_zoom')\n", (155044, 155073), False, 'import pymol\n'), ((155082, 155128), 'pymol.cmd.delete', 'pymol.cmd.delete', (["(display_load_name + '_label')"], {}), "(display_load_name + '_label')\n", (155098, 155128), False, 'import pymol\n'), ((155137, 155183), 'pymol.cmd.delete', 'pymol.cmd.delete', (["(display_load_name + '_temp3')"], {}), "(display_load_name + '_temp3')\n", (155153, 155183), False, 'import pymol\n'), ((155192, 155238), 'pymol.cmd.delete', 'pymol.cmd.delete', (["(display_load_name + '_temp2')"], {}), "(display_load_name + '_temp2')\n", (155208, 155238), False, 'import pymol\n'), ((155247, 155292), 'pymol.cmd.delete', 'pymol.cmd.delete', (["(display_load_name + '_temp')"], {}), "(display_load_name + '_temp')\n", (155263, 155292), False, 'import pymol\n'), ((158768, 158831), 'pymol.cmd.png', 'pymol.cmd.png', (['image_fname', '(1200)', '(1200)'], {'dpi': '(300)', 'ray': '(1)', 'quiet': '(1)'}), '(image_fname, 1200, 1200, dpi=300, ray=1, quiet=1)\n', (158781, 158831), False, 'import pymol\n'), ((158840, 158856), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (158854, 158856), False, 'import pymol\n'), ((158975, 158991), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (158989, 158991), False, 'import pymol\n'), ((159465, 159490), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'frameon': '(False)'}), '(frameon=False)\n', (159475, 159490), True, 'import matplotlib.pyplot as plt\n'), ((159511, 159523), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (159521, 159523), True, 'import matplotlib.pyplot as plt\n'), ((159695, 159719), 'pymol.finish_launching', 'pymol.finish_launching', ([], {}), '()\n', (159717, 159719), False, 'import pymol\n'), ((161626, 161637), 'time.time', 'time.time', ([], {}), '()\n', (161635, 161637), False, 'import time\n'), ((163463, 163474), 'time.time', 'time.time', ([], {}), '()\n', (163472, 163474), False, 'import time\n'), ((186385, 186401), 'pymol.cmd.quit', 'pymol.cmd.quit', ([], {}), '()\n', (186399, 186401), False, 'import pymol\n'), ((542, 572), 'sys.path.append', 'sys.path.append', (['pymol_py3_dir'], {}), '(pymol_py3_dir)\n', (557, 572), False, 'import sys\n'), ((3115, 3140), 'numpy.std', 'numpy.std', (['align_len_list'], {}), '(align_len_list)\n', (3124, 3140), False, 'import numpy\n'), ((3663, 3673), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3671, 3673), False, 'import sys\n'), ((6932, 6952), 'math.sqrt', 'math.sqrt', (['align_len'], {}), '(align_len)\n', (6941, 6952), False, 'import math\n'), ((9557, 9590), 'math.sqrt', 'math.sqrt', (['total_alignment_length'], {}), '(total_alignment_length)\n', (9566, 9590), False, 'import math\n'), ((83882, 83893), 'time.time', 'time.time', ([], {}), '()\n', (83891, 83893), False, 'import time\n'), ((111733, 111748), 'random.random', 'random.random', ([], {}), '()\n', (111746, 111748), False, 'import random\n'), ((111750, 111765), 'random.random', 'random.random', ([], {}), '()\n', (111763, 111765), False, 'import random\n'), ((111767, 111782), 'random.random', 'random.random', ([], {}), '()\n', (111780, 111782), False, 'import random\n'), ((115365, 115375), 'sys.exit', 'sys.exit', ([], {}), '()\n', (115373, 115375), False, 'import sys\n'), ((132741, 132751), 'sys.exit', 'sys.exit', ([], {}), '()\n', (132749, 132751), False, 'import sys\n'), ((137642, 137672), 'numpy.sum', 'numpy.sum', (['(sel1 * sel1)'], {'axis': '(0)'}), '(sel1 * sel1, axis=0)\n', (137651, 137672), False, 'import numpy\n'), ((137693, 137723), 'numpy.sum', 'numpy.sum', (['(sel2 * sel2)'], {'axis': '(0)'}), '(sel2 * sel2, axis=0)\n', (137702, 137723), False, 'import numpy\n'), ((137775, 137796), 'numpy.transpose', 'numpy.transpose', (['sel2'], {}), '(sel2)\n', (137790, 137796), False, 'import numpy\n'), ((141083, 141115), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', '"""all"""'], {}), "('cartoon', 'all')\n", (141097, 141115), False, 'import pymol\n'), ((145593, 145621), 'pymol.cmd.set_view', 'pymol.cmd.set_view', (['lines[0]'], {}), '(lines[0])\n', (145611, 145621), False, 'import pymol\n'), ((158908, 158937), 'pymol.cmd.save', 'pymol.cmd.save', (['session_fname'], {}), '(session_fname)\n', (158922, 158937), False, 'import pymol\n'), ((158950, 158966), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (158964, 158966), False, 'import pymol\n'), ((159848, 159889), 'pymol.finish_launching', 'pymol.finish_launching', (["['pymol', '-cqQ']"], {}), "(['pymol', '-cqQ'])\n", (159870, 159889), False, 'import pymol\n'), ((186002, 186013), 'time.time', 'time.time', ([], {}), '()\n', (186011, 186013), False, 'import time\n'), ((188894, 188910), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (188908, 188910), False, 'import pymol\n'), ((189155, 189176), 'pymol.cmd.delete', 'pymol.cmd.delete', (['"""t"""'], {}), "('t')\n", (189171, 189176), False, 'import pymol\n'), ((189189, 189205), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (189203, 189205), False, 'import pymol\n'), ((189219, 189244), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""'], {}), "('cartoon')\n", (189233, 189244), False, 'import pymol\n'), ((189257, 189273), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (189271, 189273), False, 'import pymol\n'), ((189367, 189383), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (189381, 189383), False, 'import pymol\n'), ((189698, 189714), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (189712, 189714), False, 'import pymol\n'), ((189960, 189981), 'pymol.cmd.delete', 'pymol.cmd.delete', (['"""t"""'], {}), "('t')\n", (189976, 189981), False, 'import pymol\n'), ((189995, 190011), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (190009, 190011), False, 'import pymol\n'), ((190105, 190121), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (190119, 190121), False, 'import pymol\n'), ((9299, 9319), 'math.sqrt', 'math.sqrt', (['align_len'], {}), '(align_len)\n', (9308, 9319), False, 'import math\n'), ((9747, 9819), 'functools.reduce', 'reduce', (['(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2]))', 'coord_list'], {}), '(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2]), coord_list)\n', (9753, 9819), False, 'from functools import reduce\n'), ((21480, 21510), 're.split', 're.split', (['"""-?(\\\\d+)"""', 'decom[0]'], {}), "('-?(\\\\d+)', decom[0])\n", (21488, 21510), False, 'import re\n'), ((28261, 28271), 'sys.exit', 'sys.exit', ([], {}), '()\n', (28269, 28271), False, 'import sys\n'), ((53001, 53063), 'heapq.heappush', 'heapq.heappush', (['heap', '(traversal_weight, edge_weight, n, node)'], {}), '(heap, (traversal_weight, edge_weight, n, node))\n', (53015, 53063), False, 'import heapq\n'), ((53401, 53424), 'math.sqrt', 'math.sqrt', (['align_length'], {}), '(align_length)\n', (53410, 53424), False, 'import math\n'), ((83363, 83431), 'pymol.cmd.select', 'pymol.cmd.select', (['a_single_name', "(a_load_name + ' and name ' + a_name)"], {}), "(a_single_name, a_load_name + ' and name ' + a_name)\n", (83379, 83431), False, 'import pymol\n'), ((83448, 83516), 'pymol.cmd.select', 'pymol.cmd.select', (['b_single_name', "(b_load_name + ' and name ' + b_name)"], {}), "(b_single_name, b_load_name + ' and name ' + b_name)\n", (83464, 83516), False, 'import pymol\n'), ((83544, 83600), 'pymol.cmd.distance', 'pymol.cmd.distance', (['"""dist"""', 'a_single_name', 'b_single_name'], {}), "('dist', a_single_name, b_single_name)\n", (83562, 83600), False, 'import pymol\n'), ((83785, 83809), 'pymol.cmd.delete', 'pymol.cmd.delete', (['"""dist"""'], {}), "('dist')\n", (83801, 83809), False, 'import pymol\n'), ((115539, 115549), 'sys.exit', 'sys.exit', ([], {}), '()\n', (115547, 115549), False, 'import sys\n'), ((119774, 119803), 'numpy.sum', 'numpy.sum', (['stored.sel'], {'axis': '(0)'}), '(stored.sel, axis=0)\n', (119783, 119803), False, 'import numpy\n'), ((127914, 127924), 'sys.exit', 'sys.exit', ([], {}), '()\n', (127922, 127924), False, 'import sys\n'), ((183123, 183139), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (183137, 183139), False, 'import pymol\n'), ((183235, 183251), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (183249, 183251), False, 'import pymol\n'), ((137835, 137854), 'numpy.linalg.det', 'numpy.linalg.det', (['v'], {}), '(v)\n', (137851, 137854), False, 'import numpy\n'), ((137857, 137877), 'numpy.linalg.det', 'numpy.linalg.det', (['wt'], {}), '(wt)\n', (137873, 137877), False, 'import numpy\n'), ((141378, 141422), 'pymol.cmd.show', 'pymol.cmd.show', (['"""cartoon"""', 'display_load_name'], {}), "('cartoon', display_load_name)\n", (141392, 141422), False, 'import pymol\n'), ((182743, 182763), 'pymol.cmd.deselect', 'pymol.cmd.deselect', ([], {}), '()\n', (182761, 182763), False, 'import pymol\n'), ((183045, 183061), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (183059, 183061), False, 'import pymol\n'), ((169390, 169400), 'sys.exit', 'sys.exit', ([], {}), '()\n', (169398, 169400), False, 'import sys\n'), ((170912, 170928), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (170926, 170928), False, 'import pymol\n'), ((170953, 170969), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (170967, 170969), False, 'import pymol\n'), ((178815, 178831), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (178829, 178831), False, 'import pymol\n'), ((178856, 178872), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (178870, 178872), False, 'import pymol\n'), ((181951, 181967), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (181965, 181967), False, 'import pymol\n'), ((181997, 182013), 'pymol.cmd.hide', 'pymol.cmd.hide', ([], {}), '()\n', (182011, 182013), False, 'import pymol\n'), ((182135, 182151), 'pymol.cmd.sync', 'pymol.cmd.sync', ([], {}), '()\n', (182149, 182151), False, 'import pymol\n'), ((163796, 163807), 'time.time', 'time.time', ([], {}), '()\n', (163805, 163807), False, 'import time\n')] |
from __future__ import absolute_import
try:
import unittest2 as unittest
except ImportError:
import unittest
import neo.io.blackrockio
import os
import numpy as np
import quantities as pq
import glob
from .common_io_test import BaseTestIO
from ...io import tools
from ..tools import assert_arrays_almost_equal
import struct
import tempfile
#~ class testRead(unittest.TestCase):
#~ """Tests that data can be read from KlustaKwik files"""
#~ def test1(self):
#~ """Tests that data and metadata are read correctly"""
#~ pass
#~ def test2(self):
#~ """Checks that cluster id autosets to 0 without clu file"""
#~ pass
#~ dirname = os.path.normpath('./files_for_tests/klustakwik/test2')
#~ kio = neo.io.KlustaKwikIO(filename=os.path.join(dirname, 'base2'),
#~ sampling_rate=1000.)
#~ block = kio.read()
#~ seg = block.segments[0]
#~ self.assertEqual(len(seg.spiketrains), 1)
#~ self.assertEqual(seg.spiketrains[0].name, 'unit 0 from group 5')
#~ self.assertEqual(seg.spiketrains[0].annotations['cluster'], 0)
#~ self.assertEqual(seg.spiketrains[0].annotations['group'], 5)
#~ self.assertEqual(seg.spiketrains[0].t_start, 0.0)
#~ self.assertTrue(np.all(seg.spiketrains[0].times == np.array(
#~ [0.026, 0.122, 0.228])))
class testWrite(unittest.TestCase):
def setUp(self):
self.datadir = os.path.join(tempfile.gettempdir(),
'files_for_testing_neo',
'blackrock/test2/')
self.fn = os.path.join(self.datadir, 'test.write.ns5')
if not os.path.exists(self.datadir):
raise unittest.SkipTest('data directory does not exist: ' +
self.datadir)
def test1(self):
"""Write data to binary file, then read it back in and verify"""
# delete temporary file before trying to write to it
if os.path.exists(self.fn):
os.remove(self.fn)
block = neo.Block()
full_range = 234 * pq.mV
# Create segment1 with analogsignals
segment1 = neo.Segment()
sig1 = neo.AnalogSignal([3,4,5], units='mV', channel_index=3,
sampling_rate=30000.*pq.Hz)
sig2 = neo.AnalogSignal([6,-4,-5], units='mV', channel_index=4,
sampling_rate=30000.*pq.Hz)
segment1.analogsignals.append(sig1)
segment1.analogsignals.append(sig2)
# Create segment2 with analogsignals
segment2 = neo.Segment()
sig3 = neo.AnalogSignal([-3,-4,-5], units='mV', channel_index=3,
sampling_rate=30000.*pq.Hz)
sig4 = neo.AnalogSignal([-6,4,5], units='mV', channel_index=4,
sampling_rate=30000.*pq.Hz)
segment2.analogsignals.append(sig3)
segment2.analogsignals.append(sig4)
# Link segments to block
block.segments.append(segment1)
block.segments.append(segment2)
# Create hardware view, and bijectivity
#tools.populate_RecordingChannel(block)
#print "problem happening"
#print block.recordingchannelgroups[0].recordingchannels
#print block.recordingchannelgroups[0].recordingchannels[0].analogsignals
#tools.create_many_to_one_relationship(block)
#print "here: "
#print block.segments[0].analogsignals[0].recordingchannel
# Chris I prefer that:
#tools.finalize_block(block)
tools.populate_RecordingChannel(block)
tools.create_many_to_one_relationship(block)
# Check that blackrockio is correctly extracting channel indexes
self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
segment1), [3,4])
self.assertEqual(neo.io.blackrockio.channel_indexes_in_segment(
segment2), [3,4])
# Create writer. Write block, then read back in.
bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
bio.write_block(block)
fi = file(self.fn)
# Text header
self.assertEqual(fi.read(16), 'NEURALSG30 kS/s\x00')
self.assertEqual(fi.read(8), '\x00\x00\x00\x00\x00\x00\x00\x00')
# Integers: period, channel count, channel index1, channel index2
self.assertEqual(struct.unpack('<4I', fi.read(16)), (1,2,3,4))
# What should the signals be after conversion?
conv = float(full_range) / 2**16
sigs = np.array(\
[np.concatenate((sig1,sig3)), np.concatenate((sig2, sig4))])
sigs_converted = np.rint(sigs / conv).astype(np.int)
# Check that each time point is the same
for time_slc in sigs_converted.transpose():
written_data = struct.unpack('<2h', fi.read(4))
self.assertEqual(list(time_slc), list(written_data))
# Check that we read to the end
currentpos = fi.tell()
fi.seek(0, 2)
truelen = fi.tell()
self.assertEqual(currentpos, truelen)
fi.close()
# Empty out test session again
#~ delete_test_session()
class testRead(unittest.TestCase):
def setUp(self):
self.fn = os.path.join(tempfile.gettempdir(),
'files_for_testing_neo',
'blackrock/test2/test.ns5')
if not os.path.exists(self.fn):
raise unittest.SkipTest('data file does not exist:' + self.fn)
def test1(self):
"""Read data into one big segment (default)"""
full_range = 8192 * pq.mV
bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
block = bio.read_block(n_starts=[0], n_stops=[6])
self.assertEqual(bio.header.Channel_Count, 2)
self.assertEqual(bio.header.n_samples, 6)
# Everything put in one segment
self.assertEqual(len(block.segments), 1)
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 2)
assert_arrays_almost_equal(seg.analogsignals[0],
[3., 4., 5., -3., -4., -5.] * pq.mV, .0001)
assert_arrays_almost_equal(seg.analogsignals[1],
[6., -4., -5., -6., 4., 5.] * pq.mV, .0001)
def test2(self):
"""Read data into two segments instead of just one"""
full_range = 8192 * pq.mV
bio = neo.io.BlackrockIO(filename=self.fn, full_range=full_range)
block = bio.read_block(n_starts=[0, 3], n_stops=[2, 6])
self.assertEqual(bio.header.Channel_Count, 2)
self.assertEqual(bio.header.n_samples, 6)
# Everything in two segments
self.assertEqual(len(block.segments), 2)
# Test first seg
seg = block.segments[0]
self.assertEqual(len(seg.analogsignals), 2)
assert_arrays_almost_equal(seg.analogsignals[0],
[3., 4.] * pq.mV, .0001)
assert_arrays_almost_equal(seg.analogsignals[1],
[6., -4.] * pq.mV, .0001)
# Test second seg
seg = block.segments[1]
self.assertEqual(len(seg.analogsignals), 2)
assert_arrays_almost_equal(seg.analogsignals[0],
[-3., -4., -5.] * pq.mV, .0001)
assert_arrays_almost_equal(seg.analogsignals[1],
[-6., 4., 5.] * pq.mV, .0001)
class CommonTests(BaseTestIO, unittest.TestCase ):
ioclass = neo.io.BlackrockIO
read_and_write_is_bijective = False
# These are the files it tries to read and test for compliance
files_to_test = [
'test2/test.ns5'
]
# Will fetch from g-node if they don't already exist locally
# How does it know to do this before any of the other tests?
files_to_download = [
'test2/test.ns5'
]
#~ def delete_test_session():
#~ """Removes all file in directory so we can test writing to it"""
#~ for fi in glob.glob(os.path.join(
#~ './files_for_tests/klustakwik/test3', '*')):
#~ os.remove(fi)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.remove",
"tempfile.gettempdir",
"os.path.exists",
"numpy.rint",
"unittest.SkipTest",
"os.path.join",
"numpy.concatenate"
] | [((7975, 7990), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7988, 7990), False, 'import unittest\n'), ((1626, 1670), 'os.path.join', 'os.path.join', (['self.datadir', '"""test.write.ns5"""'], {}), "(self.datadir, 'test.write.ns5')\n", (1638, 1670), False, 'import os\n'), ((2005, 2028), 'os.path.exists', 'os.path.exists', (['self.fn'], {}), '(self.fn)\n', (2019, 2028), False, 'import os\n'), ((1468, 1489), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1487, 1489), False, 'import tempfile\n'), ((1686, 1714), 'os.path.exists', 'os.path.exists', (['self.datadir'], {}), '(self.datadir)\n', (1700, 1714), False, 'import os\n'), ((1734, 1801), 'unittest.SkipTest', 'unittest.SkipTest', (["('data directory does not exist: ' + self.datadir)"], {}), "('data directory does not exist: ' + self.datadir)\n", (1751, 1801), False, 'import unittest\n'), ((2042, 2060), 'os.remove', 'os.remove', (['self.fn'], {}), '(self.fn)\n', (2051, 2060), False, 'import os\n'), ((5215, 5236), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (5234, 5236), False, 'import tempfile\n'), ((5368, 5391), 'os.path.exists', 'os.path.exists', (['self.fn'], {}), '(self.fn)\n', (5382, 5391), False, 'import os\n'), ((5411, 5467), 'unittest.SkipTest', 'unittest.SkipTest', (["('data file does not exist:' + self.fn)"], {}), "('data file does not exist:' + self.fn)\n", (5428, 5467), False, 'import unittest\n'), ((4519, 4547), 'numpy.concatenate', 'np.concatenate', (['(sig1, sig3)'], {}), '((sig1, sig3))\n', (4533, 4547), True, 'import numpy as np\n'), ((4548, 4576), 'numpy.concatenate', 'np.concatenate', (['(sig2, sig4)'], {}), '((sig2, sig4))\n', (4562, 4576), True, 'import numpy as np\n'), ((4604, 4624), 'numpy.rint', 'np.rint', (['(sigs / conv)'], {}), '(sigs / conv)\n', (4611, 4624), True, 'import numpy as np\n')] |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import matplotlib.pyplot as plt
FLAGS = tf.app.flags.FLAGS
import sys
def MLP(input_x, output_size, name, weights_regularizer):
with tf.variable_scope(name):
x = slim.fully_connected(input_x, 1024, weights_regularizer = weights_regularizer, scope = 'fc_1')
x = slim.fully_connected(x, 1024, weights_regularizer = weights_regularizer, scope = 'fc_2')
x = slim.fully_connected(x, 512, weights_regularizer = weights_regularizer, scope = 'fc_3')
x = slim.fully_connected(x, output_size, activation_fn = None, weights_regularizer = weights_regularizer, scope = 'logits')
return x
def decoder_net(input_x, weights_regularizer, epoch, shift_indicator, is_training):
# initialize the basis with a known stick pattern
#lim = 10.0
Q = np.load(FLAGS.Q_dir)
stick_bases = np.load(FLAGS.stick_bases_dir)
mu_init = np.zeros([FLAGS.n_bases, FLAGS.n_spike])
c_init = np.zeros([FLAGS.n_bases, FLAGS.n_spike])
#logvar_init = np.zeros([FLAGS.n_bases, FLAGS.n_spike]) + FLAGS.init_logvar
for num in range(FLAGS.n_bases):
basis = stick_bases[num]
for (i, peak) in enumerate(basis):
mu_init[num][i] = peak[0]
c_init[num][i] = peak[1]
c_init[num] /= np.max(c_init[num]) + FLAGS.eps
#
X = Q
X = tf.reshape(tf.constant(X, dtype= "float32"), [-1, 1, 1, 1]) # X, 1, 1
X = tf.tile(X, [1, tf.shape(input_x)[0], FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike]) # Q.size, bs, n_bases, n_spike
#print(X.shape)
#mu_init = np.random.uniform(-lim, lim, size = FLAGS.n_spike)
mu_new_bases = tf.exp(tf.Variable(np.log(np.random.rand(FLAGS.n_new_bases, FLAGS.n_spike)*(Q[-1] - Q[0]) + Q[0]), trainable = True, dtype = "float32"))
c_new_bases = tf.abs(tf.Variable(np.random.rand(FLAGS.n_new_bases, FLAGS.n_spike), trainable = True, dtype = "float32"))
mu_init = tf.concat([tf.Variable(mu_init, trainable = False, dtype="float32"), mu_new_bases], axis = 0) #n_bases + n_new_bases, n_spike
c_init = tf.concat([tf.Variable(c_init, trainable = False, dtype="float32"), c_new_bases], axis = 0) #n_bases + n_new_bases, n_spike
shift_indicator = tf.reshape(shift_indicator, [-1, 1, 1])
shift_indicator = tf.tile(shift_indicator, [1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])
with tf.variable_scope('decoder'):
################ mu
mu_shift = MLP(input_x, FLAGS.n_bases + FLAGS.n_new_bases, "spike_shift", weights_regularizer)
r1 = (tf.minimum(epoch, 10.0) /10.0)
#alpha = FLAGS.max_shift
alpha = r1 * FLAGS.max_shift
if ("refine" in sys.argv):
mu_shift = tf.stop_gradient(tf.tanh(mu_shift)* alpha + 1.0)
else:
mu_shift = tf.tanh(mu_shift)* alpha + 1.0
mu_shift = tf.reshape(mu_shift, [-1, FLAGS.n_bases + FLAGS.n_new_bases, 1])
mu_shift = tf.tile(mu_shift, [1, 1, FLAGS.n_spike]) #bs, n_bases, n_spike
#mu_shift = (shift_indicator * mu_shift + (1.0 - shift_indicator) * tf.clip_by_value(mu_shift, 1.0 - FLAGS.shift_unit, 1.0 + FLAGS.shift_unit))
mu = mu_init * mu_shift#bs, n_bases, n_spike
#mu = tf.clip_by_value(mu, 11, 60)
################ variance
logvar_shift = MLP(input_x, FLAGS.n_bases + FLAGS.n_new_bases, "spike_logvar", weights_regularizer)
#logvar_shift = tf.Variable(np.zeros((1, FLAGS.n_bases + FLAGS.n_new_bases)), trainable = True, dtype = tf.float32, name = "logvar_shift")
logvar_shift = tf.tanh(logvar_shift) * FLAGS.logvar_range
#logvar_shift = tf.tile(logvar_shift, [tf.shape(mu_shift)[0], 1]) #bs, n_bases
logvar = logvar_shift + tf.Variable(np.zeros(FLAGS.n_bases + FLAGS.n_new_bases) + FLAGS.init_logvar, trainable = False, dtype = tf.float32, name = "logvar") #bs, n_bases
logvar = tf.reshape(logvar, [-1, FLAGS.n_bases + FLAGS.n_new_bases, 1]) #bs, n_bases, 1
extra_logvar = tf.tanh(tf.abs(tf.Variable(0.1, dtype = tf.float32, trainable = True, name = "gradual_fattening_ratio"))) * FLAGS.max_extra_logvar
gradual_fattening = mu_init / Q[-1] * extra_logvar # bs, n_bases, n_spike: 0~1
if (is_training):
tf.summary.scalar("extra_logvar", extra_logvar)
logvar = tf.tile(logvar, [1, 1, FLAGS.n_spike]) + gradual_fattening
#logvar = FLAGS.init_logvar
################ intensity
c = tf.reshape(c_init, [1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])
#lim = tf.minimum(epoch, 10.0) /10.0 * np.log(FLAGS.intensity_shift)
#r2 = (tf.minimum(epoch, 20.0) /20.0)
#lim = 0.0 * (1-r2) + r2 * np.log(FLAGS.intensity_shift)
lim = np.log(FLAGS.intensity_shift)
if ("refine" in sys.argv):
lim = np.log(FLAGS.intensity_shift * 2.0)
with tf.variable_scope("intensity_shift_net"):
x = slim.fully_connected(input_x, 512, weights_regularizer = weights_regularizer, scope = 'fc_1')
x = slim.fully_connected(x, 512, weights_regularizer = weights_regularizer, scope = 'fc_2')
x = slim.fully_connected(x, 32, weights_regularizer = weights_regularizer, scope = 'fc_3')
x = slim.fully_connected(x, (FLAGS.n_bases + FLAGS.n_new_bases) * FLAGS.n_spike, activation_fn = None, weights_regularizer = weights_regularizer, scope = 'logits')
local_intensity_shift = tf.tanh(tf.reshape(x, [-1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])) * lim
global_intensity_shift = tf.tanh(tf.Variable(np.zeros((FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike)), trainable = True, dtype = tf.float32, name = "global_IS")) * np.log(FLAGS.global_intensity_shift)
intensity_shift = tf.exp(local_intensity_shift + global_intensity_shift) #intensity_shift = tf.exp(tf.clip_by_value(tf.Variable(np.zeros((FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike)), trainable = True, dtype = tf.float32, name = "I_shift"), -lim, lim))
c = tf.tile(c, [tf.shape(input_x)[0], 1, 1])
intensity_shift_weighted = tf.reduce_sum(tf.abs(tf.log(intensity_shift)) * c, axis = 2) #bs, n_bases
c_shifted = c * intensity_shift ##bs, n_bases, n_spike
################ Gaussian Mixtures
x = tf.exp( - ((X - mu)**2)/(2.0 * tf.exp(logvar)) - logvar * 0.5) * c_shifted #Gaussian xrd, bs, n_bases, n_spike
#x = tf.exp( - (tf.abs(X - mu))/(tf.exp(logvar) + FLAGS.eps) - logvar) * c_shifted #Laplace xrd, bs, n_bases, n_spike
x = tf.reduce_sum(x, axis = 3) #xrd, bs, n_bases
x = x / (tf.reduce_max(x, axis = 0) + FLAGS.eps) #xrd, bs, n_bases
return x, mu, mu_shift, logvar, c, intensity_shift, intensity_shift_weighted
def gaussian_KL(recog_mu, recog_logvar, prior_mu, prior_logvar):
# KL divergence
KL = - 0.5 * tf.reduce_sum(1 + (recog_logvar - prior_logvar)
- tf.pow(prior_mu - recog_mu, 2) / (tf.exp(prior_logvar) + FLAGS.eps)
- tf.exp(recog_logvar) / (tf.exp(prior_logvar) + FLAGS.eps), axis = 1)
return KL
def avg_n(x):
return tf.reduce_mean(tf.stack(x, axis=0), axis=0)
def sum_normalize(x):
return tf.transpose(tf.transpose(x) / (tf.reduce_sum(x, axis = 1) + FLAGS.eps))
def max_normalize(x):
return tf.transpose(tf.transpose(x) / (tf.reduce_max(x, axis = 1) + FLAGS.eps))
class VAE:
def __init__(self, epoch, input_xrd, input_feature, input_indicator, distance, weights_regularizer, shift_indicator, is_training):
#tf.set_random_seed(19950420)
z_dim = FLAGS.z_dim
n_sample = FLAGS.n_sample
name = "VAE"
with tf.variable_scope(name):
############## Q(z|x) ###############
x = tf.concat([input_feature, input_xrd], 1)
self.bases, self.mu, self.mu_shift, self.logvar, self.intensity, self.intensity_shift, self.intensity_shift_weighted = decoder_net(x, weights_regularizer, epoch, shift_indicator, is_training) # all positive #xrd, bs, n_bases
def KL_dis(input_xrd, xrd_prime, eps = None, importance = None):
if (eps == None):
eps = FLAGS.KL_eps
#P = input_xrd
input_xrd += FLAGS.eps
P = tf.transpose(tf.transpose(input_xrd) / (tf.reduce_sum(input_xrd, axis = 1) + FLAGS.eps))
#Q = xrd_prime
xrd_prime += FLAGS.eps
Q = tf.transpose(tf.transpose(xrd_prime) / (tf.reduce_sum(xrd_prime, axis = 1) + FLAGS.eps))
if (importance == None):
importance = 1.0 #tf.constant(np.load(FLAGS.importance_dir), dtype = tf.float32)
tmp = P * tf.log((P + eps)/(Q + eps)) * importance
res = tf.reduce_sum(tmp, axis = 1) - (tf.reduce_sum(input_xrd, axis = 1) - tf.reduce_sum(xrd_prime, axis = 1)) * 0
res = res * 100
return res
def norm2(x):
return tf.sqrt(FLAGS.eps + tf.reduce_sum(tf.square(x), axis = 1))
def norm1(x):
return tf.reduce_sum(tf.abs(x), axis = 1)
def L1_dis(x, y):
return norm1(x - y) * 100 * FLAGS.L1_weight
def L2_dis(x, y):
return norm2(x - y) * 100 * FLAGS.L2_weight
def JS_dis(input_xrd, xrd_prime, fac):
return (0.5 * KL_dis(input_xrd, xrd_prime, FLAGS.KL_eps * fac) + 0.5 * KL_dis(xrd_prime, input_xrd, FLAGS.KL_eps * fac)) * FLAGS.KL_weight
class MODEL:
def __init__(self, is_training):
tf.set_random_seed(19950420)
#batch_size = FLAGS.batch_size
#if (not is_training):
# batch_size = FLAGS.testing_size
self.input_feature = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.feature_dim], name='input_feature')
self.input_xrd = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.compressed_xrd_dim], name='input_xrd') # 4096
self.input_indicator = tf.placeholder(dtype=tf.float32, shape=[None, FLAGS.n_bases + FLAGS.n_new_bases], name='input_indicator')
self.shift_indicator = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='shift_indicator')
self.degree_of_freedom = tf.placeholder(dtype=tf.float32, shape=[None], name='degree_of_freedom')
self.keep_prob = tf.placeholder(tf.float32) #keep probability for the dropout
self.epoch = tf.placeholder(tf.float32)
weights_regularizer = slim.l2_regularizer(FLAGS.weight_decay)
############## feature extractor ###############
self.prev_feature = tf.slice(self.input_feature, [0, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.feature_dim])
self.next_feature = tf.slice(self.input_feature, [1, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.feature_dim])
self.distance = norm1(self.prev_feature - self.next_feature)
#self.kl_losses = []
#self.similarity_losses = []
### normalize and denoise
self.normalized_input_xrd = max_normalize(self.input_xrd)
#self.normalized_input_xrd = max_normalize(tf.maximum(0.0, self.normalized_input_xrd - FLAGS.intensity_th))
self.VAE = VAE(self.epoch, self.normalized_input_xrd, self.input_feature, self.input_indicator, self.distance, weights_regularizer, self.shift_indicator, is_training)
self.bases = self.VAE.bases #xrd, bs, n_bases
self.mu = self.VAE.mu
self.mu_shift = tf.reduce_mean(self.VAE.mu_shift, axis = 2) # bs, n_bases
self.logvar = tf.reduce_mean(self.VAE.logvar, axis = 2) # bs, n_bases
self.intensity = self.VAE.intensity
self.intensity_shift = self.VAE.intensity_shift #bs, n_bases, n_sticks
tf.summary.histogram("intensity_shift", tf.reshape(self.intensity_shift, [-1]))
tf.summary.histogram("mu_shift", tf.reshape(self.mu_shift, [-1]))
##### compute weights for each basis ######
x = tf.concat([self.normalized_input_xrd, self.input_feature], axis = 1)
x = MLP(x, FLAGS.n_bases + FLAGS.n_new_bases, "classifier", weights_regularizer)
x = tf.transpose(tf.transpose(x) - tf.reduce_max(x, axis = 1))
#self.amplifier = tf.exp(x)
#self.partition = tf.reduce_sum(self.amplifier, axis = 1)
s = slim.dropout(tf.exp(x), keep_prob = self.keep_prob, is_training = is_training)
#s = tf.nn.sigmoid(x)
s = s * self.input_indicator #
sum_s = tf.reduce_sum(s, axis = 1) + FLAGS.eps #bs
#self.weights = s
self.weights = tf.transpose(tf.transpose(s) / sum_s)
#### intensity_shift_loss ###
self.intensity_shift_loss = tf.reduce_mean(tf.reduce_sum(self.VAE.intensity_shift_weighted * tf.stop_gradient(self.weights), axis = 1))
if (is_training):
tf.summary.scalar("train/intensity_shift_loss", self.intensity_shift_loss)
##### reconstruction loss ######
tmp = self.bases * self.weights #xrd, bs, n_bases
self.decomp = tf.transpose(tmp, perm = [1, 2, 0]) # bs, n_bases, xrd
tmp2 = tf.reduce_sum(tmp, axis=2) # xrd, bs
noise = tf.abs(tf.Variable(np.random.randn(FLAGS.compressed_xrd_dim)/100.0, trainable = True, dtype="float32", name = "noise"))
noise = noise / (tf.reduce_max(noise) + FLAGS.eps)
scale = tf.nn.sigmoid(tf.Variable(-2, dtype="float32", name = "noise_scale")) * FLAGS.noise_scale
self.noise = noise * scale
#####
x = tf.concat([self.normalized_input_xrd, self.input_feature], axis = 1)
x = MLP(x, 1, "noise_b", weights_regularizer)
self.noise_b = tf.abs(tf.tile(x, [1, FLAGS.compressed_xrd_dim])) * 0.0
#####
self.xrd_prime = max_normalize(tf.transpose(tmp2)) + self.noise + self.noise_b
#self.xrd_prime = tf.transpose(tmp2) + self.noise
###### composition Loss ##############
max_I_XRD = tf.reduce_max(self.input_xrd, axis = 1)
max_I_x_prime = tf.reduce_max(tmp2, axis = 0) + FLAGS.eps
ratio = max_I_XRD / max_I_x_prime
self.activation = tf.transpose(tf.transpose(self.weights) * ratio) # bs, n_bases
self.rescale_factor = tf.nn.sigmoid(tf.Variable(np.zeros(FLAGS.n_bases + FLAGS.n_new_bases), trainable = True, dtype="float32", name = "rescale_factor"))
self.rescaled_activation = self.activation * self.rescale_factor
self.comp = self.input_feature
new_bases_comp = tf.nn.softmax(tf.Variable(np.zeros((FLAGS.n_new_bases, 3)), trainable = True, dtype="float32"), axis = 1)
self.bases_comp = tf.concat([tf.constant(np.load(FLAGS.bases_comp_dir)[:FLAGS.n_bases], dtype = "float32", name = "bases_comp"), new_bases_comp], axis = 0)
raw_comp_prime = tf.matmul(self.rescaled_activation, self.bases_comp)
self.comp_prime = sum_normalize(raw_comp_prime)
#tf.transpose(tf.transpose(raw_comp_prime) / tf.reduce_sum(raw_comp_prime, axis = 1))
#self.comp_loss = tf.reduce_mean(tf.reduce_sum(tf.square(self.comp - self.comp_prime), axis = 1))
self.comp_loss_batch = KL_dis(self.comp_prime, self.comp, FLAGS.eps, 1.0)
self.comp_loss = tf.reduce_mean(self.comp_loss_batch)
if (is_training):
tf.summary.scalar('train/comp_loss * %.6f'%FLAGS.comp_decay, self.comp_loss)
cond = tf.cast(tf.logical_and(tf.equal(tf.reduce_mean(self.degree_of_freedom), 2.0), \
tf.greater(tf.reduce_sum(self.input_feature * tf.constant([0.0, 0.0, 1.0], dtype = "float32")), 1e-6)), tf.float32)
fac = 1.0 * (1 - cond) + 100.0 * cond
tf.summary.scalar('train/fac ', fac)
self.JS_dis_batch = JS_dis(self.normalized_input_xrd, self.xrd_prime, fac)
self.L2_dis_batch = L2_dis(self.normalized_input_xrd, self.xrd_prime)
self.L1_dis_batch = L1_dis(self.normalized_input_xrd, self.xrd_prime)
self.recon_loss_batch = self.JS_dis_batch + self.L2_dis_batch + self.L1_dis_batch
self.recon_loss = tf.reduce_mean(self.recon_loss_batch)
self.sqr_recon_loss = tf.reduce_mean(tf.square(self.recon_loss_batch))
if (is_training):
tf.summary.scalar('train/recon_loss', self.recon_loss)
tf.summary.scalar('train/JS_dis', tf.reduce_mean(self.JS_dis_batch))
tf.summary.scalar('train/L2_dis', tf.reduce_mean(self.L2_dis_batch))
tf.summary.scalar('train/L1_dis', tf.reduce_mean(self.L1_dis_batch))
tf.summary.scalar('train/sqr_recon_loss', self.sqr_recon_loss)
self.vae_loss = self.recon_loss #+ self.tot_kl_loss * FLAGS.beta
if (is_training):
tf.summary.scalar('train/vae_loss', self.vae_loss)
###### gibbs-alloy loss #################
act = self.weights
P = tf.pow(self.weights + FLAGS.eps, FLAGS.beta)
P = tf.transpose(tf.transpose(P) / (tf.reduce_sum(P, axis = 1))) #bs, n_bases
mean_loss = FLAGS.mean_loss #self.recon_loss
self.gibbs_loss_batch = tf.reduce_sum(- P * tf.log(P + FLAGS.eps), axis = 1) # bs
self.prev_shift = tf.slice(self.mu_shift, [0, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
self.next_shift = tf.slice(self.mu_shift, [1, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
self.prev_act = tf.slice(act, [0, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
self.next_act = tf.slice(act, [1, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
delta = - 0.05 * FLAGS.shift_unit
if ("refine" in sys.argv):
delta = 0.05 * FLAGS.shift_unit
print("refine mode")
share_act_bases = tf.stop_gradient(tf.cast(tf.greater(tf.minimum(self.prev_act, self.next_act), FLAGS.min_activation), tf.float32)) #bs-1, n_bases
shift_between = tf.reduce_max(tf.abs(self.prev_shift - self.next_shift) * share_act_bases, axis = 1)
max_shift_between = tf.reduce_max(tf.abs(self.prev_shift - self.next_shift) * tf.cast(tf.greater(tf.minimum(self.prev_act, self.next_act), FLAGS.min_activation), tf.float32), axis = 1) #bs-1
self.max_shift = tf.concat([max_shift_between, tf.constant([0.0])], axis = 0)
shift_penalty = tf.nn.leaky_relu(tf.tanh((shift_between - (FLAGS.shift_unit + delta)) / FLAGS.shift_amplify), alpha = 0.1)
diff = tf.maximum(tf.concat([shift_penalty, tf.constant([0.0])], axis = 0), tf.concat([tf.constant([0.0]), shift_penalty], axis = 0)) #d_1,2, d_1,2 + d2,3, ..., d_8,9 + d_9,10, d_9,10: penalties
#n_diff = tf.stop_gradient(tf.concat([shift_penalty*0.0 + 1, tf.constant([0.0])], axis = 0) + tf.concat([tf.constant([0.0]), shift_penalty*0.0 + 1], axis = 0)) # 1,2,...,2,1
avg_diff = diff #/ n_diff #bs
self.alloy_loss_batch = avg_diff * (tf.log(self.degree_of_freedom) - tf.log(self.degree_of_freedom - 1 + FLAGS.eps))
tf.summary.histogram("gibbs_loss", tf.reshape(self.gibbs_loss_batch, [-1]))
tf.summary.histogram("alloy_loss", tf.reshape(self.alloy_loss_batch, [-1]))
self.condition1 = tf.greater(tf.reduce_sum(tf.cast(tf.greater(act, FLAGS.min_activation), tf.float32), axis = 1), 0.5 + self.degree_of_freedom) #bs n_bases > d_free
self.condition2 = tf.greater(tf.reduce_sum(tf.cast(tf.greater(act, FLAGS.min_activation), tf.float32), axis = 1), -0.5 + self.degree_of_freedom) #bs n_bases >= d_free
is_violated = tf.cast(tf.logical_and(tf.greater(tf.abs(self.prev_shift - self.next_shift), FLAGS.shift_unit + delta), tf.greater(tf.minimum(self.prev_act, self.next_act), FLAGS.min_activation)), tf.float32) #bs - 1
is_violated = tf.reduce_sum(is_violated, axis = 1)
num_violate = tf.concat([is_violated, tf.constant([0.0])], axis = 0) + tf.concat([tf.constant([0.0]), is_violated], axis = 0)
#condition3 = tf.greater_equal(self.alloy_loss_batch, 0.04) #bs shift = true
self.condition3 = tf.greater(num_violate, 0.5) #bs shift = true
condition = tf.logical_or(self.condition1, tf.logical_and(self.condition2, self.condition3))
penalty_ratio = tf.maximum(tf.stop_gradient(tf.cast(condition, tf.float32)), FLAGS.gibbs_penalty)
self.gibbs_penalty_ratio = penalty_ratio
#if (is_training):
# tf.summary.scalar('train/percent_of_unsatisfied', tf.reduce_mean(tf.cast(condition, tf.float32)))
#only decrease the entropy over some threshold
#tf.stop_gradient(tf.nn.sigmoid((mean_loss - self.recon_loss_batch))) # bs
self.gibbs_loss = tf.reduce_mean((self.gibbs_loss_batch + FLAGS.alloy_decay * self.alloy_loss_batch) * penalty_ratio) # / (FLAGS.eps + tf.reduce_sum(penalty_ratio))
self.gibbs_decay = 0.0 + tf.minimum(self.epoch, 10.0) * FLAGS.gibbs_decay / 10.0
if (is_training):
tf.summary.scalar('train/gibbs_loss', self.gibbs_loss)
tf.summary.scalar('train/gibbs_decay', self.gibbs_decay)
##### smooth_weights_loss ######
self.prev_weights = tf.slice(self.weights, [0, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
self.next_weights = tf.slice(self.weights, [1, 0], [tf.shape(self.input_feature)[0] - 1, FLAGS.n_bases + FLAGS.n_new_bases])
almost_zero = 1.0 #tf.stop_gradient(tf.cast(tf.logical_or(tf.greater(0.05, self.prev_weights), tf.greater(0.05, self.next_weights)), tf.float32))
self.smooth_weights_loss_batch = tf.reduce_sum(tf.maximum(tf.abs(self.prev_weights - self.next_weights) - FLAGS.smooth_weights_th, 0.0), axis = 1) #/ self.distance * tf.reduce_min(self.distance)
#tf.sqrt(tf.reduce_sum(tf.square(self.prev_weights - self.next_weights) * almost_zero, axis = 1) + FLAGS.eps)
#tf.norm(self.prev_weights - self.next_weights, axis=1) #/ self.distance
self.smooth_weights_loss = tf.reduce_mean(self.smooth_weights_loss_batch)
self.smoothness_decay = FLAGS.smoothness_decay
#self.smoothness_decay = tf.minimum(self.epoch, 10.0) * FLAGS.smoothness_decay / 10.0
if (is_training):
tf.summary.scalar('train/smooth_weights_loss * %.6f'%FLAGS.smoothness_decay, self.smooth_weights_loss)
####### l2 loss ##########
self.l2_loss = tf.add_n(tf.losses.get_regularization_losses()) #+FLAGS.weight_decay*tf.nn.l2_loss(self.r_sqrt_sigma)
if (is_training):
tf.summary.scalar('train/l2_loss', self.l2_loss)
####### total loss ##########
if (is_training):
self.total_loss = self.l2_loss + self.vae_loss \
+ self.gibbs_loss * self.gibbs_decay \
+ self.smooth_weights_loss * self.smoothness_decay\
+ self.comp_loss * FLAGS.comp_decay \
+ self.sqr_recon_loss * FLAGS.sqr_recon_loss_decay\
+ self.intensity_shift_loss * FLAGS.intensity_shift_loss_decay
else:
self.total_loss = self.vae_loss
if (is_training):
tf.summary.scalar('train/total_loss', self.total_loss)
self.optimizer_loss = self.total_loss
| [
"numpy.load",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.reduce_max",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.greater",
"tensorflow.abs",
"tensorflow.logical_and",
"numpy.random.randn",
"tensorflow.variable_scope",
"te... | [((871, 891), 'numpy.load', 'np.load', (['FLAGS.Q_dir'], {}), '(FLAGS.Q_dir)\n', (878, 891), True, 'import numpy as np\n'), ((910, 940), 'numpy.load', 'np.load', (['FLAGS.stick_bases_dir'], {}), '(FLAGS.stick_bases_dir)\n', (917, 940), True, 'import numpy as np\n'), ((956, 996), 'numpy.zeros', 'np.zeros', (['[FLAGS.n_bases, FLAGS.n_spike]'], {}), '([FLAGS.n_bases, FLAGS.n_spike])\n', (964, 996), True, 'import numpy as np\n'), ((1010, 1050), 'numpy.zeros', 'np.zeros', (['[FLAGS.n_bases, FLAGS.n_spike]'], {}), '([FLAGS.n_bases, FLAGS.n_spike])\n', (1018, 1050), True, 'import numpy as np\n'), ((2276, 2315), 'tensorflow.reshape', 'tf.reshape', (['shift_indicator', '[-1, 1, 1]'], {}), '(shift_indicator, [-1, 1, 1])\n', (2286, 2315), True, 'import tensorflow as tf\n'), ((2338, 2417), 'tensorflow.tile', 'tf.tile', (['shift_indicator', '[1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike]'], {}), '(shift_indicator, [1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])\n', (2345, 2417), True, 'import tensorflow as tf\n'), ((221, 244), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (238, 244), True, 'import tensorflow as tf\n'), ((259, 353), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['input_x', '(1024)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_1"""'}), "(input_x, 1024, weights_regularizer=weights_regularizer,\n scope='fc_1')\n", (279, 353), True, 'import tensorflow.contrib.slim as slim\n'), ((366, 454), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', '(1024)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_2"""'}), "(x, 1024, weights_regularizer=weights_regularizer,\n scope='fc_2')\n", (386, 454), True, 'import tensorflow.contrib.slim as slim\n'), ((467, 555), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', '(512)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_3"""'}), "(x, 512, weights_regularizer=weights_regularizer, scope\n ='fc_3')\n", (487, 555), True, 'import tensorflow.contrib.slim as slim\n'), ((567, 684), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', 'output_size'], {'activation_fn': 'None', 'weights_regularizer': 'weights_regularizer', 'scope': '"""logits"""'}), "(x, output_size, activation_fn=None,\n weights_regularizer=weights_regularizer, scope='logits')\n", (587, 684), True, 'import tensorflow.contrib.slim as slim\n'), ((1421, 1452), 'tensorflow.constant', 'tf.constant', (['X'], {'dtype': '"""float32"""'}), "(X, dtype='float32')\n", (1432, 1452), True, 'import tensorflow as tf\n'), ((2428, 2456), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (2445, 2456), True, 'import tensorflow as tf\n'), ((2903, 2967), 'tensorflow.reshape', 'tf.reshape', (['mu_shift', '[-1, FLAGS.n_bases + FLAGS.n_new_bases, 1]'], {}), '(mu_shift, [-1, FLAGS.n_bases + FLAGS.n_new_bases, 1])\n', (2913, 2967), True, 'import tensorflow as tf\n'), ((2987, 3027), 'tensorflow.tile', 'tf.tile', (['mu_shift', '[1, 1, FLAGS.n_spike]'], {}), '(mu_shift, [1, 1, FLAGS.n_spike])\n', (2994, 3027), True, 'import tensorflow as tf\n'), ((3964, 4026), 'tensorflow.reshape', 'tf.reshape', (['logvar', '[-1, FLAGS.n_bases + FLAGS.n_new_bases, 1]'], {}), '(logvar, [-1, FLAGS.n_bases + FLAGS.n_new_bases, 1])\n', (3974, 4026), True, 'import tensorflow as tf\n'), ((4560, 4633), 'tensorflow.reshape', 'tf.reshape', (['c_init', '[1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike]'], {}), '(c_init, [1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])\n', (4570, 4633), True, 'import tensorflow as tf\n'), ((4847, 4876), 'numpy.log', 'np.log', (['FLAGS.intensity_shift'], {}), '(FLAGS.intensity_shift)\n', (4853, 4876), True, 'import numpy as np\n'), ((5874, 5928), 'tensorflow.exp', 'tf.exp', (['(local_intensity_shift + global_intensity_shift)'], {}), '(local_intensity_shift + global_intensity_shift)\n', (5880, 5928), True, 'import tensorflow as tf\n'), ((6662, 6686), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {'axis': '(3)'}), '(x, axis=3)\n', (6675, 6686), True, 'import tensorflow as tf\n'), ((7285, 7304), 'tensorflow.stack', 'tf.stack', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (7293, 7304), True, 'import tensorflow as tf\n'), ((8793, 8819), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (8806, 8819), True, 'import tensorflow as tf\n'), ((9062, 9071), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (9068, 9071), True, 'import tensorflow as tf\n'), ((9470, 9498), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(19950420)'], {}), '(19950420)\n', (9488, 9498), True, 'import tensorflow as tf\n'), ((9643, 9735), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, FLAGS.feature_dim]', 'name': '"""input_feature"""'}), "(dtype=tf.float32, shape=[None, FLAGS.feature_dim], name=\n 'input_feature')\n", (9657, 9735), True, 'import tensorflow as tf\n'), ((9765, 9859), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, FLAGS.compressed_xrd_dim]', 'name': '"""input_xrd"""'}), "(dtype=tf.float32, shape=[None, FLAGS.compressed_xrd_dim],\n name='input_xrd')\n", (9779, 9859), True, 'import tensorflow as tf\n'), ((9903, 10013), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, FLAGS.n_bases + FLAGS.n_new_bases]', 'name': '"""input_indicator"""'}), "(dtype=tf.float32, shape=[None, FLAGS.n_bases + FLAGS.\n n_new_bases], name='input_indicator')\n", (9917, 10013), True, 'import tensorflow as tf\n'), ((10042, 10115), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, 1]', 'name': '"""shift_indicator"""'}), "(dtype=tf.float32, shape=[None, 1], name='shift_indicator')\n", (10056, 10115), True, 'import tensorflow as tf\n'), ((10151, 10223), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""degree_of_freedom"""'}), "(dtype=tf.float32, shape=[None], name='degree_of_freedom')\n", (10165, 10223), True, 'import tensorflow as tf\n'), ((10251, 10277), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (10265, 10277), True, 'import tensorflow as tf\n'), ((10334, 10360), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (10348, 10360), True, 'import tensorflow as tf\n'), ((10392, 10431), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['FLAGS.weight_decay'], {}), '(FLAGS.weight_decay)\n', (10411, 10431), True, 'import tensorflow.contrib.slim as slim\n'), ((11397, 11438), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.VAE.mu_shift'], {'axis': '(2)'}), '(self.VAE.mu_shift, axis=2)\n', (11411, 11438), True, 'import tensorflow as tf\n'), ((11477, 11516), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.VAE.logvar'], {'axis': '(2)'}), '(self.VAE.logvar, axis=2)\n', (11491, 11516), True, 'import tensorflow as tf\n'), ((11895, 11961), 'tensorflow.concat', 'tf.concat', (['[self.normalized_input_xrd, self.input_feature]'], {'axis': '(1)'}), '([self.normalized_input_xrd, self.input_feature], axis=1)\n', (11904, 11961), True, 'import tensorflow as tf\n'), ((12995, 13028), 'tensorflow.transpose', 'tf.transpose', (['tmp'], {'perm': '[1, 2, 0]'}), '(tmp, perm=[1, 2, 0])\n', (13007, 13028), True, 'import tensorflow as tf\n'), ((13066, 13092), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['tmp'], {'axis': '(2)'}), '(tmp, axis=2)\n', (13079, 13092), True, 'import tensorflow as tf\n'), ((13470, 13536), 'tensorflow.concat', 'tf.concat', (['[self.normalized_input_xrd, self.input_feature]'], {'axis': '(1)'}), '([self.normalized_input_xrd, self.input_feature], axis=1)\n', (13479, 13536), True, 'import tensorflow as tf\n'), ((13901, 13938), 'tensorflow.reduce_max', 'tf.reduce_max', (['self.input_xrd'], {'axis': '(1)'}), '(self.input_xrd, axis=1)\n', (13914, 13938), True, 'import tensorflow as tf\n'), ((14738, 14790), 'tensorflow.matmul', 'tf.matmul', (['self.rescaled_activation', 'self.bases_comp'], {}), '(self.rescaled_activation, self.bases_comp)\n', (14747, 14790), True, 'import tensorflow as tf\n'), ((15158, 15194), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.comp_loss_batch'], {}), '(self.comp_loss_batch)\n', (15172, 15194), True, 'import tensorflow as tf\n'), ((15592, 15628), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/fac """', 'fac'], {}), "('train/fac ', fac)\n", (15609, 15628), True, 'import tensorflow as tf\n'), ((15993, 16030), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.recon_loss_batch'], {}), '(self.recon_loss_batch)\n', (16007, 16030), True, 'import tensorflow as tf\n'), ((16782, 16826), 'tensorflow.pow', 'tf.pow', (['(self.weights + FLAGS.eps)', 'FLAGS.beta'], {}), '(self.weights + FLAGS.eps, FLAGS.beta)\n', (16788, 16826), True, 'import tensorflow as tf\n'), ((19737, 19771), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['is_violated'], {'axis': '(1)'}), '(is_violated, axis=1)\n', (19750, 19771), True, 'import tensorflow as tf\n'), ((20021, 20049), 'tensorflow.greater', 'tf.greater', (['num_violate', '(0.5)'], {}), '(num_violate, 0.5)\n', (20031, 20049), True, 'import tensorflow as tf\n'), ((20663, 20767), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((self.gibbs_loss_batch + FLAGS.alloy_decay * self.alloy_loss_batch) *\n penalty_ratio)'], {}), '((self.gibbs_loss_batch + FLAGS.alloy_decay * self.\n alloy_loss_batch) * penalty_ratio)\n', (20677, 20767), True, 'import tensorflow as tf\n'), ((22009, 22055), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.smooth_weights_loss_batch'], {}), '(self.smooth_weights_loss_batch)\n', (22023, 22055), True, 'import tensorflow as tf\n'), ((1353, 1372), 'numpy.max', 'np.max', (['c_init[num]'], {}), '(c_init[num])\n', (1359, 1372), True, 'import numpy as np\n'), ((1886, 1934), 'numpy.random.rand', 'np.random.rand', (['FLAGS.n_new_bases', 'FLAGS.n_spike'], {}), '(FLAGS.n_new_bases, FLAGS.n_spike)\n', (1900, 1934), True, 'import numpy as np\n'), ((2000, 2054), 'tensorflow.Variable', 'tf.Variable', (['mu_init'], {'trainable': '(False)', 'dtype': '"""float32"""'}), "(mu_init, trainable=False, dtype='float32')\n", (2011, 2054), True, 'import tensorflow as tf\n'), ((2140, 2193), 'tensorflow.Variable', 'tf.Variable', (['c_init'], {'trainable': '(False)', 'dtype': '"""float32"""'}), "(c_init, trainable=False, dtype='float32')\n", (2151, 2193), True, 'import tensorflow as tf\n'), ((2605, 2628), 'tensorflow.minimum', 'tf.minimum', (['epoch', '(10.0)'], {}), '(epoch, 10.0)\n', (2615, 2628), True, 'import tensorflow as tf\n'), ((3624, 3645), 'tensorflow.tanh', 'tf.tanh', (['logvar_shift'], {}), '(logvar_shift)\n', (3631, 3645), True, 'import tensorflow as tf\n'), ((4343, 4390), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""extra_logvar"""', 'extra_logvar'], {}), "('extra_logvar', extra_logvar)\n", (4360, 4390), True, 'import tensorflow as tf\n'), ((4417, 4455), 'tensorflow.tile', 'tf.tile', (['logvar', '[1, 1, FLAGS.n_spike]'], {}), '(logvar, [1, 1, FLAGS.n_spike])\n', (4424, 4455), True, 'import tensorflow as tf\n'), ((4930, 4965), 'numpy.log', 'np.log', (['(FLAGS.intensity_shift * 2.0)'], {}), '(FLAGS.intensity_shift * 2.0)\n', (4936, 4965), True, 'import numpy as np\n'), ((4980, 5020), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""intensity_shift_net"""'], {}), "('intensity_shift_net')\n", (4997, 5020), True, 'import tensorflow as tf\n'), ((5039, 5132), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['input_x', '(512)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_1"""'}), "(input_x, 512, weights_regularizer=weights_regularizer,\n scope='fc_1')\n", (5059, 5132), True, 'import tensorflow.contrib.slim as slim\n'), ((5149, 5237), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', '(512)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_2"""'}), "(x, 512, weights_regularizer=weights_regularizer, scope\n ='fc_2')\n", (5169, 5237), True, 'import tensorflow.contrib.slim as slim\n'), ((5253, 5340), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', '(32)'], {'weights_regularizer': 'weights_regularizer', 'scope': '"""fc_3"""'}), "(x, 32, weights_regularizer=weights_regularizer, scope=\n 'fc_3')\n", (5273, 5340), True, 'import tensorflow.contrib.slim as slim\n'), ((5356, 5518), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['x', '((FLAGS.n_bases + FLAGS.n_new_bases) * FLAGS.n_spike)'], {'activation_fn': 'None', 'weights_regularizer': 'weights_regularizer', 'scope': '"""logits"""'}), "(x, (FLAGS.n_bases + FLAGS.n_new_bases) * FLAGS.n_spike,\n activation_fn=None, weights_regularizer=weights_regularizer, scope='logits'\n )\n", (5376, 5518), True, 'import tensorflow.contrib.slim as slim\n'), ((5810, 5846), 'numpy.log', 'np.log', (['FLAGS.global_intensity_shift'], {}), '(FLAGS.global_intensity_shift)\n', (5816, 5846), True, 'import numpy as np\n'), ((7361, 7376), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (7373, 7376), True, 'import tensorflow as tf\n'), ((7468, 7483), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (7480, 7483), True, 'import tensorflow as tf\n'), ((7809, 7832), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (7826, 7832), True, 'import tensorflow as tf\n'), ((7902, 7942), 'tensorflow.concat', 'tf.concat', (['[input_feature, input_xrd]', '(1)'], {}), '([input_feature, input_xrd], 1)\n', (7911, 7942), True, 'import tensorflow as tf\n'), ((8382, 8405), 'tensorflow.transpose', 'tf.transpose', (['input_xrd'], {}), '(input_xrd)\n', (8394, 8405), True, 'import tensorflow as tf\n'), ((8526, 8549), 'tensorflow.transpose', 'tf.transpose', (['xrd_prime'], {}), '(xrd_prime)\n', (8538, 8549), True, 'import tensorflow as tf\n'), ((8740, 8769), 'tensorflow.log', 'tf.log', (['((P + eps) / (Q + eps))'], {}), '((P + eps) / (Q + eps))\n', (8746, 8769), True, 'import tensorflow as tf\n'), ((11707, 11745), 'tensorflow.reshape', 'tf.reshape', (['self.intensity_shift', '[-1]'], {}), '(self.intensity_shift, [-1])\n', (11717, 11745), True, 'import tensorflow as tf\n'), ((11788, 11819), 'tensorflow.reshape', 'tf.reshape', (['self.mu_shift', '[-1]'], {}), '(self.mu_shift, [-1])\n', (11798, 11819), True, 'import tensorflow as tf\n'), ((12261, 12270), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (12267, 12270), True, 'import tensorflow as tf\n'), ((12415, 12439), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (12428, 12439), True, 'import tensorflow as tf\n'), ((12785, 12859), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/intensity_shift_loss"""', 'self.intensity_shift_loss'], {}), "('train/intensity_shift_loss', self.intensity_shift_loss)\n", (12802, 12859), True, 'import tensorflow as tf\n'), ((13965, 13992), 'tensorflow.reduce_max', 'tf.reduce_max', (['tmp2'], {'axis': '(0)'}), '(tmp2, axis=0)\n', (13978, 13992), True, 'import tensorflow as tf\n'), ((15233, 15311), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('train/comp_loss * %.6f' % FLAGS.comp_decay)", 'self.comp_loss'], {}), "('train/comp_loss * %.6f' % FLAGS.comp_decay, self.comp_loss)\n", (15250, 15311), True, 'import tensorflow as tf\n'), ((16076, 16108), 'tensorflow.square', 'tf.square', (['self.recon_loss_batch'], {}), '(self.recon_loss_batch)\n', (16085, 16108), True, 'import tensorflow as tf\n'), ((16149, 16203), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/recon_loss"""', 'self.recon_loss'], {}), "('train/recon_loss', self.recon_loss)\n", (16166, 16203), True, 'import tensorflow as tf\n'), ((16459, 16521), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/sqr_recon_loss"""', 'self.sqr_recon_loss'], {}), "('train/sqr_recon_loss', self.sqr_recon_loss)\n", (16476, 16521), True, 'import tensorflow as tf\n'), ((16638, 16688), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/vae_loss"""', 'self.vae_loss'], {}), "('train/vae_loss', self.vae_loss)\n", (16655, 16688), True, 'import tensorflow as tf\n'), ((18332, 18407), 'tensorflow.tanh', 'tf.tanh', (['((shift_between - (FLAGS.shift_unit + delta)) / FLAGS.shift_amplify)'], {}), '((shift_between - (FLAGS.shift_unit + delta)) / FLAGS.shift_amplify)\n', (18339, 18407), True, 'import tensorflow as tf\n'), ((19017, 19056), 'tensorflow.reshape', 'tf.reshape', (['self.gibbs_loss_batch', '[-1]'], {}), '(self.gibbs_loss_batch, [-1])\n', (19027, 19056), True, 'import tensorflow as tf\n'), ((19101, 19140), 'tensorflow.reshape', 'tf.reshape', (['self.alloy_loss_batch', '[-1]'], {}), '(self.alloy_loss_batch, [-1])\n', (19111, 19140), True, 'import tensorflow as tf\n'), ((20118, 20166), 'tensorflow.logical_and', 'tf.logical_and', (['self.condition2', 'self.condition3'], {}), '(self.condition2, self.condition3)\n', (20132, 20166), True, 'import tensorflow as tf\n'), ((20939, 20993), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/gibbs_loss"""', 'self.gibbs_loss'], {}), "('train/gibbs_loss', self.gibbs_loss)\n", (20956, 20993), True, 'import tensorflow as tf\n'), ((21006, 21062), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/gibbs_decay"""', 'self.gibbs_decay'], {}), "('train/gibbs_decay', self.gibbs_decay)\n", (21023, 21062), True, 'import tensorflow as tf\n'), ((22245, 22354), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('train/smooth_weights_loss * %.6f' % FLAGS.smoothness_decay)", 'self.smooth_weights_loss'], {}), "('train/smooth_weights_loss * %.6f' % FLAGS.\n smoothness_decay, self.smooth_weights_loss)\n", (22262, 22354), True, 'import tensorflow as tf\n'), ((22418, 22455), 'tensorflow.losses.get_regularization_losses', 'tf.losses.get_regularization_losses', ([], {}), '()\n', (22453, 22455), True, 'import tensorflow as tf\n'), ((22549, 22597), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/l2_loss"""', 'self.l2_loss'], {}), "('train/l2_loss', self.l2_loss)\n", (22566, 22597), True, 'import tensorflow as tf\n'), ((23198, 23252), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""train/total_loss"""', 'self.total_loss'], {}), "('train/total_loss', self.total_loss)\n", (23215, 23252), True, 'import tensorflow as tf\n'), ((1503, 1520), 'tensorflow.shape', 'tf.shape', (['input_x'], {}), '(input_x)\n', (1511, 1520), True, 'import tensorflow as tf\n'), ((5557, 5626), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike]'], {}), '(x, [-1, FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike])\n', (5567, 5626), True, 'import tensorflow as tf\n'), ((6724, 6748), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (6737, 6748), True, 'import tensorflow as tf\n'), ((7380, 7404), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7393, 7404), True, 'import tensorflow as tf\n'), ((7487, 7511), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (7500, 7511), True, 'import tensorflow as tf\n'), ((8409, 8441), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['input_xrd'], {'axis': '(1)'}), '(input_xrd, axis=1)\n', (8422, 8441), True, 'import tensorflow as tf\n'), ((8553, 8585), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['xrd_prime'], {'axis': '(1)'}), '(xrd_prime, axis=1)\n', (8566, 8585), True, 'import tensorflow as tf\n'), ((8825, 8857), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['input_xrd'], {'axis': '(1)'}), '(input_xrd, axis=1)\n', (8838, 8857), True, 'import tensorflow as tf\n'), ((8862, 8894), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['xrd_prime'], {'axis': '(1)'}), '(xrd_prime, axis=1)\n', (8875, 8894), True, 'import tensorflow as tf\n'), ((8997, 9009), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (9006, 9009), True, 'import tensorflow as tf\n'), ((12078, 12093), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (12090, 12093), True, 'import tensorflow as tf\n'), ((12096, 12120), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (12109, 12120), True, 'import tensorflow as tf\n'), ((12529, 12544), 'tensorflow.transpose', 'tf.transpose', (['s'], {}), '(s)\n', (12541, 12544), True, 'import tensorflow as tf\n'), ((13266, 13286), 'tensorflow.reduce_max', 'tf.reduce_max', (['noise'], {}), '(noise)\n', (13279, 13286), True, 'import tensorflow as tf\n'), ((13331, 13383), 'tensorflow.Variable', 'tf.Variable', (['(-2)'], {'dtype': '"""float32"""', 'name': '"""noise_scale"""'}), "(-2, dtype='float32', name='noise_scale')\n", (13342, 13383), True, 'import tensorflow as tf\n'), ((13623, 13664), 'tensorflow.tile', 'tf.tile', (['x', '[1, FLAGS.compressed_xrd_dim]'], {}), '(x, [1, FLAGS.compressed_xrd_dim])\n', (13630, 13664), True, 'import tensorflow as tf\n'), ((14089, 14115), 'tensorflow.transpose', 'tf.transpose', (['self.weights'], {}), '(self.weights)\n', (14101, 14115), True, 'import tensorflow as tf\n'), ((14195, 14238), 'numpy.zeros', 'np.zeros', (['(FLAGS.n_bases + FLAGS.n_new_bases)'], {}), '(FLAGS.n_bases + FLAGS.n_new_bases)\n', (14203, 14238), True, 'import numpy as np\n'), ((14467, 14499), 'numpy.zeros', 'np.zeros', (['(FLAGS.n_new_bases, 3)'], {}), '((FLAGS.n_new_bases, 3))\n', (14475, 14499), True, 'import numpy as np\n'), ((16250, 16283), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.JS_dis_batch'], {}), '(self.JS_dis_batch)\n', (16264, 16283), True, 'import tensorflow as tf\n'), ((16331, 16364), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.L2_dis_batch'], {}), '(self.L2_dis_batch)\n', (16345, 16364), True, 'import tensorflow as tf\n'), ((16412, 16445), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.L1_dis_batch'], {}), '(self.L1_dis_batch)\n', (16426, 16445), True, 'import tensorflow as tf\n'), ((16852, 16867), 'tensorflow.transpose', 'tf.transpose', (['P'], {}), '(P)\n', (16864, 16867), True, 'import tensorflow as tf\n'), ((16871, 16895), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (16884, 16895), True, 'import tensorflow as tf\n'), ((17018, 17039), 'tensorflow.log', 'tf.log', (['(P + FLAGS.eps)'], {}), '(P + FLAGS.eps)\n', (17024, 17039), True, 'import tensorflow as tf\n'), ((17923, 17964), 'tensorflow.abs', 'tf.abs', (['(self.prev_shift - self.next_shift)'], {}), '(self.prev_shift - self.next_shift)\n', (17929, 17964), True, 'import tensorflow as tf\n'), ((18046, 18087), 'tensorflow.abs', 'tf.abs', (['(self.prev_shift - self.next_shift)'], {}), '(self.prev_shift - self.next_shift)\n', (18052, 18087), True, 'import tensorflow as tf\n'), ((18258, 18276), 'tensorflow.constant', 'tf.constant', (['[0.0]'], {}), '([0.0])\n', (18269, 18276), True, 'import tensorflow as tf\n'), ((18892, 18922), 'tensorflow.log', 'tf.log', (['self.degree_of_freedom'], {}), '(self.degree_of_freedom)\n', (18898, 18922), True, 'import tensorflow as tf\n'), ((18925, 18971), 'tensorflow.log', 'tf.log', (['(self.degree_of_freedom - 1 + FLAGS.eps)'], {}), '(self.degree_of_freedom - 1 + FLAGS.eps)\n', (18931, 18971), True, 'import tensorflow as tf\n'), ((20229, 20259), 'tensorflow.cast', 'tf.cast', (['condition', 'tf.float32'], {}), '(condition, tf.float32)\n', (20236, 20259), True, 'import tensorflow as tf\n'), ((2852, 2869), 'tensorflow.tanh', 'tf.tanh', (['mu_shift'], {}), '(mu_shift)\n', (2859, 2869), True, 'import tensorflow as tf\n'), ((3804, 3847), 'numpy.zeros', 'np.zeros', (['(FLAGS.n_bases + FLAGS.n_new_bases)'], {}), '(FLAGS.n_bases + FLAGS.n_new_bases)\n', (3812, 3847), True, 'import numpy as np\n'), ((4091, 4178), 'tensorflow.Variable', 'tf.Variable', (['(0.1)'], {'dtype': 'tf.float32', 'trainable': '(True)', 'name': '"""gradual_fattening_ratio"""'}), "(0.1, dtype=tf.float32, trainable=True, name=\n 'gradual_fattening_ratio')\n", (4102, 4178), True, 'import tensorflow as tf\n'), ((5687, 5747), 'numpy.zeros', 'np.zeros', (['(FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike)'], {}), '((FLAGS.n_bases + FLAGS.n_new_bases, FLAGS.n_spike))\n', (5695, 5747), True, 'import numpy as np\n'), ((6147, 6164), 'tensorflow.shape', 'tf.shape', (['input_x'], {}), '(input_x)\n', (6155, 6164), True, 'import tensorflow as tf\n'), ((6234, 6257), 'tensorflow.log', 'tf.log', (['intensity_shift'], {}), '(intensity_shift)\n', (6240, 6257), True, 'import tensorflow as tf\n'), ((7161, 7181), 'tensorflow.exp', 'tf.exp', (['recog_logvar'], {}), '(recog_logvar)\n', (7167, 7181), True, 'import tensorflow as tf\n'), ((12695, 12725), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['self.weights'], {}), '(self.weights)\n', (12711, 12725), True, 'import tensorflow as tf\n'), ((13139, 13180), 'numpy.random.randn', 'np.random.randn', (['FLAGS.compressed_xrd_dim'], {}), '(FLAGS.compressed_xrd_dim)\n', (13154, 13180), True, 'import numpy as np\n'), ((13726, 13744), 'tensorflow.transpose', 'tf.transpose', (['tmp2'], {}), '(tmp2)\n', (13738, 13744), True, 'import tensorflow as tf\n'), ((15366, 15404), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.degree_of_freedom'], {}), '(self.degree_of_freedom)\n', (15380, 15404), True, 'import tensorflow as tf\n'), ((17791, 17831), 'tensorflow.minimum', 'tf.minimum', (['self.prev_act', 'self.next_act'], {}), '(self.prev_act, self.next_act)\n', (17801, 17831), True, 'import tensorflow as tf\n'), ((18476, 18494), 'tensorflow.constant', 'tf.constant', (['[0.0]'], {}), '([0.0])\n', (18487, 18494), True, 'import tensorflow as tf\n'), ((18519, 18537), 'tensorflow.constant', 'tf.constant', (['[0.0]'], {}), '([0.0])\n', (18530, 18537), True, 'import tensorflow as tf\n'), ((19202, 19239), 'tensorflow.greater', 'tf.greater', (['act', 'FLAGS.min_activation'], {}), '(act, FLAGS.min_activation)\n', (19212, 19239), True, 'import tensorflow as tf\n'), ((19375, 19412), 'tensorflow.greater', 'tf.greater', (['act', 'FLAGS.min_activation'], {}), '(act, FLAGS.min_activation)\n', (19385, 19412), True, 'import tensorflow as tf\n'), ((19548, 19589), 'tensorflow.abs', 'tf.abs', (['(self.prev_shift - self.next_shift)'], {}), '(self.prev_shift - self.next_shift)\n', (19554, 19589), True, 'import tensorflow as tf\n'), ((19629, 19669), 'tensorflow.minimum', 'tf.minimum', (['self.prev_act', 'self.next_act'], {}), '(self.prev_act, self.next_act)\n', (19639, 19669), True, 'import tensorflow as tf\n'), ((19821, 19839), 'tensorflow.constant', 'tf.constant', (['[0.0]'], {}), '([0.0])\n', (19832, 19839), True, 'import tensorflow as tf\n'), ((19865, 19883), 'tensorflow.constant', 'tf.constant', (['[0.0]'], {}), '([0.0])\n', (19876, 19883), True, 'import tensorflow as tf\n'), ((20843, 20871), 'tensorflow.minimum', 'tf.minimum', (['self.epoch', '(10.0)'], {}), '(self.epoch, 10.0)\n', (20853, 20871), True, 'import tensorflow as tf\n'), ((21637, 21682), 'tensorflow.abs', 'tf.abs', (['(self.prev_weights - self.next_weights)'], {}), '(self.prev_weights - self.next_weights)\n', (21643, 21682), True, 'import tensorflow as tf\n'), ((1738, 1786), 'numpy.random.rand', 'np.random.rand', (['FLAGS.n_new_bases', 'FLAGS.n_spike'], {}), '(FLAGS.n_new_bases, FLAGS.n_spike)\n', (1752, 1786), True, 'import numpy as np\n'), ((2783, 2800), 'tensorflow.tanh', 'tf.tanh', (['mu_shift'], {}), '(mu_shift)\n', (2790, 2800), True, 'import tensorflow as tf\n'), ((7060, 7090), 'tensorflow.pow', 'tf.pow', (['(prior_mu - recog_mu)', '(2)'], {}), '(prior_mu - recog_mu, 2)\n', (7066, 7090), True, 'import tensorflow as tf\n'), ((7185, 7205), 'tensorflow.exp', 'tf.exp', (['prior_logvar'], {}), '(prior_logvar)\n', (7191, 7205), True, 'import tensorflow as tf\n'), ((10556, 10584), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (10564, 10584), True, 'import tensorflow as tf\n'), ((10679, 10707), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (10687, 10707), True, 'import tensorflow as tf\n'), ((14596, 14625), 'numpy.load', 'np.load', (['FLAGS.bases_comp_dir'], {}), '(FLAGS.bases_comp_dir)\n', (14603, 14625), True, 'import numpy as np\n'), ((17118, 17146), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (17126, 17146), True, 'import tensorflow as tf\n'), ((17250, 17278), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (17258, 17278), True, 'import tensorflow as tf\n'), ((17371, 17399), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (17379, 17399), True, 'import tensorflow as tf\n'), ((17491, 17519), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (17499, 17519), True, 'import tensorflow as tf\n'), ((18109, 18149), 'tensorflow.minimum', 'tf.minimum', (['self.prev_act', 'self.next_act'], {}), '(self.prev_act, self.next_act)\n', (18119, 18149), True, 'import tensorflow as tf\n'), ((21177, 21205), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (21185, 21205), True, 'import tensorflow as tf\n'), ((21310, 21338), 'tensorflow.shape', 'tf.shape', (['self.input_feature'], {}), '(self.input_feature)\n', (21318, 21338), True, 'import tensorflow as tf\n'), ((6438, 6452), 'tensorflow.exp', 'tf.exp', (['logvar'], {}), '(logvar)\n', (6444, 6452), True, 'import tensorflow as tf\n'), ((7094, 7114), 'tensorflow.exp', 'tf.exp', (['prior_logvar'], {}), '(prior_logvar)\n', (7100, 7114), True, 'import tensorflow as tf\n'), ((15468, 15513), 'tensorflow.constant', 'tf.constant', (['[0.0, 0.0, 1.0]'], {'dtype': '"""float32"""'}), "([0.0, 0.0, 1.0], dtype='float32')\n", (15479, 15513), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
import re
import dbconnect
import logging
import multiclasssql_legacy as multiclasssql # Legacy code for scoring cells
import numpy as np
import matplotlib.pyplot as plt
from sys import stdin, stdout, argv, exit
from time import time
class FastGentleBoosting(object):
def __init__(self, classifier = None):
logging.info('Initialized New Classifier: FastGentleBoosting')
self.name = self.name()
self.model = None
self.classBins = []
self.classifier = classifier
self.features = []
# Set features
def _set_features(self, features):
self.features = features
def name(self):
return self.__class__.__name__
def CheckProgress(self):
import wx
''' Called when the Cross Validation Button is pressed. '''
# get wells if available, otherwise use imagenumbers
try:
nRules = int(self.classifier.nRulesTxt.GetValue())
except:
logging.error('Unable to parse number of rules')
return
if not self.classifier.UpdateTrainingSet():
self.PostMessage('Cross-validation canceled.')
return
db = dbconnect.DBConnect.getInstance()
groups = [db.get_platewell_for_object(key) for key in self.classifier.trainingSet.get_object_keys()]
t1 = time()
dlg = wx.ProgressDialog('Computing cross validation accuracy...', '0% Complete', 100, self.classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
base = 0.0
scale = 1.0
class StopXValidation(Exception):
pass
def progress_callback(amount):
pct = min(int(100 * (amount * scale + base)), 100)
cont, skip = dlg.Update(pct, '%d%% Complete'%(pct))
self.classifier.PostMessage('Computing cross validation accuracy... %s%% Complete'%(pct))
if not cont:
raise StopXValidation
# each round of xvalidation takes about (numfolds * (1 - (1 / num_folds))) time
step_time_1 = (2.0 * (1.0 - 1.0 / 2.0))
step_time_2 = (20.0 * (1.0 - 1.0 / 20.0))
scale = step_time_1 / (10 * step_time_1 + step_time_2)
xvalid_50 = []
try:
n_iter = 1
for i in range(10):
xval = self.XValidate(
self.classifier.trainingSet.colnames, nRules, self.classifier.trainingSet.label_matrix,
self.classifier.trainingSet.values, 2, groups, progress_callback)
if xval is not None:
xvalid_50 += xval
n_iter += 1
# each round makes one "scale" size step in progress
base += scale
xvalid_50 = sum(xvalid_50) / float(n_iter)
# only one more step
scale = 1.0 - base
xvalid_95 = self.XValidate(
self.classifier.trainingSet.colnames, nRules, self.classifier.trainingSet.label_matrix,
self.classifier.trainingSet.values, 20, groups, progress_callback)
dlg.Destroy()
figure = plt.figure()
plt.clf()
plt.hold(True)
plt.plot(range(1, nRules + 1), 1.0 - xvalid_50 / float(len(groups)), 'r', label='50% cross-validation accuracy')
plt.plot(range(1, nRules + 1), 1.0 - xvalid_95[0] / float(len(groups)), 'b', label='95% cross-validation accuracy')
chance_level = 1.0 / len(self.classifier.trainingSet.labels)
plt.plot([1, nRules + 1], [chance_level, chance_level], 'k--', label='accuracy of random classifier')
plt.legend(loc='lower right')
plt.xlabel('Rule #')
plt.ylabel('Accuracy')
plt.xlim(1, max(nRules,2))
plt.ylim(-0.05, 1.05)
plt.title('Cross-validation accuracy')
plt.show()
self.classifier.PostMessage('Cross-validation complete in %.1fs.'%(time()-t1))
except StopXValidation:
dlg.Destroy()
def ClearModel(self):
self.classBins = []
self.model = None
# Adjust text for the classifier rules panel
def panelTxt(self):
return 'with'
def panelTxt2(self):
return 'max rules'
def CreatePerObjectClassTable(self, labels):
multiclasssql.create_perobject_class_table(labels, self.model)
def FilterObjectsFromClassN(self, obClass, obKeysToTry):
return multiclasssql.FilterObjectsFromClassN(obClass, self.model, obKeysToTry)
def IsTrained(self):
return self.model is not None
def LoadModel(self, model_filename):
# For loading scikit learn library
from sklearn.externals import joblib
try:
self.model, self.bin_labels, self.name = joblib.load(model_filename)
except:
self.model = None
self.bin_labels = None
logging.error('Loading trained model failed')
raise TypeError
def ParseModel(self, string):
self.model = []
string = string.replace('\r\n', '\n')
for line in string.split('\n'):
if line.strip() == '':
continue
m = re.match('^IF \((\w+) > (-{0,1}\d+\.\d+), \[(-{0,1}\d+\.\d+(?:, -{0,1}\d+\.\d+)*)\], \[(-{0,1}\d+\.\d+(?:, -{0,1}\d+\.\d+)*)\]\)',
line, flags=re.IGNORECASE)
if m is None:
raise ValueError
colname, thresh, a, b = m.groups()
thresh = float(thresh)
a = map(float, a.split(','))
b = map(float, b.split(','))
if len(a) != len(b):
raise ValueError('Alpha and beta must have the same cardinality in "IF (column > threshold, alpha, beta)"')
self.model.append((colname, thresh, a, b, None))
n_classes = len(self.model[0][2])
for wl in self.model:
if len(wl[2]) != n_classes:
raise ValueError('Number of classes must remain the same between rules.')
return self.model
def PerImageCounts(self, filter_name=None, cb=None):
return multiclasssql.PerImageCounts(self.model, filter_name=filter_name, cb=cb)
def SaveModel(self, model_filename, bin_labels):
# For loading scikit learn library
from sklearn.externals import joblib
joblib.dump((self.model, bin_labels, self.name), model_filename, compress=1)
def ShowModel(self):
'''
Transforms the weak learners of the algorithm into a human readable
representation
'''
if self.model is not None and self.model is not []:
return '\n'.join("IF (%s > %s, %s, %s)" %(colname, repr(thresh),
"[" + ", ".join([repr(v) for v in a]) + "]",
"[" + ", ".join([repr(v) for v in b]) + "]")
for colname, thresh, a, b, e_m in self.model)
else:
return ''
def Train(self, colnames, num_learners, label_matrix, values, fout=None, do_prof=False, test_values=None, callback=None):
'''
label_matrix is an n by k numpy array containing values of either +1 or -1
values is the n by j numpy array of cell measurements
n = #example cells, k = #classes, j = #measurements
Return a list of learners. Each learner is a tuple (column, thresh, a,
b, average_margin), where column is an integer index into colnames
'''
if 0 in values.shape:
# Nothing to train
return None
assert label_matrix.shape[0] == values.shape[0] # Number of training examples.
computed_labels = np.zeros(label_matrix.shape, np.float32)
num_examples, num_classes = label_matrix.shape
do_tests = (test_values is not None)
if do_tests:
num_tests = test_values.shape[0]
computed_test_labels = np.zeros((num_tests, num_classes), np.float32)
test_labels_by_iteration = []
# Set weights, normalize by number of examples
weights = np.ones(label_matrix.shape, np.float32)
margin_correct = np.zeros((num_examples, num_classes-1), np.float32)
margin_incorrect = np.zeros((num_examples, num_classes-1), np.float32)
for idx in range(num_classes):
classmask = (label_matrix[:, idx] == 1).reshape((num_examples, 1))
num_examples_class = sum(classmask)
weights[np.tile(classmask, (1, num_classes))] /= num_examples_class
balancing = weights.copy()
def GetOneWeakLearner(ctl=None, tlbi=None):
best_error = float(np.Infinity)
for feature_idx in range(values.shape[1]):
thresh, err, a, b = self.TrainWeakLearner(label_matrix, weights, values[:, feature_idx])
if err < best_error:
best_error = err
bestvals = (err, feature_idx, thresh, a, b)
err, column, thresh, a, b = bestvals
# recompute weights
delta = np.reshape(values[:, column] > thresh, (num_examples, 1))
feature_thresh_mask = np.tile(delta, (1, num_classes))
adjustment = feature_thresh_mask * np.tile(a, (num_examples, 1)) + (1 - feature_thresh_mask) * np.tile(b, (num_examples, 1))
recomputed_labels = computed_labels + adjustment
reweights = balancing * np.exp(- recomputed_labels * label_matrix)
reweights = reweights / sum(reweights)
# if we have test values, update their computed labels
if ctl is not None:
test_delta = np.reshape(test_values[:, column] > thresh, (num_tests, 1))
test_feature_thresh_mask = np.tile(test_delta, (1, num_classes))
test_adjustment = test_feature_thresh_mask * np.tile(a, (num_tests, 1)) + (1 - test_feature_thresh_mask) * np.tile(b, (num_tests, 1))
ctl += test_adjustment
tlbi += [ctl.argmax(axis=1)]
return (err, colnames[int(column)], thresh, a, b, reweights, recomputed_labels, adjustment)
self.model = []
for weak_count in range(num_learners):
if do_tests:
err, colname, thresh, a, b, reweight, recomputed_labels, adjustment = GetOneWeakLearner(ctl=computed_test_labels, tlbi=test_labels_by_iteration)
else:
err, colname, thresh, a, b, reweight, recomputed_labels, adjustment = GetOneWeakLearner()
# compute margins
step_correct_class = adjustment[label_matrix > 0].reshape((num_examples, 1))
step_relative = step_correct_class - (adjustment[label_matrix < 0].reshape((num_examples, num_classes - 1)))
mask = (step_relative > 0)
margin_correct += step_relative * mask
margin_incorrect += (- step_relative) * (~ mask)
expected_worst_margin = sum(balancing[:,0] * (margin_correct / (margin_correct + margin_incorrect)).min(axis=1)) / sum(balancing[:,0])
computed_labels = recomputed_labels
self.model += [(colname, thresh, a, b, expected_worst_margin)]
if callback is not None:
callback(weak_count / float(num_learners))
if fout:
colname, thresh, a, b, e_m = self.model[-1]
fout.write("IF (%s > %s, %s, %s)\n" %
(colname, repr(thresh),
"[" + ", ".join([repr(v) for v in a]) + "]",
"[" + ", ".join([repr(v) for v in b]) + "]"))
if err == 0.0:
break
weights = reweight
if do_tests:
return test_labels_by_iteration
def TrainWeakLearner(self, labels, weights, values):
''' For a multiclass training set, with C classes and N examples,
finds the optimal weak learner in O(M * N logN) time.
Optimality is defined by Eq. 7 of Torralba et al., 'Sharing visual
features...', 2007, IEEE PAMI.
We differ from Torralba et al. in two ways:
- we do not share a's and b's between classes
- we always solve for the complete set of examples, regardless of label
Labels should be 1 and -1, only.
label_matrix and weights are NxC.
values is N
'''
global order, s_values, s_labels, s_weights, s_weights_times_labels, num_a, den_a, a, b, sless0, sgrtr0, w_below_neg, w_below_pos, w_above_neg, w_above_pos, J
# Sort labels and weights by values (AKA possible thresholds). By
# default, argsort is not stable, so the results will vary
# slightly with the number of workers. Add kind="mergesort" to
# get a stable sort, which avoids this.
order = np.argsort(values)
s_values = values[order]
s_labels = labels[order, :]
s_weights = weights[order, :]
# useful subfunction
num_examples = labels.shape[0]
def tilesum(a):
return np.tile(np.sum(a, axis=0), (num_examples, 1))
# Equations 9 and 10 of Torralba et al.
s_weights_times_labels = s_weights * s_labels
num_a = (tilesum(s_weights_times_labels) - np.cumsum(s_weights_times_labels, axis=0))
den_a = (tilesum(s_weights) - np.cumsum(s_weights, axis=0))
den_a[den_a <= 0.0] = 1.0 # avoid div by zero
a = num_a / den_a
b = np.cumsum(s_weights_times_labels, axis=0) / np.cumsum(s_weights, axis=0)
# We need, at each index, the total weights below and above,
# separated by positive and negative label. Below includes the
# current index
sless0 = (s_labels < 0)
sgrtr0 = (s_labels > 0)
w_below_neg = np.cumsum(s_weights * sless0, axis=0)
w_below_pos = np.cumsum(s_weights * sgrtr0, axis=0)
w_above_neg = tilesum(s_weights * sless0) - w_below_neg
w_above_pos = tilesum(s_weights * sgrtr0) - w_below_pos
# Now evaluate the error at each threshold.
# (see Equation 7, and note that we're assuming -1 and +1 for entries in the label matrix.
J = w_below_neg * ((-1 - b)**2) + w_below_pos * ((1 - b)**2) + w_above_neg * ((-1 - a)**2) + w_above_pos * ((1 - a)**2)
J = J.sum(axis=1)
# Find index of least error
idx = np.argmin(J)
# make sure we're at the top of this thresh
while (idx+1 < len(s_values)) and (s_values[idx] == s_values[idx + 1]):
idx += 1
# return the threshold at that index
return s_values[idx], J[idx], a[idx, :].copy(), b[idx, :].copy()
def UpdateBins(self, classBins):
self.classBins = classBins
def Usage(self, name):
print("usage %s:" % (name))
print("%s num_learners - read from stdin, write to stdout" % (name))
print("%s num_learners file - read from file, write to stdout" % (name))
print("%s num_learners file1 file2 - read from file1, write to file2" % (name))
print("")
print("Input files should be tab delimited.")
print("Example:")
print("ClassLabel Value1_name Value2_name Value3_name")
print("2 0.1 0.3 1.5")
print("1 0.5 -0.3 0.5")
print("3 0.1 1.0 0.5")
print("")
print("Labels should be positive integers.")
print("Note that if one learner is sufficient, only one will be written.")
exit(1)
def XValidate(self, colnames, num_learners, label_matrix, values, folds, group_labels, progress_callback, confusion=False):
# if everything's in the same group, ignore the labels
if all([g == group_labels[0] for g in group_labels]):
group_labels = range(len(group_labels))
# randomize the order of labels
unique_labels = list(set(group_labels))
np.random.shuffle(unique_labels)
fold_min_size = len(group_labels) / float(folds)
num_misclassifications = np.zeros(num_learners, int)
np_holdout_results = np.array([])
np_holdout_labels = np.array([])
# break into folds, randomly, but with all identical group_labels together
for f in range(folds):
current_holdout = [False] * len(group_labels)
while unique_labels and (sum(current_holdout) < fold_min_size):
to_add = unique_labels.pop()
current_holdout = [(a or b) for a, b in zip(current_holdout, [g == to_add for g in group_labels])]
if sum(current_holdout) == 0:
logging.error("no holdout")
break
holdout_idx = np.nonzero(current_holdout)[0]
current_holdin = ~ np.array(current_holdout)
holdin_idx = np.nonzero(current_holdin)[0]
holdin_labels = label_matrix[holdin_idx, :]
holdin_values = values[holdin_idx, :]
holdout_values = values[holdout_idx, :]
holdout_results = self.Train(colnames, num_learners, holdin_labels, holdin_values, test_values=holdout_values)
if holdout_results is None:
return None
# pad the end of the holdout set with the last element
if len(holdout_results) < num_learners:
holdout_results += [holdout_results[-1]] * (num_learners - len(holdout_results))
holdout_labels = label_matrix[holdout_idx, :].argmax(axis=1)
if confusion:
np_holdout_results = np.concatenate((np_holdout_results,np.array(holdout_results).flatten()))
np_holdout_labels = np.concatenate((np_holdout_labels,np.tile(holdout_labels,(num_learners,1)).flatten()))
num_misclassifications += [sum(hr != holdout_labels) for hr in holdout_results]
if progress_callback:
progress_callback(f / float(folds))
if confusion:
return np_holdout_results, np_holdout_labels
else:
return [num_misclassifications]
def XValidatePredict(self, colnames, num_learners, label_matrix, values, folds, group_labels, progress_callback):
# if everything's in the same group, ignore the labels
if all([g == group_labels[0] for g in group_labels]):
group_labels = range(len(group_labels))
# randomize the order of labels
unique_labels = list(set(group_labels))
np.random.shuffle(unique_labels)
fold_min_size = len(group_labels) / float(folds)
num_misclassifications = np.zeros(num_learners, int)
# break into folds, randomly, but with all identical group_labels together
for f in range(folds):
current_holdout = [False] * len(group_labels)
while unique_labels and (sum(current_holdout) < fold_min_size):
to_add = unique_labels.pop()
current_holdout = [(a or b) for a, b in zip(current_holdout, [g == to_add for g in group_labels])]
if sum(current_holdout) == 0:
logging.error("no holdout")
break
holdout_idx = np.nonzero(current_holdout)[0]
current_holdin = ~ np.array(current_holdout)
holdin_idx = np.nonzero(current_holdin)[0]
holdin_labels = label_matrix[holdin_idx, :]
holdin_values = values[holdin_idx, :]
holdout_values = values[holdout_idx, :]
holdout_results = self.Train(colnames, num_learners, holdin_labels, holdin_values, test_values=holdout_values)
if holdout_results is None:
return None
# pad the end of the holdout set with the last element
if len(holdout_results) < num_learners:
holdout_results += [holdout_results[-1]] * (num_learners - len(holdout_results))
holdout_labels = label_matrix[holdout_idx, :].argmax(axis=1)
num_misclassifications += [sum(hr != holdout_labels) for hr in holdout_results]
if progress_callback:
progress_callback(f / float(folds))
return [num_misclassifications]
# Confusion Matrix
def plot_confusion_matrix(self, conf_arr, title='Confusion matrix', cmap=plt.cm.Blues):
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid' : False})
#plt.imshow(cm, interpolation='nearest', cmap=cmap)
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=cmap,
interpolation='nearest')
width = len(conf_arr)
height = len(conf_arr[0])
for x in xrange(width):
for y in xrange(height):
if conf_arr[x][y] != 0:
ax.annotate("%.2f" % conf_arr[x][y], xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
plt.title(title)
plt.colorbar(res)
tick_marks = np.arange(len(self.classifier.trainingSet.labels))
plt.xticks(tick_marks, self.classifier.trainingSet.labels, rotation=45)
plt.yticks(tick_marks, self.classifier.trainingSet.labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def ConfusionMatrix(self, folds):
from sklearn.metrics import confusion_matrix
import wx
# get wells if available, otherwise use imagenumbers
try:
nRules = int(self.classifier.nRulesTxt.GetValue())
except:
logging.error('Unable to parse number of rules')
return
if not self.classifier.UpdateTrainingSet():
self.PostMessage('Cross-validation canceled.')
return
db = dbconnect.DBConnect.getInstance()
groups = [db.get_platewell_for_object(key) for key in self.classifier.trainingSet.get_object_keys()]
#t1 = time()
#dlg = wx.ProgressDialog('Computing cross validation accuracy...', '0% Complete', 100, self.classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.PD_CAN_ABORT)
#base = 0.0
#scale = 1.0
if(folds):
folds = folds
else:
folds = 5
class StopXValidation(Exception):
pass
# def progress_callback(amount):
# pct = min(int(100 * (amount * scale + base)), 100)
# cont, skip = dlg.Update(pct, '%d%% Complete'%(pct))
# self.classifier.PostMessage('Computing cross validation accuracy... %s%% Complete'%(pct))
# if not cont:
# raise StopXValidation
y_pred, y_test = self.XValidate(self.classifier.trainingSet.colnames, nRules, self.classifier.trainingSet.label_matrix,
self.classifier.trainingSet.values, folds, groups, None, confusion=True)
cm = confusion_matrix(y_test, y_pred)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
self.plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
if __name__ == '__main__':
fgb = FastGentleBoosting()
if len(argv) == 2:
fin = stdin
fout = stdout
elif len(argv) == 3:
fin = open(argv[2])
fout = stdout
elif len(argv) == 4:
fin = open(argv[2])
fout = open(argv[3], 'w')
else:
fgb.usage(argv[0])
num_learners = int(argv[1])
assert num_learners > 0
import csv
reader = csv.reader(fin, delimiter=' ')
header = reader.next()
label_to_labelidx = {}
curlabel = 1
def getNumlabel(strlabel):
if strlabel in label_to_labelidx:
return label_to_labelidx[strlabel]
global curlabel
print("LABEL: ", curlabel, strlabel)
label_to_labelidx[strlabel] = curlabel
curlabel += 1
return label_to_labelidx[strlabel]
colnames = header[1:]
labels = []
values = []
for vals in reader:
values.append([0 if v == 'None' else float(v) for v in vals[1:]])
numlabel = getNumlabel(vals[0])
labels.append(numlabel)
labels = np.array(labels).astype(np.int32)
values = np.array(values).astype(np.float32)
# convert labels to a matrix with +1/-1 values only (+1 in the column matching the label, 1-indexed)
num_classes = max(labels)
label_matrix = -np.ones((len(labels), num_classes), np.int32)
for i, j in zip(range(len(labels)), np.array(labels)-1):
label_matrix[i, j] = 1
wl = fgb.Train(colnames, num_learners, label_matrix, values, fout)
for w in wl:
print(w)
print(label_matrix.shape, "groups")
print(fgb.xvalidate(colnames, num_learners, label_matrix, values, 20, range(1, label_matrix.shape[0]+1), None))
#def train_classifier(labels, values, iterations):
# # make sure these are arrays (not matrices)
# labels = array(labels)
# values = array(values)
#
# num_examples = labels.shape[0]
#
# learners = []
# weights = ones(labels.shape)
# output = zeros(labels.shape)
# for n in range(iterations):
# best_error = float(Infinity)
#
# for feature_idx in range(values.shape[1]):
# val, err, a, b = trainWeakLearner(labels, weights, values[:, feature_idx])
# if err < best_error:
# best_error = err
# best_idx = feature_idx
# best_val = val
# best_a = a
# best_b = b
#
# delta = values[:, best_idx] > best_val
# delta.shape = (len(delta), 1)
# feature_thresh_mask = tile(delta, (1, labels.shape[1]))
# output = output + feature_thresh_mask * tile(best_a, (num_examples, 1)) + (1 - feature_thresh_mask) * tile(best_b, (num_examples, 1))
# weights = exp(- output * labels)
# weights = weights / sum(weights)
# err = sum((output * labels) <= 0)
# return
#
#def myfromfile(stream, type, sh):
# if len(sh) == 2:
# tot = sh[0] * sh[1]
# else:
# tot = sh[0]
# result = fromfile(stream, type, tot)
# result.shape = sh
# return result
#
#def doit():
# testing = False
# n, ncols = myfromfile(stdin, int32, (2,))
# num_classes = myfromfile(stdin, int32, (1,))[0]
# values = myfromfile(stdin, float32, (n, ncols))
# label_matrix = myfromfile(stdin, int32, (n, num_classes))
#
# while True:
# # It would be cleaner to tell the worker we're done by just
# # closing the stream, but numpy does strange things (prints
# # error message, signals MemoryError) when myfromfile cannot
# # read as many bytes as expected.
# if stdin.readline() == "done\n":
# return
# weights = myfromfile(stdin, float32, (n, num_classes))
#
# best = float(Infinity)
# for column in range(ncols):
# colvals = values[:, column]
# # print >>stderr, "WORK", column, label_matrix, weights, colvals
# thresh, err, a, b = trainWeakLearner(label_matrix, weights, colvals)
# if err < best:
# best = err
# bestvals = (err, column, thresh, a, b)
#
# err, column, thresh, a, b = bestvals
# array([err, column, thresh], float32).tofile(stdout)
# a.astype(float32).tofile(stdout)
# b.astype(float32).tofile(stdout)
# stdout.flush()
#if __name__ == '__main__':
# try:
# import dl
# h = dl.open('change_malloc_zone.dylib')
# h.call('setup')
# except:
# pass
# if len(argv) != 1:
# import cProfile
# cProfile.runctx("doit()", globals(), locals(), "worker.cprof")
# else:
# try: # Use binary I/O on Windows
# import msvcrt, os
# try:
# msvcrt.setmode(stdin.fileno(), os.O_BINARY)
# except:
# stderr.write("Couldn't deal with stdin\n")
# pass
# try:
# msvcrt.setmode(stdout.fileno(), os.O_BINARY)
# stderr.write("Couldn't deal with stdout\n")
# except:
# pass
# except ImportError:
# pass
# doit()
# try:
# h.call('teardown')
# except:
# pass
| [
"matplotlib.pyplot.title",
"sklearn.externals.joblib.dump",
"csv.reader",
"numpy.sum",
"multiclasssql_legacy.create_perobject_class_table",
"matplotlib.pyplot.clf",
"numpy.ones",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.tile",
"multiclasssql_legacy.FilterObjectsFromC... | [((23963, 23994), 'csv.reader', 'csv.reader', (['fin'], {'delimiter': '"""\t"""'}), "(fin, delimiter='\\t')\n", (23973, 23994), False, 'import csv\n'), ((359, 421), 'logging.info', 'logging.info', (['"""Initialized New Classifier: FastGentleBoosting"""'], {}), "('Initialized New Classifier: FastGentleBoosting')\n", (371, 421), False, 'import logging\n'), ((1227, 1260), 'dbconnect.DBConnect.getInstance', 'dbconnect.DBConnect.getInstance', ([], {}), '()\n', (1258, 1260), False, 'import dbconnect\n'), ((1384, 1390), 'time.time', 'time', ([], {}), '()\n', (1388, 1390), False, 'from time import time\n'), ((1405, 1595), 'wx.ProgressDialog', 'wx.ProgressDialog', (['"""Computing cross validation accuracy..."""', '"""0% Complete"""', '(100)', 'self.classifier', '(wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME | wx.\n PD_CAN_ABORT)'], {}), "('Computing cross validation accuracy...', '0% Complete', \n 100, self.classifier, wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.\n PD_REMAINING_TIME | wx.PD_CAN_ABORT)\n", (1422, 1595), False, 'import wx\n'), ((4399, 4461), 'multiclasssql_legacy.create_perobject_class_table', 'multiclasssql.create_perobject_class_table', (['labels', 'self.model'], {}), '(labels, self.model)\n', (4441, 4461), True, 'import multiclasssql_legacy as multiclasssql\n'), ((4539, 4610), 'multiclasssql_legacy.FilterObjectsFromClassN', 'multiclasssql.FilterObjectsFromClassN', (['obClass', 'self.model', 'obKeysToTry'], {}), '(obClass, self.model, obKeysToTry)\n', (4576, 4610), True, 'import multiclasssql_legacy as multiclasssql\n'), ((6221, 6293), 'multiclasssql_legacy.PerImageCounts', 'multiclasssql.PerImageCounts', (['self.model'], {'filter_name': 'filter_name', 'cb': 'cb'}), '(self.model, filter_name=filter_name, cb=cb)\n', (6249, 6293), True, 'import multiclasssql_legacy as multiclasssql\n'), ((6445, 6521), 'sklearn.externals.joblib.dump', 'joblib.dump', (['(self.model, bin_labels, self.name)', 'model_filename'], {'compress': '(1)'}), '((self.model, bin_labels, self.name), model_filename, compress=1)\n', (6456, 6521), False, 'from sklearn.externals import joblib\n'), ((7771, 7811), 'numpy.zeros', 'np.zeros', (['label_matrix.shape', 'np.float32'], {}), '(label_matrix.shape, np.float32)\n', (7779, 7811), True, 'import numpy as np\n'), ((8175, 8214), 'numpy.ones', 'np.ones', (['label_matrix.shape', 'np.float32'], {}), '(label_matrix.shape, np.float32)\n', (8182, 8214), True, 'import numpy as np\n'), ((8240, 8293), 'numpy.zeros', 'np.zeros', (['(num_examples, num_classes - 1)', 'np.float32'], {}), '((num_examples, num_classes - 1), np.float32)\n', (8248, 8293), True, 'import numpy as np\n'), ((8319, 8372), 'numpy.zeros', 'np.zeros', (['(num_examples, num_classes - 1)', 'np.float32'], {}), '((num_examples, num_classes - 1), np.float32)\n', (8327, 8372), True, 'import numpy as np\n'), ((12889, 12907), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (12899, 12907), True, 'import numpy as np\n'), ((13855, 13892), 'numpy.cumsum', 'np.cumsum', (['(s_weights * sless0)'], {'axis': '(0)'}), '(s_weights * sless0, axis=0)\n', (13864, 13892), True, 'import numpy as np\n'), ((13915, 13952), 'numpy.cumsum', 'np.cumsum', (['(s_weights * sgrtr0)'], {'axis': '(0)'}), '(s_weights * sgrtr0, axis=0)\n', (13924, 13952), True, 'import numpy as np\n'), ((14438, 14450), 'numpy.argmin', 'np.argmin', (['J'], {}), '(J)\n', (14447, 14450), True, 'import numpy as np\n'), ((15547, 15554), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (15551, 15554), False, 'from sys import stdin, stdout, argv, exit\n'), ((15958, 15990), 'numpy.random.shuffle', 'np.random.shuffle', (['unique_labels'], {}), '(unique_labels)\n', (15975, 15990), True, 'import numpy as np\n'), ((16082, 16109), 'numpy.zeros', 'np.zeros', (['num_learners', 'int'], {}), '(num_learners, int)\n', (16090, 16109), True, 'import numpy as np\n'), ((16140, 16152), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16148, 16152), True, 'import numpy as np\n'), ((16181, 16193), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16189, 16193), True, 'import numpy as np\n'), ((18506, 18538), 'numpy.random.shuffle', 'np.random.shuffle', (['unique_labels'], {}), '(unique_labels)\n', (18523, 18538), True, 'import numpy as np\n'), ((18630, 18657), 'numpy.zeros', 'np.zeros', (['num_learners', 'int'], {}), '(num_learners, int)\n', (18638, 18657), True, 'import numpy as np\n'), ((20359, 20407), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': False}"], {}), "('whitegrid', {'axes.grid': False})\n", (20372, 20407), True, 'import seaborn as sns\n'), ((20716, 20728), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20726, 20728), True, 'import matplotlib.pyplot as plt\n'), ((20737, 20746), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (20744, 20746), True, 'import matplotlib.pyplot as plt\n'), ((21286, 21302), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (21295, 21302), True, 'import matplotlib.pyplot as plt\n'), ((21311, 21328), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['res'], {}), '(res)\n', (21323, 21328), True, 'import matplotlib.pyplot as plt\n'), ((21409, 21480), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'self.classifier.trainingSet.labels'], {'rotation': '(45)'}), '(tick_marks, self.classifier.trainingSet.labels, rotation=45)\n', (21419, 21480), True, 'import matplotlib.pyplot as plt\n'), ((21489, 21547), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'self.classifier.trainingSet.labels'], {}), '(tick_marks, self.classifier.trainingSet.labels)\n', (21499, 21547), True, 'import matplotlib.pyplot as plt\n'), ((21556, 21574), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21572, 21574), True, 'import matplotlib.pyplot as plt\n'), ((21583, 21607), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (21593, 21607), True, 'import matplotlib.pyplot as plt\n'), ((21616, 21645), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (21626, 21645), True, 'import matplotlib.pyplot as plt\n'), ((22144, 22177), 'dbconnect.DBConnect.getInstance', 'dbconnect.DBConnect.getInstance', ([], {}), '()\n', (22175, 22177), False, 'import dbconnect\n'), ((23284, 23316), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (23300, 23316), False, 'from sklearn.metrics import confusion_matrix\n'), ((23401, 23433), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (23420, 23433), True, 'import numpy as np\n'), ((23538, 23548), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23546, 23548), True, 'import matplotlib.pyplot as plt\n'), ((3203, 3215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3213, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3228, 3237), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3235, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3250, 3264), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (3258, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3603, 3709), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, nRules + 1]', '[chance_level, chance_level]', '"""k--"""'], {'label': '"""accuracy of random classifier"""'}), "([1, nRules + 1], [chance_level, chance_level], 'k--', label=\n 'accuracy of random classifier')\n", (3611, 3709), True, 'import matplotlib.pyplot as plt\n'), ((3717, 3746), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3727, 3746), True, 'import matplotlib.pyplot as plt\n'), ((3759, 3779), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rule #"""'], {}), "('Rule #')\n", (3769, 3779), True, 'import matplotlib.pyplot as plt\n'), ((3792, 3814), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (3802, 3814), True, 'import matplotlib.pyplot as plt\n'), ((3866, 3887), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.05)', '(1.05)'], {}), '(-0.05, 1.05)\n', (3874, 3887), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3938), 'matplotlib.pyplot.title', 'plt.title', (['"""Cross-validation accuracy"""'], {}), "('Cross-validation accuracy')\n", (3909, 3938), True, 'import matplotlib.pyplot as plt\n'), ((3951, 3961), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3959, 3961), True, 'import matplotlib.pyplot as plt\n'), ((4880, 4907), 'sklearn.externals.joblib.load', 'joblib.load', (['model_filename'], {}), '(model_filename)\n', (4891, 4907), False, 'from sklearn.externals import joblib\n'), ((5296, 5485), 're.match', 're.match', (['"""^IF \\\\((\\\\w+) > (-{0,1}\\\\d+\\\\.\\\\d+), \\\\[(-{0,1}\\\\d+\\\\.\\\\d+(?:, -{0,1}\\\\d+\\\\.\\\\d+)*)\\\\], \\\\[(-{0,1}\\\\d+\\\\.\\\\d+(?:, -{0,1}\\\\d+\\\\.\\\\d+)*)\\\\]\\\\)"""', 'line'], {'flags': 're.IGNORECASE'}), "(\n '^IF \\\\((\\\\w+) > (-{0,1}\\\\d+\\\\.\\\\d+), \\\\[(-{0,1}\\\\d+\\\\.\\\\d+(?:, -{0,1}\\\\d+\\\\.\\\\d+)*)\\\\], \\\\[(-{0,1}\\\\d+\\\\.\\\\d+(?:, -{0,1}\\\\d+\\\\.\\\\d+)*)\\\\]\\\\)'\n , line, flags=re.IGNORECASE)\n", (5304, 5485), False, 'import re\n'), ((8013, 8059), 'numpy.zeros', 'np.zeros', (['(num_tests, num_classes)', 'np.float32'], {}), '((num_tests, num_classes), np.float32)\n', (8021, 8059), True, 'import numpy as np\n'), ((9148, 9205), 'numpy.reshape', 'np.reshape', (['(values[:, column] > thresh)', '(num_examples, 1)'], {}), '(values[:, column] > thresh, (num_examples, 1))\n', (9158, 9205), True, 'import numpy as np\n'), ((9240, 9272), 'numpy.tile', 'np.tile', (['delta', '(1, num_classes)'], {}), '(delta, (1, num_classes))\n', (9247, 9272), True, 'import numpy as np\n'), ((13327, 13368), 'numpy.cumsum', 'np.cumsum', (['s_weights_times_labels'], {'axis': '(0)'}), '(s_weights_times_labels, axis=0)\n', (13336, 13368), True, 'import numpy as np\n'), ((13408, 13436), 'numpy.cumsum', 'np.cumsum', (['s_weights'], {'axis': '(0)'}), '(s_weights, axis=0)\n', (13417, 13436), True, 'import numpy as np\n'), ((13530, 13571), 'numpy.cumsum', 'np.cumsum', (['s_weights_times_labels'], {'axis': '(0)'}), '(s_weights_times_labels, axis=0)\n', (13539, 13571), True, 'import numpy as np\n'), ((13574, 13602), 'numpy.cumsum', 'np.cumsum', (['s_weights'], {'axis': '(0)'}), '(s_weights, axis=0)\n', (13583, 13602), True, 'import numpy as np\n'), ((20830, 20849), 'numpy.array', 'np.array', (['norm_conf'], {}), '(norm_conf)\n', (20838, 20849), True, 'import numpy as np\n'), ((24610, 24626), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (24618, 24626), True, 'import numpy as np\n'), ((24657, 24673), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (24665, 24673), True, 'import numpy as np\n'), ((24935, 24951), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (24943, 24951), True, 'import numpy as np\n'), ((1005, 1053), 'logging.error', 'logging.error', (['"""Unable to parse number of rules"""'], {}), "('Unable to parse number of rules')\n", (1018, 1053), False, 'import logging\n'), ((5001, 5046), 'logging.error', 'logging.error', (['"""Loading trained model failed"""'], {}), "('Loading trained model failed')\n", (5014, 5046), False, 'import logging\n'), ((8557, 8593), 'numpy.tile', 'np.tile', (['classmask', '(1, num_classes)'], {}), '(classmask, (1, num_classes))\n', (8564, 8593), True, 'import numpy as np\n'), ((9507, 9548), 'numpy.exp', 'np.exp', (['(-recomputed_labels * label_matrix)'], {}), '(-recomputed_labels * label_matrix)\n', (9513, 9548), True, 'import numpy as np\n'), ((9730, 9789), 'numpy.reshape', 'np.reshape', (['(test_values[:, column] > thresh)', '(num_tests, 1)'], {}), '(test_values[:, column] > thresh, (num_tests, 1))\n', (9740, 9789), True, 'import numpy as np\n'), ((9833, 9870), 'numpy.tile', 'np.tile', (['test_delta', '(1, num_classes)'], {}), '(test_delta, (1, num_classes))\n', (9840, 9870), True, 'import numpy as np\n'), ((13135, 13152), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (13141, 13152), True, 'import numpy as np\n'), ((16662, 16689), 'logging.error', 'logging.error', (['"""no holdout"""'], {}), "('no holdout')\n", (16675, 16689), False, 'import logging\n'), ((16739, 16766), 'numpy.nonzero', 'np.nonzero', (['current_holdout'], {}), '(current_holdout)\n', (16749, 16766), True, 'import numpy as np\n'), ((16801, 16826), 'numpy.array', 'np.array', (['current_holdout'], {}), '(current_holdout)\n', (16809, 16826), True, 'import numpy as np\n'), ((16852, 16878), 'numpy.nonzero', 'np.nonzero', (['current_holdin'], {}), '(current_holdin)\n', (16862, 16878), True, 'import numpy as np\n'), ((19126, 19153), 'logging.error', 'logging.error', (['"""no holdout"""'], {}), "('no holdout')\n", (19139, 19153), False, 'import logging\n'), ((19203, 19230), 'numpy.nonzero', 'np.nonzero', (['current_holdout'], {}), '(current_holdout)\n', (19213, 19230), True, 'import numpy as np\n'), ((19265, 19290), 'numpy.array', 'np.array', (['current_holdout'], {}), '(current_holdout)\n', (19273, 19290), True, 'import numpy as np\n'), ((19316, 19342), 'numpy.nonzero', 'np.nonzero', (['current_holdin'], {}), '(current_holdin)\n', (19326, 19342), True, 'import numpy as np\n'), ((21922, 21970), 'logging.error', 'logging.error', (['"""Unable to parse number of rules"""'], {}), "('Unable to parse number of rules')\n", (21935, 21970), False, 'import logging\n'), ((9320, 9349), 'numpy.tile', 'np.tile', (['a', '(num_examples, 1)'], {}), '(a, (num_examples, 1))\n', (9327, 9349), True, 'import numpy as np\n'), ((9380, 9409), 'numpy.tile', 'np.tile', (['b', '(num_examples, 1)'], {}), '(b, (num_examples, 1))\n', (9387, 9409), True, 'import numpy as np\n'), ((4041, 4047), 'time.time', 'time', ([], {}), '()\n', (4045, 4047), False, 'from time import time\n'), ((9932, 9958), 'numpy.tile', 'np.tile', (['a', '(num_tests, 1)'], {}), '(a, (num_tests, 1))\n', (9939, 9958), True, 'import numpy as np\n'), ((9994, 10020), 'numpy.tile', 'np.tile', (['b', '(num_tests, 1)'], {}), '(b, (num_tests, 1))\n', (10001, 10020), True, 'import numpy as np\n'), ((17619, 17644), 'numpy.array', 'np.array', (['holdout_results'], {}), '(holdout_results)\n', (17627, 17644), True, 'import numpy as np\n'), ((17727, 17769), 'numpy.tile', 'np.tile', (['holdout_labels', '(num_learners, 1)'], {}), '(holdout_labels, (num_learners, 1))\n', (17734, 17769), True, 'import numpy as np\n')] |
from os import path
import os
import numpy as np
import nibabel as nib
from torch.utils.data import DataLoader
import argparse
import matplotlib.pyplot as plt
import warnings
from clinicadl.tools.deep_learning.iotools import read_json, commandline_to_json, translate_parameters, return_logger
from clinicadl.tools.deep_learning.cnn_utils import get_criterion, sort_predicted
from clinicadl.tools.deep_learning.models import create_model, load_model
from clinicadl.tools.deep_learning.data import load_data_test, return_dataset, get_transforms
from .gradients import VanillaBackProp
def group_backprop(options):
main_logger = return_logger(options.verbose, "main process")
options = translate_parameters(options)
fold_list = [fold for fold in os.listdir(options.model_path) if fold[:5:] == "fold-"]
if len(fold_list) == 0:
raise ValueError("No folds were found at path %s" % options.model_path)
model_options = argparse.Namespace()
model_options = read_json(model_options, path.join(options.model_path, 'commandline.json'))
model_options = translate_parameters(model_options)
model_options.gpu = options.gpu
if model_options.network_type == "multicnn":
raise NotImplementedError("The interpretation of multi-CNN is not implemented.")
if options.tsv_path is None and options.input_dir is None:
options.multi_cohort = model_options.multi_cohort
if options.tsv_path is None:
options.tsv_path = model_options.tsv_path
if options.input_dir is None:
options.input_dir = model_options.input_dir
if options.target_diagnosis is None:
options.target_diagnosis = options.diagnosis
for fold in fold_list:
main_logger.info(fold)
for selection in options.selection:
results_path = path.join(options.model_path, fold, 'gradients',
selection, options.name)
criterion = get_criterion(model_options.loss)
# Data management (remove data not well predicted by the CNN)
training_df = load_data_test(options.tsv_path, [options.diagnosis], baseline=options.baseline,
multi_cohort=options.multi_cohort)
training_df.reset_index(drop=True, inplace=True)
# Model creation
_, all_transforms = get_transforms(model_options.mode,
minmaxnormalization=model_options.minmaxnormalization)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data_example = return_dataset(model_options.mode, options.input_dir,
training_df, model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
prepare_dl=options.prepare_dl, multi_cohort=options.multi_cohort,
params=model_options)
model = create_model(model_options, data_example.size)
model_dir = os.path.join(options.model_path, fold, 'models', selection)
model, best_epoch = load_model(model, model_dir, gpu=options.gpu, filename='model_best.pth.tar')
options.output_dir = results_path
commandline_to_json(options, logger=main_logger)
# Keep only subjects who were correctly / wrongly predicted by the network
training_df = sort_predicted(model, training_df, options.input_dir, model_options,
criterion, options.keep_true,
batch_size=options.batch_size, num_workers=options.num_workers,
gpu=options.gpu)
if len(training_df) > 0:
# Save the tsv files used for the saliency maps
training_df.to_csv(path.join('data.tsv'), sep='\t', index=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
data_train = return_dataset(model_options.mode, options.input_dir,
training_df, model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
prepare_dl=options.prepare_dl, multi_cohort=options.multi_cohort,
params=model_options)
train_loader = DataLoader(data_train,
batch_size=options.batch_size,
shuffle=True,
num_workers=options.num_workers,
pin_memory=True)
interpreter = VanillaBackProp(model, gpu=options.gpu)
cum_map = 0
for data in train_loader:
if options.gpu:
input_batch = data['image'].cuda()
else:
input_batch = data['image']
maps = interpreter.generate_gradients(input_batch,
data_train.diagnosis_code[options.target_diagnosis])
cum_map += maps.sum(axis=0)
mean_map = cum_map / len(data_train)
if len(data_train.size) == 4:
if options.nifti_template_path is not None:
image_nii = nib.load(options.nifti_template_path)
affine = image_nii.affine
else:
affine = np.eye(4)
mean_map_nii = nib.Nifti1Image(mean_map[0], affine)
nib.save(mean_map_nii, path.join(results_path, "map.nii.gz"))
else:
jpg_path = path.join(results_path, "map.jpg")
plt.imshow(mean_map[0], cmap="coolwarm", vmin=-options.vmax, vmax=options.vmax)
plt.colorbar()
plt.savefig(jpg_path)
plt.close()
np.save(path.join(results_path, "map.npy"), mean_map[0])
else:
main_logger.warn("There are no subjects for the given options")
| [
"argparse.Namespace",
"clinicadl.tools.deep_learning.iotools.commandline_to_json",
"clinicadl.tools.deep_learning.data.load_data_test",
"os.path.join",
"warnings.simplefilter",
"torch.utils.data.DataLoader",
"clinicadl.tools.deep_learning.data.get_transforms",
"matplotlib.pyplot.imshow",
"matplotlib... | [((633, 679), 'clinicadl.tools.deep_learning.iotools.return_logger', 'return_logger', (['options.verbose', '"""main process"""'], {}), "(options.verbose, 'main process')\n", (646, 679), False, 'from clinicadl.tools.deep_learning.iotools import read_json, commandline_to_json, translate_parameters, return_logger\n'), ((694, 723), 'clinicadl.tools.deep_learning.iotools.translate_parameters', 'translate_parameters', (['options'], {}), '(options)\n', (714, 723), False, 'from clinicadl.tools.deep_learning.iotools import read_json, commandline_to_json, translate_parameters, return_logger\n'), ((944, 964), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (962, 964), False, 'import argparse\n'), ((1081, 1116), 'clinicadl.tools.deep_learning.iotools.translate_parameters', 'translate_parameters', (['model_options'], {}), '(model_options)\n', (1101, 1116), False, 'from clinicadl.tools.deep_learning.iotools import read_json, commandline_to_json, translate_parameters, return_logger\n'), ((1010, 1059), 'os.path.join', 'path.join', (['options.model_path', '"""commandline.json"""'], {}), "(options.model_path, 'commandline.json')\n", (1019, 1059), False, 'from os import path\n'), ((759, 789), 'os.listdir', 'os.listdir', (['options.model_path'], {}), '(options.model_path)\n', (769, 789), False, 'import os\n'), ((1807, 1880), 'os.path.join', 'path.join', (['options.model_path', 'fold', '"""gradients"""', 'selection', 'options.name'], {}), "(options.model_path, fold, 'gradients', selection, options.name)\n", (1816, 1880), False, 'from os import path\n'), ((1943, 1976), 'clinicadl.tools.deep_learning.cnn_utils.get_criterion', 'get_criterion', (['model_options.loss'], {}), '(model_options.loss)\n', (1956, 1976), False, 'from clinicadl.tools.deep_learning.cnn_utils import get_criterion, sort_predicted\n'), ((2078, 2198), 'clinicadl.tools.deep_learning.data.load_data_test', 'load_data_test', (['options.tsv_path', '[options.diagnosis]'], {'baseline': 'options.baseline', 'multi_cohort': 'options.multi_cohort'}), '(options.tsv_path, [options.diagnosis], baseline=options.\n baseline, multi_cohort=options.multi_cohort)\n', (2092, 2198), False, 'from clinicadl.tools.deep_learning.data import load_data_test, return_dataset, get_transforms\n'), ((2358, 2452), 'clinicadl.tools.deep_learning.data.get_transforms', 'get_transforms', (['model_options.mode'], {'minmaxnormalization': 'model_options.minmaxnormalization'}), '(model_options.mode, minmaxnormalization=model_options.\n minmaxnormalization)\n', (2372, 2452), False, 'from clinicadl.tools.deep_learning.data import load_data_test, return_dataset, get_transforms\n'), ((3071, 3117), 'clinicadl.tools.deep_learning.models.create_model', 'create_model', (['model_options', 'data_example.size'], {}), '(model_options, data_example.size)\n', (3083, 3117), False, 'from clinicadl.tools.deep_learning.models import create_model, load_model\n'), ((3142, 3201), 'os.path.join', 'os.path.join', (['options.model_path', 'fold', '"""models"""', 'selection'], {}), "(options.model_path, fold, 'models', selection)\n", (3154, 3201), False, 'import os\n'), ((3234, 3310), 'clinicadl.tools.deep_learning.models.load_model', 'load_model', (['model', 'model_dir'], {'gpu': 'options.gpu', 'filename': '"""model_best.pth.tar"""'}), "(model, model_dir, gpu=options.gpu, filename='model_best.pth.tar')\n", (3244, 3310), False, 'from clinicadl.tools.deep_learning.models import create_model, load_model\n'), ((3369, 3417), 'clinicadl.tools.deep_learning.iotools.commandline_to_json', 'commandline_to_json', (['options'], {'logger': 'main_logger'}), '(options, logger=main_logger)\n', (3388, 3417), False, 'from clinicadl.tools.deep_learning.iotools import read_json, commandline_to_json, translate_parameters, return_logger\n'), ((3532, 3719), 'clinicadl.tools.deep_learning.cnn_utils.sort_predicted', 'sort_predicted', (['model', 'training_df', 'options.input_dir', 'model_options', 'criterion', 'options.keep_true'], {'batch_size': 'options.batch_size', 'num_workers': 'options.num_workers', 'gpu': 'options.gpu'}), '(model, training_df, options.input_dir, model_options,\n criterion, options.keep_true, batch_size=options.batch_size,\n num_workers=options.num_workers, gpu=options.gpu)\n', (3546, 3719), False, 'from clinicadl.tools.deep_learning.cnn_utils import get_criterion, sort_predicted\n'), ((2512, 2537), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2535, 2537), False, 'import warnings\n'), ((2555, 2586), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2576, 2586), False, 'import warnings\n'), ((2618, 2877), 'clinicadl.tools.deep_learning.data.return_dataset', 'return_dataset', (['model_options.mode', 'options.input_dir', 'training_df', 'model_options.preprocessing'], {'train_transformations': 'None', 'all_transformations': 'all_transforms', 'prepare_dl': 'options.prepare_dl', 'multi_cohort': 'options.multi_cohort', 'params': 'model_options'}), '(model_options.mode, options.input_dir, training_df,\n model_options.preprocessing, train_transformations=None,\n all_transformations=all_transforms, prepare_dl=options.prepare_dl,\n multi_cohort=options.multi_cohort, params=model_options)\n', (2632, 2877), False, 'from clinicadl.tools.deep_learning.data import load_data_test, return_dataset, get_transforms\n'), ((4625, 4746), 'torch.utils.data.DataLoader', 'DataLoader', (['data_train'], {'batch_size': 'options.batch_size', 'shuffle': '(True)', 'num_workers': 'options.num_workers', 'pin_memory': '(True)'}), '(data_train, batch_size=options.batch_size, shuffle=True,\n num_workers=options.num_workers, pin_memory=True)\n', (4635, 4746), False, 'from torch.utils.data import DataLoader\n'), ((3973, 3994), 'os.path.join', 'path.join', (['"""data.tsv"""'], {}), "('data.tsv')\n", (3982, 3994), False, 'from os import path\n'), ((4041, 4066), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (4064, 4066), False, 'import warnings\n'), ((4088, 4119), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (4109, 4119), False, 'import warnings\n'), ((4153, 4412), 'clinicadl.tools.deep_learning.data.return_dataset', 'return_dataset', (['model_options.mode', 'options.input_dir', 'training_df', 'model_options.preprocessing'], {'train_transformations': 'None', 'all_transformations': 'all_transforms', 'prepare_dl': 'options.prepare_dl', 'multi_cohort': 'options.multi_cohort', 'params': 'model_options'}), '(model_options.mode, options.input_dir, training_df,\n model_options.preprocessing, train_transformations=None,\n all_transformations=all_transforms, prepare_dl=options.prepare_dl,\n multi_cohort=options.multi_cohort, params=model_options)\n', (4167, 4412), False, 'from clinicadl.tools.deep_learning.data import load_data_test, return_dataset, get_transforms\n'), ((5851, 5887), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['mean_map[0]', 'affine'], {}), '(mean_map[0], affine)\n', (5866, 5887), True, 'import nibabel as nib\n'), ((6023, 6057), 'os.path.join', 'path.join', (['results_path', '"""map.jpg"""'], {}), "(results_path, 'map.jpg')\n", (6032, 6057), False, 'from os import path\n'), ((6078, 6157), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mean_map[0]'], {'cmap': '"""coolwarm"""', 'vmin': '(-options.vmax)', 'vmax': 'options.vmax'}), "(mean_map[0], cmap='coolwarm', vmin=-options.vmax, vmax=options.vmax)\n", (6088, 6157), True, 'import matplotlib.pyplot as plt\n'), ((6178, 6192), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6190, 6192), True, 'import matplotlib.pyplot as plt\n'), ((6213, 6234), 'matplotlib.pyplot.savefig', 'plt.savefig', (['jpg_path'], {}), '(jpg_path)\n', (6224, 6234), True, 'import matplotlib.pyplot as plt\n'), ((6255, 6266), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6264, 6266), True, 'import matplotlib.pyplot as plt\n'), ((6291, 6325), 'os.path.join', 'path.join', (['results_path', '"""map.npy"""'], {}), "(results_path, 'map.npy')\n", (6300, 6325), False, 'from os import path\n'), ((5658, 5695), 'nibabel.load', 'nib.load', (['options.nifti_template_path'], {}), '(options.nifti_template_path)\n', (5666, 5695), True, 'import nibabel as nib\n'), ((5805, 5814), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5811, 5814), True, 'import numpy as np\n'), ((5931, 5968), 'os.path.join', 'path.join', (['results_path', '"""map.nii.gz"""'], {}), "(results_path, 'map.nii.gz')\n", (5940, 5968), False, 'from os import path\n')] |
import os
import shutil
import gym
import numpy as np
import pytest
from stable_baselines import (A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2,
TD3, TRPO, SAC)
from stable_baselines.common.cmd_util import make_atari_env
from stable_baselines.common.vec_env import VecFrameStack, DummyVecEnv
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.callbacks import CheckpointCallback
from stable_baselines.gail import ExpertDataset, generate_expert_traj
EXPERT_PATH_PENDULUM = "stable_baselines/gail/dataset/expert_pendulum.npz"
EXPERT_PATH_DISCRETE = "stable_baselines/gail/dataset/expert_cartpole.npz"
@pytest.mark.parametrize("expert_env", [('Pendulum-v0', EXPERT_PATH_PENDULUM, True),
('CartPole-v1', EXPERT_PATH_DISCRETE, False)])
def test_gail(tmp_path, expert_env):
env_id, expert_path, load_from_memory = expert_env
env = gym.make(env_id)
traj_data = None
if load_from_memory:
traj_data = np.load(expert_path)
expert_path = None
dataset = ExpertDataset(traj_data=traj_data, expert_path=expert_path, traj_limitation=10,
sequential_preprocessing=True)
# Note: train for 1M steps to have a working policy
model = GAIL('MlpPolicy', env, adversary_entcoeff=0.0, lam=0.92, max_kl=0.001,
expert_dataset=dataset, hidden_size_adversary=64, verbose=0)
model.learn(300)
model.save(str(tmp_path / "GAIL-{}".format(env_id)))
model = model.load(str(tmp_path / "GAIL-{}".format(env_id)), env=env)
model.learn(300)
evaluate_policy(model, env, n_eval_episodes=5)
del dataset, model
@pytest.mark.parametrize("generate_env", [
(SAC, 'MlpPolicy', 'Pendulum-v0', 1, 10),
(DQN, 'MlpPolicy', 'CartPole-v1', 1, 10),
(A2C, 'MlpLstmPolicy', 'Pendulum-v0', 1, 10),
(A2C, 'MlpLstmPolicy', 'CartPole-v1', 1, 10),
(A2C, 'CnnPolicy', 'BreakoutNoFrameskip-v4', 8, 1),
])
def test_generate(tmp_path, generate_env):
model, policy, env_name, n_env, n_episodes = generate_env
if n_env > 1:
env = make_atari_env(env_name, num_env=n_env, seed=0)
model = model(policy, env, verbose=0)
else:
model = model(policy, env_name, verbose=0)
dataset = generate_expert_traj(model, str(tmp_path / 'expert'), n_timesteps=300, n_episodes=n_episodes,
image_folder=str(tmp_path / 'test_recorded_images'))
assert set(dataset.keys()).issuperset(['actions', 'obs', 'rewards', 'episode_returns', 'episode_starts'])
assert sum(dataset['episode_starts']) == n_episodes
assert len(dataset['episode_returns']) == n_episodes
n_timesteps = len(dataset['episode_starts'])
for key, val in dataset.items():
if key != 'episode_returns':
assert val.shape[0] == n_timesteps, "inconsistent number of timesteps at '{}'".format(key)
dataset_loaded = np.load(str(tmp_path / 'expert.npz'), allow_pickle=True)
assert dataset.keys() == dataset_loaded.keys()
for key in dataset.keys():
assert (dataset[key] == dataset_loaded[key]).all(), "different data at '{}'".format(key)
# Cleanup folder
if os.path.isdir(str(tmp_path / 'test_recorded_images')):
shutil.rmtree(str(tmp_path / 'test_recorded_images'))
def test_generate_callable(tmp_path):
"""
Test generating expert trajectories with a callable.
"""
env = gym.make("CartPole-v1")
# Here the expert is a random agent
def dummy_expert(_obs):
return env.action_space.sample()
generate_expert_traj(dummy_expert, tmp_path / 'dummy_expert_cartpole', env, n_timesteps=0, n_episodes=10)
@pytest.mark.xfail(reason="Not Enough Memory", strict=False)
def test_pretrain_images(tmp_path):
env = make_atari_env("PongNoFrameskip-v4", num_env=1, seed=0)
env = VecFrameStack(env, n_stack=3)
model = PPO2('CnnPolicy', env)
generate_expert_traj(model, str(tmp_path / 'expert_pong'), n_timesteps=0, n_episodes=1,
image_folder=str(tmp_path / 'pretrain_recorded_images'))
expert_path = str(tmp_path / 'expert_pong.npz')
dataset = ExpertDataset(expert_path=expert_path, traj_limitation=1, batch_size=32,
sequential_preprocessing=True)
model.pretrain(dataset, n_epochs=2)
shutil.rmtree(str(tmp_path / 'pretrain_recorded_images'))
env.close()
del dataset, model, env
def test_gail_callback(tmp_path):
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = GAIL("MlpPolicy", "Pendulum-v0", dataset)
checkpoint_callback = CheckpointCallback(save_freq=150, save_path=str(tmp_path / 'logs/gail/'), name_prefix='gail')
model.learn(total_timesteps=301, callback=checkpoint_callback)
shutil.rmtree(str(tmp_path / 'logs/gail/'))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACKTR, GAIL, DDPG, PPO1, PPO2, SAC, TD3, TRPO])
def test_behavior_cloning_box(tmp_path, model_class):
"""
Behavior cloning with continuous actions.
"""
dataset = ExpertDataset(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "Pendulum-v0")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
@pytest.mark.parametrize("model_class", [A2C, ACER, ACKTR, DQN, GAIL, PPO1, PPO2, TRPO])
def test_behavior_cloning_discrete(tmp_path, model_class):
dataset = ExpertDataset(expert_path=EXPERT_PATH_DISCRETE, traj_limitation=10,
sequential_preprocessing=True, verbose=0)
model = model_class("MlpPolicy", "CartPole-v1")
model.pretrain(dataset, n_epochs=5)
model.save(str(tmp_path / "test-pretrain"))
del dataset, model
def test_dataset_param_validation():
with pytest.raises(ValueError):
ExpertDataset()
traj_data = np.load(EXPERT_PATH_PENDULUM)
with pytest.raises(ValueError):
ExpertDataset(traj_data=traj_data, expert_path=EXPERT_PATH_PENDULUM)
def test_generate_vec_env_non_image_observation():
env = DummyVecEnv([lambda: gym.make('CartPole-v1')] * 2)
model = PPO2('MlpPolicy', env)
model.learn(total_timesteps=300)
generate_expert_traj(model, save_path='.', n_timesteps=0, n_episodes=5)
| [
"numpy.load",
"stable_baselines.PPO2",
"gym.make",
"stable_baselines.GAIL",
"stable_baselines.common.cmd_util.make_atari_env",
"stable_baselines.gail.generate_expert_traj",
"pytest.raises",
"stable_baselines.gail.ExpertDataset",
"stable_baselines.common.vec_env.VecFrameStack",
"pytest.mark.paramet... | [((677, 811), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expert_env"""', "[('Pendulum-v0', EXPERT_PATH_PENDULUM, True), ('CartPole-v1',\n EXPERT_PATH_DISCRETE, False)]"], {}), "('expert_env', [('Pendulum-v0', EXPERT_PATH_PENDULUM,\n True), ('CartPole-v1', EXPERT_PATH_DISCRETE, False)])\n", (700, 811), False, 'import pytest\n'), ((1705, 1988), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""generate_env"""', "[(SAC, 'MlpPolicy', 'Pendulum-v0', 1, 10), (DQN, 'MlpPolicy', 'CartPole-v1',\n 1, 10), (A2C, 'MlpLstmPolicy', 'Pendulum-v0', 1, 10), (A2C,\n 'MlpLstmPolicy', 'CartPole-v1', 1, 10), (A2C, 'CnnPolicy',\n 'BreakoutNoFrameskip-v4', 8, 1)]"], {}), "('generate_env', [(SAC, 'MlpPolicy', 'Pendulum-v0', \n 1, 10), (DQN, 'MlpPolicy', 'CartPole-v1', 1, 10), (A2C, 'MlpLstmPolicy',\n 'Pendulum-v0', 1, 10), (A2C, 'MlpLstmPolicy', 'CartPole-v1', 1, 10), (\n A2C, 'CnnPolicy', 'BreakoutNoFrameskip-v4', 8, 1)])\n", (1728, 1988), False, 'import pytest\n'), ((3952, 4011), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Not Enough Memory"""', 'strict': '(False)'}), "(reason='Not Enough Memory', strict=False)\n", (3969, 4011), False, 'import pytest\n'), ((5212, 5308), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_class"""', '[A2C, ACKTR, GAIL, DDPG, PPO1, PPO2, SAC, TD3, TRPO]'], {}), "('model_class', [A2C, ACKTR, GAIL, DDPG, PPO1, PPO2,\n SAC, TD3, TRPO])\n", (5235, 5308), False, 'import pytest\n'), ((5739, 5830), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_class"""', '[A2C, ACER, ACKTR, DQN, GAIL, PPO1, PPO2, TRPO]'], {}), "('model_class', [A2C, ACER, ACKTR, DQN, GAIL, PPO1,\n PPO2, TRPO])\n", (5762, 5830), False, 'import pytest\n'), ((950, 966), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (958, 966), False, 'import gym\n'), ((1096, 1211), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'traj_data': 'traj_data', 'expert_path': 'expert_path', 'traj_limitation': '(10)', 'sequential_preprocessing': '(True)'}), '(traj_data=traj_data, expert_path=expert_path, traj_limitation\n =10, sequential_preprocessing=True)\n', (1109, 1211), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((1304, 1439), 'stable_baselines.GAIL', 'GAIL', (['"""MlpPolicy"""', 'env'], {'adversary_entcoeff': '(0.0)', 'lam': '(0.92)', 'max_kl': '(0.001)', 'expert_dataset': 'dataset', 'hidden_size_adversary': '(64)', 'verbose': '(0)'}), "('MlpPolicy', env, adversary_entcoeff=0.0, lam=0.92, max_kl=0.001,\n expert_dataset=dataset, hidden_size_adversary=64, verbose=0)\n", (1308, 1439), False, 'from stable_baselines import A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2, TD3, TRPO, SAC\n'), ((1632, 1678), 'stable_baselines.common.evaluation.evaluate_policy', 'evaluate_policy', (['model', 'env'], {'n_eval_episodes': '(5)'}), '(model, env, n_eval_episodes=5)\n', (1647, 1678), False, 'from stable_baselines.common.evaluation import evaluate_policy\n'), ((3706, 3729), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (3714, 3729), False, 'import gym\n'), ((3843, 3952), 'stable_baselines.gail.generate_expert_traj', 'generate_expert_traj', (['dummy_expert', "(tmp_path / 'dummy_expert_cartpole')", 'env'], {'n_timesteps': '(0)', 'n_episodes': '(10)'}), "(dummy_expert, tmp_path / 'dummy_expert_cartpole', env,\n n_timesteps=0, n_episodes=10)\n", (3863, 3952), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((4058, 4113), 'stable_baselines.common.cmd_util.make_atari_env', 'make_atari_env', (['"""PongNoFrameskip-v4"""'], {'num_env': '(1)', 'seed': '(0)'}), "('PongNoFrameskip-v4', num_env=1, seed=0)\n", (4072, 4113), False, 'from stable_baselines.common.cmd_util import make_atari_env\n'), ((4124, 4153), 'stable_baselines.common.vec_env.VecFrameStack', 'VecFrameStack', (['env'], {'n_stack': '(3)'}), '(env, n_stack=3)\n', (4137, 4153), False, 'from stable_baselines.common.vec_env import VecFrameStack, DummyVecEnv\n'), ((4166, 4188), 'stable_baselines.PPO2', 'PPO2', (['"""CnnPolicy"""', 'env'], {}), "('CnnPolicy', env)\n", (4170, 4188), False, 'from stable_baselines import A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2, TD3, TRPO, SAC\n'), ((4430, 4537), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'expert_path': 'expert_path', 'traj_limitation': '(1)', 'batch_size': '(32)', 'sequential_preprocessing': '(True)'}), '(expert_path=expert_path, traj_limitation=1, batch_size=32,\n sequential_preprocessing=True)\n', (4443, 4537), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((4759, 4872), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'expert_path': 'EXPERT_PATH_PENDULUM', 'traj_limitation': '(10)', 'sequential_preprocessing': '(True)', 'verbose': '(0)'}), '(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,\n sequential_preprocessing=True, verbose=0)\n', (4772, 4872), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((4909, 4950), 'stable_baselines.GAIL', 'GAIL', (['"""MlpPolicy"""', '"""Pendulum-v0"""', 'dataset'], {}), "('MlpPolicy', 'Pendulum-v0', dataset)\n", (4913, 4950), False, 'from stable_baselines import A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2, TD3, TRPO, SAC\n'), ((5435, 5548), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'expert_path': 'EXPERT_PATH_PENDULUM', 'traj_limitation': '(10)', 'sequential_preprocessing': '(True)', 'verbose': '(0)'}), '(expert_path=EXPERT_PATH_PENDULUM, traj_limitation=10,\n sequential_preprocessing=True, verbose=0)\n', (5448, 5548), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((5900, 6013), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'expert_path': 'EXPERT_PATH_DISCRETE', 'traj_limitation': '(10)', 'sequential_preprocessing': '(True)', 'verbose': '(0)'}), '(expert_path=EXPERT_PATH_DISCRETE, traj_limitation=10,\n sequential_preprocessing=True, verbose=0)\n', (5913, 6013), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((6317, 6346), 'numpy.load', 'np.load', (['EXPERT_PATH_PENDULUM'], {}), '(EXPERT_PATH_PENDULUM)\n', (6324, 6346), True, 'import numpy as np\n'), ((6587, 6609), 'stable_baselines.PPO2', 'PPO2', (['"""MlpPolicy"""', 'env'], {}), "('MlpPolicy', env)\n", (6591, 6609), False, 'from stable_baselines import A2C, ACER, ACKTR, GAIL, DDPG, DQN, PPO1, PPO2, TD3, TRPO, SAC\n'), ((6652, 6723), 'stable_baselines.gail.generate_expert_traj', 'generate_expert_traj', (['model'], {'save_path': '"""."""', 'n_timesteps': '(0)', 'n_episodes': '(5)'}), "(model, save_path='.', n_timesteps=0, n_episodes=5)\n", (6672, 6723), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((1034, 1054), 'numpy.load', 'np.load', (['expert_path'], {}), '(expert_path)\n', (1041, 1054), True, 'import numpy as np\n'), ((2378, 2425), 'stable_baselines.common.cmd_util.make_atari_env', 'make_atari_env', (['env_name'], {'num_env': 'n_env', 'seed': '(0)'}), '(env_name, num_env=n_env, seed=0)\n', (2392, 2425), False, 'from stable_baselines.common.cmd_util import make_atari_env\n'), ((6249, 6274), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6262, 6274), False, 'import pytest\n'), ((6284, 6299), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {}), '()\n', (6297, 6299), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((6356, 6381), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6369, 6381), False, 'import pytest\n'), ((6391, 6459), 'stable_baselines.gail.ExpertDataset', 'ExpertDataset', ([], {'traj_data': 'traj_data', 'expert_path': 'EXPERT_PATH_PENDULUM'}), '(traj_data=traj_data, expert_path=EXPERT_PATH_PENDULUM)\n', (6404, 6459), False, 'from stable_baselines.gail import ExpertDataset, generate_expert_traj\n'), ((6544, 6567), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (6552, 6567), False, 'import gym\n')] |
import numpy as np
import pickle
import datetime
with open("../data/dicOfMatrix.pickle",'rb') as f:
data = pickle.load(f)
with open("../data/factor_in_2020.pickle",'rb') as f:
factor_in = pickle.load(f)
with open("../data/factor_out_2020.pickle",'rb') as f:
factor_out = pickle.load(f)
coef = 9.3 # 迁徙规模修正系数
def L_iw():
"""
international to WuHan
:return: # of people fly to WuHan from international.
"""
return 0 #just my toy number
def L_cw(t):
"""
China to WuHan
:param t: time
:return: # of people come to WuHan from China.
"""
if t <= 31:
time = datetime.timedelta(int(t)) + datetime.date(2020,1,1)
mat = data[time]
fvector = factor_out[:,(time-datetime.date(2020,1,1)).days]*coef
result = 0.01*mat[:,0] #取武汉这一列
# print(result)
result = fvector.dot(result)
return result
elif t >= 32:
return np.mean([L_cw(i) for i in range(25,31)])*np.exp(-1*(t-31))
def L_wi():
"""
WuHan to international
:return: # of people fly from WuHan to international.
"""
return 0 # just my toy number
def L_wc(t):
"""
WuHan to China
:param t: time
:return:# of people from WuHan to other China cities.
"""
begin = datetime.date(2020,1,1)
if t <= 31:
time = datetime.timedelta(int(t)) + datetime.date(2020, 1, 1)
mat = data[time]
fvector = factor_out[0,(time-begin).days] * coef
result = 0.01 * mat[0,:] # 取武汉这一行
# print(result)
result = np.sum(result) * fvector
return result
elif t >= 32:
return np.mean([L_wc(i) for i in range(25, 31)])*np.exp(-1*(t-32))
def z(t):
"""
zoonotic force of infection
:param t: time
:return: # of people got sick because of zoonotic.
"""
if t < 1:
return 82
else:
return 0
def R0(t):
"""
基本感染数 : 平均一个患者感染几个人
:param t:
:return:
"""
if t < 60:
return 2.5
else:
return 0.5
Di = 7.5 # mean infectious period 感染期 (平均一个病人感染多久就会死亡或是痊愈)
De = 6.5 # mean latent period 潜伏期 (平均一个感染者需要多久才会表现出症状)
| [
"numpy.exp",
"pickle.load",
"datetime.date",
"numpy.sum"
] | [((115, 129), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (126, 129), False, 'import pickle\n'), ((202, 216), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (213, 216), False, 'import pickle\n'), ((291, 305), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (302, 305), False, 'import pickle\n'), ((1322, 1347), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (1335, 1347), False, 'import datetime\n'), ((671, 696), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (684, 696), False, 'import datetime\n'), ((1408, 1433), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (1421, 1433), False, 'import datetime\n'), ((1605, 1619), 'numpy.sum', 'np.sum', (['result'], {}), '(result)\n', (1611, 1619), True, 'import numpy as np\n'), ((997, 1018), 'numpy.exp', 'np.exp', (['(-1 * (t - 31))'], {}), '(-1 * (t - 31))\n', (1003, 1018), True, 'import numpy as np\n'), ((1730, 1751), 'numpy.exp', 'np.exp', (['(-1 * (t - 32))'], {}), '(-1 * (t - 32))\n', (1736, 1751), True, 'import numpy as np\n'), ((759, 784), 'datetime.date', 'datetime.date', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (772, 784), False, 'import datetime\n')] |
import cv2
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D
from keras.models import Sequential
import tensorflow as tf
import os
from random import shuffle
img_path='res'
CLASS_MAP = { #any new gesture has to be added here
"none":0,
"one":1,
"two":2,
"three":3,
"four":4,
"five":5,
"six":6
}
NUM_CLASSES = len(CLASS_MAP)
def mapper(val):
return CLASS_MAP[val]
def get_model():
model = Sequential([
SqueezeNet(input_shape=(227, 227, 3), include_top=False),
Dropout(0.6),
Convolution2D(NUM_CLASSES, (1, 1), padding='valid'),
Activation('relu'),
GlobalAveragePooling2D(),
Activation('softmax')
])
return model
dataset = []
for directory in os.listdir(img_path):
path = os.path.join(img_path, directory)
if not os.path.isdir(path):
continue
for item in os.listdir(path):
# to make sure no hidden files get in our way
if item.startswith("."):
continue
img = cv2.imread(os.path.join(path, item))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (227, 227))
dataset.append([img, directory])
shuffle(dataset)
data, labels = zip(*dataset)
labels = list(map(mapper, labels))
labels = np_utils.to_categorical(labels) #encoding now
model = get_model()
model.compile(
optimizer=Adam(lr=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy']
)
X=np.array(data)
Y=np.array(labels)
model.fit(X, Y, validation_split=0.25, epochs=5, batch_size=50)
model.save("gesturecheck.h5")
| [
"keras.layers.Convolution2D",
"keras.layers.Activation",
"os.path.isdir",
"random.shuffle",
"cv2.cvtColor",
"keras.layers.Dropout",
"keras.optimizers.Adam",
"keras_squeezenet.SqueezeNet",
"keras.layers.GlobalAveragePooling2D",
"keras.utils.np_utils.to_categorical",
"numpy.array",
"os.path.join... | [((898, 918), 'os.listdir', 'os.listdir', (['img_path'], {}), '(img_path)\n', (908, 918), False, 'import os\n'), ((1342, 1358), 'random.shuffle', 'shuffle', (['dataset'], {}), '(dataset)\n', (1349, 1358), False, 'from random import shuffle\n'), ((1432, 1463), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels'], {}), '(labels)\n', (1455, 1463), False, 'from keras.utils import np_utils\n'), ((1610, 1624), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1618, 1624), True, 'import numpy as np\n'), ((1627, 1643), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1635, 1643), True, 'import numpy as np\n'), ((931, 964), 'os.path.join', 'os.path.join', (['img_path', 'directory'], {}), '(img_path, directory)\n', (943, 964), False, 'import os\n'), ((1030, 1046), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1040, 1046), False, 'import os\n'), ((976, 995), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (989, 995), False, 'import os\n'), ((1221, 1257), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1233, 1257), False, 'import cv2\n'), ((1272, 1299), 'cv2.resize', 'cv2.resize', (['img', '(227, 227)'], {}), '(img, (227, 227))\n', (1282, 1299), False, 'import cv2\n'), ((1527, 1542), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1531, 1542), False, 'from keras.optimizers import Adam\n'), ((610, 666), 'keras_squeezenet.SqueezeNet', 'SqueezeNet', ([], {'input_shape': '(227, 227, 3)', 'include_top': '(False)'}), '(input_shape=(227, 227, 3), include_top=False)\n', (620, 666), False, 'from keras_squeezenet import SqueezeNet\n'), ((676, 688), 'keras.layers.Dropout', 'Dropout', (['(0.6)'], {}), '(0.6)\n', (683, 688), False, 'from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D\n'), ((698, 749), 'keras.layers.Convolution2D', 'Convolution2D', (['NUM_CLASSES', '(1, 1)'], {'padding': '"""valid"""'}), "(NUM_CLASSES, (1, 1), padding='valid')\n", (711, 749), False, 'from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D\n'), ((759, 777), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (769, 777), False, 'from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D\n'), ((787, 811), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (809, 811), False, 'from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D\n'), ((821, 842), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (831, 842), False, 'from keras.layers import Activation, Dropout, Convolution2D, GlobalAveragePooling2D\n'), ((1181, 1205), 'os.path.join', 'os.path.join', (['path', 'item'], {}), '(path, item)\n', (1193, 1205), False, 'import os\n')] |
import cv2
from PIL import Image, ImageOps
from tqdm import tqdm
from functions.folder_check import FOLDER_LIST, OUTPUT_FOLDERS
import os
from functions import folder_check as fldr_chk
import random
import math
import numpy as np
OUTPUT_EXTENSION = '.jpg'
RED = 128
BLUE = 200
class ImgComposer:
def __init__(self,
resolution: tuple,
classes_to_include: list,
num_of_images:int,
max_objects_in_image:int,
output_directory:str,
efo_directory:str,
include_negative_examples=[],
datafolder = './Data'):
self.classes_to_include = classes_to_include
self.include_negative_examples = include_negative_examples
self.num_of_images = num_of_images
self.max_objects_in_images = max_objects_in_image
self.output_directory = output_directory
self.efo_directory = efo_directory
self.resolution = resolution # (width, height)
self.datafolder = datafolder
def compose(self):
for imgno in tqdm(range(self.num_of_images)):
img_number = str(fldr_chk.get_num(self.output_directory,
OUTPUT_EXTENSION))
final_path = os.path.join(self.output_directory,
img_number)
img_name = f"{final_path}.jpg"
# Coloured annotation mask
output_dir = os.path.dirname(self.output_directory)
cm_path = os.path.join(output_dir,
OUTPUT_FOLDERS[2],
img_number)
cm_name = f"{cm_path}.png"
# Number of objects to add in the image
NumObjects = random.randrange(1,
self.max_objects_in_images+1)
# list of integer values for green
# for objects in the range of
# 0 to 255
greenlist = list(range(1, 255,
int(255/NumObjects)))
Num = 0
greenval_and_obj = {}
while Num != NumObjects:
classinstance = random.choice(self.classes_to_include)
greenval_and_obj[greenlist[Num]] = classinstance
Num += 1
# Background image
bg_folder = os.path.join(self.datafolder,(FOLDER_LIST[0]))
random_bg = random.choice(os.listdir(bg_folder))
bg_path = os.path.join(bg_folder, random_bg)
background = Image.open(bg_path)
background = background.resize(self.resolution,
Image.ANTIALIAS)
# Black image for binary mask
bin_mask = Image.new('RGBA',
background.size,
color='black')
# TODO: Adding negative examples
for green, obj in greenval_and_obj.items():
fg_point = os.path.join(self.efo_directory, obj)
if len(os.listdir(fg_point)) != 0:
fg = random.choice(os.listdir(fg_point))
fg_path = os.path.join(fg_point, fg)
msk_fldr = os.path.join(os.path.dirname(self.efo_directory),
OUTPUT_FOLDERS[4])
msk_path = os.path.join(msk_fldr, obj, fg)
mask = Image.open(msk_path).convert('L')
foreground = Image.open(fg_path)
ColourRGB = (RED, green, BLUE)
foreground, mask = scaled(foreground,
background, mask)
foreground, mask = rotated(foreground, mask)
foreground, mask = flipped(foreground, mask)
background, bin_mask = placed(foreground,
background,
mask,
bin_mask,
ColourRGB)
background.save(img_name)
bin_mask.save(cm_name)
# Saving the annotation masks
save_annotation_masks(bin_mask,
img_number,
greenval_and_obj,
output_dir)
def scaled(foreground, background, mask,
scaled_to=None):
"""
Function for scaling the foreground object
with respect to the background
Inputs:
foreground = PIL Image of the foreground
background = PIL Image of the background
mask = PIL Image of foreground mask
scaled_to = float; scaling factor of foreground,
if None then uses random values
with respect to the background
"""
if scaled_to is None:
bg_width, _ = background.size
fg_width, fg_height = foreground.size
new_width = random.randrange((math.floor(0.005*bg_width)),
math.ceil(0.6*bg_width))
new_height = int(fg_height * (new_width/fg_width))
new_size = (new_width, new_height)
resized_fg = foreground.resize(new_size,
Image.BICUBIC)
resized_mask = mask.resize(new_size,
Image.BICUBIC)
return resized_fg, resized_mask
def rotated(foreground, mask,
angle=random.randrange(0,359)):
"""
Function for rotating the foreground object
Also performs the same operation for the mask
Inputs:
foreground = PIL Image of foreground
mask = PIL Image of mask
angle = angle in degrees to rotate the foreground
Outputs:
foreground and mask with rotation performed
"""
foreground = foreground.rotate(angle,
resample=Image.BICUBIC)
mask = mask.rotate(angle,
resample=Image.BICUBIC)
return foreground, mask
def flipped(foreground, mask,
flip_foreground=random.choice((True, False))):
"""
Performs Mirror operation on the foreground
object and the mask of the foreground
Inputs:
foreground = PIL Image of foreground
mask = PIL Image of mask
flip_foreground = Bool to decide whether
or not to mirror the foreground object
Outputs:
foreground and mask with the operation
performed
"""
if flip_foreground:
foreground = ImageOps.mirror(foreground)
mask = ImageOps.mirror(mask)
return foreground, mask
def placed(foreground, background, mask,
bin_mask=None,
mask_colour=None,
placed_at=None):
"""
Function for placing the foreground object
on the background.
Inputs:
foreground = PIL Image of the foreground
background = PIL Image of the background
mask = PIL Image of the background
placed_at = Tuple containing x and
y co-ordinates
bin_mask = PIL Image of the final
annotation mask
mask_colour = Tuple of RGB values for mask
Outputs:
returns composed image and the final
coloured annotation mask
"""
if placed_at == None:
bg_wide, bg_high = background.size
# Location on X axis
x_low_limit = -0.1*bg_wide
x_up_limit = 0.8*bg_wide
x = random.randrange(x_low_limit,
x_up_limit)
# Location on Y axis
y_low_limit = -0.1*bg_high
y_up_limit = 0.8*bg_high
y = random.randrange(y_low_limit,
y_up_limit)
placed_at = (x,y)
# Compositing the foreground on the background
background.paste(foreground,
placed_at,
mask)
if bin_mask != None:
mask_coloured = mask_maker(mask,
mask_colour)
bin_mask.paste(mask_coloured, placed_at,
mask)
return background, bin_mask
def mask_maker(mask, mask_colour):
"""
Function to create the coloured mask for
compositing on the annotation mask
Inputs:
mask = PIL Image of the foreground mask
mask_coloured = Tuple of RGB values for mask
"""
w, h = mask.size
mask_coloured = Image.new("RGB", (w,h),
mask_colour)
mask_coloured.putalpha(mask)
return mask_coloured
def save_annotation_masks(bin_mask,
img_number:str,
dict_color_object:dict,
output_dir:str):
"""
Function for saving the annotation masks
of the different objects in the format
required for PyCocoCreatorTools
img_number= str; image number to save the
binary mask
dict_color_object= dict; dictionary of
green value and
object name
output_dir = str; Path to directory where
annotation masks will be saved
"""
instance_num = 0
for green, object in dict_color_object.items():
object = object.lower()
annotationmask_name = f"{img_number}_{object}_{instance_num}.png"
ColourBGR = (BLUE, green, RED)
anno_mask = annotation_mask_maker(bin_mask,
ColourBGR)
anno_mask_path = os.path.join(output_dir,
OUTPUT_FOLDERS[0],
annotationmask_name)
instance_num +=1
cv2.imwrite(anno_mask_path, anno_mask)
def annotation_mask_maker(bin_mask, ColourBGR):
"""
Makes a black and white annotation masks for each object instance
Input: The binary mask with each object instance in a different colour, Colour is the tuple in BGR corresponding to object instance.
Output: A binary mask showing the object instance in white while the rest of the image is black.
"""
kernel = np.ones((3,3),np.uint8)
thresh_l = ColourBGR
thresh_h = ColourBGR
bin_mask = cv2.cvtColor(np.array(bin_mask), cv2.COLOR_RGB2BGR)
anno_mask = cv2.inRange(bin_mask, thresh_l, thresh_h)
anno_mask = cv2.morphologyEx(anno_mask, cv2.MORPH_OPEN, kernel)
return anno_mask | [
"functions.folder_check.get_num",
"PIL.Image.new",
"os.path.join",
"math.ceil",
"cv2.morphologyEx",
"cv2.imwrite",
"os.path.dirname",
"math.floor",
"numpy.ones",
"random.choice",
"PIL.Image.open",
"PIL.ImageOps.mirror",
"random.randrange",
"numpy.array",
"cv2.inRange",
"os.listdir"
] | [((5480, 5504), 'random.randrange', 'random.randrange', (['(0)', '(359)'], {}), '(0, 359)\n', (5496, 5504), False, 'import random\n'), ((6056, 6084), 'random.choice', 'random.choice', (['(True, False)'], {}), '((True, False))\n', (6069, 6084), False, 'import random\n'), ((8304, 8341), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(w, h)', 'mask_colour'], {}), "('RGB', (w, h), mask_colour)\n", (8313, 8341), False, 'from PIL import Image, ImageOps\n'), ((9940, 9965), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (9947, 9965), True, 'import numpy as np\n'), ((10097, 10138), 'cv2.inRange', 'cv2.inRange', (['bin_mask', 'thresh_l', 'thresh_h'], {}), '(bin_mask, thresh_l, thresh_h)\n', (10108, 10138), False, 'import cv2\n'), ((10155, 10206), 'cv2.morphologyEx', 'cv2.morphologyEx', (['anno_mask', 'cv2.MORPH_OPEN', 'kernel'], {}), '(anno_mask, cv2.MORPH_OPEN, kernel)\n', (10171, 10206), False, 'import cv2\n'), ((6478, 6505), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['foreground'], {}), '(foreground)\n', (6493, 6505), False, 'from PIL import Image, ImageOps\n'), ((6521, 6542), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['mask'], {}), '(mask)\n', (6536, 6542), False, 'from PIL import Image, ImageOps\n'), ((7382, 7423), 'random.randrange', 'random.randrange', (['x_low_limit', 'x_up_limit'], {}), '(x_low_limit, x_up_limit)\n', (7398, 7423), False, 'import random\n'), ((7561, 7602), 'random.randrange', 'random.randrange', (['y_low_limit', 'y_up_limit'], {}), '(y_low_limit, y_up_limit)\n', (7577, 7602), False, 'import random\n'), ((9368, 9432), 'os.path.join', 'os.path.join', (['output_dir', 'OUTPUT_FOLDERS[0]', 'annotationmask_name'], {}), '(output_dir, OUTPUT_FOLDERS[0], annotationmask_name)\n', (9380, 9432), False, 'import os\n'), ((9514, 9552), 'cv2.imwrite', 'cv2.imwrite', (['anno_mask_path', 'anno_mask'], {}), '(anno_mask_path, anno_mask)\n', (9525, 9552), False, 'import cv2\n'), ((10042, 10060), 'numpy.array', 'np.array', (['bin_mask'], {}), '(bin_mask)\n', (10050, 10060), True, 'import numpy as np\n'), ((1288, 1335), 'os.path.join', 'os.path.join', (['self.output_directory', 'img_number'], {}), '(self.output_directory, img_number)\n', (1300, 1335), False, 'import os\n'), ((1488, 1526), 'os.path.dirname', 'os.path.dirname', (['self.output_directory'], {}), '(self.output_directory)\n', (1503, 1526), False, 'import os\n'), ((1549, 1604), 'os.path.join', 'os.path.join', (['output_dir', 'OUTPUT_FOLDERS[2]', 'img_number'], {}), '(output_dir, OUTPUT_FOLDERS[2], img_number)\n', (1561, 1604), False, 'import os\n'), ((1794, 1845), 'random.randrange', 'random.randrange', (['(1)', '(self.max_objects_in_images + 1)'], {}), '(1, self.max_objects_in_images + 1)\n', (1810, 1845), False, 'import random\n'), ((2406, 2451), 'os.path.join', 'os.path.join', (['self.datafolder', 'FOLDER_LIST[0]'], {}), '(self.datafolder, FOLDER_LIST[0])\n', (2418, 2451), False, 'import os\n'), ((2536, 2570), 'os.path.join', 'os.path.join', (['bg_folder', 'random_bg'], {}), '(bg_folder, random_bg)\n', (2548, 2570), False, 'import os\n'), ((2596, 2615), 'PIL.Image.open', 'Image.open', (['bg_path'], {}), '(bg_path)\n', (2606, 2615), False, 'from PIL import Image, ImageOps\n'), ((2799, 2848), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'background.size'], {'color': '"""black"""'}), "('RGBA', background.size, color='black')\n", (2808, 2848), False, 'from PIL import Image, ImageOps\n'), ((5043, 5071), 'math.floor', 'math.floor', (['(0.005 * bg_width)'], {}), '(0.005 * bg_width)\n', (5053, 5071), False, 'import math\n'), ((5096, 5121), 'math.ceil', 'math.ceil', (['(0.6 * bg_width)'], {}), '(0.6 * bg_width)\n', (5105, 5121), False, 'import math\n'), ((1164, 1221), 'functions.folder_check.get_num', 'fldr_chk.get_num', (['self.output_directory', 'OUTPUT_EXTENSION'], {}), '(self.output_directory, OUTPUT_EXTENSION)\n', (1180, 1221), True, 'from functions import folder_check as fldr_chk\n'), ((2209, 2247), 'random.choice', 'random.choice', (['self.classes_to_include'], {}), '(self.classes_to_include)\n', (2222, 2247), False, 'import random\n'), ((2491, 2512), 'os.listdir', 'os.listdir', (['bg_folder'], {}), '(bg_folder)\n', (2501, 2512), False, 'import os\n'), ((3039, 3076), 'os.path.join', 'os.path.join', (['self.efo_directory', 'obj'], {}), '(self.efo_directory, obj)\n', (3051, 3076), False, 'import os\n'), ((3219, 3245), 'os.path.join', 'os.path.join', (['fg_point', 'fg'], {}), '(fg_point, fg)\n', (3231, 3245), False, 'import os\n'), ((3405, 3436), 'os.path.join', 'os.path.join', (['msk_fldr', 'obj', 'fg'], {}), '(msk_fldr, obj, fg)\n', (3417, 3436), False, 'import os\n'), ((3531, 3550), 'PIL.Image.open', 'Image.open', (['fg_path'], {}), '(fg_path)\n', (3541, 3550), False, 'from PIL import Image, ImageOps\n'), ((3100, 3120), 'os.listdir', 'os.listdir', (['fg_point'], {}), '(fg_point)\n', (3110, 3120), False, 'import os\n'), ((3167, 3187), 'os.listdir', 'os.listdir', (['fg_point'], {}), '(fg_point)\n', (3177, 3187), False, 'import os\n'), ((3290, 3325), 'os.path.dirname', 'os.path.dirname', (['self.efo_directory'], {}), '(self.efo_directory)\n', (3305, 3325), False, 'import os\n'), ((3464, 3484), 'PIL.Image.open', 'Image.open', (['msk_path'], {}), '(msk_path)\n', (3474, 3484), False, 'from PIL import Image, ImageOps\n')] |
# Author: <NAME>
# Copyright@CUHK
# Please do not discolse this code to others
import numpy as np
import cv2 as cv
import os
from fnmatch import fnmatch
import PolyUtil
import subprocess
from database import result_db
def show_img(img, title, wait_key=0, output_path=None):
cv.imshow(title, img)
cv.waitKey(wait_key)
if output_path is not None:
cv.imwrite(output_path, img)
return
def find_external_contours(gray_img):
cnts, hier = cv.findContours(
gray_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)
return cnts, hier
def find_all_contours(gray_img, contour_approx=cv.CHAIN_APPROX_SIMPLE):
# cnts, hier = cv.findContours(gray_img, cv.RETR_EXTERNAL, contour_approx)
cnts, hier = cv.findContours(gray_img, cv.RETR_TREE, contour_approx)
return cnts, hier
def report_all_polygons(gray_img, save_path=None):
new_img_gray = np.zeros((2048, 2048), np.uint8)
my_m_cnts,_ = find_all_contours(gray_img.copy())
cv.fillPoly(new_img_gray, my_m_cnts, 255)
print(save_path)
app_cnts, _ = cv.findContours(new_img_gray, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
new_img_gray = np.zeros((2048, 2048), np.uint8)
cv.fillPoly(new_img_gray, app_cnts, 2)
new_img_gray = new_img_gray - np.ones((2048,2048))
new_img_gray.astype(np.uint8)
my_large_polys_img = enlarge_img(new_img_gray)
print(np.max(my_large_polys_img), np.min(my_large_polys_img), "begin extraction.")
polys = PolyUtil.bimg_to_poly_coord(my_large_polys_img)
my_polys = []
for poly in polys:
check_polygon(poly)
if filter_out_singleton_pixel(poly):
my_polys.append(poly)
#poly_save_path = save_path.split('.')[0] + '.txt
poly_save_path = save_path.replace('.png','.txt')
print(save_path)
print('------------------------poly save path is-------------------------- ')
print(poly_save_path)
with open(poly_save_path, 'w') as f:
for poly in my_polys:
f.write("POLY")
for p in poly:
f.write(" %i %i" % (p[0], p[1]))
f.write("\n")
process = subprocess.Popen(['./mask_fracturing',poly_save_path])
process.communicate()
process.kill()
process.terminate()
return new_img_gray
def enlarge_img(img, ori=2048, mul=2):
new_img_gray = np.zeros((ori * mul, ori * mul), np.int)
for x in range(len(img)):
for y in range(len(img[0])):
new_img_gray[x * mul][y * mul] = img[x][y]
new_img_gray[x * mul + 1][y * mul + 1] = img[x][y]
new_img_gray[x * mul][y * mul + 1] = img[x][y]
new_img_gray[x * mul + 1][y * mul] = img[x][y]
return new_img_gray
def check_polygon(cnt):
for x in range(len(cnt)):
lp = cnt[x-1]
p = cnt[x]
if p[0] != lp[0] and p[1] != lp[1]:
for ele in cnt:
print(ele)
print(x)
raise NotImplementedError("Error: Not valid polygon")
print("Valid polygon")
return True
def filter_out_singleton_pixel(cnt):
# special for gan-opc family results, since their mask files are floating pixels, binarized masks may contain many singleton pixels
# which may significantly increase the usless fracturing shot counts (1 singleton pixel = 1 shot)
for x in range(len(cnt)):
p = cnt[x]
lp = cnt[x-1]
if len(cnt) <= 4 and (abs(p[0] - lp[0]) == 1 or abs(p[1] - lp[1]) == 1):
return False
return True
def manhattanize_target_masks(masks_root, output_dir):
if not os.path.exists(os.path.dirname(output_dir)):
os.makedirs(os.path.dirname(output_dir))
for path, _, files in os.walk(masks_root):
for name in files:
if fnmatch(name, "*.jpg") or fnmatch(name, "*.png"):
png_file = os.path.join(path, name)
#print(png_file)
design_name = name.split('.')[0]
mask = cv.imread(png_file)
maskgray = cv.cvtColor(mask, cv.COLOR_BGR2GRAY)
save_path = os.path.join(output_dir, design_name + '.png')
#print(png_file, save_path)
report_all_polygons(maskgray, save_path=save_path)
def get_minshots(filepath):
dic = {}
lines = open(filepath)
for line in lines:
case_name, min_shots = line.split(' ')
dic[case_name] = min_shots
return dic
# if __name__ == "__main__":
# masks_root="/path/to/your/input/folder/"
# output_dir="/path/to/your/output/folder/"
# manhattanize_target_masks(masks_root, output_dir)
if __name__ == "__main__":
my_db = result_db('./result.db')
base_dir = '/home/hongduo/school/develset_opc/levelset_net/iccad13_outputs'
ilt_weight = 1.0
pvb_weight = 7.5
add_curv = False
base_name = 'ckpts_{}_{}_{}'.format(ilt_weight, pvb_weight, add_curv)
work_dir = os.path.join(base_dir, base_name)
masks_root = os.path.join(work_dir,'mask')
manhattanize_target_masks(masks_root, masks_root)
best_score_name = 'best_result_ilt_{}_pvb_{}_curv_{}.txt'.format(ilt_weight, pvb_weight, add_curv)
best_score_path = os.path.join(work_dir, best_score_name)
min_shots_path = os.path.join(masks_root, 'minShots.txt')
dic = get_minshots(min_shots_path)
lines = open(best_score_path, 'r')
for line in lines:
CASE_NBME, EPOCH, L2, PV_BAND = line.split(' ')
my_db.insert_record(CASE_NBME, ilt_weight, pvb_weight, add_curv, EPOCH, L2, PV_BAND, dic[CASE_NBME])
my_db.close() | [
"subprocess.Popen",
"cv2.waitKey",
"cv2.imwrite",
"os.path.dirname",
"os.walk",
"numpy.zeros",
"numpy.ones",
"cv2.cvtColor",
"cv2.fillPoly",
"database.result_db",
"cv2.imread",
"numpy.max",
"numpy.min",
"cv2.imshow",
"os.path.join",
"fnmatch.fnmatch",
"cv2.findContours",
"PolyUtil.... | [((293, 314), 'cv2.imshow', 'cv.imshow', (['title', 'img'], {}), '(title, img)\n', (302, 314), True, 'import cv2 as cv\n'), ((320, 340), 'cv2.waitKey', 'cv.waitKey', (['wait_key'], {}), '(wait_key)\n', (330, 340), True, 'import cv2 as cv\n'), ((487, 552), 'cv2.findContours', 'cv.findContours', (['gray_img', 'cv.RETR_EXTERNAL', 'cv.CHAIN_APPROX_NONE'], {}), '(gray_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n', (502, 552), True, 'import cv2 as cv\n'), ((759, 814), 'cv2.findContours', 'cv.findContours', (['gray_img', 'cv.RETR_TREE', 'contour_approx'], {}), '(gray_img, cv.RETR_TREE, contour_approx)\n', (774, 814), True, 'import cv2 as cv\n'), ((912, 944), 'numpy.zeros', 'np.zeros', (['(2048, 2048)', 'np.uint8'], {}), '((2048, 2048), np.uint8)\n', (920, 944), True, 'import numpy as np\n'), ((1008, 1049), 'cv2.fillPoly', 'cv.fillPoly', (['new_img_gray', 'my_m_cnts', '(255)'], {}), '(new_img_gray, my_m_cnts, 255)\n', (1019, 1049), True, 'import cv2 as cv\n'), ((1093, 1160), 'cv2.findContours', 'cv.findContours', (['new_img_gray', 'cv.RETR_TREE', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(new_img_gray, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n', (1108, 1160), True, 'import cv2 as cv\n'), ((1181, 1213), 'numpy.zeros', 'np.zeros', (['(2048, 2048)', 'np.uint8'], {}), '((2048, 2048), np.uint8)\n', (1189, 1213), True, 'import numpy as np\n'), ((1219, 1257), 'cv2.fillPoly', 'cv.fillPoly', (['new_img_gray', 'app_cnts', '(2)'], {}), '(new_img_gray, app_cnts, 2)\n', (1230, 1257), True, 'import cv2 as cv\n'), ((1504, 1551), 'PolyUtil.bimg_to_poly_coord', 'PolyUtil.bimg_to_poly_coord', (['my_large_polys_img'], {}), '(my_large_polys_img)\n', (1531, 1551), False, 'import PolyUtil\n'), ((2169, 2224), 'subprocess.Popen', 'subprocess.Popen', (["['./mask_fracturing', poly_save_path]"], {}), "(['./mask_fracturing', poly_save_path])\n", (2185, 2224), False, 'import subprocess\n'), ((2383, 2423), 'numpy.zeros', 'np.zeros', (['(ori * mul, ori * mul)', 'np.int'], {}), '((ori * mul, ori * mul), np.int)\n', (2391, 2423), True, 'import numpy as np\n'), ((3766, 3785), 'os.walk', 'os.walk', (['masks_root'], {}), '(masks_root)\n', (3773, 3785), False, 'import os\n'), ((4746, 4770), 'database.result_db', 'result_db', (['"""./result.db"""'], {}), "('./result.db')\n", (4755, 4770), False, 'from database import result_db\n'), ((5009, 5042), 'os.path.join', 'os.path.join', (['base_dir', 'base_name'], {}), '(base_dir, base_name)\n', (5021, 5042), False, 'import os\n'), ((5061, 5091), 'os.path.join', 'os.path.join', (['work_dir', '"""mask"""'], {}), "(work_dir, 'mask')\n", (5073, 5091), False, 'import os\n'), ((5273, 5312), 'os.path.join', 'os.path.join', (['work_dir', 'best_score_name'], {}), '(work_dir, best_score_name)\n', (5285, 5312), False, 'import os\n'), ((5335, 5375), 'os.path.join', 'os.path.join', (['masks_root', '"""minShots.txt"""'], {}), "(masks_root, 'minShots.txt')\n", (5347, 5375), False, 'import os\n'), ((383, 411), 'cv2.imwrite', 'cv.imwrite', (['output_path', 'img'], {}), '(output_path, img)\n', (393, 411), True, 'import cv2 as cv\n'), ((1293, 1314), 'numpy.ones', 'np.ones', (['(2048, 2048)'], {}), '((2048, 2048))\n', (1300, 1314), True, 'import numpy as np\n'), ((1414, 1440), 'numpy.max', 'np.max', (['my_large_polys_img'], {}), '(my_large_polys_img)\n', (1420, 1440), True, 'import numpy as np\n'), ((1442, 1468), 'numpy.min', 'np.min', (['my_large_polys_img'], {}), '(my_large_polys_img)\n', (1448, 1468), True, 'import numpy as np\n'), ((3659, 3686), 'os.path.dirname', 'os.path.dirname', (['output_dir'], {}), '(output_dir)\n', (3674, 3686), False, 'import os\n'), ((3710, 3737), 'os.path.dirname', 'os.path.dirname', (['output_dir'], {}), '(output_dir)\n', (3725, 3737), False, 'import os\n'), ((3831, 3853), 'fnmatch.fnmatch', 'fnmatch', (['name', '"""*.jpg"""'], {}), "(name, '*.jpg')\n", (3838, 3853), False, 'from fnmatch import fnmatch\n'), ((3857, 3879), 'fnmatch.fnmatch', 'fnmatch', (['name', '"""*.png"""'], {}), "(name, '*.png')\n", (3864, 3879), False, 'from fnmatch import fnmatch\n'), ((3909, 3933), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (3921, 3933), False, 'import os\n'), ((4042, 4061), 'cv2.imread', 'cv.imread', (['png_file'], {}), '(png_file)\n', (4051, 4061), True, 'import cv2 as cv\n'), ((4090, 4126), 'cv2.cvtColor', 'cv.cvtColor', (['mask', 'cv.COLOR_BGR2GRAY'], {}), '(mask, cv.COLOR_BGR2GRAY)\n', (4101, 4126), True, 'import cv2 as cv\n'), ((4156, 4202), 'os.path.join', 'os.path.join', (['output_dir', "(design_name + '.png')"], {}), "(output_dir, design_name + '.png')\n", (4168, 4202), False, 'import os\n')] |
# SHAM galaxy catalog class
# Contact: <NAME> <<EMAIL>>
import os
import numpy as np
from astropy.cosmology import FlatLambdaCDM
#from GalaxyCatalogInterface import GalaxyCatalog
GalaxyCatalog = object
class SHAMGalaxyCatalog(GalaxyCatalog):
"""
SHAM galaxy catalog class.
"""
def __init__(self, redshift=0.062496, match_to='LiWhite', **kwargs):
if match_to not in ('LiWhite', 'MBII'):
raise ValueError('`match_to` must be "LiWhite" or "MBII"')
self.match_to = match_to
self.redshift = redshift
self.scale = 1.0/(1.0+self.redshift)
self.base_catalog_dir = kwargs['base_catalog_dir']
self.filename = os.path.join(self.base_catalog_dir, 'SHAM_{:.5f}_{}.npz'.format(self.scale, self.match_to))
if not os.path.isfile(self.filename):
raise ValueError('{} does not exist!'.format(self.filename))
self.npz_file = np.load(self.filename)
self.data_cache = {}
self.cosmology = FlatLambdaCDM(H0=70.2, Om0=0.275, Ob0=0.046)
self._h = self.cosmology.H0.value / 100.0
self._distmod = self.cosmology.distmod(self.redshift).value
self.box_size = (100.0/self._h)
self.overdensity = 97.7
self.lightcone = False
self.SDSS_kcorrection_z = 0.0
self.quantities = { 'redshift': ('redshift', None),
'stellar_mass': ('sm', None),
'halo_id': ('id', None),
'parent_halo_id': ('upid', None),
'positionX': ('x', lambda x: x/self._h),
'positionY': ('y', lambda x: x/self._h),
'positionZ': ('z', lambda x: x/self._h),
'velocityX': ('vx', None),
'velocityY': ('vy', None),
'velocityZ': ('vz', None),
'mass': ('mvir', lambda x: x/self._h),
'SDSS_u:observed:': ('AMAG[0]', lambda x: x+self._distmod),
'SDSS_g:observed:': ('AMAG[1]', lambda x: x+self._distmod),
'SDSS_r:observed:': ('AMAG[2]', lambda x: x+self._distmod),
'SDSS_i:observed:': ('AMAG[3]', lambda x: x+self._distmod),
'SDSS_z:observed:': ('AMAG[4]', lambda x: x+self._distmod),
'SDSS_u:rest:': ('AMAG[0]', None),
'SDSS_g:rest:': ('AMAG[1]', None),
'SDSS_r:rest:': ('AMAG[2]', None),
'SDSS_i:rest:': ('AMAG[3]', None),
'SDSS_z:rest:': ('AMAG[4]', None),
}
def get_quantities(self, quantities, filters={}):
if isinstance(quantities, basestring):
quantities = [quantities]
if not quantities:
raise ValueError('quantities cannot be empty')
if not all(q in self.quantities for q in quantities):
raise ValueError('Some quantities are not available in this catalog')
if self.redshift < filters.get('zlo', -np.inf) or self.redshift > filters.get('zhi', np.inf):
result = [np.array([]) for _ in quantities]
else:
result = []
for q in quantities:
if q in self.data_cache:
result.append(self.data_cache[q])
else:
key, func = self.quantities[q]
d = func(self.npz_file[key]) if callable(func) else self.npz_file[key]
self.data_cache[q] = d
result.append(d)
return result if len(result) > 1 else result[0]
| [
"astropy.cosmology.FlatLambdaCDM",
"numpy.load",
"os.path.isfile",
"numpy.array"
] | [((942, 964), 'numpy.load', 'np.load', (['self.filename'], {}), '(self.filename)\n', (949, 964), True, 'import numpy as np\n'), ((1023, 1067), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': '(70.2)', 'Om0': '(0.275)', 'Ob0': '(0.046)'}), '(H0=70.2, Om0=0.275, Ob0=0.046)\n', (1036, 1067), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((812, 841), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (826, 841), False, 'import os\n'), ((3475, 3487), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3483, 3487), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import time
import torch
try:
from external.nms import soft_nms_39
except:
print('NMS not imported! If you need it,'
' do \n cd $CenterNet_ROOT/src/lib/external \n make')
from models.decode import multi_pose_decode, _topk
from models.decode import car_pose_decode
from models.utils import flip_tensor, flip_lr_off, flip_lr
from utils.image import get_affine_transform
from utils.post_process import multi_pose_post_process
from utils.post_process import car_pose_post_process
from utils.debugger import Debugger
from .base_detector import BaseDetector
from torch.onnx import OperatorExportTypes
# import onnxruntime
# import onnx
class CarPoseDetector(BaseDetector):
def __init__(self, opt, onnx=False):
super(CarPoseDetector, self).__init__(opt)
self.flip_idx = opt.flip_idx
self.not_depth_guide = opt.not_depth_guide
self.backbonea_arch = opt.arch.split('_')[0]
self.export_onnx = onnx
def process(self, images, depths, meta, return_time=False):
# NOTE export ONNX
if self.export_onnx:
with torch.no_grad():
onnx_path = self.opt.load_model[:-4] + ".onnx"
# hm, features = self.model(images) # remember the order of outputs
hm, hps, rot, dim, prob = self.model(images)
torch.onnx.export(self.model, images,
onnx_path,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
verbose=True,
input_names= ['input'],
output_names=["hm", "hps", "rot", "dim", "prob"])
print("Export ONNX successful. Model is saved at", onnx_path)
quit()
with torch.no_grad():
# if self.not_depth_guide or self.backbonea_arch == 'dla':
# output = self.model(images)[-1]
# else:
# output = self.model(images, depths)[-1]
# output = self.model(images)[-1]
# output = self.model(images)[-1]
outputs = self.model(images)
# hm, hps, rot, dim, prob = self.model(images)
hm, hps, rot, dim, prob = outputs['hm'], outputs['hps'], outputs['rot'], outputs['dim'], outputs['prob']
hm = hm.sigmoid_()
dets = car_pose_decode(
hm, hps, dim, rot, prob,
reg=outputs['reg'], wh=outputs['wh'], K=self.opt.K, meta=meta, const=self.const,
dynamic_dim=self.opt.dynamic_dim, axis_head_angle=self.opt.axis_head_angle, not_joint_task=self.opt.not_joint_task)
# dets = car_pose_decode(
# output['hm'], output['hps'], output['dim'], output['rot'], output['prob'],
# reg=output['reg'], wh=output['wh'], K=self.opt.K, meta=meta, const=self.const,
# dynamic_dim=self.opt.dynamic_dim, axis_head_angle=self.opt.axis_head_angle, not_joint_task=self.opt.not_joint_task)
if return_time:
return None, dets, 0
else:
return None, dets
def preprocess_depth(self, depth):
n = 40
delta = 2 * 80 / (n * (n + 1))
depth = 1 + 8 * (depth) / delta
depth = -0.5 + 0.5 * np.sqrt(depth) # 0 -> 40
depth = depth / 40 # 0 -> 1
return depth
def pre_process(self, image, depth, meta=None):
height, width = image.shape[0:2]
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = np.array([width, height], dtype=np.float32)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp_image = cv2.warpAffine(
image, trans_input, (self.opt.input_w, self.opt.input_h), flags=cv2.INTER_LINEAR)
inp_image = (inp_image / 255).astype(np.float32)
inp_image = (inp_image - self.mean) / self.std
images = inp_image.transpose(2, 0, 1).reshape(
1, 3, self.opt.input_h, self.opt.input_w)
images = torch.from_numpy(images)
# FIXME test depth
# print(resized_depth.shape)
# dummy_depth = np.random.randint(0, 10000, size = (new_height, new_width)).astype(np.uint16)
# resized_depth = dummy_depth
# print(resized_depth)
# dummy_depth = np.ones_like(resized_depth) * 10 * 256
# s = resized_depth.shape
# resized_depth = np.random.randn(new_width, new_height, 1)
# resized_depth = dummy_depth
# resized_depth = np.arange(new_width * new_height).reshape(new_height,new_width)
# resized_depth = np.clip(resized_depth, 0, 255 * 100)
# print(resized_depth.shape)
# resized_depth = cv2.resize(depth, (new_width, new_height))
inp_depth = cv2.warpAffine(
depth, trans_input, (self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp_depth = inp_depth.astype(np.float32) / 256.0
# NOTE test new depth preproc
# inp_depth = self.preprocess_depth(inp_depth)
inp_depth = inp_depth[:, :, np.newaxis]
inp_depth = (inp_depth - self.depth_mean) / self.depth_std
# print(np.max(inp_depth), np.min(inp_depth))
# inp_depth = inp_depth * 10000
depths = inp_depth.transpose(2, 0, 1).reshape(
1, 1, self.opt.input_h, self.opt.input_w)
depths = torch.from_numpy(depths)
meta = {'c': c, 's': s,
'out_height': self.opt.input_h // self.opt.down_ratio,
'out_width': self.opt.input_w // self.opt.down_ratio}
trans_output_inv = get_affine_transform(
c, s, 0, [meta['out_width'], meta['out_height']], inv=1)
trans_output_inv = torch.from_numpy(
trans_output_inv).unsqueeze(0).to(self.opt.device)
meta['trans_output_inv'] = trans_output_inv
return images, depths, meta
def post_process(self, dets, meta):
dets = dets.squeeze(0).detach().cpu().numpy() # for batch size 1
return dets
def merge_outputs(self, detections):
results = {}
results[1] = np.concatenate(
[detection[1] for detection in detections], axis=0).astype(np.float32)
if self.opt.nms or len(self.opt.test_scales) > 1:
soft_nms_39(results[1], Nt=0.5, method=2)
results[1] = results[1].tolist()
return results
def debug(self, debugger, images, dets, output, scale=1):
dets = dets.detach().cpu().numpy().copy()
dets[:, :, :4] *= self.opt.down_ratio
dets[:, :, 5:39] *= self.opt.down_ratio
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if self.opt.hm_hp:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
def show_results(self, debugger, image, results, calib):
debugger.add_img(image, img_id='car_pose')
for bbox in results:
if bbox[4] > self.opt.vis_thresh:
# debugger.add_coco_bbox(bbox[:4], bbox[40], bbox[4], img_id='car_pose')
# debugger.add_kitti_hp(bbox[5:23], img_id='car_pose')
# debugger.add_bev(bbox, img_id='car_pose',is_faster=self.opt.faster)
# debugger.add_3d_detection(bbox, calib, img_id='car_pose')
debugger.save_kitti_format(
bbox, self.image_path, self.opt, img_id='car_pose')
if self.opt.vis:
debugger.show_all_imgs(pause=self.pause)
| [
"models.decode.car_pose_decode",
"numpy.sqrt",
"numpy.clip",
"cv2.warpAffine",
"numpy.array",
"external.nms.soft_nms_39",
"utils.image.get_affine_transform",
"torch.no_grad",
"torch.onnx.export",
"numpy.concatenate",
"torch.from_numpy"
] | [((3679, 3734), 'numpy.array', 'np.array', (['[width / 2.0, height / 2.0]'], {'dtype': 'np.float32'}), '([width / 2.0, height / 2.0], dtype=np.float32)\n', (3687, 3734), True, 'import numpy as np\n'), ((3745, 3788), 'numpy.array', 'np.array', (['[width, height]'], {'dtype': 'np.float32'}), '([width, height], dtype=np.float32)\n', (3753, 3788), True, 'import numpy as np\n'), ((3812, 3879), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', '[self.opt.input_w, self.opt.input_h]'], {}), '(c, s, 0, [self.opt.input_w, self.opt.input_h])\n', (3832, 3879), False, 'from utils.image import get_affine_transform\n'), ((3913, 4013), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'trans_input', '(self.opt.input_w, self.opt.input_h)'], {'flags': 'cv2.INTER_LINEAR'}), '(image, trans_input, (self.opt.input_w, self.opt.input_h),\n flags=cv2.INTER_LINEAR)\n', (3927, 4013), False, 'import cv2\n'), ((4263, 4287), 'torch.from_numpy', 'torch.from_numpy', (['images'], {}), '(images)\n', (4279, 4287), False, 'import torch\n'), ((5007, 5107), 'cv2.warpAffine', 'cv2.warpAffine', (['depth', 'trans_input', '(self.opt.input_w, self.opt.input_h)'], {'flags': 'cv2.INTER_LINEAR'}), '(depth, trans_input, (self.opt.input_w, self.opt.input_h),\n flags=cv2.INTER_LINEAR)\n', (5021, 5107), False, 'import cv2\n'), ((5615, 5639), 'torch.from_numpy', 'torch.from_numpy', (['depths'], {}), '(depths)\n', (5631, 5639), False, 'import torch\n'), ((5842, 5919), 'utils.image.get_affine_transform', 'get_affine_transform', (['c', 's', '(0)', "[meta['out_width'], meta['out_height']]"], {'inv': '(1)'}), "(c, s, 0, [meta['out_width'], meta['out_height']], inv=1)\n", (5862, 5919), False, 'from utils.image import get_affine_transform\n'), ((1998, 2013), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2011, 2013), False, 'import torch\n'), ((2574, 2826), 'models.decode.car_pose_decode', 'car_pose_decode', (['hm', 'hps', 'dim', 'rot', 'prob'], {'reg': "outputs['reg']", 'wh': "outputs['wh']", 'K': 'self.opt.K', 'meta': 'meta', 'const': 'self.const', 'dynamic_dim': 'self.opt.dynamic_dim', 'axis_head_angle': 'self.opt.axis_head_angle', 'not_joint_task': 'self.opt.not_joint_task'}), "(hm, hps, dim, rot, prob, reg=outputs['reg'], wh=outputs[\n 'wh'], K=self.opt.K, meta=meta, const=self.const, dynamic_dim=self.opt.\n dynamic_dim, axis_head_angle=self.opt.axis_head_angle, not_joint_task=\n self.opt.not_joint_task)\n", (2589, 2826), False, 'from models.decode import car_pose_decode\n'), ((6517, 6558), 'external.nms.soft_nms_39', 'soft_nms_39', (['results[1]'], {'Nt': '(0.5)', 'method': '(2)'}), '(results[1], Nt=0.5, method=2)\n', (6528, 6558), False, 'from external.nms import soft_nms_39\n'), ((1262, 1277), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1275, 1277), False, 'import torch\n'), ((1504, 1710), 'torch.onnx.export', 'torch.onnx.export', (['self.model', 'images', 'onnx_path'], {'operator_export_type': 'OperatorExportTypes.ONNX_ATEN_FALLBACK', 'verbose': '(True)', 'input_names': "['input']", 'output_names': "['hm', 'hps', 'rot', 'dim', 'prob']"}), "(self.model, images, onnx_path, operator_export_type=\n OperatorExportTypes.ONNX_ATEN_FALLBACK, verbose=True, input_names=[\n 'input'], output_names=['hm', 'hps', 'rot', 'dim', 'prob'])\n", (1521, 1710), False, 'import torch\n'), ((3489, 3503), 'numpy.sqrt', 'np.sqrt', (['depth'], {}), '(depth)\n', (3496, 3503), True, 'import numpy as np\n'), ((6348, 6414), 'numpy.concatenate', 'np.concatenate', (['[detection[1] for detection in detections]'], {'axis': '(0)'}), '([detection[1] for detection in detections], axis=0)\n', (6362, 6414), True, 'import numpy as np\n'), ((6910, 6963), 'numpy.clip', 'np.clip', (['((img * self.std + self.mean) * 255.0)', '(0)', '(255)'], {}), '((img * self.std + self.mean) * 255.0, 0, 255)\n', (6917, 6963), True, 'import numpy as np\n'), ((5960, 5994), 'torch.from_numpy', 'torch.from_numpy', (['trans_output_inv'], {}), '(trans_output_inv)\n', (5976, 5994), False, 'import torch\n')] |
"""
General Introduction:
* the objective of this script is that students could see evaluation process of their models against other RL models
* students could evaluate their best trained models with this script
* the evaluation is done againts uploaded default best trained models with sac with default parameters
* students should give path to their best models in LOAD_CUSTOM_MODEL
* opponent vehicle number could be changed in OPPONENT_NUM and their initial racing positions in AGENT_LOCATIONS
* by changing CONTROL_OTHER_AGENTS boolean students could evaluate their models against default RL trained model or IDM (autopilot) vehicles
* there are already designed bult-in tracks which could be changed from the list: [HungaryGrandPrix, DutchGrandPrix, CircularRoad, StraightRoad]
* its important to modify load_checkpoint() function if your model's network structure is not default Soft-Actor-Critic Network
* evaluation in each step is done until NUM_EVAL_STEPS iteration number is reached, however this could be changed
* in the competition, student will race their models against each others RL models
"""
import torch
import simstar
import numpy as np
from simstarEnv import SimstarEnv
from collections import namedtuple
from model import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# user's own best model could be loaded from saved_models folder
# TODO: right now default model is loaded, however users should evaluate their own models
LOAD_CUSTOM_MODEL = 'default_model/best_253953.dat'
# default model is trained with given sac agent code and training is breaked at 320K steps
LOAD_DEFAULT_MODEL = 'default_model/best_253953.dat'
NUM_EVAL_EPISODE = 5
NUM_EVAL_STEPS = 4000
ADD_OPPONENTS = True
OPPONENT_NUM = 5
# True: controls opponent vehicles with loaded default model weights
# False: opponent vehicles will be controled with IDM (Intelligent Driver Model)
CONTROL_OTHER_AGENTS = True
# initial locations of the opponents could be defined in meters with respect to the main agent
AGENT_LOCATIONS = [25, 50, 75, 100, 125]
if CONTROL_OTHER_AGENTS:
AGENT_INIT_SPEEDS = [0, 0, 0, 0, 0]
else:
AGENT_INIT_SPEEDS = [45, 80, 55, 100, 40]
def evaluate(port=8080):
env = SimstarEnv(track=simstar.Environments.HungaryGrandPrix, port=port, add_opponents=ADD_OPPONENTS, num_opponents=OPPONENT_NUM, speed_up=1, synronized_mode=True)
# update agent init configs
env.agent_locations = AGENT_LOCATIONS
env.agent_speeds = AGENT_INIT_SPEEDS
# total length of chosen observation states
insize = 4 + env.track_sensor_size + env.opponent_sensor_size
outsize = env.action_space.shape[0]
hyperparams = {
"lrvalue": 0.0005,
"lrpolicy": 0.0001,
"gamma": 0.97,
"episodes": 15000,
"buffersize": 250000,
"tau": 0.001,
"batchsize": 64,
"alpha": 0.2,
"maxlength": 10000,
"hidden": 256
}
HyperParams = namedtuple("HyperParams", hyperparams.keys())
hyprm = HyperParams(**hyperparams)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load actor network from checkpoint
agent = Model(env=env, params=hyprm, n_insize=insize, n_outsize=outsize).to(device)
load_checkpoint(agent)
if CONTROL_OTHER_AGENTS:
opponent_agent = Model(env=env, params=hyprm, n_insize=insize, n_outsize=outsize)
load_checkpoint(opponent_agent)
total_reward = 0
for eps in range(NUM_EVAL_EPISODE):
obs = env.reset()
state = np.hstack((obs.angle, obs.track, obs.trackPos, obs.speedX, obs.speedY, obs.opponents))
agent_observations = env.get_agent_observations()
if CONTROL_OTHER_AGENTS:
env.change_opponent_control_to_api()
agent_actions = []
epsisode_reward = 0
for i in range(NUM_EVAL_STEPS):
action = np.array(agent.select_action(state=state))
if CONTROL_OTHER_AGENTS:
# set other agent actions
env.set_agent_actions(agent_actions)
obs, reward, done, summary = env.step(action)
next_state = np.hstack((obs.angle, obs.speedX, obs.speedY, obs.opponents, obs.track, obs.trackPos))
if CONTROL_OTHER_AGENTS:
agent_actions = []
for agent_obs in agent_observations:
agent_state = np.hstack((agent_obs.angle, agent_obs.speedX, agent_obs.speedY, agent_obs.opponents, agent_obs.track, agent_obs.trackPos))
agent_action = np.array(opponent_agent.select_action(state=agent_state))
agent_actions.append(agent_action)
# get other agent observation
agent_observations = env.get_agent_observations()
epsisode_reward += reward
if done:
# do not restart
if "accident" != summary['end_reason']:
break
state = next_state
total_reward += epsisode_reward
print("Episode: %d, Reward: %.1f"%(i, epsisode_reward))
print("Average reward over %d episodes: %.1f"%(NUM_EVAL_EPISODE, total_reward/NUM_EVAL_EPISODE))
def load_checkpoint(agent):
try:
checkpoint = torch.load(LOAD_CUSTOM_MODEL)
print("keys are: ",checkpoint.keys())
agent.actor.load_state_dict(checkpoint['actor_state_dict'])
agent.critic_1.load_state_dict(checkpoint['critic_1_state_dict'])
agent.critic_2.load_state_dict(checkpoint['critic_2_state_dict'])
if 'epsisode_reward' in checkpoint: reward = float(checkpoint['epsisode_reward'])
except FileNotFoundError:
raise FileNotFoundError("custom model weights are not found")
if __name__ == "__main__":
evaluate() | [
"simstarEnv.SimstarEnv",
"torch.load",
"model.Model",
"numpy.hstack",
"torch.cuda.is_available"
] | [((2272, 2437), 'simstarEnv.SimstarEnv', 'SimstarEnv', ([], {'track': 'simstar.Environments.HungaryGrandPrix', 'port': 'port', 'add_opponents': 'ADD_OPPONENTS', 'num_opponents': 'OPPONENT_NUM', 'speed_up': '(1)', 'synronized_mode': '(True)'}), '(track=simstar.Environments.HungaryGrandPrix, port=port,\n add_opponents=ADD_OPPONENTS, num_opponents=OPPONENT_NUM, speed_up=1,\n synronized_mode=True)\n', (2282, 2437), False, 'from simstarEnv import SimstarEnv\n'), ((1327, 1352), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1350, 1352), False, 'import torch\n'), ((3384, 3448), 'model.Model', 'Model', ([], {'env': 'env', 'params': 'hyprm', 'n_insize': 'insize', 'n_outsize': 'outsize'}), '(env=env, params=hyprm, n_insize=insize, n_outsize=outsize)\n', (3389, 3448), False, 'from model import Model\n'), ((3594, 3685), 'numpy.hstack', 'np.hstack', (['(obs.angle, obs.track, obs.trackPos, obs.speedX, obs.speedY, obs.opponents)'], {}), '((obs.angle, obs.track, obs.trackPos, obs.speedX, obs.speedY, obs.\n opponents))\n', (3603, 3685), True, 'import numpy as np\n'), ((5345, 5374), 'torch.load', 'torch.load', (['LOAD_CUSTOM_MODEL'], {}), '(LOAD_CUSTOM_MODEL)\n', (5355, 5374), False, 'import torch\n'), ((3131, 3156), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3154, 3156), False, 'import torch\n'), ((3222, 3286), 'model.Model', 'Model', ([], {'env': 'env', 'params': 'hyprm', 'n_insize': 'insize', 'n_outsize': 'outsize'}), '(env=env, params=hyprm, n_insize=insize, n_outsize=outsize)\n', (3227, 3286), False, 'from model import Model\n'), ((4201, 4292), 'numpy.hstack', 'np.hstack', (['(obs.angle, obs.speedX, obs.speedY, obs.opponents, obs.track, obs.trackPos)'], {}), '((obs.angle, obs.speedX, obs.speedY, obs.opponents, obs.track, obs\n .trackPos))\n', (4210, 4292), True, 'import numpy as np\n'), ((4448, 4575), 'numpy.hstack', 'np.hstack', (['(agent_obs.angle, agent_obs.speedX, agent_obs.speedY, agent_obs.opponents,\n agent_obs.track, agent_obs.trackPos)'], {}), '((agent_obs.angle, agent_obs.speedX, agent_obs.speedY, agent_obs.\n opponents, agent_obs.track, agent_obs.trackPos))\n', (4457, 4575), True, 'import numpy as np\n')] |
from __future__ import print_function, division, absolute_import
import os
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse
import dymos as dm
from dymos.examples.hyper_sensitive.hyper_sensitive_ode import HyperSensitiveODE
from dymos.examples.brachistochrone.brachistochrone_ode import BrachistochroneODE
from dymos.examples.brachistochrone.brachistochrone_vector_states_ode import BrachistochroneVectorStatesODE
from openmdao.utils.general_utils import set_pyoptsparse_opt
_, optimizer = set_pyoptsparse_opt('IPOPT', fallback=True)
@use_tempdirs
class TestRunProblem(unittest.TestCase):
@unittest.skipIf(optimizer != 'IPOPT', 'IPOPT not available')
@require_pyoptsparse(optimizer='SLSQP')
def test_run_HS_problem_radau(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = optimizer
if optimizer == 'SNOPT':
p.driver.opt_settings['Major iterations limit'] = 200
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-6
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-6
elif optimizer == 'IPOPT':
p.driver.opt_settings['hessian_approximation'] = 'limited-memory'
# p.driver.opt_settings['nlp_scaling_method'] = 'user-scaling'
p.driver.opt_settings['print_level'] = 4
p.driver.opt_settings['max_iter'] = 200
p.driver.opt_settings['linear_solver'] = 'mumps'
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase0 = traj.add_phase('phase0', dm.Phase(ode_class=HyperSensitiveODE,
transcription=dm.Radau(num_segments=10,
order=3)))
phase0.set_time_options(fix_initial=True, fix_duration=True)
phase0.add_state('x', fix_initial=True, fix_final=False, rate_source='x_dot', targets=['x'])
phase0.add_state('xL', fix_initial=True, fix_final=False, rate_source='L', targets=['xL'])
phase0.add_control('u', opt=True, targets=['u'], rate_continuity=False)
phase0.add_boundary_constraint('x', loc='final', equals=1)
phase0.add_objective('xL', loc='final')
phase0.set_refine_options(refine=True, tol=1e-6)
p.setup(check=True)
tf = np.float128(20)
p.set_val('traj.phase0.states:x', phase0.interp('x', [1.5, 1]))
p.set_val('traj.phase0.states:xL', phase0.interp('xL', [0, 1]))
p.set_val('traj.phase0.t_initial', 0)
p.set_val('traj.phase0.t_duration', tf)
p.set_val('traj.phase0.controls:u', phase0.interp('u', [-0.6, 2.4]))
dm.run_problem(p, refine_method='hp', refine_iteration_limit=10)
sqrt_two = np.sqrt(2)
val = sqrt_two * tf
c1 = (1.5 * np.exp(-val) - 1) / (np.exp(-val) - np.exp(val))
c2 = (1 - 1.5 * np.exp(val)) / (np.exp(-val) - np.exp(val))
ui = c1 * (1 + sqrt_two) + c2 * (1 - sqrt_two)
uf = c1 * (1 + sqrt_two) * np.exp(val) + c2 * (1 - sqrt_two) * np.exp(-val)
J = 0.5 * (c1 ** 2 * (1 + sqrt_two) * np.exp(2 * val) + c2 ** 2 * (1 - sqrt_two) *
np.exp(-2 * val) - (1 + sqrt_two) * c1 ** 2 - (1 - sqrt_two) * c2 ** 2)
assert_near_equal(p.get_val('traj.phase0.timeseries.controls:u')[0],
ui,
tolerance=5e-4)
assert_near_equal(p.get_val('traj.phase0.timeseries.controls:u')[-1],
uf,
tolerance=5e-4)
assert_near_equal(p.get_val('traj.phase0.timeseries.states:xL')[-1],
J,
tolerance=5e-4)
@unittest.skipIf(optimizer != 'IPOPT', 'IPOPT not available')
@require_pyoptsparse(optimizer='SLSQP')
def test_run_HS_problem_gl(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = optimizer
if optimizer == 'SNOPT':
p.driver.opt_settings['Major iterations limit'] = 100
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-6
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-6
elif optimizer == 'IPOPT':
p.driver.opt_settings['hessian_approximation'] = 'limited-memory'
# p.driver.opt_settings['nlp_scaling_method'] = 'user-scaling'
p.driver.opt_settings['print_level'] = 4
p.driver.opt_settings['linear_solver'] = 'mumps'
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase0 = traj.add_phase('phase0', dm.Phase(ode_class=HyperSensitiveODE,
transcription=dm.GaussLobatto(num_segments=20,
order=3)))
phase0.set_time_options(fix_initial=True, fix_duration=True)
phase0.add_state('x', fix_initial=True, fix_final=False, rate_source='x_dot', targets=['x'])
phase0.add_state('xL', fix_initial=True, fix_final=False, rate_source='L', targets=['xL'])
phase0.add_control('u', opt=True, targets=['u'], rate_continuity=False)
phase0.add_boundary_constraint('x', loc='final', equals=1)
phase0.add_objective('xL', loc='final')
phase0.set_refine_options(refine=True, tol=1.0E-5)
p.setup(check=True)
tf = 20
p.set_val('traj.phase0.states:x', phase0.interp('x', [1.5, 1]))
p.set_val('traj.phase0.states:xL', phase0.interp('xL', [0, 1]))
p.set_val('traj.phase0.t_initial', 0)
p.set_val('traj.phase0.t_duration', tf)
p.set_val('traj.phase0.controls:u', phase0.interp('u', [-0.6, 2.4]))
dm.run_problem(p, refine_method='hp', refine_iteration_limit=5)
sqrt_two = np.sqrt(2)
val = sqrt_two * tf
c1 = (1.5 * np.exp(-val) - 1) / (np.exp(-val) - np.exp(val))
c2 = (1 - 1.5 * np.exp(val)) / (np.exp(-val) - np.exp(val))
ui = c1 * (1 + sqrt_two) + c2 * (1 - sqrt_two)
uf = c1 * (1 + sqrt_two) * np.exp(val) + c2 * (1 - sqrt_two) * np.exp(-val)
J = 0.5 * (c1 ** 2 * (1 + sqrt_two) * np.exp(2 * val) + c2 ** 2 * (1 - sqrt_two) *
np.exp(-2 * val) - (1 + sqrt_two) * c1 ** 2 - (1 - sqrt_two) * c2 ** 2)
assert_near_equal(p.get_val('traj.phase0.timeseries.controls:u')[0],
ui,
tolerance=1e-2)
assert_near_equal(p.get_val('traj.phase0.timeseries.controls:u')[-1],
uf,
tolerance=1e-2)
assert_near_equal(p.get_val('traj.phase0.timeseries.states:xL')[-1],
J,
tolerance=5e-4)
@require_pyoptsparse(optimizer='SLSQP')
def test_run_brachistochrone_problem(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = 'SLSQP'
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase0 = traj.add_phase('phase0', dm.Phase(ode_class=BrachistochroneODE,
transcription=dm.Radau(num_segments=10,
order=3)))
phase0.set_time_options(fix_initial=True, fix_duration=False)
phase0.add_state('x', fix_initial=True, fix_final=False)
phase0.add_state('y', fix_initial=True, fix_final=False)
phase0.add_state('v', fix_initial=True, fix_final=False)
phase0.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase0.add_parameter('g', units='m/s**2', val=9.80665)
phase0.add_boundary_constraint('x', loc='final', equals=10)
phase0.add_boundary_constraint('y', loc='final', equals=5)
# Minimize time at the end of the phase
phase0.add_objective('time_phase', loc='final', scaler=10)
phase0.set_refine_options(refine=True)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True)
p.set_val('traj.phase0.t_initial', 0.0)
p.set_val('traj.phase0.t_duration', 2.0)
p.set_val('traj.phase0.states:x', phase0.interp('x', [0, 10]))
p.set_val('traj.phase0.states:y', phase0.interp('y', [10, 5]))
p.set_val('traj.phase0.states:v', phase0.interp('v', [0, 9.9]))
p.set_val('traj.phase0.controls:theta', phase0.interp('theta', [5, 100]))
p.set_val('traj.phase0.parameters:g', 9.80665)
dm.run_problem(p)
self.assertTrue(os.path.exists('dymos_solution.db'))
# Assert the results are what we expect.
cr = om.CaseReader('dymos_solution.db')
case = cr.get_case('final')
assert_almost_equal(case.outputs['traj.phase0.timeseries.time'].max(), 1.8016, decimal=4)
@require_pyoptsparse(optimizer='SLSQP')
def test_run_brachistochrone_vector_states_problem(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = 'SLSQP'
phase = dm.Phase(ode_class=BrachistochroneVectorStatesODE,
transcription=dm.Radau(num_segments=1, order=3))
p.model.add_subsystem('phase0', phase)
phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
phase.add_state('pos', fix_initial=True, fix_final=[True, False])
phase.add_state('v', fix_initial=True, fix_final=False)
phase.add_control('theta', units='deg', rate_continuity=False, lower=0.01, upper=179.9)
phase.add_parameter('g', units='m/s**2', opt=False, val=9.80665)
phase.add_boundary_constraint('pos', loc='final', equals=5, indices=[1])
# Minimize time at the end of the phase
phase.add_objective('time', loc='final', scaler=10)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True, force_alloc_complex=True)
p['phase0.t_initial'] = 0.0
p['phase0.t_duration'] = 2.0
pos0 = [0, 10]
posf = [10, 5]
p['phase0.states:pos'] = phase.interp('pos', [pos0, posf])
p['phase0.states:v'] = phase.interp('v', [0, 9.9])
p['phase0.controls:theta'] = phase.interp('theta', [5, 100])
p['phase0.parameters:g'] = 9.80665
dm.run_problem(p, refine_iteration_limit=5)
assert_near_equal(p.get_val('phase0.time')[-1], 1.8016, tolerance=1.0E-3)
@require_pyoptsparse(optimizer='SLSQP')
def test_run_brachistochrone_problem_with_simulate(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = 'SLSQP'
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase0 = traj.add_phase('phase0', dm.Phase(ode_class=BrachistochroneODE,
transcription=dm.Radau(num_segments=10,
order=3)))
phase0.set_time_options(fix_initial=True, fix_duration=False)
phase0.add_state('x', fix_initial=True, fix_final=False)
phase0.add_state('y', fix_initial=True, fix_final=False)
phase0.add_state('v', fix_initial=True, fix_final=False)
phase0.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase0.add_parameter('g', units='m/s**2', val=9.80665)
phase0.add_boundary_constraint('x', loc='final', equals=10)
phase0.add_boundary_constraint('y', loc='final', equals=5)
# Minimize time at the end of the phase
phase0.add_objective('time_phase', loc='final', scaler=10)
phase0.set_refine_options(refine=True)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True)
p.set_val('traj.phase0.t_initial', 0.0)
p.set_val('traj.phase0.t_duration', 2.0)
p.set_val('traj.phase0.states:x', phase0.interp('x', [0, 10]))
p.set_val('traj.phase0.states:y', phase0.interp('y', [10, 5]))
p.set_val('traj.phase0.states:v', phase0.interp('v', [0, 9.9]))
p.set_val('traj.phase0.controls:theta', phase0.interp('theta', [5, 100]))
p.set_val('traj.phase0.parameters:g', 9.80665)
dm.run_problem(p, simulate=True)
self.assertTrue(os.path.exists('dymos_solution.db'))
# Assert the results are what we expect.
cr = om.CaseReader('dymos_solution.db')
case = cr.get_case('final')
assert_almost_equal(case.outputs['traj.phase0.timeseries.time'].max(), 1.8016, decimal=4)
def test_modify_problem(self):
from dymos.examples.vanderpol.vanderpol_dymos import vanderpol
from dymos.run_problem import run_problem
from scipy.interpolate import interp1d
from numpy.testing import assert_almost_equal
# Create the Dymos problem instance
p = vanderpol(transcription='gauss-lobatto', num_segments=75)
# Run the problem (simulate only)
p.run_model()
# simulate and record
p.model.traj.simulate(record_file='vanderpol_simulation.sql')
# create a new problem for restart to simulate a different command line execution
q = vanderpol(transcription='gauss-lobatto', num_segments=75)
# # Run the model
run_problem(q, restart='vanderpol_simulation.sql')
s = q.model.traj.simulate(rtol=1.0E-9, atol=1.0E-9)
# get_val returns data for duplicate time points; remove them before interpolating
tq = q.get_val('traj.phase0.timeseries.time')[:, 0]
nodup = np.insert(tq[1:] != tq[:-1], 0, True)
tq = tq[nodup]
x1q = q.get_val('traj.phase0.timeseries.states:x1')[:, 0][nodup]
x0q = q.get_val('traj.phase0.timeseries.states:x0')[:, 0][nodup]
uq = q.get_val('traj.phase0.timeseries.controls:u')[:, 0][nodup]
ts = s.get_val('traj.phase0.timeseries.time')[:, 0]
nodup = np.insert(ts[1:] != ts[:-1], 0, True)
ts = ts[nodup]
x1s = s.get_val('traj.phase0.timeseries.states:x1')[:, 0][nodup]
x0s = s.get_val('traj.phase0.timeseries.states:x0')[:, 0][nodup]
us = s.get_val('traj.phase0.timeseries.controls:u')[:, 0][nodup]
# create interpolation functions so that values can be looked up at matching time points
fx1s = interp1d(ts, x1s, kind='cubic')
fx0s = interp1d(ts, x0s, kind='cubic')
fus = interp1d(ts, us, kind='cubic')
assert_almost_equal(x1q, fx1s(tq), decimal=2)
assert_almost_equal(x0q, fx0s(tq), decimal=2)
assert_almost_equal(uq, fus(tq), decimal=5)
@use_tempdirs
@require_pyoptsparse(optimizer='SLSQP')
class TestRunProblemPlotting(unittest.TestCase):
def setUp(self):
p = om.Problem(model=om.Group())
p.driver = om.pyOptSparseDriver()
p.driver.declare_coloring()
p.driver.options['optimizer'] = 'SLSQP'
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase0 = traj.add_phase('phase0', dm.Phase(ode_class=BrachistochroneODE,
transcription=dm.Radau(num_segments=10,
order=3)))
phase0.set_time_options(fix_initial=True, fix_duration=True)
phase0.add_state('x', fix_initial=True, fix_final=False)
phase0.add_state('y', fix_initial=True, fix_final=False)
phase0.add_state('v', fix_initial=True, fix_final=False)
phase0.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase0.add_parameter('g', units='m/s**2', val=9.80665)
phase0.add_boundary_constraint('x', loc='final', equals=10)
phase0.add_boundary_constraint('y', loc='final', equals=5)
# Minimize time at the end of the phase
phase0.add_objective('time_phase', loc='final', scaler=10)
phase0.set_refine_options(refine=True)
p.model.linear_solver = om.DirectSolver()
p.setup(check=True)
p.set_val('traj.phase0.t_initial', 0.0)
p.set_val('traj.phase0.t_duration', 2.0)
p.set_val('traj.phase0.states:x', phase0.interp('x', [0, 10]))
p.set_val('traj.phase0.states:y', phase0.interp('y', [10, 5]))
p.set_val('traj.phase0.states:v', phase0.interp('v', [0, 9.9]))
p.set_val('traj.phase0.controls:theta', phase0.interp('theta', [5, 100]))
p.set_val('traj.phase0.parameters:g', 9.80665)
self.p = p
def test_run_brachistochrone_problem_make_plots(self):
dm.run_problem(self.p, make_plots=True)
for varname in ['time_phase', 'states:x', 'state_rates:x', 'states:y',
'state_rates:y', 'states:v',
'state_rates:v', 'controls:theta', 'control_rates:theta_rate',
'control_rates:theta_rate2', 'parameters:g']:
self.assertTrue(os.path.exists(f'plots/{varname.replace(":","_")}.png'))
def test_run_brachistochrone_problem_make_plots_set_plot_dir(self):
plot_dir = "test_plot_dir"
dm.run_problem(self.p, make_plots=True, plot_dir=plot_dir)
for varname in ['time_phase', 'states:x', 'state_rates:x', 'states:y',
'state_rates:y', 'states:v',
'state_rates:v', 'controls:theta', 'control_rates:theta_rate',
'control_rates:theta_rate2', 'parameters:g']:
self.assertTrue(os.path.exists(f'test_plot_dir/{varname.replace(":","_")}.png'))
def test_run_brachistochrone_problem_do_not_make_plots(self):
dm.run_problem(self.p, make_plots=False)
for varname in ['time_phase', 'states:x', 'state_rates:x', 'states:y',
'state_rates:y', 'states:v',
'state_rates:v', 'controls:theta', 'control_rates:theta_rate',
'control_rates:theta_rate2', 'parameters:g']:
self.assertFalse(os.path.exists(f'plots/{varname.replace(":","_")}.png'))
def test_run_brachistochrone_problem_set_simulation_record_file(self):
simulation_record_file = 'simulation_record_file.db'
dm.run_problem(self.p, simulate=True, simulation_record_file=simulation_record_file)
self.assertTrue(os.path.exists(simulation_record_file))
def test_run_brachistochrone_problem_set_solution_record_file(self):
solution_record_file = 'solution_record_file.db'
dm.run_problem(self.p, solution_record_file=solution_record_file)
self.assertTrue(os.path.exists(solution_record_file))
def test_run_brachistochrone_problem_plot_simulation(self):
dm.run_problem(self.p, make_plots=True, simulate=True)
for varname in ['time_phase', 'states:x', 'state_rates:x', 'states:y',
'state_rates:y', 'states:v',
'state_rates:v', 'controls:theta', 'control_rates:theta_rate',
'control_rates:theta_rate2', 'parameters:g']:
self.assertTrue(os.path.exists(f'plots/{varname.replace(":","_")}.png'))
def test_run_brachistochrone_problem_plot_no_simulation_record_file_given(self):
dm.run_problem(self.p, make_plots=True, simulate=True)
for varname in ['time_phase', 'states:x', 'state_rates:x', 'states:y',
'state_rates:y', 'states:v',
'state_rates:v', 'controls:theta', 'control_rates:theta_rate',
'control_rates:theta_rate2', 'parameters:g']:
self.assertTrue(os.path.exists(f'plots/{varname.replace(":","_")}.png'))
@use_tempdirs
class TestSimulateArrayParam(unittest.TestCase):
def test_simulate_array_param(self):
#
# Initialize the Problem and the optimization driver
#
p = om.Problem(model=om.Group())
p.driver = om.ScipyOptimizeDriver()
p.driver.declare_coloring()
#
# Create a trajectory and add a phase to it
#
traj = p.model.add_subsystem('traj', dm.Trajectory())
phase = traj.add_phase('phase0',
dm.Phase(ode_class=BrachistochroneODE,
transcription=dm.GaussLobatto(num_segments=10)))
#
# Set the variables
#
phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))
phase.add_state('x', fix_initial=True, fix_final=True, rate_source='xdot')
phase.add_state('y', fix_initial=True, fix_final=True, rate_source='ydot')
phase.add_state('v', fix_initial=True, fix_final=False, rate_source='vdot')
phase.add_control('theta', continuity=True, rate_continuity=True,
units='deg', lower=0.01, upper=179.9)
phase.add_parameter('g', units='m/s**2', val=9.80665)
phase.add_parameter('array', units=None, shape=(10,), static_target=True)
# dummy array of data
indeps = p.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('array', np.linspace(1, 10, 10), units=None)
# add dummy array as a parameter and connect it
p.model.connect('array', 'traj.phase0.parameters:array')
#
# Minimize time at the end of the phase
#
phase.add_objective('time', loc='final', scaler=10)
p.model.linear_solver = om.DirectSolver()
#
# Setup the Problem
#
p.setup()
#
# Set the initial values
#
p['traj.phase0.t_initial'] = 0.0
p['traj.phase0.t_duration'] = 2.0
p['traj.phase0.states:x'] = phase.interp('x', [0, 10])
p['traj.phase0.states:y'] = phase.interp('y', [10, 5])
p['traj.phase0.states:v'] = phase.interp('v', [0, 9.9])
p['traj.phase0.controls:theta'] = phase.interp('theta', [5, 100.5])
#
# Solve for the optimal trajectory
#
dm.run_problem(p, simulate=True)
# Test the results
sol_results = om.CaseReader('dymos_solution.db').get_case('final')
sim_results = om.CaseReader('dymos_solution.db').get_case('final')
sol = sol_results.get_val('traj.phase0.timeseries.parameters:array')
sim = sim_results.get_val('traj.phase0.timeseries.parameters:array')
assert_near_equal(sol - sim, np.zeros_like(sol))
# Test that the parameter is available in the solution and simulation files
sol = sol_results.get_val('traj.phase0.parameters:array')
sim = sim_results.get_val('traj.phase0.parameters:array')
assert_near_equal(sol - sim, np.zeros_like(sol))
if __name__ == '__main__': # pragma: no cover
unittest.main()
| [
"dymos.examples.vanderpol.vanderpol_dymos.vanderpol",
"dymos.run_problem",
"numpy.exp",
"openmdao.api.Group",
"scipy.interpolate.interp1d",
"dymos.Radau",
"openmdao.api.ScipyOptimizeDriver",
"unittest.main",
"unittest.skipIf",
"openmdao.api.IndepVarComp",
"numpy.zeros_like",
"os.path.exists",
... | [((684, 727), 'openmdao.utils.general_utils.set_pyoptsparse_opt', 'set_pyoptsparse_opt', (['"""IPOPT"""'], {'fallback': '(True)'}), "('IPOPT', fallback=True)\n", (703, 727), False, 'from openmdao.utils.general_utils import set_pyoptsparse_opt\n'), ((15224, 15262), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (15243, 15262), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((791, 851), 'unittest.skipIf', 'unittest.skipIf', (["(optimizer != 'IPOPT')", '"""IPOPT not available"""'], {}), "(optimizer != 'IPOPT', 'IPOPT not available')\n", (806, 851), False, 'import unittest\n'), ((857, 895), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (876, 895), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((3972, 4032), 'unittest.skipIf', 'unittest.skipIf', (["(optimizer != 'IPOPT')", '"""IPOPT not available"""'], {}), "(optimizer != 'IPOPT', 'IPOPT not available')\n", (3987, 4032), False, 'import unittest\n'), ((4038, 4076), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (4057, 4076), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((7100, 7138), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (7119, 7138), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((9296, 9334), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (9315, 9334), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((10938, 10976), 'openmdao.utils.testing_utils.require_pyoptsparse', 'require_pyoptsparse', ([], {'optimizer': '"""SLSQP"""'}), "(optimizer='SLSQP')\n", (10957, 10976), False, 'from openmdao.utils.testing_utils import use_tempdirs, require_pyoptsparse\n'), ((23346, 23361), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23359, 23361), False, 'import unittest\n'), ((997, 1019), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (1017, 1019), True, 'import openmdao.api as om\n'), ((2593, 2608), 'numpy.float128', 'np.float128', (['(20)'], {}), '(20)\n', (2604, 2608), True, 'import numpy as np\n'), ((2933, 2997), 'dymos.run_problem', 'dm.run_problem', (['p'], {'refine_method': '"""hp"""', 'refine_iteration_limit': '(10)'}), "(p, refine_method='hp', refine_iteration_limit=10)\n", (2947, 2997), True, 'import dymos as dm\n'), ((3018, 3028), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3025, 3028), True, 'import numpy as np\n'), ((4175, 4197), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (4195, 4197), True, 'import openmdao.api as om\n'), ((6062, 6125), 'dymos.run_problem', 'dm.run_problem', (['p'], {'refine_method': '"""hp"""', 'refine_iteration_limit': '(5)'}), "(p, refine_method='hp', refine_iteration_limit=5)\n", (6076, 6125), True, 'import dymos as dm\n'), ((6146, 6156), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6153, 6156), True, 'import numpy as np\n'), ((7247, 7269), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (7267, 7269), True, 'import openmdao.api as om\n'), ((8474, 8491), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (8489, 8491), True, 'import openmdao.api as om\n'), ((8979, 8996), 'dymos.run_problem', 'dm.run_problem', (['p'], {}), '(p)\n', (8993, 8996), True, 'import dymos as dm\n'), ((9121, 9155), 'openmdao.api.CaseReader', 'om.CaseReader', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (9134, 9155), True, 'import openmdao.api as om\n'), ((9457, 9479), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (9477, 9479), True, 'import openmdao.api as om\n'), ((10364, 10381), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (10379, 10381), True, 'import openmdao.api as om\n'), ((10805, 10848), 'dymos.run_problem', 'dm.run_problem', (['p'], {'refine_iteration_limit': '(5)'}), '(p, refine_iteration_limit=5)\n', (10819, 10848), True, 'import dymos as dm\n'), ((11099, 11121), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (11119, 11121), True, 'import openmdao.api as om\n'), ((12326, 12343), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (12341, 12343), True, 'import openmdao.api as om\n'), ((12831, 12863), 'dymos.run_problem', 'dm.run_problem', (['p'], {'simulate': '(True)'}), '(p, simulate=True)\n', (12845, 12863), True, 'import dymos as dm\n'), ((12988, 13022), 'openmdao.api.CaseReader', 'om.CaseReader', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (13001, 13022), True, 'import openmdao.api as om\n'), ((13472, 13529), 'dymos.examples.vanderpol.vanderpol_dymos.vanderpol', 'vanderpol', ([], {'transcription': '"""gauss-lobatto"""', 'num_segments': '(75)'}), "(transcription='gauss-lobatto', num_segments=75)\n", (13481, 13529), False, 'from dymos.examples.vanderpol.vanderpol_dymos import vanderpol\n'), ((13799, 13856), 'dymos.examples.vanderpol.vanderpol_dymos.vanderpol', 'vanderpol', ([], {'transcription': '"""gauss-lobatto"""', 'num_segments': '(75)'}), "(transcription='gauss-lobatto', num_segments=75)\n", (13808, 13856), False, 'from dymos.examples.vanderpol.vanderpol_dymos import vanderpol\n'), ((13892, 13942), 'dymos.run_problem.run_problem', 'run_problem', (['q'], {'restart': '"""vanderpol_simulation.sql"""'}), "(q, restart='vanderpol_simulation.sql')\n", (13903, 13942), False, 'from dymos.run_problem import run_problem\n'), ((14172, 14209), 'numpy.insert', 'np.insert', (['(tq[1:] != tq[:-1])', '(0)', '(True)'], {}), '(tq[1:] != tq[:-1], 0, True)\n', (14181, 14209), True, 'import numpy as np\n'), ((14529, 14566), 'numpy.insert', 'np.insert', (['(ts[1:] != ts[:-1])', '(0)', '(True)'], {}), '(ts[1:] != ts[:-1], 0, True)\n', (14538, 14566), True, 'import numpy as np\n'), ((14922, 14953), 'scipy.interpolate.interp1d', 'interp1d', (['ts', 'x1s'], {'kind': '"""cubic"""'}), "(ts, x1s, kind='cubic')\n", (14930, 14953), False, 'from scipy.interpolate import interp1d\n'), ((14969, 15000), 'scipy.interpolate.interp1d', 'interp1d', (['ts', 'x0s'], {'kind': '"""cubic"""'}), "(ts, x0s, kind='cubic')\n", (14977, 15000), False, 'from scipy.interpolate import interp1d\n'), ((15015, 15045), 'scipy.interpolate.interp1d', 'interp1d', (['ts', 'us'], {'kind': '"""cubic"""'}), "(ts, us, kind='cubic')\n", (15023, 15045), False, 'from scipy.interpolate import interp1d\n'), ((15393, 15415), 'openmdao.api.pyOptSparseDriver', 'om.pyOptSparseDriver', ([], {}), '()\n', (15413, 15415), True, 'import openmdao.api as om\n'), ((16619, 16636), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (16634, 16636), True, 'import openmdao.api as om\n'), ((17203, 17242), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'make_plots': '(True)'}), '(self.p, make_plots=True)\n', (17217, 17242), True, 'import dymos as dm\n'), ((17734, 17792), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'make_plots': '(True)', 'plot_dir': 'plot_dir'}), '(self.p, make_plots=True, plot_dir=plot_dir)\n', (17748, 17792), True, 'import dymos as dm\n'), ((18251, 18291), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'make_plots': '(False)'}), '(self.p, make_plots=False)\n', (18265, 18291), True, 'import dymos as dm\n'), ((18813, 18902), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'simulate': '(True)', 'simulation_record_file': 'simulation_record_file'}), '(self.p, simulate=True, simulation_record_file=\n simulation_record_file)\n', (18827, 18902), True, 'import dymos as dm\n'), ((19102, 19167), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'solution_record_file': 'solution_record_file'}), '(self.p, solution_record_file=solution_record_file)\n', (19116, 19167), True, 'import dymos as dm\n'), ((19304, 19358), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'make_plots': '(True)', 'simulate': '(True)'}), '(self.p, make_plots=True, simulate=True)\n', (19318, 19358), True, 'import dymos as dm\n'), ((19828, 19882), 'dymos.run_problem', 'dm.run_problem', (['self.p'], {'make_plots': '(True)', 'simulate': '(True)'}), '(self.p, make_plots=True, simulate=True)\n', (19842, 19882), True, 'import dymos as dm\n'), ((20506, 20530), 'openmdao.api.ScipyOptimizeDriver', 'om.ScipyOptimizeDriver', ([], {}), '()\n', (20528, 20530), True, 'import openmdao.api as om\n'), ((22033, 22050), 'openmdao.api.DirectSolver', 'om.DirectSolver', ([], {}), '()\n', (22048, 22050), True, 'import openmdao.api as om\n'), ((22594, 22626), 'dymos.run_problem', 'dm.run_problem', (['p'], {'simulate': '(True)'}), '(p, simulate=True)\n', (22608, 22626), True, 'import dymos as dm\n'), ((1753, 1768), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (1766, 1768), True, 'import dymos as dm\n'), ((4879, 4894), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (4892, 4894), True, 'import dymos as dm\n'), ((7400, 7415), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (7413, 7415), True, 'import dymos as dm\n'), ((9022, 9057), 'os.path.exists', 'os.path.exists', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (9036, 9057), False, 'import os\n'), ((11252, 11267), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (11265, 11267), True, 'import dymos as dm\n'), ((12889, 12924), 'os.path.exists', 'os.path.exists', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (12903, 12924), False, 'import os\n'), ((15546, 15561), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (15559, 15561), True, 'import dymos as dm\n'), ((18923, 18961), 'os.path.exists', 'os.path.exists', (['simulation_record_file'], {}), '(simulation_record_file)\n', (18937, 18961), False, 'import os\n'), ((19193, 19229), 'os.path.exists', 'os.path.exists', (['solution_record_file'], {}), '(solution_record_file)\n', (19207, 19229), False, 'import os\n'), ((20685, 20700), 'dymos.Trajectory', 'dm.Trajectory', ([], {}), '()\n', (20698, 20700), True, 'import dymos as dm\n'), ((21644, 21661), 'openmdao.api.IndepVarComp', 'om.IndepVarComp', ([], {}), '()\n', (21659, 21661), True, 'import openmdao.api as om\n'), ((21714, 21736), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (21725, 21736), True, 'import numpy as np\n'), ((22998, 23016), 'numpy.zeros_like', 'np.zeros_like', (['sol'], {}), '(sol)\n', (23011, 23016), True, 'import numpy as np\n'), ((23273, 23291), 'numpy.zeros_like', 'np.zeros_like', (['sol'], {}), '(sol)\n', (23286, 23291), True, 'import numpy as np\n'), ((966, 976), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (974, 976), True, 'import openmdao.api as om\n'), ((3098, 3110), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (3104, 3110), True, 'import numpy as np\n'), ((3113, 3124), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (3119, 3124), True, 'import numpy as np\n'), ((3166, 3178), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (3172, 3178), True, 'import numpy as np\n'), ((3181, 3192), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (3187, 3192), True, 'import numpy as np\n'), ((3285, 3296), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (3291, 3296), True, 'import numpy as np\n'), ((3321, 3333), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (3327, 3333), True, 'import numpy as np\n'), ((4144, 4154), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (4152, 4154), True, 'import openmdao.api as om\n'), ((6226, 6238), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (6232, 6238), True, 'import numpy as np\n'), ((6241, 6252), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (6247, 6252), True, 'import numpy as np\n'), ((6294, 6306), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (6300, 6306), True, 'import numpy as np\n'), ((6309, 6320), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (6315, 6320), True, 'import numpy as np\n'), ((6413, 6424), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (6419, 6424), True, 'import numpy as np\n'), ((6449, 6461), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (6455, 6461), True, 'import numpy as np\n'), ((7216, 7226), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (7224, 7226), True, 'import openmdao.api as om\n'), ((9426, 9436), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (9434, 9436), True, 'import openmdao.api as om\n'), ((9671, 9704), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': '(1)', 'order': '(3)'}), '(num_segments=1, order=3)\n', (9679, 9704), True, 'import dymos as dm\n'), ((11068, 11078), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (11076, 11078), True, 'import openmdao.api as om\n'), ((15362, 15372), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (15370, 15372), True, 'import openmdao.api as om\n'), ((20475, 20485), 'openmdao.api.Group', 'om.Group', ([], {}), '()\n', (20483, 20485), True, 'import openmdao.api as om\n'), ((22677, 22711), 'openmdao.api.CaseReader', 'om.CaseReader', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (22690, 22711), True, 'import openmdao.api as om\n'), ((22752, 22786), 'openmdao.api.CaseReader', 'om.CaseReader', (['"""dymos_solution.db"""'], {}), "('dymos_solution.db')\n", (22765, 22786), True, 'import openmdao.api as om\n'), ((1915, 1949), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': '(10)', 'order': '(3)'}), '(num_segments=10, order=3)\n', (1923, 1949), True, 'import dymos as dm\n'), ((3077, 3089), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (3083, 3089), True, 'import numpy as np\n'), ((3150, 3161), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (3156, 3161), True, 'import numpy as np\n'), ((5041, 5082), 'dymos.GaussLobatto', 'dm.GaussLobatto', ([], {'num_segments': '(20)', 'order': '(3)'}), '(num_segments=20, order=3)\n', (5056, 5082), True, 'import dymos as dm\n'), ((6205, 6217), 'numpy.exp', 'np.exp', (['(-val)'], {}), '(-val)\n', (6211, 6217), True, 'import numpy as np\n'), ((6278, 6289), 'numpy.exp', 'np.exp', (['val'], {}), '(val)\n', (6284, 6289), True, 'import numpy as np\n'), ((7563, 7597), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': '(10)', 'order': '(3)'}), '(num_segments=10, order=3)\n', (7571, 7597), True, 'import dymos as dm\n'), ((11415, 11449), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': '(10)', 'order': '(3)'}), '(num_segments=10, order=3)\n', (11423, 11449), True, 'import dymos as dm\n'), ((15709, 15743), 'dymos.Radau', 'dm.Radau', ([], {'num_segments': '(10)', 'order': '(3)'}), '(num_segments=10, order=3)\n', (15717, 15743), True, 'import dymos as dm\n'), ((20868, 20900), 'dymos.GaussLobatto', 'dm.GaussLobatto', ([], {'num_segments': '(10)'}), '(num_segments=10)\n', (20883, 20900), True, 'import dymos as dm\n'), ((3380, 3395), 'numpy.exp', 'np.exp', (['(2 * val)'], {}), '(2 * val)\n', (3386, 3395), True, 'import numpy as np\n'), ((3444, 3460), 'numpy.exp', 'np.exp', (['(-2 * val)'], {}), '(-2 * val)\n', (3450, 3460), True, 'import numpy as np\n'), ((6508, 6523), 'numpy.exp', 'np.exp', (['(2 * val)'], {}), '(2 * val)\n', (6514, 6523), True, 'import numpy as np\n'), ((6572, 6588), 'numpy.exp', 'np.exp', (['(-2 * val)'], {}), '(-2 * val)\n', (6578, 6588), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
from numpy.random import gamma, exponential, lognormal,normal
import pandas as pd
from pathlib import Path
import sys
import niddk_covid_sicr as ncs
def get_stan_dataV(full_data_path, args):
df = pd.read_csv(full_data_path)
if getattr(args, 'last_date', None):
try:
datetime.strptime(args.last_date, '%m/%d/%y')
except ValueError:
msg = "Incorrect --last-date format, should be MM/DD/YY"
raise ValueError(msg)
else:
df = df[df['dates2'] <= args.last_date]
# t0 := where to start time series, index space
try:
t0 = np.where(df["new_cases"].values >= 5)[0][0]
except IndexError:
return [None, None]
# tm := start of mitigation, index space
try:
dfm = pd.read_csv(args.data_path / 'mitigationprior.csv')
tmdate = dfm.loc[dfm.region == args.roi, 'date'].values[0]
tm = np.where(df["dates2"] == tmdate)[0][0]
except Exception:
print("Could not use mitigation prior data; setting mitigation prior to default.")
tm = t0 + 10
n_proj = 120
stan_data = {}
stan_data['n_ostates'] = 5
stan_data['tm'] = tm
stan_data['ts'] = np.arange(t0, len(df['dates2']) + n_proj)
stan_data['y'] = df[['new_cases', 'new_recover', 'new_deaths', 'new_hosp', 'new_icu']].to_numpy()\
.astype(int)[t0:, :]
stan_data['n_obs'] = len(df['dates2']) - t0
stan_data['n_total'] = len(df['dates2']) - t0 + n_proj
if args.fixed_t:
global_start = datetime.strptime('01/22/20', '%m/%d/%y')
frame_start = datetime.strptime(df['dates2'][0], '%m/%d/%y')
offset = (frame_start - global_start).days
stan_data['tm'] += offset
stan_data['ts'] += offset
return stan_data, df['dates2'][t0]
def get_n_data(stan_data):
if stan_data:
return (stan_data['y'] > 0).ravel().sum()
else:
return 0
# functions used to initialize parameters
def get_init_funV(args, stan_data, force_fresh=False):
if args.init and not force_fresh:
try:
init_path = Path(args.fits_path) / args.init
model_path = Path(args.models_path) / args.model_name
result = ncs.last_sample_as_dict(init_path, model_path)
except Exception:
print("Couldn't use last sample from previous fit to initialize")
return init_fun(force_fresh=True)
else:
print("Using last sample from previous fit to initialize")
else:
print("Using default values to initialize fit")
result = {'f1': 1.75,
'f2': .25,
'sigmaVS': exponential(1/10.),
'sigmaSR': exponential(1/400.),
'sigmaSRI': exponential(1/400.),
'sigmaIV': exponential(1/100.),
'sigmaRI': .01,
'sigmaDI': .03,
'sigmaRC': .08,
'sigmaDC': .003,
'sigmaRH1': 1.,
'sigmaRH2': .1,
'sigmaRV': exponential(1/10.),
'sigmaH1C': .01,
'sigmaH1V': exponential(1/10.),
'sigmaH2H1': .4,
'sigmaDH1': .003,
'sigmaDH2': .001,
'sigmaM': exponential(1/10.),
'mbase': exponential(1/10.),
'q': exponential(.5),
'n_pop': normal(4e6, 1e4)
}
# result = {'f1': exponential(1.),
# 'f2': exponential(1.),
# 'sigmaVS': exponential(1/10.),
# 'sigmaSR': exponential(1/400.),
# 'sigmaSRI': exponential(1/400.),
# 'sigmaIV': exponential(1/100.),
# 'sigmaRI': exponential(1/10.),
# 'sigmaDI': exponential(1/30.),
# 'sigmaRC': exponential(1/10.),
# 'sigmaDC': exponential(1/10.),
# 'sigmaRH1': exponential(1/10.),
# 'sigmaRH2': exponential(1/10.),
# 'sigmaRV': exponential(1/10.),
# 'sigmaH1C': exponential(1/10.),
# 'sigmaH1V': exponential(1/10.),
# 'sigmaH2H1': exponential(1/10.),
# 'sigmaRH1': exponential(1/10.),
# 'sigmaDH1': exponential(1/10.),
# 'sigmaDH2': exponential(1/10.),
# 'sigmaM': exponential(1/10.),
# 'mbase': exponential(1/10.),
# 'q': exponential(.1),
# 'n_pop': normal(1e6, 1e4)
# }
def init_fun():
return result
return init_fun
| [
"niddk_covid_sicr.last_sample_as_dict",
"pandas.read_csv",
"numpy.random.exponential",
"datetime.datetime.strptime",
"numpy.where",
"pathlib.Path",
"numpy.random.normal"
] | [((252, 279), 'pandas.read_csv', 'pd.read_csv', (['full_data_path'], {}), '(full_data_path)\n', (263, 279), True, 'import pandas as pd\n'), ((827, 878), 'pandas.read_csv', 'pd.read_csv', (["(args.data_path / 'mitigationprior.csv')"], {}), "(args.data_path / 'mitigationprior.csv')\n", (838, 878), True, 'import pandas as pd\n'), ((1572, 1613), 'datetime.datetime.strptime', 'datetime.strptime', (['"""01/22/20"""', '"""%m/%d/%y"""'], {}), "('01/22/20', '%m/%d/%y')\n", (1589, 1613), False, 'from datetime import datetime\n'), ((1636, 1682), 'datetime.datetime.strptime', 'datetime.strptime', (["df['dates2'][0]", '"""%m/%d/%y"""'], {}), "(df['dates2'][0], '%m/%d/%y')\n", (1653, 1682), False, 'from datetime import datetime\n'), ((346, 391), 'datetime.datetime.strptime', 'datetime.strptime', (['args.last_date', '"""%m/%d/%y"""'], {}), "(args.last_date, '%m/%d/%y')\n", (363, 391), False, 'from datetime import datetime\n'), ((2259, 2305), 'niddk_covid_sicr.last_sample_as_dict', 'ncs.last_sample_as_dict', (['init_path', 'model_path'], {}), '(init_path, model_path)\n', (2282, 2305), True, 'import niddk_covid_sicr as ncs\n'), ((2696, 2717), 'numpy.random.exponential', 'exponential', (['(1 / 10.0)'], {}), '(1 / 10.0)\n', (2707, 2717), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((2746, 2768), 'numpy.random.exponential', 'exponential', (['(1 / 400.0)'], {}), '(1 / 400.0)\n', (2757, 2768), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((2798, 2820), 'numpy.random.exponential', 'exponential', (['(1 / 400.0)'], {}), '(1 / 400.0)\n', (2809, 2820), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((2849, 2871), 'numpy.random.exponential', 'exponential', (['(1 / 100.0)'], {}), '(1 / 100.0)\n', (2860, 2871), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3111, 3132), 'numpy.random.exponential', 'exponential', (['(1 / 10.0)'], {}), '(1 / 10.0)\n', (3122, 3132), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3198, 3219), 'numpy.random.exponential', 'exponential', (['(1 / 10.0)'], {}), '(1 / 10.0)\n', (3209, 3219), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3357, 3378), 'numpy.random.exponential', 'exponential', (['(1 / 10.0)'], {}), '(1 / 10.0)\n', (3368, 3378), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3405, 3426), 'numpy.random.exponential', 'exponential', (['(1 / 10.0)'], {}), '(1 / 10.0)\n', (3416, 3426), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3448, 3464), 'numpy.random.exponential', 'exponential', (['(0.5)'], {}), '(0.5)\n', (3459, 3464), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((3492, 3518), 'numpy.random.normal', 'normal', (['(4000000.0)', '(10000.0)'], {}), '(4000000.0, 10000.0)\n', (3498, 3518), False, 'from numpy.random import gamma, exponential, lognormal, normal\n'), ((663, 700), 'numpy.where', 'np.where', (["(df['new_cases'].values >= 5)"], {}), "(df['new_cases'].values >= 5)\n", (671, 700), True, 'import numpy as np\n'), ((959, 991), 'numpy.where', 'np.where', (["(df['dates2'] == tmdate)"], {}), "(df['dates2'] == tmdate)\n", (967, 991), True, 'import numpy as np\n'), ((2139, 2159), 'pathlib.Path', 'Path', (['args.fits_path'], {}), '(args.fits_path)\n', (2143, 2159), False, 'from pathlib import Path\n'), ((2197, 2219), 'pathlib.Path', 'Path', (['args.models_path'], {}), '(args.models_path)\n', (2201, 2219), False, 'from pathlib import Path\n')] |
'''
Summary
-------
Defines a maximum a-posterior estimator for unigrams
MAPEstimator supports a common API for unigram probability estimators:
* fit
* predict_proba
* score
Resources
---------
See COMP 136 course website for the complete problem description and all math details
'''
import numpy as np
from Vocabulary import Vocabulary
class MAPEstimator():
"""
Maximum A-Posteriori Estimator for unigram probabiliies
Attributes
----------
vocab : Vocabulary object
alpha : float, must be greater than or equal to one
Defines concentration parameter of the symmetric Dirichlet prior
Examples
--------
>>> word_list = ['dinosaur', 'trex', 'dinosaur', 'stegosaurus']
>>> mapEst = MAPEstimator(Vocabulary(word_list), alpha=2.0)
>>> mapEst.fit(word_list)
>>> np.allclose(mapEst.predict_proba('dinosaur'), 3.0/7.0)
True
>>> mapEst.predict_proba('never_seen-before')
Traceback (most recent call last):
...
KeyError: 'Word never_seen-before not in the vocabulary'
"""
def __init__(self, vocab, alpha=2.0):
self.vocab = vocab
self.alpha = float(alpha)
# State that is adjusted by calls to 'fit'
self.total_count = 0
self.count_V = None
def fit(self, word_list):
''' Fit this estimator to provided training data
Args
----
word_list : list of str
Each entry is a word that can be looked up in the vocabulary
Returns
-------
None. Internal attributes updated.
Post Condition
--------------
Attributes will updated based on provided word list
* The 1D array count_V is set to the count of each vocabulary word
* The integer total_count is set to the total length of the word list
'''
self.count_V = np.zeros(self.vocab.size)
self.total_count = 0
# TODO update total_count
# TODO update the count_V array
def predict_proba(self, word):
''' Predict probability of a given unigram under this model
Assumes this word is in the vocabulary
Args
----
word : string
Known word that can be looked up in the vocabulary
Returns
-------
proba : float between 0 and 1
Throws
------
ValueError if hyperparameters do not allow MAP estimation
KeyError if the provided word is not in the vocabulary
'''
# TODO adjust if condition to avoid cases where valid MAP does not exist
if False:
raise ValueError(
"Hyperparameter alpha does not yield valid MAP estimate")
# TODO calculate MAP estimate of the provided word
return 1.0 / self.vocab.size # TODO change this placeholder!
def score(self, word_list):
''' Compute the average log probability of words in provided list
Args
----
word_list : list of str
Each entry is a word that can be looked up in the vocabulary
Returns
-------
avg_log_proba : float between (-np.inf, 0.0)
'''
total_log_proba = 0.0
for word in word_list:
total_log_proba += np.log(self.predict_proba(word))
return total_log_proba / len(word_list)
| [
"numpy.zeros"
] | [((1862, 1887), 'numpy.zeros', 'np.zeros', (['self.vocab.size'], {}), '(self.vocab.size)\n', (1870, 1887), True, 'import numpy as np\n')] |
import os.path as osp
import numpy as np
from typing import List, Tuple, Union
from dslpy.utils.raster2uint8 import rasterToUint8
try:
from osgeo import gdal
except:
import gdal
class Raster:
def __init__(self,
path: str,
band_list: Union[List[int], Tuple[int], None]=None,
is_sar: bool=False, # TODO: Remove this param
is_src: bool=False) -> None:
""" Class of read raster.
Args:
path (str): The path of raster.
band_list (Union[List[int], Tuple[int], None], optional):
band list (start with 1) or None (all of bands). Defaults to None.
is_sar (bool, optional): The raster is SAR or not. Defaults to False.
is_src (bool, optional):
Return raw data or not (convert uint8/float32). Defaults to False.
"""
super(Raster, self).__init__()
if osp.exists(path):
self.path = path
self.__src_data = gdal.Open(path)
self.__getInfo()
self.is_sar = is_sar
self.is_src = is_src
self.setBands(band_list)
else:
raise ValueError("The path {0} not exists.".format(path))
def setBands(self,
band_list: Union[List[int], Tuple[int], None]) -> None:
""" Set band of data.
Args:
band_list (Union[List[int], Tuple[int], None]):
band list (start with 1) or None (all of bands).
"""
if band_list is not None:
if len(band_list) > self.bands:
raise ValueError("The lenght of band_list must be less than {0}.".format(str(self.bands)))
if max(band_list) > self.bands or min(band_list) < 1:
raise ValueError("The range of band_list must within [1, {0}].".format(str(self.bands)))
self.band_list = band_list
def getArray(self,
start_loc: Union[List[int], Tuple[int], None]=None,
block_size: Union[List[int], Tuple[int]]=[512, 512]) -> np.ndarray:
""" Get ndarray data
Args:
start_loc (Union[List[int], Tuple[int], None], optional):
Coordinates of the upper left corner of the block, if None means return full image.
block_size (Union[List[int], Tuple[int]], optional):
Block size. Defaults to [512, 512].
Returns:
np.ndarray: data's ndarray.
"""
if start_loc is None:
return self.__getAarray()
else:
return self.__getBlock(start_loc, block_size)
def __getInfo(self) -> None:
self.bands = self.__src_data.RasterCount
self.width = self.__src_data.RasterXSize
self.height = self.__src_data.RasterYSize
def __getAarray(self, window: Union[None, List[int], Tuple[int]]=None) -> np.ndarray:
if window is not None:
xoff, yoff, xsize, ysize = window
if self.band_list is None:
if window is None:
ima = self.__src_data.ReadAsArray()
else:
ima = self.__src_data.ReadAsArray(xoff, yoff, xsize, ysize)
else:
band_array = []
for b in self.band_list:
if window is None:
band_i = self.__src_data.GetRasterBand(b).ReadAsArray()
else:
band_i = self.__src_data.GetRasterBand(b).ReadAsArray(xoff, yoff, xsize, ysize)
band_array.append(band_i)
ima = np.stack(band_array, axis=0)
if self.bands == 1:
if self.is_sar:
ima = abs(ima)
else:
ima = ima.transpose((1, 2, 0))
if self.is_src is False:
ima = rasterToUint8(ima)
return ima
def __getBlock(self,
start_loc: Union[List[int], Tuple[int]],
block_size: Union[List[int], Tuple[int]]=[512, 512]) -> np.ndarray:
if len(start_loc) != 2 or len(block_size) != 2:
raise ValueError("The length start_loc/block_size must be 2.")
xoff, yoff = start_loc
xsize, ysize = block_size
if (xoff < 0 or xoff > self.width) or (yoff < 0 or yoff > self.height):
raise ValueError(
"start_loc must be within [0-{0}, 0-{1}].".format(str(self.width), str(self.height)))
if xoff + xsize > self.width:
xsize = self.width - xoff
if yoff + ysize > self.height:
ysize = self.height - yoff
ima = self.__getAarray([int(xoff), int(yoff), int(xsize), int(ysize)])
h, w = ima.shape[:2] if len(ima.shape) == 3 else ima.shape
if self.bands != 1:
tmp = np.zeros((block_size[0], block_size[1], self.bands), dtype=ima.dtype)
tmp[:h, :w, :] = ima
else:
tmp = np.zeros((block_size[0], block_size[1]), dtype=ima.dtype)
tmp[:h, :w] = ima
return tmp | [
"numpy.stack",
"numpy.zeros",
"os.path.exists",
"gdal.Open",
"dslpy.utils.raster2uint8.rasterToUint8"
] | [((975, 991), 'os.path.exists', 'osp.exists', (['path'], {}), '(path)\n', (985, 991), True, 'import os.path as osp\n'), ((1054, 1069), 'gdal.Open', 'gdal.Open', (['path'], {}), '(path)\n', (1063, 1069), False, 'import gdal\n'), ((3677, 3705), 'numpy.stack', 'np.stack', (['band_array'], {'axis': '(0)'}), '(band_array, axis=0)\n', (3685, 3705), True, 'import numpy as np\n'), ((3908, 3926), 'dslpy.utils.raster2uint8.rasterToUint8', 'rasterToUint8', (['ima'], {}), '(ima)\n', (3921, 3926), False, 'from dslpy.utils.raster2uint8 import rasterToUint8\n'), ((4894, 4963), 'numpy.zeros', 'np.zeros', (['(block_size[0], block_size[1], self.bands)'], {'dtype': 'ima.dtype'}), '((block_size[0], block_size[1], self.bands), dtype=ima.dtype)\n', (4902, 4963), True, 'import numpy as np\n'), ((5032, 5089), 'numpy.zeros', 'np.zeros', (['(block_size[0], block_size[1])'], {'dtype': 'ima.dtype'}), '((block_size[0], block_size[1]), dtype=ima.dtype)\n', (5040, 5089), True, 'import numpy as np\n')] |
# Should be run with pytest:
# > python3 -m pytest
import numpy
import pytest
import ship
import utility
from game_engine import handleAttack
from simple_agent import (SimpleAgent)
from world_state import (WorldState)
# Initialize ships from the test ship list
keys, ship_templates = utility.parseShips('data/test_ships.csv')
# Test the defense tokens by comparing the results of the test ships with and without those tokens
def a_vs_b(ship_a, ship_b, trials, attack_range):
"""This function calculates the average time to destruction when a shoots at b.
Args:
ship_a ((Ship, str)): Attacker and hull zone tuple.
ship_b ((Ship, str)): Defender and hull zone tuple.
trials (int): Number of trials in average calculation.
range (str): Attack range.
"""
roll_counts = []
agent = SimpleAgent()
for trial in range(trials):
# Reset ship b for each trial
ship_b.reset()
world_state = WorldState()
world_state.addShip(ship_a, 0)
world_state.addShip(ship_b, 1)
num_rolls = 0
while ship_b.damage_cards() < ship_b.hull():
num_rolls += 1
# Handle the attack and receive the updated world state
world_state = handleAttack(world_state=world_state, attacker=(ship_a, "front"),
defender=(ship_b, "front"), attack_range=attack_range,
offensive_agent=agent, defensive_agent=agent)
roll_counts.append(num_rolls)
np_counts = numpy.array(roll_counts)
return np_counts.mean()
def test_brace():
"""Test that brace increases the number of attacks required to destroy a ship."""
attacker = ship.Ship(name="Attacker", template=ship_templates["Attacker"], upgrades=[], player_number=1)
no_brace = ship.Ship(name="No Defense Tokens", template=ship_templates["No Defense Tokens"], upgrades=[], player_number=2)
one_brace = ship.Ship(name="Single Brace", template=ship_templates["Single Brace"], upgrades=[], player_number=2)
two_brace = ship.Ship(name="Double Brace", template=ship_templates["Double Brace"], upgrades=[], player_number=2)
# Test with 1000 trials to compensate for the natural variability in rolls
for attack_range in ['long', 'medium']:
no_brace_attacks = a_vs_b(attacker, no_brace, 1000, attack_range)
one_brace_attacks = a_vs_b(attacker, one_brace, 1000, attack_range)
two_brace_attacks = a_vs_b(attacker, two_brace, 1000, attack_range)
# Only test brace vs no brace at short range since with the test setup the ships reaches 0 hull
# before spending all of the brace tokens.
for attack_range in ['short']:
no_brace_attacks = a_vs_b(attacker, no_brace, 1000, attack_range)
one_brace_attacks = a_vs_b(attacker, one_brace, 1000, attack_range)
assert(no_brace_attacks < one_brace_attacks)
assert(one_brace_attacks < two_brace_attacks)
#def test_scatter():
# """Test that scatter increases the number of attacks required to destroy a ship."""
#
# attacker = ship.Ship(name="Attacker", template=ship_templates["Attacker"], upgrades=[], player_number=1)
#
# no_scatter = ship.Ship(name="No Defense Tokens", template=ship_templates["No Defense Tokens"], upgrades=[], player_number=2)
# two_scatter = ship.Ship(name="Double Scatter", template=ship_templates["Double Scatter"], upgrades=[], player_number=2)
#
# # Test with 1000 trials to compensate for the natural variability in rolls
# for attack_range in ['long', 'medium', 'short']:
# no_scatter_attacks = a_vs_b(attacker, no_scatter, 1000, attack_range)
# two_scatter_attacks = a_vs_b(attacker, two_scatter, 1000, attack_range)
#
# assert(no_scatter_attacks < two_scatter_attacks)
#
#
#def test_evade():
# """Test that evade increases the number of attacks required to destroy a ship at long or medium range."""
#
# attacker = ship.Ship(name="Attacker", template=ship_templates["Attacker"], upgrades=[], player_number=1)
#
# no_evade = ship.Ship(name="No Defense Tokens", template=ship_templates["No Defense Tokens"], upgrades=[], player_number=2)
# two_evade = ship.Ship(name="Double Evade", template=ship_templates["Double Evade"], upgrades=[], player_number=2)
#
# # Test with 1000 trials to compensate for the natural variability in rolls
# no_evade_attacks_long = a_vs_b(attacker, no_evade, 1000, "long")
# no_evade_attacks_medium = a_vs_b(attacker, no_evade, 1000, "medium")
# no_evade_attacks_short = a_vs_b(attacker, no_evade, 1000, "short")
# two_evade_attacks_long = a_vs_b(attacker, two_evade, 1000, "long")
# two_evade_attacks_medium = a_vs_b(attacker, two_evade, 1000, "medium")
# two_evade_attacks_short = a_vs_b(attacker, two_evade, 1000, "short")
#
# # Evades should increase the time to destruction
# assert(no_evade_attacks_long < two_evade_attacks_long)
# assert(no_evade_attacks_medium < two_evade_attacks_medium)
# # Evades do not work at short range
# assert(pytest.approx(two_evade_attacks_short, 0.1) == no_evade_attacks_short)
| [
"ship.Ship",
"game_engine.handleAttack",
"utility.parseShips",
"numpy.array",
"simple_agent.SimpleAgent",
"world_state.WorldState"
] | [((287, 328), 'utility.parseShips', 'utility.parseShips', (['"""data/test_ships.csv"""'], {}), "('data/test_ships.csv')\n", (305, 328), False, 'import utility\n'), ((831, 844), 'simple_agent.SimpleAgent', 'SimpleAgent', ([], {}), '()\n', (842, 844), False, 'from simple_agent import SimpleAgent\n'), ((1546, 1570), 'numpy.array', 'numpy.array', (['roll_counts'], {}), '(roll_counts)\n', (1557, 1570), False, 'import numpy\n'), ((1721, 1818), 'ship.Ship', 'ship.Ship', ([], {'name': '"""Attacker"""', 'template': "ship_templates['Attacker']", 'upgrades': '[]', 'player_number': '(1)'}), "(name='Attacker', template=ship_templates['Attacker'], upgrades=[],\n player_number=1)\n", (1730, 1818), False, 'import ship\n'), ((1831, 1947), 'ship.Ship', 'ship.Ship', ([], {'name': '"""No Defense Tokens"""', 'template': "ship_templates['No Defense Tokens']", 'upgrades': '[]', 'player_number': '(2)'}), "(name='No Defense Tokens', template=ship_templates[\n 'No Defense Tokens'], upgrades=[], player_number=2)\n", (1840, 1947), False, 'import ship\n'), ((1959, 2064), 'ship.Ship', 'ship.Ship', ([], {'name': '"""Single Brace"""', 'template': "ship_templates['Single Brace']", 'upgrades': '[]', 'player_number': '(2)'}), "(name='Single Brace', template=ship_templates['Single Brace'],\n upgrades=[], player_number=2)\n", (1968, 2064), False, 'import ship\n'), ((2077, 2182), 'ship.Ship', 'ship.Ship', ([], {'name': '"""Double Brace"""', 'template': "ship_templates['Double Brace']", 'upgrades': '[]', 'player_number': '(2)'}), "(name='Double Brace', template=ship_templates['Double Brace'],\n upgrades=[], player_number=2)\n", (2086, 2182), False, 'import ship\n'), ((960, 972), 'world_state.WorldState', 'WorldState', ([], {}), '()\n', (970, 972), False, 'from world_state import WorldState\n'), ((1247, 1422), 'game_engine.handleAttack', 'handleAttack', ([], {'world_state': 'world_state', 'attacker': "(ship_a, 'front')", 'defender': "(ship_b, 'front')", 'attack_range': 'attack_range', 'offensive_agent': 'agent', 'defensive_agent': 'agent'}), "(world_state=world_state, attacker=(ship_a, 'front'), defender=\n (ship_b, 'front'), attack_range=attack_range, offensive_agent=agent,\n defensive_agent=agent)\n", (1259, 1422), False, 'from game_engine import handleAttack\n')] |
"""
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://opensource.org/licenses/MIT
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
This code follows the work of -- <NAME>., <NAME>., <NAME>., Calvet,
<NAME>., <NAME>., <NAME>., ... & <NAME>. (2008). From near-surface to
root-zone soil moisture using an exponential filter: an assessment of the method
based on in-situ observations and model simulations. Hydrology and Earth System
Sciences, 12(6), 1323-1337.
Authors: <NAME>, <NAME>, <NAME>
Contact: <EMAIL>
Copyright (c) 2019, Authors
"""
import os
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pylab as plt
import matplotlib.dates as mdates
import seaborn as sns
def nse(df):
"""
Nash-sutcliffe model efficiency coefficient function
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Returns: out (float): calculated NSE value
"""
df = df.dropna()
sim = df['simulation']
obs = df['observation']
out = 1 - (np.nansum((sim-obs)**2)/np.nansum((obs-np.nanmean(obs))**2))
return out
def rmse(df):
"""
Root mean square error function
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Returns: out (float): calculated RMSE value
"""
df = df.dropna()
sim = df['simulation']
obs = df['observation']
out = np.sqrt(np.nanmean((sim-obs)**2))
return out
def r(df):
"""
Correlation coefficient function
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Returns: out (float): calculated correlation coefficient value
"""
df = df.dropna()
sim = df['simulation']
obs = df['observation']
out = stats.pearsonr(sim,obs)[0]
return out
def bias(df):
"""
Bias function
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Returns: out (float): calculated bias value
"""
df = df.dropna()
sim = df['simulation']
obs = df['observation']
out = np.nanmean(sim-obs)
return out
def ubRmse(df):
"""
Un-biased RMSE function
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Returns: out (float): calculated un-biased rmse value
"""
rms = rmse(df)
b = bias(df)
out = np.round(np.sqrt(rms**2-b**2),3)
return out
def calc_Topt(sur,obs,Tvals,objfunc='nse'):
"""
Function to calibrate the T parameter using a brute-force method
Args: sur (pandas.Series): pandas series of the surface soil moisture
obs (pandas.Series): pandas series of the soil moisture at layer x to calibrate
Tvals (list,tuple,set,np.array): sequence of values to test for optimal value
Kwargs: objfuc (string): objective function used to search for optimal value;
options: "nse","rmse","bias",and "r"; default: "nse"
Returns: out (dict): dictionary with the optimal T value keyed at 'T' and the
objective function value keyed at 'objval'
"""
objOpts = dict(nse=nse,rmse=rmse,bias=bias,r=r,ubrmse=ubRmse)
objectiveFunc = objOpts[objfunc]
df = pd.concat([sur,obs],axis=1)
df.columns = ('surface','depth')
df.dropna(inplace = True)
# new_df = new_df[~new_df.index.duplicated(keep='first')]
results = []
for T in Tvals:
Ttest = expFilter(df['surface'],T=T)
tempDf = pd.concat([Ttest,df['depth']],axis=1)
tempDf.columns = ('simulation','observation')
N = objectiveFunc(tempDf)
results.append(N)
# check to either find the min or max depending on objectivFunc
if objfunc in ('nse','r'):
best = np.array(results).argmax()
objVal = np.nanmax(results)
else:
best = np.array(results).argmin()
objVal = np.nanmin(results)
out = dict(T=Tvals[best],objval=objVal)
return out
def expFilter(series,T=1):
"""
Function to calculate the exponential filter function for a time series
Expects the soil moisture to be effective soil moisture
Args: series (pandas.Series): pandas series of the surface observations
Kwargs: T (int): integer value for T parameter used in the exponential filter
Returns: out (pandas.Series): series of simulated soil moisture
"""
sim = series.copy()
K = 1
SWIn = series.iloc[0]
sim = []
for i in range(series.size):
if i == 0:
gap = 1
else:
dd = series.index[i] - series.index[i-1]
gap = dd // np.timedelta64(1, 'D')
if gap <= 12 :
p = series.iloc[i]
recurSWI = SWIn + K * (p-SWIn)
K = K / (K+np.exp(-1*gap/T))
if gap > 12:
p = series.iloc[i]
K = 1
SWIn = series.iloc[i-1]
recurSWI = SWIn + K * (p-SWIn)
SWIn = recurSWI
sim.append(recurSWI)
out = pd.Series(sim,index=series.index,name='simulation')
return out
def normalize(series,lower=None,upper=None):
"""
Function to normalize values between a lower and upper bounds
Args: series (np.array | pd.Series | pd.DataFrame): continuous values to normalize
Kwargs: lower (float): lower bound to normalize to; default: None and will use
the series minimum value
upper (float): upper bound to normalize to; default: None and will use
the series maximum value
Returns: normalized series with values 0-1
"""
if lower == None:
lower = np.nanmin(series)
if upper == None:
upper = np.nanmax(series)
return (series-lower)/(upper-lower)
def make_plot(df,plotType='scatter',outFile=None,**kwargs):
"""
Function to create a plot of observed and simulated time series
Args: df (pandas.DataFrame): pandas dataframe with 'simulation' and 'observation' columns
Kwargs: plotType (string): type of plot to create; options: "scatter", "series";
default: scatter
outFile (string): output file path to save figure to; if outFile is
specified then the plot will not show; default: None
**kwargs (dict): keyword arguments for saving matplotlib figure
Returns: None
"""
# Compute Time series statistics
ns = np.round(nse(df),2) # NS efficiency
rms = np.round(rmse(df),3) # RMSE
r2 = np.round(r(df)**2,3) # R^2
b = np.round(bias(df),3) # Bias
ub = np.round(np.sqrt(rms**2-b**2),3)
x,y = df['observation'].values,df['simulation'].values
if plotType == "scatter":
sns.regplot(x,y, ci = None, truncate = False)
plt.ylim(0,1)
plt.xlim(0,1)
plt.xlabel('SCAN')
plt.ylabel('Exponential Filter')
xTxt = 0.82
yTxt = 0.05
elif plotType == 'series':
plt.plot(y,ls = '--', color = 'b',label = 'simulation')
plt.plot(x, color = 'r',label = 'observation')
plt.ylim(0,1)
plt.ylabel('SM (Effective)')
plt.xlabel('Year')
legend = plt.legend()
xTxt = 0.82
yTxt = 0.80
else:
raise NotImplementedError()
plt.text(xTxt,yTxt+(0.05*3),'R: '+str(np.sqrt(r2)))
plt.text(xTxt,yTxt+(0.05*2),'RMSE: '+str(rms))
plt.text(xTxt,yTxt+(0.05),'ubRMSE: '+str(ub))
plt.text(xTxt,yTxt,'Bias: '+str(b))
# plt.title('Layer ' + str(lyr+2))
if outFile != None:
plt.savefig(outFile,**kwargs)
plt.close()
else:
plt.show()
return
| [
"seaborn.regplot",
"numpy.exp",
"matplotlib.pylab.close",
"matplotlib.pylab.show",
"numpy.nanmean",
"matplotlib.pylab.legend",
"pandas.concat",
"matplotlib.pylab.savefig",
"numpy.nansum",
"scipy.stats.pearsonr",
"matplotlib.pylab.plot",
"matplotlib.pylab.xlim",
"pandas.Series",
"matplotlib... | [((2608, 2629), 'numpy.nanmean', 'np.nanmean', (['(sim - obs)'], {}), '(sim - obs)\n', (2618, 2629), True, 'import numpy as np\n'), ((3769, 3798), 'pandas.concat', 'pd.concat', (['[sur, obs]'], {'axis': '(1)'}), '([sur, obs], axis=1)\n', (3778, 3798), True, 'import pandas as pd\n'), ((5529, 5582), 'pandas.Series', 'pd.Series', (['sim'], {'index': 'series.index', 'name': '"""simulation"""'}), "(sim, index=series.index, name='simulation')\n", (5538, 5582), True, 'import pandas as pd\n'), ((1934, 1962), 'numpy.nanmean', 'np.nanmean', (['((sim - obs) ** 2)'], {}), '((sim - obs) ** 2)\n', (1944, 1962), True, 'import numpy as np\n'), ((2288, 2312), 'scipy.stats.pearsonr', 'stats.pearsonr', (['sim', 'obs'], {}), '(sim, obs)\n', (2302, 2312), False, 'from scipy import stats\n'), ((2912, 2938), 'numpy.sqrt', 'np.sqrt', (['(rms ** 2 - b ** 2)'], {}), '(rms ** 2 - b ** 2)\n', (2919, 2938), True, 'import numpy as np\n'), ((4026, 4065), 'pandas.concat', 'pd.concat', (["[Ttest, df['depth']]"], {'axis': '(1)'}), "([Ttest, df['depth']], axis=1)\n", (4035, 4065), True, 'import pandas as pd\n'), ((4337, 4355), 'numpy.nanmax', 'np.nanmax', (['results'], {}), '(results)\n', (4346, 4355), True, 'import numpy as np\n'), ((4425, 4443), 'numpy.nanmin', 'np.nanmin', (['results'], {}), '(results)\n', (4434, 4443), True, 'import numpy as np\n'), ((6170, 6187), 'numpy.nanmin', 'np.nanmin', (['series'], {}), '(series)\n', (6179, 6187), True, 'import numpy as np\n'), ((6227, 6244), 'numpy.nanmax', 'np.nanmax', (['series'], {}), '(series)\n', (6236, 6244), True, 'import numpy as np\n'), ((7136, 7162), 'numpy.sqrt', 'np.sqrt', (['(rms ** 2 - b ** 2)'], {}), '(rms ** 2 - b ** 2)\n', (7143, 7162), True, 'import numpy as np\n'), ((7259, 7301), 'seaborn.regplot', 'sns.regplot', (['x', 'y'], {'ci': 'None', 'truncate': '(False)'}), '(x, y, ci=None, truncate=False)\n', (7270, 7301), True, 'import seaborn as sns\n'), ((7313, 7327), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (7321, 7327), True, 'import matplotlib.pylab as plt\n'), ((7335, 7349), 'matplotlib.pylab.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (7343, 7349), True, 'import matplotlib.pylab as plt\n'), ((7357, 7375), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""SCAN"""'], {}), "('SCAN')\n", (7367, 7375), True, 'import matplotlib.pylab as plt\n'), ((7384, 7416), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Exponential Filter"""'], {}), "('Exponential Filter')\n", (7394, 7416), True, 'import matplotlib.pylab as plt\n'), ((8082, 8112), 'matplotlib.pylab.savefig', 'plt.savefig', (['outFile'], {}), '(outFile, **kwargs)\n', (8093, 8112), True, 'import matplotlib.pylab as plt\n'), ((8120, 8131), 'matplotlib.pylab.close', 'plt.close', ([], {}), '()\n', (8129, 8131), True, 'import matplotlib.pylab as plt\n'), ((8150, 8160), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (8158, 8160), True, 'import matplotlib.pylab as plt\n'), ((1553, 1580), 'numpy.nansum', 'np.nansum', (['((sim - obs) ** 2)'], {}), '((sim - obs) ** 2)\n', (1562, 1580), True, 'import numpy as np\n'), ((7497, 7548), 'matplotlib.pylab.plot', 'plt.plot', (['y'], {'ls': '"""--"""', 'color': '"""b"""', 'label': '"""simulation"""'}), "(y, ls='--', color='b', label='simulation')\n", (7505, 7548), True, 'import matplotlib.pylab as plt\n'), ((7561, 7604), 'matplotlib.pylab.plot', 'plt.plot', (['x'], {'color': '"""r"""', 'label': '"""observation"""'}), "(x, color='r', label='observation')\n", (7569, 7604), True, 'import matplotlib.pylab as plt\n'), ((7616, 7630), 'matplotlib.pylab.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (7624, 7630), True, 'import matplotlib.pylab as plt\n'), ((7639, 7667), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""SM (Effective)"""'], {}), "('SM (Effective)')\n", (7649, 7667), True, 'import matplotlib.pylab as plt\n'), ((7676, 7694), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (7686, 7694), True, 'import matplotlib.pylab as plt\n'), ((7712, 7724), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (7722, 7724), True, 'import matplotlib.pylab as plt\n'), ((4293, 4310), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4301, 4310), True, 'import numpy as np\n'), ((4381, 4398), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (4389, 4398), True, 'import numpy as np\n'), ((5152, 5174), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (5166, 5174), True, 'import numpy as np\n'), ((7855, 7866), 'numpy.sqrt', 'np.sqrt', (['r2'], {}), '(r2)\n', (7862, 7866), True, 'import numpy as np\n'), ((5296, 5316), 'numpy.exp', 'np.exp', (['(-1 * gap / T)'], {}), '(-1 * gap / T)\n', (5302, 5316), True, 'import numpy as np\n'), ((1592, 1607), 'numpy.nanmean', 'np.nanmean', (['obs'], {}), '(obs)\n', (1602, 1607), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class Conv2DBatchNorm(nn.Module):
def __init__(
self,
in_channels,
n_filters,
k_size,
stride,
padding,
bias=True,
dilation=1,
is_batchnorm=True,
):
super(Conv2DBatchNorm, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
if is_batchnorm:
self.cb_unit = nn.Sequential(conv_mod, nn.BatchNorm2d(int(n_filters)))
else:
self.cb_unit = nn.Sequential(conv_mod)
def forward(self, inputs):
outputs = self.cb_unit(inputs)
return outputs
class Conv2DGroupNorm(nn.Module):
def __init__(
self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16
):
super(Conv2DGroupNorm, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
self.cg_unit = nn.Sequential(conv_mod, nn.GroupNorm(n_groups, int(n_filters)))
def forward(self, inputs):
outputs = self.cg_unit(inputs)
return outputs
class Deconv2DBatchNorm(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(Deconv2DBatchNorm, self).__init__()
self.dcb_unit = nn.Sequential(
nn.ConvTranspose2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
),
nn.BatchNorm2d(int(n_filters)),
)
def forward(self, inputs):
outputs = self.dcb_unit(inputs)
return outputs
class Conv2DBatchNormRelu(nn.Module):
def __init__(
self,
in_channels,
n_filters,
k_size,
stride,
padding,
bias=True,
dilation=1,
is_batchnorm=True,
):
super(Conv2DBatchNormRelu, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
if is_batchnorm:
self.cbr_unit = nn.Sequential(
conv_mod, nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True)
)
else:
self.cbr_unit = nn.Sequential(conv_mod, nn.ReLU(inplace=True))
def forward(self, inputs):
outputs = self.cbr_unit(inputs)
return outputs
class Conv2DGroupNormRelu(nn.Module):
def __init__(
self, in_channels, n_filters, k_size, stride, padding, bias=True, dilation=1, n_groups=16
):
super(Conv2DGroupNormRelu, self).__init__()
conv_mod = nn.Conv2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
dilation=dilation,
)
self.cgr_unit = nn.Sequential(
conv_mod, nn.GroupNorm(n_groups, int(n_filters)), nn.ReLU(inplace=True)
)
def forward(self, inputs):
outputs = self.cgr_unit(inputs)
return outputs
class Deconv2DBatchNormRelu(nn.Module):
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True):
super(Deconv2DBatchNormRelu, self).__init__()
self.dcbr_unit = nn.Sequential(
nn.ConvTranspose2d(
int(in_channels),
int(n_filters),
kernel_size=k_size,
padding=padding,
stride=stride,
bias=bias,
),
nn.BatchNorm2d(int(n_filters)),
nn.ReLU(inplace=True),
)
def forward(self, inputs):
outputs = self.dcbr_unit(inputs)
return outputs
class SegnetDown2(nn.Module):
def __init__(self, in_size, out_size):
super(SegnetDown2, self).__init__()
self.conv1 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
self.conv2 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
unpooled_shape = outputs.size()
outputs, indices = self.maxpool_with_argmax(outputs)
return outputs, indices, unpooled_shape
class SegnetDown3(nn.Module):
def __init__(self, in_size, out_size):
super(SegnetDown3, self).__init__()
self.conv1 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
self.conv2 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.conv3 = Conv2DBatchNormRelu(out_size, out_size, 3, 1, 1)
self.maxpool_with_argmax = nn.MaxPool2d(2, 2, return_indices=True)
def forward(self, inputs):
outputs = self.conv1(inputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
unpooled_shape = outputs.size()
outputs, indices = self.maxpool_with_argmax(outputs)
return outputs, indices, unpooled_shape
class SegnetUp2(nn.Module):
def __init__(self, in_size, out_size):
super(SegnetUp2, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
def forward(self, inputs, indices, output_shape):
outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)
outputs = self.conv1(outputs)
outputs = self.conv2(outputs)
return outputs
class SegnetUp3(nn.Module):
def __init__(self, in_size, out_size):
super(SegnetUp3, self).__init__()
self.unpool = nn.MaxUnpool2d(2, 2)
self.conv1 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv2 = Conv2DBatchNormRelu(in_size, in_size, 3, 1, 1)
self.conv3 = Conv2DBatchNormRelu(in_size, out_size, 3, 1, 1)
def forward(self, inputs, indices, output_shape):
outputs = self.unpool(input=inputs, indices=indices, output_size=output_shape)
outputs = self.conv1(outputs)
outputs = self.conv2(outputs)
outputs = self.conv3(outputs)
return outputs
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(ResidualBlock, self).__init__()
self.convbnrelu1 = Conv2DBatchNormRelu(in_channels, n_filters, 3, stride, 1, bias=False)
self.convbn2 = Conv2DBatchNorm(n_filters, n_filters, 3, 1, 1, bias=False)
self.downsample = downsample
self.stride = stride
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.convbnrelu1(x)
out = self.convbn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResidualBottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, n_filters, stride=1, downsample=None):
super(ResidualBottleneck, self).__init__()
self.convbn1 = nn.Conv2DBatchNorm(in_channels, n_filters, k_size=1, bias=False)
self.convbn2 = nn.Conv2DBatchNorm(
n_filters, n_filters, k_size=3, padding=1, stride=stride, bias=False
)
self.convbn3 = nn.Conv2DBatchNorm(n_filters, n_filters * 4, k_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.convbn1(x)
out = self.convbn2(out)
out = self.convbn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FRRU(nn.Module):
"""
Full Resolution Residual Unit for FRRN
"""
def __init__(self, prev_channels, out_channels, scale, group_norm=False, n_groups=None):
super(FRRU, self).__init__()
self.scale = scale
self.prev_channels = prev_channels
self.out_channels = out_channels
self.group_norm = group_norm
self.n_groups = n_groups
if self.group_norm:
conv_unit = Conv2DGroupNormRelu
self.conv1 = conv_unit(
prev_channels + 32,
out_channels,
k_size=3,
stride=1,
padding=1,
bias=False,
n_groups=self.n_groups,
)
self.conv2 = conv_unit(
out_channels,
out_channels,
k_size=3,
stride=1,
padding=1,
bias=False,
n_groups=self.n_groups,
)
else:
conv_unit = Conv2DBatchNormRelu
self.conv1 = conv_unit(
prev_channels + 32, out_channels, k_size=3, stride=1, padding=1, bias=False
)
self.conv2 = conv_unit(
out_channels, out_channels, k_size=3, stride=1, padding=1, bias=False
)
self.conv_res = nn.Conv2d(out_channels, 32, kernel_size=1, stride=1, padding=0)
def forward(self, y, z):
x = torch.cat([y, nn.MaxPool2d(self.scale, self.scale)(z)], dim=1)
y_prime = self.conv1(x)
y_prime = self.conv2(y_prime)
x = self.conv_res(y_prime)
upsample_size = torch.Size([_s * self.scale for _s in y_prime.shape[-2:]])
x = F.upsample(x, size=upsample_size, mode="nearest")
z_prime = z + x
return y_prime, z_prime
class RU(nn.Module):
"""
Residual Unit for FRRN
"""
def __init__(self, channels, kernel_size=3, strides=1, group_norm=False, n_groups=None):
super(RU, self).__init__()
self.group_norm = group_norm
self.n_groups = n_groups
if self.group_norm:
self.conv1 = Conv2DGroupNormRelu(
channels,
channels,
k_size=kernel_size,
stride=strides,
padding=1,
bias=False,
n_groups=self.n_groups,
)
self.conv2 = Conv2DGroupNorm(
channels,
channels,
k_size=kernel_size,
stride=strides,
padding=1,
bias=False,
n_groups=self.n_groups,
)
else:
self.conv1 = Conv2DBatchNormRelu(
channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False
)
self.conv2 = Conv2DBatchNorm(
channels, channels, k_size=kernel_size, stride=strides, padding=1, bias=False
)
def forward(self, x):
incoming = x
x = self.conv1(x)
x = self.conv2(x)
return x + incoming
class ResidualConvUnit(nn.Module):
def __init__(self, channels, kernel_size=3):
super(ResidualConvUnit, self).__init__()
self.residual_conv_unit = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(channels, channels, kernel_size=kernel_size),
nn.ReLU(inplace=True),
nn.Conv2d(channels, channels, kernel_size=kernel_size),
)
def forward(self, x):
input = x
x = self.residual_conv_unit(x)
return x + input
class MultiResolutionFusion(nn.Module):
def __init__(self, channels, up_scale_high, up_scale_low, high_shape, low_shape):
super(MultiResolutionFusion, self).__init__()
self.up_scale_high = up_scale_high
self.up_scale_low = up_scale_low
self.conv_high = nn.Conv2d(high_shape[1], channels, kernel_size=3)
if low_shape is not None:
self.conv_low = nn.Conv2d(low_shape[1], channels, kernel_size=3)
def forward(self, x_high, x_low):
high_upsampled = F.upsample(
self.conv_high(x_high), scale_factor=self.up_scale_high, mode="bilinear"
)
if x_low is None:
return high_upsampled
low_upsampled = F.upsample(
self.conv_low(x_low), scale_factor=self.up_scale_low, mode="bilinear"
)
return low_upsampled + high_upsampled
class ChainedResidualPooling(nn.Module):
def __init__(self, channels, input_shape):
super(ChainedResidualPooling, self).__init__()
self.chained_residual_pooling = nn.Sequential(
nn.ReLU(inplace=True),
nn.MaxPool2d(5, 1, 2),
nn.Conv2d(input_shape[1], channels, kernel_size=3),
)
def forward(self, x):
input = x
x = self.chained_residual_pooling(x)
return x + input
class PyramidPooling(nn.Module):
def __init__(
self, in_channels, pool_sizes, model_name="pspnet", fusion_mode="cat", is_batchnorm=True
):
super(PyramidPooling, self).__init__()
bias = not is_batchnorm
self.paths = []
for i in range(len(pool_sizes)):
self.paths.append(
Conv2DBatchNormRelu(
in_channels,
int(in_channels / len(pool_sizes)),
1,
1,
0,
bias=bias,
is_batchnorm=is_batchnorm,
)
)
self.path_module_list = nn.ModuleList(self.paths)
self.pool_sizes = pool_sizes
self.model_name = model_name
self.fusion_mode = fusion_mode
def forward(self, x):
h, w = x.shape[2:]
if self.training or self.model_name != "icnet": # general settings or pspnet
k_sizes = []
strides = []
for pool_size in self.pool_sizes:
k_sizes.append((int(h / pool_size), int(w / pool_size)))
strides.append((int(h / pool_size), int(w / pool_size)))
else: # eval mode and icnet: pre-trained for 1025 x 2049
k_sizes = [(8, 15), (13, 25), (17, 33), (33, 65)]
strides = [(5, 10), (10, 20), (16, 32), (33, 65)]
if self.fusion_mode == "cat": # pspnet: concat (including x)
output_slices = [x]
for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)):
out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)
# out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size))
if self.model_name != "icnet":
out = module(out)
out = F.interpolate(out, size=(h, w), mode="bilinear", align_corners=True)
output_slices.append(out)
return torch.cat(output_slices, dim=1)
else: # icnet: element-wise sum (including x)
pp_sum = x
for i, (module, pool_size) in enumerate(zip(self.path_module_list, self.pool_sizes)):
out = F.avg_pool2d(x, k_sizes[i], stride=strides[i], padding=0)
# out = F.adaptive_avg_pool2d(x, output_size=(pool_size, pool_size))
if self.model_name != "icnet":
out = module(out)
out = F.interpolate(out, size=(h, w), mode="bilinear", align_corners=True)
pp_sum = pp_sum + out
return pp_sum
class BottleNeckPSP(nn.Module):
def __init__(
self, in_channels, mid_channels, out_channels, stride, dilation=1, is_batchnorm=True
):
super(BottleNeckPSP, self).__init__()
bias = not is_batchnorm
self.cbr1 = Conv2DBatchNormRelu(
in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm
)
if dilation > 1:
self.cbr2 = Conv2DBatchNormRelu(
mid_channels,
mid_channels,
3,
stride=stride,
padding=dilation,
bias=bias,
dilation=dilation,
is_batchnorm=is_batchnorm,
)
else:
self.cbr2 = Conv2DBatchNormRelu(
mid_channels,
mid_channels,
3,
stride=stride,
padding=1,
bias=bias,
dilation=1,
is_batchnorm=is_batchnorm,
)
self.cb3 = Conv2DBatchNorm(
mid_channels, out_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm
)
self.cb4 = Conv2DBatchNorm(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=bias,
is_batchnorm=is_batchnorm,
)
def forward(self, x):
conv = self.cb3(self.cbr2(self.cbr1(x)))
residual = self.cb4(x)
return F.relu(conv + residual, inplace=True)
class BottleNeckIdentifyPSP(nn.Module):
def __init__(self, in_channels, mid_channels, stride, dilation=1, is_batchnorm=True):
super(BottleNeckIdentifyPSP, self).__init__()
bias = not is_batchnorm
self.cbr1 = Conv2DBatchNormRelu(
in_channels, mid_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm
)
if dilation > 1:
self.cbr2 = Conv2DBatchNormRelu(
mid_channels,
mid_channels,
3,
stride=1,
padding=dilation,
bias=bias,
dilation=dilation,
is_batchnorm=is_batchnorm,
)
else:
self.cbr2 = Conv2DBatchNormRelu(
mid_channels,
mid_channels,
3,
stride=1,
padding=1,
bias=bias,
dilation=1,
is_batchnorm=is_batchnorm,
)
self.cb3 = Conv2DBatchNorm(
mid_channels, in_channels, 1, stride=1, padding=0, bias=bias, is_batchnorm=is_batchnorm
)
def forward(self, x):
residual = x
x = self.cb3(self.cbr2(self.cbr1(x)))
return F.relu(x + residual, inplace=True)
class ResidualBlockPSP(nn.Module):
def __init__(
self,
n_blocks,
in_channels,
mid_channels,
out_channels,
stride,
dilation=1,
include_range="all",
is_batchnorm=True,
):
super(ResidualBlockPSP, self).__init__()
if dilation > 1:
stride = 1
# residualBlockPSP = convBlockPSP + identityBlockPSPs
layers = []
if include_range in ["all", "conv"]:
layers.append(
BottleNeckPSP(
in_channels,
mid_channels,
out_channels,
stride,
dilation,
is_batchnorm=is_batchnorm,
)
)
if include_range in ["all", "identity"]:
for i in range(n_blocks - 1):
layers.append(
BottleNeckIdentifyPSP(
out_channels, mid_channels, stride, dilation, is_batchnorm=is_batchnorm
)
)
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class CascadeFeatureFusion(nn.Module):
def __init__(
self, n_classes, low_in_channels, high_in_channels, out_channels, is_batchnorm=True
):
super(CascadeFeatureFusion, self).__init__()
bias = not is_batchnorm
self.low_dilated_conv_bn = Conv2DBatchNorm(
low_in_channels,
out_channels,
3,
stride=1,
padding=2,
bias=bias,
dilation=2,
is_batchnorm=is_batchnorm,
)
self.low_classifier_conv = nn.Conv2d(
int(low_in_channels),
int(n_classes),
kernel_size=1,
padding=0,
stride=1,
bias=True,
dilation=1,
) # Train only
self.high_proj_conv_bn = Conv2DBatchNorm(
high_in_channels,
out_channels,
1,
stride=1,
padding=0,
bias=bias,
is_batchnorm=is_batchnorm,
)
def forward(self, x_low, x_high):
x_low_upsampled = F.interpolate(
x_low, size=get_interp_size(x_low, z_factor=2), mode="bilinear", align_corners=True
)
low_cls = self.low_classifier_conv(x_low_upsampled)
low_fm = self.low_dilated_conv_bn(x_low_upsampled)
high_fm = self.high_proj_conv_bn(x_high)
high_fused_fm = F.relu(low_fm + high_fm, inplace=True)
return high_fused_fm, low_cls
def get_interp_size(input, s_factor=1, z_factor=1): # for caffe
ori_h, ori_w = input.shape[2:]
# shrink (s_factor >= 1)
ori_h = (ori_h - 1) / s_factor + 1
ori_w = (ori_w - 1) / s_factor + 1
# zoom (z_factor >= 1)
ori_h = ori_h + ori_h * (z_factor - 1)
ori_w = ori_w + ori_w * (z_factor - 1)
resize_shape = (int(ori_h), int(ori_w))
return resize_shape
def interp(input, output_size, mode="bilinear"):
n, c, ih, iw = input.shape
oh, ow = output_size
# normalize to [-1, 1]
h = torch.arange(0, oh, dtype=torch.float, device=input.device) / (oh - 1) * 2 - 1
w = torch.arange(0, ow, dtype=torch.float, device=input.device) / (ow - 1) * 2 - 1
grid = torch.zeros(oh, ow, 2, dtype=torch.float, device=input.device)
grid[:, :, 0] = w.unsqueeze(0).repeat(oh, 1)
grid[:, :, 1] = h.unsqueeze(0).repeat(ow, 1).transpose(0, 1)
grid = grid.unsqueeze(0).repeat(n, 1, 1, 1) # grid.shape: [n, oh, ow, 2]
if input.is_cuda:
grid = grid.cuda()
return F.grid_sample(input, grid, mode=mode)
def get_upsampling_weight(in_channels, out_channels, kernel_size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)
weight[range(in_channels), range(out_channels), :, :] = filt
return torch.from_numpy(weight).float()
| [
"torch.nn.ReLU",
"torch.nn.functional.grid_sample",
"torch.nn.MaxPool2d",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.cat",
"torch.nn.functional.upsample",
"torch.arange",
"torch.Size",
"torch.nn.Conv2DBatchNorm",
... | [((22155, 22217), 'torch.zeros', 'torch.zeros', (['oh', 'ow', '(2)'], {'dtype': 'torch.float', 'device': 'input.device'}), '(oh, ow, 2, dtype=torch.float, device=input.device)\n', (22166, 22217), False, 'import torch\n'), ((22471, 22508), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['input', 'grid'], {'mode': 'mode'}), '(input, grid, mode=mode)\n', (22484, 22508), True, 'import torch.nn.functional as F\n'), ((22913, 22999), 'numpy.zeros', 'np.zeros', (['(in_channels, out_channels, kernel_size, kernel_size)'], {'dtype': 'np.float64'}), '((in_channels, out_channels, kernel_size, kernel_size), dtype=np.\n float64)\n', (22921, 22999), True, 'import numpy as np\n'), ((4602, 4641), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {'return_indices': '(True)'}), '(2, 2, return_indices=True)\n', (4614, 4641), True, 'import torch.nn as nn\n'), ((5261, 5300), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {'return_indices': '(True)'}), '(2, 2, return_indices=True)\n', (5273, 5300), True, 'import torch.nn as nn\n'), ((5732, 5752), 'torch.nn.MaxUnpool2d', 'nn.MaxUnpool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (5746, 5752), True, 'import torch.nn as nn\n'), ((6268, 6288), 'torch.nn.MaxUnpool2d', 'nn.MaxUnpool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (6282, 6288), True, 'import torch.nn as nn\n'), ((7213, 7234), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (7220, 7234), True, 'import torch.nn as nn\n'), ((7713, 7777), 'torch.nn.Conv2DBatchNorm', 'nn.Conv2DBatchNorm', (['in_channels', 'n_filters'], {'k_size': '(1)', 'bias': '(False)'}), '(in_channels, n_filters, k_size=1, bias=False)\n', (7731, 7777), True, 'import torch.nn as nn\n'), ((7801, 7893), 'torch.nn.Conv2DBatchNorm', 'nn.Conv2DBatchNorm', (['n_filters', 'n_filters'], {'k_size': '(3)', 'padding': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(n_filters, n_filters, k_size=3, padding=1, stride=stride,\n bias=False)\n', (7819, 7893), True, 'import torch.nn as nn\n'), ((7935, 8001), 'torch.nn.Conv2DBatchNorm', 'nn.Conv2DBatchNorm', (['n_filters', '(n_filters * 4)'], {'k_size': '(1)', 'bias': '(False)'}), '(n_filters, n_filters * 4, k_size=1, bias=False)\n', (7953, 8001), True, 'import torch.nn as nn\n'), ((8022, 8043), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (8029, 8043), True, 'import torch.nn as nn\n'), ((9761, 9824), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', '(32)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(out_channels, 32, kernel_size=1, stride=1, padding=0)\n', (9770, 9824), True, 'import torch.nn as nn\n'), ((10060, 10120), 'torch.Size', 'torch.Size', (['[(_s * self.scale) for _s in y_prime.shape[-2:]]'], {}), '([(_s * self.scale) for _s in y_prime.shape[-2:]])\n', (10070, 10120), False, 'import torch\n'), ((10131, 10180), 'torch.nn.functional.upsample', 'F.upsample', (['x'], {'size': 'upsample_size', 'mode': '"""nearest"""'}), "(x, size=upsample_size, mode='nearest')\n", (10141, 10180), True, 'import torch.nn.functional as F\n'), ((12328, 12377), 'torch.nn.Conv2d', 'nn.Conv2d', (['high_shape[1]', 'channels'], {'kernel_size': '(3)'}), '(high_shape[1], channels, kernel_size=3)\n', (12337, 12377), True, 'import torch.nn as nn\n'), ((14030, 14055), 'torch.nn.ModuleList', 'nn.ModuleList', (['self.paths'], {}), '(self.paths)\n', (14043, 14055), True, 'import torch.nn as nn\n'), ((17479, 17516), 'torch.nn.functional.relu', 'F.relu', (['(conv + residual)'], {'inplace': '(True)'}), '(conv + residual, inplace=True)\n', (17485, 17516), True, 'import torch.nn.functional as F\n'), ((18774, 18808), 'torch.nn.functional.relu', 'F.relu', (['(x + residual)'], {'inplace': '(True)'}), '(x + residual, inplace=True)\n', (18780, 18808), True, 'import torch.nn.functional as F\n'), ((19906, 19928), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (19919, 19928), True, 'import torch.nn as nn\n'), ((21363, 21401), 'torch.nn.functional.relu', 'F.relu', (['(low_fm + high_fm)'], {'inplace': '(True)'}), '(low_fm + high_fm, inplace=True)\n', (21369, 21401), True, 'import torch.nn.functional as F\n'), ((755, 778), 'torch.nn.Sequential', 'nn.Sequential', (['conv_mod'], {}), '(conv_mod)\n', (768, 778), True, 'import torch.nn as nn\n'), ((3536, 3557), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3543, 3557), True, 'import torch.nn as nn\n'), ((4180, 4201), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4187, 4201), True, 'import torch.nn as nn\n'), ((11722, 11743), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (11729, 11743), True, 'import torch.nn as nn\n'), ((11757, 11811), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': 'kernel_size'}), '(channels, channels, kernel_size=kernel_size)\n', (11766, 11811), True, 'import torch.nn as nn\n'), ((11825, 11846), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (11832, 11846), True, 'import torch.nn as nn\n'), ((11860, 11914), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': 'kernel_size'}), '(channels, channels, kernel_size=kernel_size)\n', (11869, 11914), True, 'import torch.nn as nn\n'), ((12441, 12489), 'torch.nn.Conv2d', 'nn.Conv2d', (['low_shape[1]', 'channels'], {'kernel_size': '(3)'}), '(low_shape[1], channels, kernel_size=3)\n', (12450, 12489), True, 'import torch.nn as nn\n'), ((13111, 13132), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (13118, 13132), True, 'import torch.nn as nn\n'), ((13146, 13167), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(5)', '(1)', '(2)'], {}), '(5, 1, 2)\n', (13158, 13167), True, 'import torch.nn as nn\n'), ((13181, 13231), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_shape[1]', 'channels'], {'kernel_size': '(3)'}), '(input_shape[1], channels, kernel_size=3)\n', (13190, 13231), True, 'import torch.nn as nn\n'), ((15347, 15378), 'torch.cat', 'torch.cat', (['output_slices'], {'dim': '(1)'}), '(output_slices, dim=1)\n', (15356, 15378), False, 'import torch\n'), ((23071, 23095), 'torch.from_numpy', 'torch.from_numpy', (['weight'], {}), '(weight)\n', (23087, 23095), False, 'import torch\n'), ((2758, 2779), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2765, 2779), True, 'import torch.nn as nn\n'), ((2860, 2881), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2867, 2881), True, 'import torch.nn as nn\n'), ((14966, 15023), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', 'k_sizes[i]'], {'stride': 'strides[i]', 'padding': '(0)'}), '(x, k_sizes[i], stride=strides[i], padding=0)\n', (14978, 15023), True, 'import torch.nn.functional as F\n'), ((15216, 15284), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(out, size=(h, w), mode='bilinear', align_corners=True)\n", (15229, 15284), True, 'import torch.nn.functional as F\n'), ((15578, 15635), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', 'k_sizes[i]'], {'stride': 'strides[i]', 'padding': '(0)'}), '(x, k_sizes[i], stride=strides[i], padding=0)\n', (15590, 15635), True, 'import torch.nn.functional as F\n'), ((15828, 15896), 'torch.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(out, size=(h, w), mode='bilinear', align_corners=True)\n", (15841, 15896), True, 'import torch.nn.functional as F\n'), ((21977, 22036), 'torch.arange', 'torch.arange', (['(0)', 'oh'], {'dtype': 'torch.float', 'device': 'input.device'}), '(0, oh, dtype=torch.float, device=input.device)\n', (21989, 22036), False, 'import torch\n'), ((22064, 22123), 'torch.arange', 'torch.arange', (['(0)', 'ow'], {'dtype': 'torch.float', 'device': 'input.device'}), '(0, ow, dtype=torch.float, device=input.device)\n', (22076, 22123), False, 'import torch\n'), ((9881, 9917), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['self.scale', 'self.scale'], {}), '(self.scale, self.scale)\n', (9893, 9917), True, 'import torch.nn as nn\n')] |
from typing import NoReturn
import cv2
import numpy as np
from generic_dataset.data_pipeline import DataPipeline
from generic_dataset.generic_sample import synchronize_on_fields
from generic_dataset.sample_generator import SampleGenerator
from generic_dataset.utilities.save_load_methods import save_cv2_image_bgr, load_cv2_image_bgr, \
save_compressed_numpy_array, load_compressed_numpy_array, load_cv2_image_grayscale
COLORS = {0: (0, 0, 255), 1: (0, 255, 0)}
pipeline_fix_gbr_image = DataPipeline().add_operation(lambda d, e: (d[..., [2, 1, 0]], e))
@synchronize_on_fields(field_names={'bgr_image', 'depth_image', 'bounding_boxes'}, check_pipeline=True)
def visualize(self) -> NoReturn:
"""
This method visualizes the sample, showing all its fields.
:return:
"""
bgr_image = self.get_bgr_image()
depth_image = self.get_depth_image()
img_bounding_boxes = bgr_image.copy()
for label, *box in self.get_bounding_boxes():
cv2.rectangle(img_bounding_boxes, box, color=COLORS[label], thickness=1)
row_1 = np.concatenate((bgr_image, cv2.cvtColor(depth_image, cv2.COLOR_GRAY2BGR)), axis=1)
row_1 = np.concatenate((row_1, img_bounding_boxes), axis=1)
cv2.imshow('Sample', row_1)
cv2.waitKey()
# The bounding_boxes field is a numpy array of list [[label, x1, y1, width, height]],
# where label is the bounding box label and (x1, y1) are the coordinates of the top left point and width height the bbox dimension
DOOR_LABELS = {0: 'Closed door', 1: 'Open door'}
DoorSample = SampleGenerator(name='DoorSample', label_set={0, 1}) \
.add_dataset_field(field_name='bgr_image', field_type=np.ndarray, save_function=save_cv2_image_bgr, load_function=load_cv2_image_bgr) \
.add_dataset_field(field_name='depth_image', field_type=np.ndarray, save_function=save_cv2_image_bgr, load_function=load_cv2_image_grayscale) \
.add_dataset_field(field_name='bounding_boxes', field_type=np.ndarray, default_value=np.array([]), load_function=load_compressed_numpy_array, save_function=save_compressed_numpy_array) \
.add_custom_method(method_name='visualize', function=visualize) \
.generate_sample_class() | [
"cv2.waitKey",
"cv2.cvtColor",
"generic_dataset.sample_generator.SampleGenerator",
"numpy.array",
"cv2.rectangle",
"generic_dataset.data_pipeline.DataPipeline",
"generic_dataset.generic_sample.synchronize_on_fields",
"cv2.imshow",
"numpy.concatenate"
] | [((563, 669), 'generic_dataset.generic_sample.synchronize_on_fields', 'synchronize_on_fields', ([], {'field_names': "{'bgr_image', 'depth_image', 'bounding_boxes'}", 'check_pipeline': '(True)'}), "(field_names={'bgr_image', 'depth_image',\n 'bounding_boxes'}, check_pipeline=True)\n", (584, 669), False, 'from generic_dataset.generic_sample import synchronize_on_fields\n'), ((1151, 1202), 'numpy.concatenate', 'np.concatenate', (['(row_1, img_bounding_boxes)'], {'axis': '(1)'}), '((row_1, img_bounding_boxes), axis=1)\n', (1165, 1202), True, 'import numpy as np\n'), ((1208, 1235), 'cv2.imshow', 'cv2.imshow', (['"""Sample"""', 'row_1'], {}), "('Sample', row_1)\n", (1218, 1235), False, 'import cv2\n'), ((1240, 1253), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1251, 1253), False, 'import cv2\n'), ((494, 508), 'generic_dataset.data_pipeline.DataPipeline', 'DataPipeline', ([], {}), '()\n', (506, 508), False, 'from generic_dataset.data_pipeline import DataPipeline\n'), ((970, 1042), 'cv2.rectangle', 'cv2.rectangle', (['img_bounding_boxes', 'box'], {'color': 'COLORS[label]', 'thickness': '(1)'}), '(img_bounding_boxes, box, color=COLORS[label], thickness=1)\n', (983, 1042), False, 'import cv2\n'), ((1083, 1128), 'cv2.cvtColor', 'cv2.cvtColor', (['depth_image', 'cv2.COLOR_GRAY2BGR'], {}), '(depth_image, cv2.COLOR_GRAY2BGR)\n', (1095, 1128), False, 'import cv2\n'), ((1969, 1981), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1977, 1981), True, 'import numpy as np\n'), ((1537, 1589), 'generic_dataset.sample_generator.SampleGenerator', 'SampleGenerator', ([], {'name': '"""DoorSample"""', 'label_set': '{0, 1}'}), "(name='DoorSample', label_set={0, 1})\n", (1552, 1589), False, 'from generic_dataset.sample_generator import SampleGenerator\n')] |
import numpy as np
import time
import torch
from torch.autograd import Variable
import src.utils.utils as utils
from src.utils.procrustes import get_transformation
import src.utils.viz as viz
def test_human(test_loader, misc, stat_2d, stat_3d,
standardize_input_data, standardize_output_data,
use_rel_loss, subtract_2d_root, keep_root,
model, mse_loss, save_ims=False, epoch=None,
op_dir=None):
# list with all the predicted poses
target_poses = []
out_poses = []
scaled_poses = []
proc_poses = []
# dictionary for storing a few images for visualization purposes
output_for_viz = {'pts_2d':[], 'gt_3d':[], 'pred_3d':[], 'pred_3d_proc':[]}
# at test time keep track only of the full 3d supervised mean squared error loss
losses_sup = utils.AverageMeter()
outputs_mean = Variable(torch.from_numpy(stat_3d['mean'][np.newaxis, ...]).cuda(),requires_grad=False)
# outputs_mean = Variable(torch.from_numpy(stat_3d['mean'][np.newaxis, ...]),requires_grad=False)
outputs_std = Variable(torch.from_numpy(stat_3d['std'][np.newaxis, ...]).cuda(),requires_grad=False)
# outputs_std = Variable(torch.from_numpy(stat_3d['std'][np.newaxis, ...]),requires_grad=False)
model.eval()
tic = time.time()
for i, test_data in enumerate(test_loader):
########################################################################
# load data
inps, norm_inps, inps_root, tars, norm_tars, tars_root, _, _, _, _ = test_data
num_keypoints = int(inps.shape[1] / 2) # inps are the 2d coordinates
batch_size = inps.shape[0]
inputs = Variable(inps.cuda(), requires_grad=False)
# inputs = Variable(inps, requires_grad=False)
targets = Variable(tars.cuda(), requires_grad=False)
# targets = Variable(tars, requires_grad=False)
tars_root = Variable(tars_root.repeat(1, num_keypoints).cuda(),requires_grad=False)
# tars_root = Variable(tars_root.repeat(1, num_keypoints),requires_grad=False)
########################################################################
# standardize data based on flags
if standardize_input_data:
# uses standardized 2d inputs
model_inputs = Variable(norm_inps.cuda())
# model_inputs = Variable(norm_inps)
else:
model_inputs = inputs
# if standardize_output_data:
# # uses standardized 3d outputs. NOTE: this is using 3d data
# model_targets = Variable(norm_tars.cuda())
#
# # note that 3d outputs are not standardized based on the training data
# # for the relative loss since it cannot use any 3d information
# # (not even mean and std), relies on un-norm_op to unstandardize the data
# # assert use_rel_loss == False, "Cannot use 3d data for relative_loss!"
#
# else:
# model_targets = targets
model_outputs, _ = model(model_inputs)
if np.isnan(model_outputs.mean().data[0]):
print('nans in prediction')
import ipdb;ipdb.set_trace()
########################################################################
# un-standardize data based on flags
if standardize_output_data:
# can use the 3d info from training set to unstandardize
outputs = outputs_mean + model_outputs * outputs_std
assert use_rel_loss == False, "Cannot use 3d data for relative_loss!"
else:
# the network relies on the un-norm_op to unstandardize the data so the
# model outputs should already be un-standardized
outputs = model_outputs
# add the root back to the outputs and to the targets
# outputs = outputs + tars_root
# targets = targets + tars_root
########################################################################
# supervised loss
loss = mse_loss(outputs, targets)
# loss = mse_loss(model_outputs, model_targets)
losses_sup.update(loss.data[0], batch_size)
########################################################################
# do plotting and compute errors using numpy
# use unnormalized version of all the data
targets = targets.data.cpu().numpy()
# targets = tars.numpy()
outputs = outputs.data.cpu().numpy()
inputs = inps.numpy()
inps_root = inps_root.numpy()
########################################################################
# add the root to the inputs if specified by the flags
if subtract_2d_root:
inputs += np.tile(inps_root, num_keypoints)
########################################################################
# NOTE: MUST insert the root back to the targets if keep_root is False
# otherwise the reconstruction fails
if not keep_root: raise NotImplementedError("Must add the root in prediction.")
# compute the error with procrustes alignment
outputs_proc = np.zeros(outputs.shape)
outputs_scaled = np.zeros(outputs.shape)
for ba in range(batch_size):
gt = targets[ba, :].reshape(-1, 3)
out = outputs[ba, :].reshape(-1, 3)
_, Z, T, b, c = get_transformation(gt, out, True)
proc = (b * out.dot(T)) + c
scaled = b * out
outputs_proc[ba, :] = proc.reshape(1, num_keypoints * 3)
outputs_scaled[ba, :] = scaled.reshape(1, num_keypoints * 3)
target_poses.append(np.vstack(targets[np.newaxis,...]))
out_poses.append(np.vstack(outputs[np.newaxis,...]))
scaled_poses.append(np.vstack(outputs_scaled[np.newaxis,...]))
proc_poses.append(np.vstack(outputs_proc[np.newaxis,...]))
########################################################################
# save poses for visualization - select diverse data by spacing out selection
if save_ims and (i + 1) % (len(test_loader)//15) == 0:
output_for_viz['pts_2d'].append(inputs[0,:])
output_for_viz['gt_3d'].append(targets[0,:])
output_for_viz['pred_3d'].append(outputs[0,:])
output_for_viz['pred_3d_proc'].append(outputs_proc[0,:])
########################################################################
# update summary
if (i + 1) % 1000 == 0:
its_time = time.time() - tic
print(' ({batch}/{size}) \t| sup loss {loss:.4f} | time {its_time:.3f}s' \
.format(batch=i+1, size=len(test_loader), loss=losses_sup.avg, its_time=its_time))
tic = time.time()
############################################################################
# save image
if save_ims:
op_file_name = op_dir + '/' + str(epoch+1).zfill(3) + '.jpg'
viz.save_output_image(op_file_name, output_for_viz, misc)
return losses_sup.avg, target_poses, out_poses, proc_poses, scaled_poses
| [
"src.utils.procrustes.get_transformation",
"ipdb.set_trace",
"src.utils.utils.AverageMeter",
"numpy.zeros",
"time.time",
"numpy.tile",
"src.utils.viz.save_output_image",
"numpy.vstack",
"torch.from_numpy"
] | [((842, 862), 'src.utils.utils.AverageMeter', 'utils.AverageMeter', ([], {}), '()\n', (860, 862), True, 'import src.utils.utils as utils\n'), ((1309, 1320), 'time.time', 'time.time', ([], {}), '()\n', (1318, 1320), False, 'import time\n'), ((5176, 5199), 'numpy.zeros', 'np.zeros', (['outputs.shape'], {}), '(outputs.shape)\n', (5184, 5199), True, 'import numpy as np\n'), ((5225, 5248), 'numpy.zeros', 'np.zeros', (['outputs.shape'], {}), '(outputs.shape)\n', (5233, 5248), True, 'import numpy as np\n'), ((6987, 7044), 'src.utils.viz.save_output_image', 'viz.save_output_image', (['op_file_name', 'output_for_viz', 'misc'], {}), '(op_file_name, output_for_viz, misc)\n', (7008, 7044), True, 'import src.utils.viz as viz\n'), ((3194, 3210), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (3208, 3210), False, 'import ipdb\n'), ((4768, 4801), 'numpy.tile', 'np.tile', (['inps_root', 'num_keypoints'], {}), '(inps_root, num_keypoints)\n', (4775, 4801), True, 'import numpy as np\n'), ((5410, 5443), 'src.utils.procrustes.get_transformation', 'get_transformation', (['gt', 'out', '(True)'], {}), '(gt, out, True)\n', (5428, 5443), False, 'from src.utils.procrustes import get_transformation\n'), ((5688, 5723), 'numpy.vstack', 'np.vstack', (['targets[np.newaxis, ...]'], {}), '(targets[np.newaxis, ...])\n', (5697, 5723), True, 'import numpy as np\n'), ((5749, 5784), 'numpy.vstack', 'np.vstack', (['outputs[np.newaxis, ...]'], {}), '(outputs[np.newaxis, ...])\n', (5758, 5784), True, 'import numpy as np\n'), ((5813, 5855), 'numpy.vstack', 'np.vstack', (['outputs_scaled[np.newaxis, ...]'], {}), '(outputs_scaled[np.newaxis, ...])\n', (5822, 5855), True, 'import numpy as np\n'), ((5882, 5922), 'numpy.vstack', 'np.vstack', (['outputs_proc[np.newaxis, ...]'], {}), '(outputs_proc[np.newaxis, ...])\n', (5891, 5922), True, 'import numpy as np\n'), ((6782, 6793), 'time.time', 'time.time', ([], {}), '()\n', (6791, 6793), False, 'import time\n'), ((892, 942), 'torch.from_numpy', 'torch.from_numpy', (["stat_3d['mean'][np.newaxis, ...]"], {}), "(stat_3d['mean'][np.newaxis, ...])\n", (908, 942), False, 'import torch\n'), ((1101, 1150), 'torch.from_numpy', 'torch.from_numpy', (["stat_3d['std'][np.newaxis, ...]"], {}), "(stat_3d['std'][np.newaxis, ...])\n", (1117, 1150), False, 'import torch\n'), ((6558, 6569), 'time.time', 'time.time', ([], {}), '()\n', (6567, 6569), False, 'import time\n')] |
#!/usr/bin/env python3
import argparse
import os
from contextlib import nullcontext
from tqdm import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda.amp import GradScaler, autocast
from torch.utils.data import DataLoader
from util.io import store_json
from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset
from vpd_dataset.common import RGB_MEAN_STD
from action_dataset.eval import get_test_prefixes
from models.rgb import RGBF_EmbeddingModel
from models.util import step
import video_dataset_paths as dataset_paths
DATASETS = ['tennis', 'fs', 'fx', 'penn', 'diving48']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, choices=DATASETS)
parser.add_argument('--save_dir', type=str, required=True)
parser.add_argument('--checkpoint_frequency', type=int)
parser.add_argument('--num_epochs', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--learning_rate', type=float, default=0.0005)
parser.add_argument('--img_dim', type=int, default=128)
parser.add_argument('--flow_img', type=str)
parser.add_argument('--motion', action='store_true')
parser.add_argument('--encoder_arch', type=str, default='resnet34')
parser.add_argument('--model_select_window', type=int, default=5)
parser.add_argument('--pretrained', action='store_true')
parser.add_argument('--no_test_video', action='store_true')
parser.add_argument('--min_pose_score', type=float)
dataset_group = parser.add_mutually_exclusive_group()
# Teacher embedding directory
dataset_group.add_argument('--emb_dir', type=str)
# Only for Penn dataset
dataset_group.add_argument('--penn_dir', type=str)
return parser.parse_args()
class ModelTrainer:
"""Class for training the encoder. Discarded after training"""
def __init__(self, encoder, motion):
super().__init__()
device = encoder.device
self.encoder = encoder.to(device)
if motion:
from models.module import FCNet
self.fcn_time = FCNet(
encoder.emb_dim, [128, 128], 2 * encoder.emb_dim,
dropout=0).to(device)
def epoch(self, data_loader, optimizer=None, scaler=None, progress_cb=None):
device = self.encoder.device
self.encoder.eval() if optimizer is None else self.encoder.train()
if hasattr(self, 'fcn_time'):
self.fcn_time.eval() if optimizer is None else self.fcn_time.train()
epoch_emb_loss = 0.
epoch_emb_n = 0
with torch.no_grad() if optimizer is None else nullcontext():
for batch in data_loader:
with nullcontext() if scaler is None else autocast():
img = batch['img'].to(device)
n = img.shape[0]
emb = self.encoder(img)
gt_emb = batch['emb'].to(device)
if hasattr(self, 'fcn_time'):
emb = self.fcn_time(emb)
emb_loss = F.mse_loss(emb, gt_emb, reduction='sum')
loss = emb_loss
if optimizer is not None:
step(optimizer, scaler, loss)
epoch_emb_loss += emb_loss.item()
epoch_emb_n += n
if progress_cb is not None:
progress_cb(n)
return epoch_emb_loss / epoch_emb_n
def get_optimizer(self, learning_rate):
params = list(self.encoder.parameters())
if hasattr(self, 'fcn_time'):
params.extend(self.fcn_time.parameters())
return torch.optim.AdamW(params, lr=learning_rate), \
GradScaler() if self.encoder.device == 'cuda' else None
def save_model(self, save_dir, name):
torch.save(self.encoder.state_dict(),
os.path.join(save_dir, '{}.encoder.pt'.format(name)))
if hasattr(self, 'fcn_time'):
torch.save(self.fcn_time.state_dict(),
os.path.join(save_dir, '{}.decoder.pt'.format(name)))
def get_moving_avg_loss(losses, n, key):
return np.mean([l[key] for l in losses[-n:]])
def load_dataset(
dataset, dataset_kwargs, emb_dir, penn_dir, no_test_video
):
if dataset == 'tennis':
if emb_dir is None:
emb_dir = os.path.join(dataset_paths.TENNIS_ROOT_DIR, 'embs')
if no_test_video:
dataset_kwargs['exclude_prefixes'] = get_test_prefixes(dataset)
train_dataset, val_dataset, emb_dim = TennisDataset.load_default(
emb_dir, dataset_paths.TENNIS_CROP_DIR, **dataset_kwargs)
elif dataset == 'fs':
if emb_dir is None:
emb_dir = os.path.join(dataset_paths.FS_ROOT_DIR, 'embs')
if no_test_video:
dataset_kwargs['exclude_prefixes'] = get_test_prefixes(dataset)
train_dataset, val_dataset, emb_dim = GenericDataset.load_default(
emb_dir, dataset_paths.FS_CROP_DIR, **dataset_kwargs)
elif dataset == 'fx':
if emb_dir is None:
emb_dir = os.path.join(dataset_paths.FX_ROOT_DIR, 'embs')
if no_test_video:
import finegym.util as fg_util
fg_test_prefixes = tuple([
l.split('_A_')[0] for l in fg_util.load_labels(
fg_util.GYM99_VAL_FILE)
])
dataset_kwargs['exclude_prefixes'] = fg_test_prefixes
train_dataset, val_dataset, emb_dim = GenericDataset.load_default(
emb_dir, dataset_paths.FX_CROP_DIR, **dataset_kwargs)
elif dataset == 'diving48':
if no_test_video:
import diving48.util as diving48_util
dataset_kwargs['exclude_prefixes'] = tuple(
diving48_util.load_labels_and_embeddings(
diving48_util.DIVING48_V2_TEST_FILE)[0].keys())
if emb_dir is None:
emb_dir = os.path.join(dataset_paths.DIVING48_ROOT_DIR, 'embs')
train_dataset, val_dataset, emb_dim = GenericDataset.load_default(
emb_dir, dataset_paths.DIVING48_CROP_DIR, **dataset_kwargs)
elif dataset == 'penn':
assert penn_dir is not None
train_dataset, val_dataset, emb_dim = PennDataset.load_default(
penn_dir, **dataset_kwargs)
else:
raise NotImplementedError()
return train_dataset, val_dataset, emb_dim
def main(
dataset, num_epochs, batch_size, learning_rate, img_dim, flow_img,
motion, encoder_arch, save_dir, model_select_window,
checkpoint_frequency, pretrained, emb_dir, penn_dir,
no_test_video, min_pose_score
):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
rgb_mean_std = RGB_MEAN_STD['resnet' if pretrained else dataset]
dataset_kwargs = {
'img_dim': img_dim, 'flow_img_name': flow_img,
'embed_time': motion, 'rgb_mean_std': rgb_mean_std,
'target_len': 20000
}
if min_pose_score is not None:
dataset_kwargs['min_pose_score'] = min_pose_score
(
train_dataset, val_dataset, emb_dim
) = load_dataset(dataset, dataset_kwargs, emb_dir, penn_dir, no_test_video)
print('Device:', device)
print('Num epochs:', num_epochs)
print('Batch size:', batch_size)
print('Image dim:', img_dim)
print('Use flow:', flow_img is not None)
print('Embed time:', motion)
print('Encoder arch:', encoder_arch)
print('Dataset:')
print('', 'Train images:', len(train_dataset))
print('', 'Val images:', len(val_dataset))
print('', 'Embedding dim:', emb_dim)
print('', 'Min pose score:', min_pose_score)
num_load_workers = min(os.cpu_count(), 8)
train_loader = DataLoader(
train_dataset, batch_size, shuffle=True, num_workers=num_load_workers,
persistent_workers=False)
if val_dataset is not None:
val_loader = DataLoader(
val_dataset, batch_size, num_workers=num_load_workers,
persistent_workers=False)
encoder = RGBF_EmbeddingModel(encoder_arch, emb_dim, flow_img is not None,
device, pretrained=pretrained)
trainer = ModelTrainer(encoder, motion)
optimizer, scaler = trainer.get_optimizer(learning_rate)
# Save the model settings
os.makedirs(save_dir)
store_json(os.path.join(save_dir, 'config.json'), {
'num_epochs': num_epochs, 'batch_size': batch_size,
'learning_rate': learning_rate, 'img_dim': img_dim,
'use_flow': flow_img is not None,
'motion': motion, 'emb_dim': emb_dim,
'encoder_arch': encoder_arch, 'rgb_mean_std': rgb_mean_std
})
# Initialize the loss history
loss_file = os.path.join(save_dir, 'loss.json')
losses = []
best_val_loss = float('inf')
for epoch in range(1, num_epochs + 1):
with tqdm(
desc='Epoch {} - train'.format(epoch), total=len(train_dataset)
) as pbar:
train_loss = trainer.epoch(
train_loader, optimizer=optimizer, scaler=scaler,
progress_cb=lambda n: pbar.update(n))
val_loss = float('nan')
if val_loader is not None:
with tqdm(
desc='Epoch {} - val'.format(epoch), total=len(val_dataset)
) as pbar:
val_loss = trainer.epoch(
val_loader, progress_cb=lambda n: pbar.update(n))
losses.append({
'epoch': epoch, 'train': train_loss, 'val': val_loss,
'dataset_train': [(dataset, train_loss)],
'dataset_val': [(dataset, val_loss)]
})
moving_avg_val_loss = get_moving_avg_loss(
losses, model_select_window, 'val')
print('Epoch {} - train loss: {:0.4f} [avg: {:0.4f}] val loss: {:0.4f} [avg: {:0.4f}]'.format(
epoch, train_loss,
get_moving_avg_loss(losses, model_select_window, 'train'),
val_loss, moving_avg_val_loss))
if loss_file is not None:
store_json(loss_file, losses)
if save_dir is not None:
if moving_avg_val_loss < best_val_loss:
print('New best epoch!')
trainer.save_model(save_dir, 'best_epoch')
if (
checkpoint_frequency is not None
and epoch % checkpoint_frequency == 0
):
print('Saving checkpoint: {}'.format(epoch))
trainer.save_model(save_dir, 'epoch{:04d}'.format(epoch))
best_val_loss = min(moving_avg_val_loss, best_val_loss)
if save_dir is not None:
print('Saving last epoch: {}'.format(epoch))
trainer.save_model(save_dir, 'epoch{:04d}'.format(epoch))
print('Done!')
if __name__ == '__main__':
main(**vars(get_args()))
| [
"argparse.ArgumentParser",
"vpd_dataset.single_frame.GenericDataset.load_default",
"diving48.util.load_labels_and_embeddings",
"torch.optim.AdamW",
"models.module.FCNet",
"numpy.mean",
"torch.no_grad",
"os.path.join",
"torch.cuda.amp.autocast",
"torch.utils.data.DataLoader",
"finegym.util.load_l... | [((670, 695), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (693, 695), False, 'import argparse\n'), ((4202, 4240), 'numpy.mean', 'np.mean', (['[l[key] for l in losses[-n:]]'], {}), '([l[key] for l in losses[-n:]])\n', (4209, 4240), True, 'import numpy as np\n'), ((7757, 7869), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', 'batch_size'], {'shuffle': '(True)', 'num_workers': 'num_load_workers', 'persistent_workers': '(False)'}), '(train_dataset, batch_size, shuffle=True, num_workers=\n num_load_workers, persistent_workers=False)\n', (7767, 7869), False, 'from torch.utils.data import DataLoader\n'), ((8067, 8166), 'models.rgb.RGBF_EmbeddingModel', 'RGBF_EmbeddingModel', (['encoder_arch', 'emb_dim', '(flow_img is not None)', 'device'], {'pretrained': 'pretrained'}), '(encoder_arch, emb_dim, flow_img is not None, device,\n pretrained=pretrained)\n', (8086, 8166), False, 'from models.rgb import RGBF_EmbeddingModel\n'), ((8338, 8359), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (8349, 8359), False, 'import os\n'), ((8749, 8784), 'os.path.join', 'os.path.join', (['save_dir', '"""loss.json"""'], {}), "(save_dir, 'loss.json')\n", (8761, 8784), False, 'import os\n'), ((4608, 4697), 'vpd_dataset.single_frame.TennisDataset.load_default', 'TennisDataset.load_default', (['emb_dir', 'dataset_paths.TENNIS_CROP_DIR'], {}), '(emb_dir, dataset_paths.TENNIS_CROP_DIR, **\n dataset_kwargs)\n', (4634, 4697), False, 'from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset\n'), ((6722, 6747), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6745, 6747), False, 'import torch\n'), ((7719, 7733), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (7731, 7733), False, 'import os\n'), ((7935, 8030), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset', 'batch_size'], {'num_workers': 'num_load_workers', 'persistent_workers': '(False)'}), '(val_dataset, batch_size, num_workers=num_load_workers,\n persistent_workers=False)\n', (7945, 8030), False, 'from torch.utils.data import DataLoader\n'), ((8375, 8412), 'os.path.join', 'os.path.join', (['save_dir', '"""config.json"""'], {}), "(save_dir, 'config.json')\n", (8387, 8412), False, 'import os\n'), ((3705, 3748), 'torch.optim.AdamW', 'torch.optim.AdamW', (['params'], {'lr': 'learning_rate'}), '(params, lr=learning_rate)\n', (3722, 3748), False, 'import torch\n'), ((4408, 4459), 'os.path.join', 'os.path.join', (['dataset_paths.TENNIS_ROOT_DIR', '"""embs"""'], {}), "(dataset_paths.TENNIS_ROOT_DIR, 'embs')\n", (4420, 4459), False, 'import os\n'), ((4535, 4561), 'action_dataset.eval.get_test_prefixes', 'get_test_prefixes', (['dataset'], {}), '(dataset)\n', (4552, 4561), False, 'from action_dataset.eval import get_test_prefixes\n'), ((4979, 5065), 'vpd_dataset.single_frame.GenericDataset.load_default', 'GenericDataset.load_default', (['emb_dir', 'dataset_paths.FS_CROP_DIR'], {}), '(emb_dir, dataset_paths.FS_CROP_DIR, **\n dataset_kwargs)\n', (5006, 5065), False, 'from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset\n'), ((10062, 10091), 'util.io.store_json', 'store_json', (['loss_file', 'losses'], {}), '(loss_file, losses)\n', (10072, 10091), False, 'from util.io import store_json\n'), ((2645, 2660), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2658, 2660), False, 'import torch\n'), ((2687, 2700), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (2698, 2700), False, 'from contextlib import nullcontext\n'), ((3764, 3776), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (3774, 3776), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((4783, 4830), 'os.path.join', 'os.path.join', (['dataset_paths.FS_ROOT_DIR', '"""embs"""'], {}), "(dataset_paths.FS_ROOT_DIR, 'embs')\n", (4795, 4830), False, 'import os\n'), ((4906, 4932), 'action_dataset.eval.get_test_prefixes', 'get_test_prefixes', (['dataset'], {}), '(dataset)\n', (4923, 4932), False, 'from action_dataset.eval import get_test_prefixes\n'), ((5542, 5628), 'vpd_dataset.single_frame.GenericDataset.load_default', 'GenericDataset.load_default', (['emb_dir', 'dataset_paths.FX_CROP_DIR'], {}), '(emb_dir, dataset_paths.FX_CROP_DIR, **\n dataset_kwargs)\n', (5569, 5628), False, 'from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset\n'), ((2154, 2220), 'models.module.FCNet', 'FCNet', (['encoder.emb_dim', '[128, 128]', '(2 * encoder.emb_dim)'], {'dropout': '(0)'}), '(encoder.emb_dim, [128, 128], 2 * encoder.emb_dim, dropout=0)\n', (2159, 2220), False, 'from models.module import FCNet\n'), ((3126, 3166), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['emb', 'gt_emb'], {'reduction': '"""sum"""'}), "(emb, gt_emb, reduction='sum')\n", (3136, 3166), True, 'import torch.nn.functional as F\n'), ((3266, 3295), 'models.util.step', 'step', (['optimizer', 'scaler', 'loss'], {}), '(optimizer, scaler, loss)\n', (3270, 3295), False, 'from models.util import step\n'), ((5151, 5198), 'os.path.join', 'os.path.join', (['dataset_paths.FX_ROOT_DIR', '"""embs"""'], {}), "(dataset_paths.FX_ROOT_DIR, 'embs')\n", (5163, 5198), False, 'import os\n'), ((6078, 6170), 'vpd_dataset.single_frame.GenericDataset.load_default', 'GenericDataset.load_default', (['emb_dir', 'dataset_paths.DIVING48_CROP_DIR'], {}), '(emb_dir, dataset_paths.DIVING48_CROP_DIR, **\n dataset_kwargs)\n', (6105, 6170), False, 'from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset\n'), ((2762, 2775), 'contextlib.nullcontext', 'nullcontext', ([], {}), '()\n', (2773, 2775), False, 'from contextlib import nullcontext\n'), ((2799, 2809), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (2807, 2809), False, 'from torch.cuda.amp import GradScaler, autocast\n'), ((5978, 6031), 'os.path.join', 'os.path.join', (['dataset_paths.DIVING48_ROOT_DIR', '"""embs"""'], {}), "(dataset_paths.DIVING48_ROOT_DIR, 'embs')\n", (5990, 6031), False, 'import os\n'), ((6290, 6342), 'vpd_dataset.single_frame.PennDataset.load_default', 'PennDataset.load_default', (['penn_dir'], {}), '(penn_dir, **dataset_kwargs)\n', (6314, 6342), False, 'from vpd_dataset.single_frame import GenericDataset, TennisDataset, PennDataset\n'), ((5350, 5393), 'finegym.util.load_labels', 'fg_util.load_labels', (['fg_util.GYM99_VAL_FILE'], {}), '(fg_util.GYM99_VAL_FILE)\n', (5369, 5393), True, 'import finegym.util as fg_util\n'), ((5818, 5895), 'diving48.util.load_labels_and_embeddings', 'diving48_util.load_labels_and_embeddings', (['diving48_util.DIVING48_V2_TEST_FILE'], {}), '(diving48_util.DIVING48_V2_TEST_FILE)\n', (5858, 5895), True, 'import diving48.util as diving48_util\n')] |
import numpy as np
import json
import gzip as gz
chunks = 3
docid=0
for chunk_id in range(0,chunks):
es_feed = gz.open("elastic/feed-%i.json.gz" % chunk_id, "wb")
vespa_feed = gz.open("vespa/feed-%i.json.gz" % chunk_id, "wb")
for i in range(0, 20000):
doc_vector = np.random.rand(1,512)[0]
norm = np.linalg.norm(doc_vector)
doc_vector = doc_vector/norm
doc_vector = doc_vector.tolist()
vespa_body = {
"fields": {
"text_embedding": {
"values": doc_vector
},
"id": docid
}
}
es_body={
"id": docid,
"text_embedding": doc_vector
}
es_feed.write((json.dumps(es_body) + "\n").encode("utf-8"))
vespa_feed.write((json.dumps(vespa_body) + "\n").encode("utf-8"))
docid+=1
es_feed.close()
vespa_feed.close()
| [
"numpy.random.rand",
"numpy.linalg.norm",
"gzip.open",
"json.dumps"
] | [((114, 165), 'gzip.open', 'gz.open', (["('elastic/feed-%i.json.gz' % chunk_id)", '"""wb"""'], {}), "('elastic/feed-%i.json.gz' % chunk_id, 'wb')\n", (121, 165), True, 'import gzip as gz\n'), ((181, 230), 'gzip.open', 'gz.open', (["('vespa/feed-%i.json.gz' % chunk_id)", '"""wb"""'], {}), "('vespa/feed-%i.json.gz' % chunk_id, 'wb')\n", (188, 230), True, 'import gzip as gz\n'), ((312, 338), 'numpy.linalg.norm', 'np.linalg.norm', (['doc_vector'], {}), '(doc_vector)\n', (326, 338), True, 'import numpy as np\n'), ((276, 298), 'numpy.random.rand', 'np.random.rand', (['(1)', '(512)'], {}), '(1, 512)\n', (290, 298), True, 'import numpy as np\n'), ((648, 667), 'json.dumps', 'json.dumps', (['es_body'], {}), '(es_body)\n', (658, 667), False, 'import json\n'), ((715, 737), 'json.dumps', 'json.dumps', (['vespa_body'], {}), '(vespa_body)\n', (725, 737), False, 'import json\n')] |
# coding=utf-8
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from .base import BaseStaticEnsemble
from deslib.util.aggregation import majority_voting
from sklearn.utils.validation import check_is_fitted, check_X_y, check_array
class StaticSelection(BaseStaticEnsemble):
"""Ensemble model that selects N classifiers with the best performance in a
dataset
Parameters
----------
pool_classifiers : list of classifiers (Default = None)
The generated_pool of classifiers trained for the corresponding
classification problem. Each base classifiers should support the method
"predict". If None, then the pool of classifiers is a bagging
classifier.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pct_classifiers : float (Default = 0.5)
Percentage of base classifier that should be selected by the selection
scheme.
References
----------
Britto, <NAME>., <NAME>, and <NAME>. "Dynamic selection
of classifiers—a comprehensive review."
Pattern Recognition 47.11 (2014): 3665-3680.
Kuncheva, <NAME>. Combining pattern classifiers: methods and algorithms.
John Wiley & Sons, 2004.
<NAME>, <NAME>, and <NAME>, “Dynamic classifier
selection: Recent advances and perspectives,”
Information Fusion, vol. 41, pp. 195 – 216, 2018.
"""
def __init__(self, pool_classifiers=None,
pct_classifiers=0.5,
random_state=None):
super(StaticSelection, self).__init__(
pool_classifiers=pool_classifiers, random_state=random_state)
self.pct_classifiers = pct_classifiers
def fit(self, X, y):
"""Fit the static selection model by select an ensemble of classifier
containing the base classifiers with highest accuracy in the given
dataset.
Parameters
----------
X : array of shape = [n_samples, n_features]
Data used to fit the model.
y : array of shape = [n_samples]
class labels of each example in X.
Returns
-------
self : object
Returns self.
"""
self._validate_parameters()
X, y = check_X_y(X, y)
super(StaticSelection, self).fit(X, y)
self.n_classifiers_ensemble_ = int(
self.n_classifiers_ * self.pct_classifiers)
performances = np.zeros(self.n_classifiers_)
for clf_idx, clf in enumerate(self.pool_classifiers_):
performances[clf_idx] = clf.score(X, self.y_enc_)
self.clf_indices_ = np.argsort(performances)[::-1][
0:self.n_classifiers_ensemble_]
self.ensemble_ = [self.pool_classifiers_[clf_idx] for clf_idx in
self.clf_indices_]
return self
def predict(self, X):
"""Predict the label of each sample in X and returns the predicted
label.
Parameters
----------
X : array of shape = [n_samples, n_features]
The data to be classified
Returns
-------
predicted_labels : array of shape = [n_samples]
Predicted class for each sample in X.
"""
X = check_array(X)
self._check_is_fitted()
predicted_labels = majority_voting(self.ensemble_, X).astype(int)
return self.classes_.take(predicted_labels)
def _check_is_fitted(self):
"""Verify if the estimator algorithm was fitted. Raises an error if it
is not fitted.
"""
check_is_fitted(self, "ensemble_")
def _validate_parameters(self):
if not isinstance(self.pct_classifiers, float):
raise TypeError('pct_classifiers should be a float.')
if self.pct_classifiers > 1 or self.pct_classifiers < 0:
raise ValueError(
'The parameter pct_classifiers should be a number '
'between 0 and 1.')
| [
"sklearn.utils.validation.check_X_y",
"numpy.zeros",
"sklearn.utils.validation.check_is_fitted",
"numpy.argsort",
"deslib.util.aggregation.majority_voting",
"sklearn.utils.validation.check_array"
] | [((2512, 2527), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (2521, 2527), False, 'from sklearn.utils.validation import check_is_fitted, check_X_y, check_array\n'), ((2701, 2730), 'numpy.zeros', 'np.zeros', (['self.n_classifiers_'], {}), '(self.n_classifiers_)\n', (2709, 2730), True, 'import numpy as np\n'), ((3542, 3556), 'sklearn.utils.validation.check_array', 'check_array', (['X'], {}), '(X)\n', (3553, 3556), False, 'from sklearn.utils.validation import check_is_fitted, check_X_y, check_array\n'), ((3871, 3905), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', '"""ensemble_"""'], {}), "(self, 'ensemble_')\n", (3886, 3905), False, 'from sklearn.utils.validation import check_is_fitted, check_X_y, check_array\n'), ((2886, 2910), 'numpy.argsort', 'np.argsort', (['performances'], {}), '(performances)\n', (2896, 2910), True, 'import numpy as np\n'), ((3616, 3650), 'deslib.util.aggregation.majority_voting', 'majority_voting', (['self.ensemble_', 'X'], {}), '(self.ensemble_, X)\n', (3631, 3650), False, 'from deslib.util.aggregation import majority_voting\n')] |
"""
To create library.so file:
python setup.py build_ext --inplace
"""
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
setup(
ext_modules=[
Extension("MultiClassTsetlinMachine", ["MultiClassTsetlinMachine.c"],
include_dirs=[numpy.get_include()]),
],
)
# Or, if you use cythonize() to make the ext_modules list,
# include_dirs can be passed to setup()
setup(
ext_modules=cythonize("MultiClassTsetlinMachine.pyx"),
include_dirs=[numpy.get_include()]
) | [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((460, 501), 'Cython.Build.cythonize', 'cythonize', (['"""MultiClassTsetlinMachine.pyx"""'], {}), "('MultiClassTsetlinMachine.pyx')\n", (469, 501), False, 'from Cython.Build import cythonize\n'), ((521, 540), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (538, 540), False, 'import numpy\n'), ((304, 323), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (321, 323), False, 'import numpy\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
# In[3]:
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
# In[4]:
import scipy.io as scp
get_ipython().run_line_magic('matplotlib', 'inline')
plt.rcParams['figure.figsize'] = [14, 14]
# In[28]:
path1="seq1"
files = [f for f in listdir(path1) if isfile(join(path1, f) )]
files = [f for f in files if "A_mat" in f]
print(files)
# In[29]:
A=scp.mmread(path1+"/"+files[0])
plt.spy(A,markersize=1)
plt.savefig("spy.png")
plt.show()
# In[6]:
class diagonal:
def __init__(self,x0,y0):
self.x0=x0
self.y0=y0
self.entries=[]
def append(self,value):
self.entries.append(value)
def getX(self):
return self.x0+len(self.entries)-1
def getY(self):
return self.y0+len(self.entries)-1
def size(self):
return len(self.entries)
class diagonalmatrix:
def __init__(self):
self.diagonals=[]
def AppendToDiagonal(self,i,j,value):
for diag in self.diagonals:
if diag.getX()+1==i and diag.getY()+1==j:
diag.append(value)
break;
else:
diag=diagonal(i,j)
diag.append(value)
self.diagonals.append(diag)
def fill(self,matrix):
for i,j,v in zip(A.row, A.col, A.data):
self.AppendToDiagonal(i,j,v)
def info(self):
print("Diags {}:".format(len(self.diagonals)))
elements=0
for diag in self.diagonals:
elements+=diag.size()
print("Contains {} elements".format(elements))
# In[7]:
def getMaxBand(matrix):
maxband=0
for i,j in zip(matrix.row, matrix.col):
maxband=max(maxband,abs(i-j))
return maxband
def getSparsity(matrix):
return (matrix.nnz/float(matrix.shape[0]*matrix.shape[1]))
# In[22]:
values1,edges1=np.histogram(A.row,bins=A.shape[0])
# In[28]:
edges1a=0.5*(edges1[:-1]+edges1[1:])
print(values1.shape)
print(edges1a.shape)
values1.sort()
plt.plot(edges1a,values1)
# In[10]:
#print(getMaxBand(A))
#print(getSparsity(A))
dist=[]
for i,j in zip(A.row, A.col):
dist.append((i-j))
values,edges=np.histogram(dist,bins=A.shape[0])
edges=0.5*(edges[:-1]+edges[1:])
# In[11]:
edges=edges[values>0]
values=values[values>0]
print(values.shape)
density=values/values.sum()
plt.xlim(-200,200)
plt.bar(edges,density)
plt.xlabel("i-j")
plt.savefig("hist.png")
plt.show()
sorted_density=-np.sort(-density)
print(np.cumsum(sorted_density)[0:50])
#plt.plot(sorted_density)
plt.plot(np.cumsum(sorted_density),marker="o")
#plt.plot(np.ones(sorted_density.shape))
plt.xlim(0,20)
plt.ylim(0,1)
plt.ylabel("fillfactor of subdiagonal")
plt.ylabel("subdiagonal sorted")
plt.savefig("hist2.png")
plt.show()
# In[12]:
C=scp.mmread(files[2])
plt.spy(C,markersize=1)
plt.show()
# In[13]:
print(A.nnz)
print(A.shape[0]*A.shape[1])
print(A.nnz/float(A.shape[0]*A.shape[1]))
# In[14]:
plt.spy(A,markersize=1)
size=250
offset=114500
plt.xlim(offset, offset+size)
plt.ylim(offset+size,offset)
plt.show()
# In[ ]:
# In[ ]:
# In[15]:
""
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[16]:
""
# In[ ]:
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.spy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.bar",
"scipy.io.mmread",
"numpy.histogram",
"numpy.sort",
"numpy.cumsum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"... | [((465, 499), 'scipy.io.mmread', 'scp.mmread', (["(path1 + '/' + files[0])"], {}), "(path1 + '/' + files[0])\n", (475, 499), True, 'import scipy.io as scp\n'), ((496, 520), 'matplotlib.pyplot.spy', 'plt.spy', (['A'], {'markersize': '(1)'}), '(A, markersize=1)\n', (503, 520), True, 'import matplotlib.pyplot as plt\n'), ((520, 542), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""spy.png"""'], {}), "('spy.png')\n", (531, 542), True, 'import matplotlib.pyplot as plt\n'), ((543, 553), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (551, 553), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2031), 'numpy.histogram', 'np.histogram', (['A.row'], {'bins': 'A.shape[0]'}), '(A.row, bins=A.shape[0])\n', (2007, 2031), True, 'import numpy as np\n'), ((2139, 2165), 'matplotlib.pyplot.plot', 'plt.plot', (['edges1a', 'values1'], {}), '(edges1a, values1)\n', (2147, 2165), True, 'import matplotlib.pyplot as plt\n'), ((2312, 2347), 'numpy.histogram', 'np.histogram', (['dist'], {'bins': 'A.shape[0]'}), '(dist, bins=A.shape[0])\n', (2324, 2347), True, 'import numpy as np\n'), ((2488, 2507), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-200)', '(200)'], {}), '(-200, 200)\n', (2496, 2507), True, 'import matplotlib.pyplot as plt\n'), ((2507, 2530), 'matplotlib.pyplot.bar', 'plt.bar', (['edges', 'density'], {}), '(edges, density)\n', (2514, 2530), True, 'import matplotlib.pyplot as plt\n'), ((2530, 2547), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""i-j"""'], {}), "('i-j')\n", (2540, 2547), True, 'import matplotlib.pyplot as plt\n'), ((2548, 2571), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hist.png"""'], {}), "('hist.png')\n", (2559, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2580, 2582), True, 'import matplotlib.pyplot as plt\n'), ((2771, 2786), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(20)'], {}), '(0, 20)\n', (2779, 2786), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2800), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2794, 2800), True, 'import matplotlib.pyplot as plt\n'), ((2800, 2839), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fillfactor of subdiagonal"""'], {}), "('fillfactor of subdiagonal')\n", (2810, 2839), True, 'import matplotlib.pyplot as plt\n'), ((2840, 2872), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""subdiagonal sorted"""'], {}), "('subdiagonal sorted')\n", (2850, 2872), True, 'import matplotlib.pyplot as plt\n'), ((2873, 2897), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hist2.png"""'], {}), "('hist2.png')\n", (2884, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2898, 2908), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2906, 2908), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2945), 'scipy.io.mmread', 'scp.mmread', (['files[2]'], {}), '(files[2])\n', (2935, 2945), True, 'import scipy.io as scp\n'), ((2946, 2970), 'matplotlib.pyplot.spy', 'plt.spy', (['C'], {'markersize': '(1)'}), '(C, markersize=1)\n', (2953, 2970), True, 'import matplotlib.pyplot as plt\n'), ((2970, 2980), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2978, 2980), True, 'import matplotlib.pyplot as plt\n'), ((3093, 3117), 'matplotlib.pyplot.spy', 'plt.spy', (['A'], {'markersize': '(1)'}), '(A, markersize=1)\n', (3100, 3117), True, 'import matplotlib.pyplot as plt\n'), ((3140, 3171), 'matplotlib.pyplot.xlim', 'plt.xlim', (['offset', '(offset + size)'], {}), '(offset, offset + size)\n', (3148, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3201), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(offset + size)', 'offset'], {}), '(offset + size, offset)\n', (3178, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3199, 3209), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3207, 3209), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2617), 'numpy.sort', 'np.sort', (['(-density)'], {}), '(-density)\n', (2607, 2617), True, 'import numpy as np\n'), ((2692, 2717), 'numpy.cumsum', 'np.cumsum', (['sorted_density'], {}), '(sorted_density)\n', (2701, 2717), True, 'import numpy as np\n'), ((349, 363), 'os.listdir', 'listdir', (['path1'], {}), '(path1)\n', (356, 363), False, 'from os import listdir\n'), ((2624, 2649), 'numpy.cumsum', 'np.cumsum', (['sorted_density'], {}), '(sorted_density)\n', (2633, 2649), True, 'import numpy as np\n'), ((375, 389), 'os.path.join', 'join', (['path1', 'f'], {}), '(path1, f)\n', (379, 389), False, 'from os.path import isfile, join\n')] |
# Surveillance System Controller.
# <NAME>
# 2016
# Copyright 2016, <NAME>, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code used in this project included opensource software (Openface)
# developed by <NAME>
# Copyright 2015-2016 Carnegie Mellon University
import time
import argparse
import cv2
import os
import numpy as np
import dlib
from subprocess import Popen, PIPE
import os.path
import sys
import logging
from logging.handlers import RotatingFileHandler
import threading
import time
from datetime import datetime, timedelta
#import smtplib
#from email.mime.multipart import MIMEMultipart
#from email.mime.text import MIMEText
#from email.mime.base import MIMEBase
#from email import encoders
import requests
import json
import Camera
import FaceRecogniser
import ImageUtils
import random
#
from websocket import create_connection
import apprise
# Get paths for models
# //////////////////////////////////////////////////////////////////////////////////////////////
fileDir = os.path.dirname(os.path.realpath(__file__))
luaDir = os.path.join(fileDir, '..', 'batch-represent')
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor',
type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir , "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel',
type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people')
args = parser.parse_args()
args.cuda = True
start = time.time()
np.set_printoptions(precision=2)
if args.cuda and dlib.cuda.get_num_devices()>0:
print("Surveillance System Controller DLIB using CUDA")
dlib.DLIB_USE_CUDA = True
try:
os.makedirs('logs', exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs('logs')
except OSError as exc: # Python >2.5
print("logging directory already exist")
logger = logging.getLogger()
formatter = logging.Formatter("(%(threadName)-10s) %(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = RotatingFileHandler("logs/surveillance.log", maxBytes=10000000, backupCount=10)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s',
# )
class SurveillanceSystem(object):
""" The SurveillanceSystem object is the heart of this application.
It provides all the central proccessing and ties everything
together. It generates camera frame proccessing threads as
well as an alert monitoring thread. A camera frame proccessing
thread can process a camera using 5 different processing methods.
These methods aim to allow the user to adapt the system to their
needs and can be found in the process_frame() function. The alert
monitoring thread continually checks the system state and takes
action if a particular event occurs. """
def __init__(self):
self.recogniser = FaceRecogniser.FaceRecogniser()
self.trainingEvent = threading.Event() # Used to holt processing while training the classifier
self.trainingEvent.set()
self.drawing = True
self.alarmState = 'Disarmed' # Alarm states - Disarmed, Armed, Triggered
self.alarmTriggerd = False
self.alerts = [] # Holds all system alerts
self.cameras = [] # Holds all system cameras
self.camerasLock = threading.Lock() # Used to block concurrent access of cameras []
self.cameraProcessingThreads = []
self.peopleDB = []
self.confidenceThreshold = 50 # Used as a threshold to classify a person as unknown
# Initialization of alert processing thread
self.alertsLock = threading.Lock()
self.alertThread = threading.Thread(name='alerts_process_thread_',target=self.alert_engine,args=())
self.alertThread.daemon = False
self.alertThread.start()
# Used for testing purposes
###################################
self.testingResultsLock = threading.Lock()
self.detetectionsCount = 0
self.trueDetections = 0
self.counter = 0
####################################
self.get_face_database_names() # Gets people in database for web client
self.apobj = None
self._read_config()
#//////////////////////////////////////////////////// Camera Examples ////////////////////////////////////////////////////
#self.cameras.append(Camera.IPCamera("testing/iphoneVideos/singleTest.m4v","detect_recognise_track",False)) # Video Example - uncomment and run code
# self.cameras.append(Camera.IPCamera("http://192.168.1.33/video.mjpg","detect_recognise_track",False))
# processing frame threads
for i, cam in enumerate(self.cameras):
thread = threading.Thread(name='frame_process_thread_' + str(i),target=self.process_frame,args=(cam,))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def _read_config(self):
if not os.path.isfile('config.json'):
return
with open('config.json') as json_file:
config = json.load(json_file)
for cam in config["cameras"]:
print("cam", cam)
dlibDetection = False
if cam["dlibDetection"].lower() == "true":
dlibDetection = True
fpsTweak = False
if cam["fpsTweak"].lower() == "true":
fpsTweak = True
self.cameras.append(Camera.IPCamera(cam["url"], cam["cameraFunction"], dlibDetection, fpsTweak))
for al in config["alerts"]:
print("alert", al)
self.alerts.append(Alert(al["alarmState"],
al["camera"],
al["event"],
al["person"],
al["actions"],
al["emailAddress"],
int(al["confidence"])))
def write_config(self):
config = {}
config["cameras"] = []
config["alerts"] = []
# Camera: url, cameraFunction, dlibDetection, fpsTweak
for cam in self.cameras:
config["cameras"].append({"url": cam.url,
"cameraFunction": cam.cameraFunction,
"dlibDetection": cam.dlibDetection,
"fpsTweak": cam.fpsTweak})
# Alert: alarmState, camera, event, person, actions, emailAddress, confidence
for al in self.alerts:
config["alerts"].append({"alarmState": al.alarmState,
"camera": al.camera,
"event": al.event,
"person": al.person,
"actions": al.actions,
"emailAddress": al.emailAddress,
"confidence": al.confidence})
with open('config.json', 'w') as outfile:
json.dump(config, outfile)
def add_camera(self, camera):
"""Adds new camera to the System and generates a
frame processing thread"""
print("add_camerea - {}".format(camera))
self.cameras.append(camera)
thread = threading.Thread(name='frame_process_thread_' +
str(len(self.cameras)),
target=self.process_frame,
args=(self.cameras[-1],))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def remove_camera(self, camID):
"""remove a camera to the System and kill its processing thread"""
print("remove_camera - camID {}".format(camID))
if "_" in camID:
cid = camID.split("_")[1]
else:
cid = camID
cam = self.cameras[int(cid)]
cam.captureThread.stop = False
self.cameras.pop(int(cid))
self.cameraProcessingThreads.pop(int(cid))
#self.captureThread.stop = False
def process_frame(self,camera):
"""This function performs all the frame proccessing.
It reads frames captured by the IPCamera instance,
resizes them, and performs 1 of 5 functions"""
logger.debug('Processing Frames')
state = 1
frame_count = 0;
FPScount = 0 # Used to calculate frame rate at which frames are being processed
FPSstart = time.time()
start = time.time()
stop = camera.captureThread.stop
while not stop:
frame_count +=1
logger.debug("Reading Frame")
frame = camera.read_frame()
# Checks to see if the new frame is the same as the previous frame
if (frame is None) or np.array_equal(frame, camera.tempFrame):
continue
frame = ImageUtils.resize(frame)
height, width, channels = frame.shape
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # frame in gray scale
# Frame rate calculation
if FPScount == 6:
camera.processingFPS = 6/(time.time() - FPSstart)
FPSstart = time.time()
FPScount = 0
FPScount += 1
camera.tempFrame = frame
####################
# MOTION DETECTION #
####################
if camera.cameraFunction == "detect_motion":
camera.motion, mframe = camera.motionDetector.detect_movement(grayFrame, get_rects = False, grayFrame=True)
camera.processing_frame = frame #mframe
if camera.motion == False:
logger.debug('//// NO MOTION DETECTED /////')
continue
else:
logger.debug('/// MOTION DETECTED ///')
#print("- MOTION DETECTED -")
##################################
# FACE DETECTION AND RECOGNTIION #
##################################
elif camera.cameraFunction == "detect_recognise":
# This approach performs basic face detection and
# recognition using OpenCV, Dlib and Openface
training_blocker = self.trainingEvent.wait()
#rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
camera.faceBoxes = camera.faceDetector.detect_faces(frame, camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
#frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, True) # OpenCV DNN returns dlib.rectangle
camera.processing_frame = frame
if len(camera.faceBoxes) > 0:
print('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
for face_bb in camera.faceBoxes:
# Used to reduce false positives from opencv haar cascade detector.
# If face isn't detected using more rigorous paramters in the detectMultiscale()
# function read the next frame
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = camera.dlibDetection)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
# returns a dictionary that contains name, confidence and representation and an alignedFace (numpy array)
predictions, alignedFace = self.recogniser.make_prediction(frame, face_bb)
with camera.peopleDictLock:
# If the person has already been detected and his new confidence is greater
# update persons details, otherwise create a new person
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'], predictions['confidence'], alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'], predictions['confidence'], alignedFace, "unknown")
# Used for streaming proccesed frames to client and email alerts, but mainly used for testing purposes
camera.processing_frame = frame
#####################################################################
# MOTION DETECTION EVENT FOLLOWED BY FACE DETECTION AND RECOGNITION #
#####################################################################
elif camera.cameraFunction == "motion_detect_recognise":
# When motion is detected, consecutive frames are proccessed for faces.
# If no faces are detected for longer than 30 seconds the thread goes back to
# looking for motion
training_blocker = self.trainingEvent.wait()
if state == 1: # If no faces have been found or there has been no movement
camera.motion, mframe = camera.motionDetector.detect_movement(frame, get_rects = False)
if camera.motion == True:
logger.debug('////////////////////// MOTION DETECTED //////////////////////')
state = 2
camera.processing_frame = mframe
else:
logger.debug('////////////////////// NO MOTION DETECTED //////////////////////')
continue
elif state == 2: # If motion has been detected
if frame_count == 0:
start = time.time()
frame_count += 1
#frame = cv2.flip(frame, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
if len(camera.faceBoxes) == 0:
if (time.time() - start) > 30.0:
logger.info('// No faces found for ' + str(time.time() - start) + ' seconds - Going back to Motion Detection Mode')
state = 1
frame_count = 0;
else:
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' ////')
# frame = cv2.flip(frame, 1)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
with camera.peopleDictLock:
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, "unknown")
start = time.time() # Used to go back to motion detection state of 30s of not finding a face
camera.processing_frame = frame
##################################################################################
# MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION AND RECOGNITION #
##################################################################################
elif camera.cameraFunction == "segment_detect_recognise":
# This approach uses background subtraction to segement a region of
# interest that is likely to contain a person. The region is cropped from
# the frame and face detection is performed on a much smaller image. This
# improves proccessing performance but is highly dependent upon the accuracy of
# the background model generated by the MotionDetector object.
training_blocker = self.trainingEvent.wait()
camera.motion, peopleRects = camera.motionDetector.detect_movement(frame, get_rects = True)
if camera.motion == False:
camera.processing_frame = frame
logger.debug('////-- NO MOTION DETECTED --////')
continue
logger.debug('///// MOTION DETECTED /////')
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for x, y, w, h in peopleRects:
logger.debug('//// Proccessing People Segmented Areas ///')
bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
personimg = ImageUtils.crop(frame, bb, dlibRect = True)
personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
logger.info('/// Proccessing Detected faces ///')
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
with camera.peopleDictLock:
if predictions['name'] in camera.people:
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],
predictions['confidence'],
alignedFace, "unknown")
#############################################################################################
# MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION, RECOGNITION AND TRACKING #
#############################################################################################
elif camera.cameraFunction == "detect_recognise_track":
# This approach incorporates background subtraction to perform person tracking
# and is the most efficient out of the all proccesing funcions above. When
# a face is detected in a region a Tracker object it generated, and is updated
# every frame by comparing the last known region of the person, to new regions
# produced by the motionDetector object. Every update of the tracker a detected
# face is compared to the person's face of whom is being tracked to ensure the tracker
# is still tracking the correct person. This is acheived by comparing the prediction
# and the the l2 distance between their embeddings (128 measurements that represent the face).
# If a tracker does not overlap with any of the regions produced by the motionDetector object
# for some time the Tracker is deleted.
training_blocker = self.trainingEvent.wait() # Wait if classifier is being trained
logger.debug('//// detect_recognise_track 1 ////')
peopleFound = False
camera.motion, peopleRects = camera.motionDetector.detect_movement(grayFrame, get_rects = True, grayFrame=True)
logger.debug('//// detect_recognise_track 2 /////')
if camera.motion == False:
camera.processing_frame = frame
logger.debug('///// NO MOTION DETECTED /////')
continue
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
logger.debug('//// MOTION DETECTED //////')
for x, y, w, h in peopleRects:
peopleFound = True
#person_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
personimg = ImageUtils.crop(frame, person_bb) # Crop regions of interest
#personimg = cv2.flip(personimg, 1)
tracked = False
# Iterate through each tracker and compare there current psotiion
for i in range(len(camera.trackers) - 1, -1, -1):
if camera.trackers[i].overlap(person_bb):
logger.debug("=> Updating Tracker <=")
camera.trackers[i].update_tracker(person_bb)
# personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg, camera.dlibDetection)
logger.debug('////// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' /////')
if len(camera.faceBoxes) > 0:
logger.info("Found " + str(len(camera.faceBoxes)) + " faces.")
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg, face_bb)
if predictions['confidence'] > self.confidenceThreshold:
predictedName = predictions['name']
else:
predictedName = "unknown"
# If only one face is detected
if len(camera.faceBoxes) == 1:
# if not the same person check to see if tracked person is unknown
# and update or change tracker accordingly
# l2Distance is between 0-4
# Openface found that 0.99 was the average cutoff between the same and different faces
# the same face having a distance less than 0.99
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep,
predictions['rep']) > 0.99 and \
(camera.trackers[i].person.identity != predictedName):
alreadyBeenDetected = False
with camera.peopleDictLock:
for ID, person in camera.people.items():
# iterate through all detected people in camera
# if the person has already been detected continue to track that person
# - use same person ID
if person.identity == predictedName or \
self.recogniser.getSquaredl2Distance(person.rep, predictions['rep']) < 0.8:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictedName)
logger.info( "====> New Tracker for " +person.identity + " <===")
# Remove current tracker and create new one with the ID of the original person
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1)
# Create a new person ID
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num)
# Is the new person detected with a low confidence? If yes, classify them as unknown
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
predictions['name'])
else:
person = Person(predictions['rep'],
predictions['confidence'],
alignedFace,
"unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "=====> New Tracker for new person <====")
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,strID))
# if it is the same person update confidence
# if it is higher and change prediction from unknown to identified person
# if the new detected face has a lower confidence and can be classified as unknown,
# when the person being tracked isn't unknown - change tracker
else:
logger.info( "====> update person name and confidence <==")
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
# If more than one face is detected in the region compare faces to the people being tracked
# and update tracker accordingly
else:
logger.info( "==> More Than One Face Detected <==")
# if tracker is already tracking the identified face make an update
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep,
predictions['rep']) < 0.99 and \
camera.trackers[i].person.identity == predictions['name']:
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
else:
# if tracker isn't tracking this face check the next tracker
break
camera.trackers[i].person.set_thumbnail(alignedFace)
camera.trackers[i].person.add_to_thumbnails(alignedFace)
camera.trackers[i].person.set_rep(predictions['rep'])
camera.trackers[i].person.set_time()
camera.trackers[i].reset_face_pinger()
with camera.peopleDictLock:
camera.people[camera.trackers[i].id] = camera.trackers[i].person
camera.trackers[i].reset_pinger()
tracked = True
break
# If the region is not being tracked
if not tracked:
# Look for faces in the cropped image of the region
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
if not isinstance(face_bb, dlib.rectangle):
x, y, w, h = face_bb
face_bb = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
alreadyBeenDetected = False
with camera.peopleDictLock:
# iterate through all detected people in camera, to see if the person has already been detected
for ID, person in camera.people.items():
if person.identity == predictions['name'] or \
self.recogniser.getSquaredl2Distance(person.rep ,predictions['rep']) < 0.8:
if predictions['confidence'] > self.confidenceThreshold and \
person.confidence > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
logger.info( "==> New Tracker for " + person.identity + " <====")
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1) # Create new person ID if they have not been detected
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num)
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "====> New Tracker for new person <=")
camera.trackers.append(Tracker(frame, person_bb, person,strID))
for i in range(len(camera.trackers) - 1, -1, -1): # starts with the most recently initiated tracker
if self.drawing == True:
bl = (camera.trackers[i].bb.left(), camera.trackers[i].bb.bottom()) # (x, y)
tr = (camera.trackers[i].bb.right(), camera.trackers[i].bb.top()) # (x+w,y+h)
cv2.rectangle(frame, bl, tr, color=(0, 255, 255), thickness=2)
text = camera.trackers[i].person.identity + " " + str(camera.trackers[i].person.confidence)+ "%"
#print("text", text)
org = (camera.trackers[i].bb.left(), camera.trackers[i].bb.top() - 10)
#print("org", org)
cv2.putText(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3, color=(0, 255, 255), thickness=1)
camera.processing_frame = frame
# Used to check if tracker hasn't been updated
camera.trackers[i].ping()
camera.trackers[i].faceping()
# If the tracker hasn't been updated for more than 10 pings delete it
if camera.trackers[i].pings > 10:
del camera.trackers[i]
continue
def alert_engine(self):
"""check alarm state -> check camera -> check event ->
either look for motion or look for detected faces -> take action"""
logger.debug('Alert engine starting')
while True:
with self.alertsLock:
for alert in self.alerts:
logger.debug('checking alert')
if alert.action_taken == False: # If action hasn't been taken for event
if alert.alarmState != 'All': # Check states
if alert.alarmState == self.alarmState:
logger.debug('checking alarm state')
alert.event_occurred = self.check_camera_events(alert)
else:
continue # Alarm not in correct state check next alert
else:
alert.event_occurred = self.check_camera_events(alert)
else:
if (time.time() - alert.eventTime) > 300: # Reinitialize event 5 min after event accured
logger.info( "reinitiallising alert: " + alert.id)
alert.reinitialise()
continue
time.sleep(2) # Put this thread to sleep - let websocket update alerts if need be (i.e delete or add)
def check_camera_events(self,alert):
"""Used to check state of cameras
to determine whether an event has occurred"""
if alert.camera != 'All': # Check cameras
logger.info( "alertTest" + alert.camera)
if alert.event == 'Recognition': #Check events
logger.info( "checkingalertconf "+ str(alert.confidence) + " : " + alert.person)
for person in self.cameras[int(alert.camera)].people.values():
logger.info( "checkingalertconf "+ str(alert.confidence )+ " : " + alert.person + " : " + person.identity)
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
logger.info( "alertTest2" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
logger.info( "alertTest3" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
logger.info( "alertTest4" + alert.camera)
if self.cameras[int(alert.camera)].motion == True: # Has motion been detected
logger.info( "alertTest5" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
else:
return False # Motion was not detected check next alert
else:
if alert.event == 'Recognition': # Check events
with self.camerasLock :
cameras = self.cameras
for camera in cameras: # Look through all cameras
for person in camera.people.values():
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
with self.camerasLock :
for camera in self.cameras: # Look through all cameras
if camera.motion == True: # Has motion been detected
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Motion was not detected check next camera
def take_action(self,alert):
"""Sends email alert and/or triggers the alarm"""
logger.info( "Taking action: ==" + json.dumps(alert.actions))
if alert.action_taken == False: # Only take action if alert hasn't accured - Alerts reinitialise every 5 min for now
alert.eventTime = time.time()
if alert.actions['mycroft_message'] == 'true':
logger.info( "mycroft notification being sent")
self.send_mycroft_notification_alert(alert)
if alert.actions['apprise_message'] == 'true':
logger.info( "apprise notification being sent")
self.send_apprise_notification_alert(alert)
alert.action_taken = True
def send_apprise_notification_alert(self,alert):
# send a push message with Apprise - see https://github.com/caronc/apprise
print(">>>Apprise<<<", alert.alertString)
if not self.apobj:
self.apobj = apprise.Apprise()
service = "" # set an Apprise url here, e.g. Pusbullet "pbul://xyz"
if service:
self.apobj.add(service)
print("alert.camera", alert.camera)
attachment = apprise.AppriseAttachment()
if alert.camera.endswith("All"):
camNum = 0
for c in self.cameras:
attachment.add('http://127.0.0.1:5000/camera_snapshot/{}'.format(camNum))
camNum += 1
else:
camNum = alert.camera[-1]
attachment.add('http://127.0.0.1:5000/camera_snapshot/{}'.format(camNum))
print("attachment", attachment)
self.apobj.notify(body=alert.alertString, title='Home Surveilance', attach=attachment)
def send_mycroft_notification_alert(self,alert):
print(">>>Mycroft<<<", alert.alertString)
host = '' # set hostname or IP of your Mycroft device here
if host:
uri = 'ws://' + host + ':8181/core'
ws = create_connection(uri)
message = '{"type": "speak", "data": {"utterance": "' + alert.alertString + '"}, "context":{}}'
result = ws.send(message)
print("Received '%s'" % result)
ws.close()
def add_face(self,name,image, upload):
"""Adds face to directory used for training the classifier"""
if upload == False:
path = fileDir + "/aligned-images/"
else:
path = fileDir + "/training-images/"
num = 0
if not os.path.exists(path + name):
try:
logger.info( "Creating New Face Dircectory: " + name)
os.makedirs(path+name)
except OSError:
logger.info( OSError)
return False
pass
else:
num = len([nam for nam in os.listdir(path +name) if os.path.isfile(os.path.join(path+name, nam))])
logger.info( "Writing Image To Directory: " + name)
cv2.imwrite(path+name+"/"+ name + "_"+str(num) + ".png", image)
self.get_face_database_names()
return True
def get_face_database_names(self):
"""Gets all the names that were most recently
used to train the classifier"""
path = fileDir + "/aligned-images/"
self.peopleDB = []
for name in os.listdir(path):
if (name == 'cache.t7' or name.startswith('.') or name[0:7] == 'unknown'):
continue
self.peopleDB.append(name)
logger.info("Known faces in our db for: " + name + " ")
self.peopleDB.append('unknown')
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
class Person(object):
"""Person object simply holds all the
person's information for other processes
"""
person_count = 0
def __init__(self,rep,confidence = 0, face = None, name = "unknown"):
print(">Person: confidence", confidence, "face", face is not None, "name", name)
if "unknown" not in name: # Used to include unknown-N from Database
self.identity = name
else:
self.identity = "unknown"
self.count = Person.person_count
self.confidence = confidence
self.thumbnails = []
self.face = face
self.rep = rep # Face representation
if face is not None:
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
self.thumbnails.append(self.thumbnail)
Person.person_count += 1
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
self.istracked = False
def set_rep(self, rep):
self.rep = rep
def set_identity(self, identity):
self.identity = identity
def set_time(self): # Update time when person was detected
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
def set_thumbnail(self, face):
self.face = face
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
def add_to_thumbnails(self, face):
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnails.append(jpeg.tostring())
class Tracker:
"""Keeps track of person position"""
tracker_count = 0
def __init__(self, img, bb, person, id):
self.id = id
self.person = person
self.bb = bb
self.pings = 0
self.facepings = 0
def reset_pinger(self):
self.pings = 0
def reset_face_pinger(self):
self.facepings = 0
def update_tracker(self,bb):
self.bb = bb
def overlap(self, bb):
p = float(self.bb.intersect(bb).area()) / float(self.bb.area())
return p > 0.2
def ping(self):
self.pings += 1
def faceping(self):
self.facepings += 1
class Alert(object):
"""Holds all the alert details and is continually checked by
the alert monitoring thread"""
alert_count = 1
def __init__(self,alarmState,camera, event, person, actions, emailAddress, confidence):
logger.info( "alert_"+str(Alert.alert_count)+ " created")
if event == 'Motion':
self.alertString = "Motion detected in camera " + camera
else:
self.alertString = person + " was recognised in camera " + camera + " with a confidence greater than " + str(confidence)
self.id = "alert_" + str(Alert.alert_count)
self.event_occurred = False
self.action_taken = False
self.camera = camera
self.alarmState = alarmState
self.event = event
self.person = person
self.confidence = confidence
self.actions = actions
if emailAddress == None:
self.emailAddress = "<EMAIL>"
else:
self.emailAddress = emailAddress
self.eventTime = 0
Alert.alert_count += 1
def reinitialise(self):
self.event_occurred = False
self.action_taken = False
def set_custom_alertmessage(self,message):
self.alertString = message
| [
"argparse.ArgumentParser",
"apprise.AppriseAttachment",
"Camera.IPCamera",
"json.dumps",
"logging.Formatter",
"os.path.isfile",
"cv2.imencode",
"cv2.rectangle",
"os.path.join",
"numpy.set_printoptions",
"cv2.cvtColor",
"cv2.imwrite",
"FaceRecogniser.FaceRecogniser",
"os.path.exists",
"ap... | [((1568, 1614), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""batch-represent"""'], {}), "(fileDir, '..', 'batch-represent')\n", (1580, 1614), False, 'import os\n'), ((1626, 1663), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""models"""'], {}), "(fileDir, '..', 'models')\n", (1638, 1663), False, 'import os\n'), ((1679, 1709), 'os.path.join', 'os.path.join', (['modelDir', '"""dlib"""'], {}), "(modelDir, 'dlib')\n", (1691, 1709), False, 'import os\n'), ((1729, 1763), 'os.path.join', 'os.path.join', (['modelDir', '"""openface"""'], {}), "(modelDir, 'openface')\n", (1741, 1763), False, 'import os\n'), ((1773, 1798), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1796, 1798), False, 'import argparse\n'), ((2559, 2570), 'time.time', 'time.time', ([], {}), '()\n', (2568, 2570), False, 'import time\n'), ((2571, 2603), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (2590, 2603), True, 'import numpy as np\n'), ((2958, 2977), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2975, 2977), False, 'import logging\n'), ((2990, 3088), 'logging.Formatter', 'logging.Formatter', (['"""(%(threadName)-10s) %(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "(\n '(%(threadName)-10s) %(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (3007, 3088), False, 'import logging\n'), ((3094, 3173), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['"""logs/surveillance.log"""'], {'maxBytes': '(10000000)', 'backupCount': '(10)'}), "('logs/surveillance.log', maxBytes=10000000, backupCount=10)\n", (3113, 3173), False, 'from logging.handlers import RotatingFileHandler\n'), ((1531, 1557), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1547, 1557), False, 'import os\n'), ((2753, 2787), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {'exist_ok': '(True)'}), "('logs', exist_ok=True)\n", (2764, 2787), False, 'import os\n'), ((1941, 2008), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (1953, 2008), False, 'import os\n'), ((2162, 2212), 'os.path.join', 'os.path.join', (['openfaceModelDir', '"""nn4.small2.v1.t7"""'], {}), "(openfaceModelDir, 'nn4.small2.v1.t7')\n", (2174, 2212), False, 'import os\n'), ((2622, 2649), 'dlib.cuda.get_num_devices', 'dlib.cuda.get_num_devices', ([], {}), '()\n', (2647, 2649), False, 'import dlib\n'), ((4122, 4153), 'FaceRecogniser.FaceRecogniser', 'FaceRecogniser.FaceRecogniser', ([], {}), '()\n', (4151, 4153), False, 'import FaceRecogniser\n'), ((4183, 4200), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4198, 4200), False, 'import threading\n'), ((4569, 4585), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4583, 4585), False, 'import threading\n'), ((4875, 4891), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (4889, 4891), False, 'import threading\n'), ((4919, 5005), 'threading.Thread', 'threading.Thread', ([], {'name': '"""alerts_process_thread_"""', 'target': 'self.alert_engine', 'args': '()'}), "(name='alerts_process_thread_', target=self.alert_engine,\n args=())\n", (4935, 5005), False, 'import threading\n'), ((5188, 5204), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5202, 5204), False, 'import threading\n'), ((9925, 9936), 'time.time', 'time.time', ([], {}), '()\n', (9934, 9936), False, 'import time\n'), ((9953, 9964), 'time.time', 'time.time', ([], {}), '()\n', (9962, 9964), False, 'import time\n'), ((47411, 47438), 'apprise.AppriseAttachment', 'apprise.AppriseAttachment', ([], {}), '()\n', (47436, 47438), False, 'import apprise\n'), ((49538, 49554), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (49548, 49554), False, 'import os\n'), ((51309, 51335), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'face'], {}), "('.jpg', face)\n", (51321, 51335), False, 'import cv2\n'), ((51477, 51503), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'face'], {}), "('.jpg', face)\n", (51489, 51503), False, 'import cv2\n'), ((2837, 2856), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {}), "('logs')\n", (2848, 2856), False, 'import os\n'), ((6263, 6292), 'os.path.isfile', 'os.path.isfile', (['"""config.json"""'], {}), "('config.json')\n", (6277, 6292), False, 'import os\n'), ((6391, 6411), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (6400, 6411), False, 'import json\n'), ((8444, 8470), 'json.dump', 'json.dump', (['config', 'outfile'], {}), '(config, outfile)\n', (8453, 8470), False, 'import json\n'), ((10349, 10373), 'ImageUtils.resize', 'ImageUtils.resize', (['frame'], {}), '(frame)\n', (10366, 10373), False, 'import ImageUtils\n'), ((10448, 10487), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (10460, 10487), False, 'import cv2\n'), ((42514, 42527), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (42524, 42527), False, 'import time\n'), ((46496, 46507), 'time.time', 'time.time', ([], {}), '()\n', (46505, 46507), False, 'import time\n'), ((47186, 47203), 'apprise.Apprise', 'apprise.Apprise', ([], {}), '()\n', (47201, 47203), False, 'import apprise\n'), ((48187, 48209), 'websocket.create_connection', 'create_connection', (['uri'], {}), '(uri)\n', (48204, 48209), False, 'from websocket import create_connection\n'), ((48724, 48751), 'os.path.exists', 'os.path.exists', (['(path + name)'], {}), '(path + name)\n', (48738, 48751), False, 'import os\n'), ((50594, 50620), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'face'], {}), "('.jpg', face)\n", (50606, 50620), False, 'import cv2\n'), ((50802, 50816), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (50814, 50816), False, 'from datetime import datetime, timedelta\n'), ((50819, 50837), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (50828, 50837), False, 'from datetime import datetime, timedelta\n'), ((51133, 51147), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (51145, 51147), False, 'from datetime import datetime, timedelta\n'), ((51150, 51168), 'datetime.timedelta', 'timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (51159, 51168), False, 'from datetime import datetime, timedelta\n'), ((10263, 10302), 'numpy.array_equal', 'np.array_equal', (['frame', 'camera.tempFrame'], {}), '(frame, camera.tempFrame)\n', (10277, 10302), True, 'import numpy as np\n'), ((10671, 10682), 'time.time', 'time.time', ([], {}), '()\n', (10680, 10682), False, 'import time\n'), ((46314, 46339), 'json.dumps', 'json.dumps', (['alert.actions'], {}), '(alert.actions)\n', (46324, 46339), False, 'import json\n'), ((48856, 48880), 'os.makedirs', 'os.makedirs', (['(path + name)'], {}), '(path + name)\n', (48867, 48880), False, 'import os\n'), ((6785, 6860), 'Camera.IPCamera', 'Camera.IPCamera', (["cam['url']", "cam['cameraFunction']", 'dlibDetection', 'fpsTweak'], {}), "(cam['url'], cam['cameraFunction'], dlibDetection, fpsTweak)\n", (6800, 6860), False, 'import Camera\n'), ((10620, 10631), 'time.time', 'time.time', ([], {}), '()\n', (10629, 10631), False, 'import time\n'), ((12047, 12115), 'ImageUtils.draw_boxes', 'ImageUtils.draw_boxes', (['frame', 'camera.faceBoxes', 'camera.dlibDetection'], {}), '(frame, camera.faceBoxes, camera.dlibDetection)\n', (12068, 12115), False, 'import ImageUtils\n'), ((49043, 49066), 'os.listdir', 'os.listdir', (['(path + name)'], {}), '(path + name)\n', (49053, 49066), False, 'import os\n'), ((13061, 13123), 'ImageUtils.crop', 'ImageUtils.crop', (['frame', 'face_bb'], {'dlibRect': 'camera.dlibDetection'}), '(frame, face_bb, dlibRect=camera.dlibDetection)\n', (13076, 13123), False, 'import ImageUtils\n'), ((45948, 46010), 'cv2.imwrite', 'cv2.imwrite', (['"""notification/image.png"""', 'camera.processing_frame'], {}), "('notification/image.png', camera.processing_frame)\n", (45959, 46010), False, 'import cv2\n'), ((49084, 49114), 'os.path.join', 'os.path.join', (['(path + name)', 'nam'], {}), '(path + name, nam)\n', (49096, 49114), False, 'import os\n'), ((21356, 21404), 'ImageUtils.draw_boxes', 'ImageUtils.draw_boxes', (['frame', 'peopleRects', '(False)'], {}), '(frame, peopleRects, False)\n', (21377, 21404), False, 'import ImageUtils\n'), ((21681, 21722), 'ImageUtils.crop', 'ImageUtils.crop', (['frame', 'bb'], {'dlibRect': '(True)'}), '(frame, bb, dlibRect=True)\n', (21696, 21722), False, 'import ImageUtils\n'), ((21785, 21807), 'cv2.flip', 'cv2.flip', (['personimg', '(1)'], {}), '(personimg, 1)\n', (21793, 21807), False, 'import cv2\n'), ((42254, 42265), 'time.time', 'time.time', ([], {}), '()\n', (42263, 42265), False, 'import time\n'), ((45180, 45242), 'cv2.imwrite', 'cv2.imwrite', (['"""notification/image.png"""', 'camera.processing_frame'], {}), "('notification/image.png', camera.processing_frame)\n", (45191, 45242), False, 'import cv2\n'), ((16536, 16547), 'time.time', 'time.time', ([], {}), '()\n', (16545, 16547), False, 'import time\n'), ((16815, 16883), 'ImageUtils.draw_boxes', 'ImageUtils.draw_boxes', (['frame', 'camera.faceBoxes', 'camera.dlibDetection'], {}), '(frame, camera.faceBoxes, camera.dlibDetection)\n', (16836, 16883), False, 'import ImageUtils\n'), ((19844, 19855), 'time.time', 'time.time', ([], {}), '()\n', (19853, 19855), False, 'import time\n'), ((22019, 22067), 'ImageUtils.draw_boxes', 'ImageUtils.draw_boxes', (['frame', 'peopleRects', '(False)'], {}), '(frame, peopleRects, False)\n', (22040, 22067), False, 'import ImageUtils\n'), ((26435, 26483), 'ImageUtils.draw_boxes', 'ImageUtils.draw_boxes', (['frame', 'peopleRects', '(False)'], {}), '(frame, peopleRects, False)\n', (26456, 26483), False, 'import ImageUtils\n'), ((26762, 26795), 'ImageUtils.crop', 'ImageUtils.crop', (['frame', 'person_bb'], {}), '(frame, person_bb)\n', (26777, 26795), False, 'import ImageUtils\n'), ((45448, 45510), 'cv2.imwrite', 'cv2.imwrite', (['"""notification/image.png"""', 'camera.processing_frame'], {}), "('notification/image.png', camera.processing_frame)\n", (45459, 45510), False, 'import cv2\n'), ((22392, 22442), 'ImageUtils.crop', 'ImageUtils.crop', (['personimg', 'face_bb'], {'dlibRect': '(True)'}), '(personimg, face_bb, dlibRect=True)\n', (22407, 22442), False, 'import ImageUtils\n'), ((40251, 40313), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'bl', 'tr'], {'color': '(0, 255, 255)', 'thickness': '(2)'}), '(frame, bl, tr, color=(0, 255, 255), thickness=2)\n', (40264, 40313), False, 'import cv2\n'), ((40642, 40750), 'cv2.putText', 'cv2.putText', (['frame', 'text', 'org', 'cv2.FONT_HERSHEY_SIMPLEX'], {'fontScale': '(0.3)', 'color': '(0, 255, 255)', 'thickness': '(1)'}), '(frame, text, org, cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3,\n color=(0, 255, 255), thickness=1)\n', (40653, 40750), False, 'import cv2\n'), ((17034, 17045), 'time.time', 'time.time', ([], {}), '()\n', (17043, 17045), False, 'import time\n'), ((17800, 17846), 'ImageUtils.crop', 'ImageUtils.crop', (['frame', 'face_bb'], {'dlibRect': '(True)'}), '(frame, face_bb, dlibRect=True)\n', (17815, 17846), False, 'import ImageUtils\n'), ((28091, 28126), 'ImageUtils.crop', 'ImageUtils.crop', (['personimg', 'face_bb'], {}), '(personimg, face_bb)\n', (28106, 28126), False, 'import ImageUtils\n'), ((37047, 37097), 'ImageUtils.crop', 'ImageUtils.crop', (['personimg', 'face_bb'], {'dlibRect': '(True)'}), '(personimg, face_bb, dlibRect=True)\n', (37062, 37097), False, 'import ImageUtils\n'), ((38905, 38933), 'random.randrange', 'random.randrange', (['(1)', '(1000)', '(1)'], {}), '(1, 1000, 1)\n', (38921, 38933), False, 'import random\n'), ((17135, 17146), 'time.time', 'time.time', ([], {}), '()\n', (17144, 17146), False, 'import time\n'), ((31449, 31477), 'random.randrange', 'random.randrange', (['(1)', '(1000)', '(1)'], {}), '(1, 1000, 1)\n', (31465, 31477), False, 'import random\n'), ((39043, 39057), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (39055, 39057), False, 'from datetime import datetime, timedelta\n'), ((31623, 31637), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (31635, 31637), False, 'from datetime import datetime, timedelta\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def _lcs(string, sub):
"""
Computes longest common subsequence (LCS) for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the LCS between the two strings
"""
if len(string) < len(sub):
sub, string = string, sub
str_len, sub_len = len(string), len(sub)
lengths = [[0 for _ in range(sub_len + 1)] for _ in range(str_len + 1)]
for j in range(1, sub_len + 1):
for i in range(1, str_len + 1):
if string[i - 1] == sub[j - 1]:
lengths[i][j] = lengths[i - 1][j - 1] + 1
else:
lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1])
return lengths[str_len][sub_len]
class Rouge(object):
"""
Class for computing ROUGE-L score for a set of
candidate sentences for the MS COCO test set
"""
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate) == 1)
assert(len(refs) > 0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split()
for reference in refs:
# split into tokens
token_r = reference.split()
# compute the longest common subsequence
lcs = _lcs(token_r, token_c)
prec.append(lcs / float(len(token_c)))
rec.append(lcs / float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if prec_max != 0 and rec_max != 0:
score = ((1 + self.beta ** 2) * prec_max * rec_max) / \
float(rec_max + self.beta ** 2 * prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and
candidate sentences for the dataset.
:param gts: dict : ground_truth
:param res: dict : results of predict
:returns: average_score: float (mean ROUGE-L score)
"""
score = []
for idx in sorted(gts.keys()):
hypo = res[idx]
ref = gts[idx]
score.append(self.calc_score(hypo, ref))
# Sanity check
assert(isinstance(hypo, list))
assert(isinstance(ref, list))
assert(len(hypo) == 1)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
# convert to percentage
return 100 * average_score, np.array(score)
@staticmethod
def method():
return "ROUGE-L"
| [
"numpy.array"
] | [((3125, 3140), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (3133, 3140), True, 'import numpy as np\n'), ((3211, 3226), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (3219, 3226), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def load_img(path, grayscale=False, target_size=None, crop_size=None):
"""
Load an image.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_width, img_height)`.
crop_size: Either `None` (default to original size)
or tuple of ints `(img_width, img_height)`.
# Returns
Image as numpy array.
"""
img = cv2.imread(path)
if grayscale:
if len(img.shape) != 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if target_size:
if (img.shape[0], img.shape[1]) != target_size:
img = cv2.resize(img, target_size)
if crop_size:
img = central_image_crop(img, crop_size[0], crop_size[1])
if grayscale:
img = img.reshape((img.shape[0], img.shape[1], 1))
return np.asarray(img, dtype=np.float32)
def central_image_crop(img, crop_width=150, crop_heigth=150):
"""
Crop the input image centered in width and starting from the top
in height.
# Arguments:
crop_width: Width of the crop.
crop_heigth: Height of the crop.
# Returns:
Cropped image.
"""
half_the_width = int(img.shape[1] / 2)
img = img[img.shape[0] - crop_heigth: img.shape[0],
half_the_width - int(crop_width / 2):
half_the_width + int(crop_width / 2)]
return img | [
"cv2.cvtColor",
"cv2.imread",
"numpy.asarray",
"cv2.resize"
] | [((607, 623), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (617, 623), False, 'import cv2\n'), ((1029, 1062), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (1039, 1062), True, 'import numpy as np\n'), ((692, 729), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (704, 729), False, 'import cv2\n'), ((825, 853), 'cv2.resize', 'cv2.resize', (['img', 'target_size'], {}), '(img, target_size)\n', (835, 853), False, 'import cv2\n')] |
from effective_dimension import Model, EffectiveDimension, ClassicalNeuralNetwork
import numpy as np
# this code generates the data for the classical model's fisher information eigenvalue distribution plot \
# in the main figure
nnsize = [4, 4, 4, 2]
cnet = ClassicalNeuralNetwork(nnsize)
num_inputs = 100
num_thetas = 100
ed = EffectiveDimension(cnet, num_thetas=num_thetas, num_inputs=num_inputs)
f, trace = ed.get_fhat()
np.save("fhat4_[4 4 4 2]_fisher.npy", f) | [
"effective_dimension.ClassicalNeuralNetwork",
"numpy.save",
"effective_dimension.EffectiveDimension"
] | [((260, 290), 'effective_dimension.ClassicalNeuralNetwork', 'ClassicalNeuralNetwork', (['nnsize'], {}), '(nnsize)\n', (282, 290), False, 'from effective_dimension import Model, EffectiveDimension, ClassicalNeuralNetwork\n'), ((330, 400), 'effective_dimension.EffectiveDimension', 'EffectiveDimension', (['cnet'], {'num_thetas': 'num_thetas', 'num_inputs': 'num_inputs'}), '(cnet, num_thetas=num_thetas, num_inputs=num_inputs)\n', (348, 400), False, 'from effective_dimension import Model, EffectiveDimension, ClassicalNeuralNetwork\n'), ((426, 466), 'numpy.save', 'np.save', (['"""fhat4_[4 4 4 2]_fisher.npy"""', 'f'], {}), "('fhat4_[4 4 4 2]_fisher.npy', f)\n", (433, 466), True, 'import numpy as np\n')] |
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import copy
import os
import time
import json
import logging
import torchvision
class Generator(nn.Module):
def __init__(self, latent_dim, output_size):
super(Generator, self).__init__()
self.output_size = output_size
self.layers = nn.Sequential(
nn.Linear(latent_dim, 128),
nn.BatchNorm1d(128),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(128, 256),
nn.BatchNorm1d(256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 1024),
nn.BatchNorm1d(1024),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(1024, int(np.prod(self.output_size))),
nn.Tanh()
)
def forward(self, x):
gen = self.layers(x)
return gen.reshape(-1, self.output_size[2], self.output_size[0], self.output_size[1])
class Discriminator(nn.Module):
def __init__(self, img_size):
super(Discriminator, self).__init__()
self.layers = nn.Sequential(
nn.Linear(int(np.prod(img_size)), 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, x):
x = x.reshape(x.shape[0], -1)
valid_or_not = self.layers(x)
return valid_or_not | [
"torch.nn.Tanh",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.Sigmoid"
] | [((476, 502), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', '(128)'], {}), '(latent_dim, 128)\n', (485, 502), True, 'import torch.nn as nn\n'), ((516, 535), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (530, 535), True, 'import torch.nn as nn\n'), ((549, 580), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (561, 580), True, 'import torch.nn as nn\n'), ((594, 613), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(256)'], {}), '(128, 256)\n', (603, 613), True, 'import torch.nn as nn\n'), ((627, 646), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (641, 646), True, 'import torch.nn as nn\n'), ((660, 691), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (672, 691), True, 'import torch.nn as nn\n'), ((705, 724), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (714, 724), True, 'import torch.nn as nn\n'), ((738, 757), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (752, 757), True, 'import torch.nn as nn\n'), ((771, 802), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (783, 802), True, 'import torch.nn as nn\n'), ((816, 836), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (825, 836), True, 'import torch.nn as nn\n'), ((850, 870), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (864, 870), True, 'import torch.nn as nn\n'), ((884, 915), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (896, 915), True, 'import torch.nn as nn\n'), ((990, 999), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (997, 999), True, 'import torch.nn as nn\n'), ((1373, 1404), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1385, 1404), True, 'import torch.nn as nn\n'), ((1418, 1437), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (1427, 1437), True, 'import torch.nn as nn\n'), ((1451, 1482), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1463, 1482), True, 'import torch.nn as nn\n'), ((1496, 1513), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (1505, 1513), True, 'import torch.nn as nn\n'), ((1527, 1539), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1537, 1539), True, 'import torch.nn as nn\n'), ((949, 974), 'numpy.prod', 'np.prod', (['self.output_size'], {}), '(self.output_size)\n', (956, 974), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.prod', 'np.prod', (['img_size'], {}), '(img_size)\n', (1342, 1352), True, 'import numpy as np\n')] |
import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000,
test_size=500):
(train_X, train_y), (test_X, test_y) = \
generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
adaboost = AdaBoost(DecisionStump, n_learners)
adaboost.fit(train_X, train_y)
iterations = range(1, n_learners + 1)
train_err = [adaboost.partial_loss(train_X, train_y, t) for t in
iterations]
test_err = [adaboost.partial_loss(test_X, test_y, t) for t in
iterations]
fig = go.Figure(
[go.Scatter(x=list(iterations), y=train_err, name="train"),
go.Scatter(x=list(iterations), y=test_err, name="test")],
layout=go.Layout(
title=r"$\text{Training and test errors as a function of the number of fitted learners}$",
xaxis=dict(title=r"$\text{Number of fitted learners}$"),
yaxis=dict(title=r"$\text{Error rate}$")))
fig.show()
fig.write_image(f"./ex4Plots/Training and test errors by learners.png")
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0),
np.r_[train_X, test_X].max(axis=0)]).T + np.array(
[-.1, .1])
fig = make_subplots(rows=2, cols=2,
subplot_titles=[f"ensemble size = {t}"
for t in T],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, t in enumerate(T):
fig.add_traces(
[decision_surface(lambda X: adaboost.partial_predict(X, t),
lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0],
custom[-1]],
line=dict(color="black",
width=1)))
],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig.update_layout(
title=rf"$\textbf{{Decision Boundaries Of different ensemble sizes}}$",
margin=dict(t=100)) \
.update_xaxes(visible=False).update_yaxes(visible=False)
fig.show()
fig.write_image(
f"./ex4Plots/Decision Boundaries Of different ensemble sizes.png")
# Question 3: Decision surface of best performing ensemble
min = float("inf")
optimal_size = 0
for i, err in enumerate(test_err):
if err < min:
min = err
optimal_size = i + 1
fig = go.Figure(
[decision_surface(lambda X: adaboost.partial_predict(X, optimal_size),
lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0],
custom[-1]],
line=dict(color="black",
width=1)))
],
layout=go.Layout(
title=rf"$\textbf{{Ensemble of size {optimal_size} achieved the accuracy of {1 - min}}}$"))
fig.show()
fig.write_image(
f"./ex4Plots/Best ensemble size and error-wise.png")
# Question 4: Decision surface with weighted samples
sizes = adaboost.D_ / np.max(adaboost.D_) * 20
fig = go.Figure(
[decision_surface(adaboost.predict,
lims[0], lims[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=train_y, size=sizes,
colorscale=[custom[0],
custom[-1]],
line=dict(color="black",
width=1)))
],
layout=go.Layout(
title=rf"$\textbf{{Training set with a point size proportional to it’s weight in the last iteration}}$"))
fig.show()
fig.write_image(
f"./ex4Plots/final training with sizes.png")
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(noise=0)
# fit_and_evaluate_adaboost(noise=0.4)
| [
"numpy.random.seed",
"numpy.sum",
"numpy.random.rand",
"numpy.ones",
"IMLearn.metalearners.adaboost.AdaBoost",
"numpy.max",
"numpy.array",
"plotly.graph_objects.Layout",
"plotly.subplots.make_subplots"
] | [((1411, 1446), 'IMLearn.metalearners.adaboost.AdaBoost', 'AdaBoost', (['DecisionStump', 'n_learners'], {}), '(DecisionStump, n_learners)\n', (1419, 1446), False, 'from IMLearn.metalearners.adaboost import AdaBoost\n'), ((2449, 2582), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(2)', 'subplot_titles': "[f'ensemble size = {t}' for t in T]", 'horizontal_spacing': '(0.01)', 'vertical_spacing': '(0.03)'}), "(rows=2, cols=2, subplot_titles=[f'ensemble size = {t}' for t in\n T], horizontal_spacing=0.01, vertical_spacing=0.03)\n", (2462, 2582), False, 'from plotly.subplots import make_subplots\n'), ((5623, 5640), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5637, 5640), True, 'import numpy as np\n'), ((959, 969), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (966, 969), True, 'import numpy as np\n'), ((2409, 2430), 'numpy.array', 'np.array', (['[-0.1, 0.1]'], {}), '([-0.1, 0.1])\n', (2417, 2430), True, 'import numpy as np\n'), ((976, 998), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(1)'}), '(X ** 2, axis=1)\n', (982, 998), True, 'import numpy as np\n'), ((4480, 4590), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'f"""$\\\\textbf{{Ensemble of size {optimal_size} achieved the accuracy of {1 - min}}}$"""'}), "(title=\n f'$\\\\textbf{{Ensemble of size {optimal_size} achieved the accuracy of {1 - min}}}$'\n )\n", (4489, 4590), True, 'import plotly.graph_objects as go\n'), ((4777, 4796), 'numpy.max', 'np.max', (['adaboost.D_'], {}), '(adaboost.D_)\n', (4783, 4796), True, 'import numpy as np\n'), ((5371, 5495), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'f"""$\\\\textbf{{Training set with a point size proportional to it’s weight in the last iteration}}$"""'}), "(title=\n f'$\\\\textbf{{Training set with a point size proportional to it’s weight in the last iteration}}$'\n )\n", (5380, 5495), True, 'import plotly.graph_objects as go\n'), ((929, 949), 'numpy.random.rand', 'np.random.rand', (['n', '(2)'], {}), '(n, 2)\n', (943, 949), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''recovery - <NAME> (<EMAIL>) - Oct 2017
License: MIT. See the LICENSE file for more details.
This is a companion module for fakelcgen.py. It runs LCs generated using
functions in that module through variable star detection and classification to
see how well they are recovered.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
import os
import os.path
import pickle
import gzip
import glob
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from hashlib import md5
from math import sqrt as msqrt
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
import numpy as np
import numpy.random as npr
# seed the numpy random generator
npr.seed(0xdecaff)
import scipy.stats as sps
import scipy.interpolate as spi
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['agg.path.chunksize'] = 10000
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
from tqdm import tqdm
###################
## LOCAL IMPORTS ##
###################
from .. import lcproc
lcproc.set_logger_parent(__name__)
#######################
## LC FORMATS SET UP ##
#######################
def read_fakelc(fakelcfile):
'''
This just reads a pickled fake LC.
'''
try:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(lcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
return lcdict
#######################
## UTILITY FUNCTIONS ##
#######################
def get_varfeatures(simbasedir,
mindet=1000,
nworkers=None):
'''
This runs lcproc.parallel_varfeatures on light curves in simbasedir.
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
varfeaturedir = os.path.join(simbasedir,'varfeatures')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
if 'fakelc' not in lcproc.LCFORM:
lcproc.register_custom_lcformat(
'fakelc',
'*-fakelc.pkl',
lcproc.read_pklc,
timecols,
magcols,
errcols,
magsarefluxes=False,
specialnormfunc=None
)
# now we can use lcproc.parallel_varfeatures directly
varinfo = lcproc.parallel_varfeatures(lcfpaths,
varfeaturedir,
lcformat='fakelc',
mindet=mindet,
nworkers=nworkers)
with open(os.path.join(simbasedir,'fakelc-varfeatures.pkl'),'wb') as outfd:
pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-varfeatures.pkl')
def precision(ntp, nfp):
'''
This calculates the precision.
'''
if (ntp+nfp) > 0:
return ntp/(ntp+nfp)
else:
return np.nan
def recall(ntp, nfn):
'''
This calculates the recall.
'''
if (ntp+nfn) > 0:
return ntp/(ntp+nfn)
else:
return np.nan
def matthews_correl_coeff(ntp, ntn, nfp, nfn):
'''
This calculates the Matthews correlation coefficent.
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
'''
mcc_top = (ntp*ntn - nfp*nfn)
mcc_bot = msqrt((ntp + nfp)*(ntp + nfn)*(ntn + nfp)*(ntn + nfn))
if mcc_bot > 0:
return mcc_top/mcc_bot
else:
return np.nan
#######################################
## VARIABILITY RECOVERY (PER MAGBIN) ##
#######################################
def get_recovered_variables_for_magbin(simbasedir,
magbinmedian,
stetson_stdev_min=2.0,
inveta_stdev_min=2.0,
iqr_stdev_min=2.0,
statsonly=True):
'''This runs variability selection for the given magbinmedian.
magbinmedian is an item from the fakelcs-info.pkl's
fakelcinfo['magrms'][magcol] list for each magcol and designates which
magbin to get the recovery stats for.
To generate a full recovery matrix, run this function for each magbin over
the specified stetson_stdev_min and inveta_stdev_min grid.
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
objectids = siminfo['objectid']
varflags = siminfo['isvariable']
sdssr = siminfo['sdssr']
ndet = siminfo['ndet']
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# get the actual variables and non-variables
actualvars = objectids[varflags]
actualnotvars = objectids[~varflags]
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
if 'fakelc' not in lcproc.LCFORM:
lcproc.register_custom_lcformat(
'fakelc',
'*-fakelc.pkl',
lcproc.read_pklc,
timecols,
magcols,
errcols,
magsarefluxes=False,
specialnormfunc=None
)
# make the output directory if it doesn't exit
outdir = os.path.join(simbasedir, 'recvar-threshold-pkls')
if not os.path.exists(outdir):
os.mkdir(outdir)
# run the variability search
varfeaturedir = os.path.join(simbasedir, 'varfeatures')
varthreshinfof = os.path.join(
outdir,
'varthresh-magbinmed%.2f-stet%.2f-inveta%.2f.pkl' % (magbinmedian,
stetson_stdev_min,
inveta_stdev_min)
)
varthresh = lcproc.variability_threshold(varfeaturedir,
varthreshinfof,
lcformat='fakelc',
min_stetj_stdev=stetson_stdev_min,
min_inveta_stdev=inveta_stdev_min,
min_iqr_stdev=iqr_stdev_min,
verbose=False)
# get the magbins from the varthresh info
magbins = varthresh['magbins']
# get the magbininds
magbininds = np.digitize(sdssr, magbins)
# bin the objects according to these magbins
binned_objectids = []
binned_actualvars = []
binned_actualnotvars = []
# go through all the mag bins and bin up the objectids, actual variables,
# and actual not-variables
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_objectids = objectids[thisbinind]
thisbin_varflags = varflags[thisbinind]
thisbin_actualvars = thisbin_objectids[thisbin_varflags]
thisbin_actualnotvars = thisbin_objectids[~thisbin_varflags]
binned_objectids.append(thisbin_objectids)
binned_actualvars.append(thisbin_actualvars)
binned_actualnotvars.append(thisbin_actualnotvars)
# this is the output dict
recdict = {
'simbasedir':simbasedir,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'stetj_min_stdev':stetson_stdev_min,
'inveta_min_stdev':inveta_stdev_min,
'iqr_min_stdev':iqr_stdev_min,
'magbinmedian':magbinmedian,
}
# now, for each magcol, find the magbin corresponding to magbinmedian, and
# get its stats
for magcol in magcols:
# this is the index of the matching magnitude bin for the magbinmedian
# provided
magbinind = np.where(
np.array(varthresh[magcol]['binned_sdssr_median']) == magbinmedian
)
magbinind = np.asscalar(magbinind[0])
# get the objectids, actual vars and actual notvars in this magbin
thisbin_objectids = binned_objectids[magbinind]
thisbin_actualvars = binned_actualvars[magbinind]
thisbin_actualnotvars = binned_actualnotvars[magbinind]
# stetson recovered variables in this magbin
stet_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_stetsonj'
][magbinind]
# calculate TP, FP, TN, FN
stet_recoverednotvars = np.setdiff1d(thisbin_objectids,
stet_recoveredvars)
stet_truepositives = np.intersect1d(stet_recoveredvars,
thisbin_actualvars)
stet_falsepositives = np.intersect1d(stet_recoveredvars,
thisbin_actualnotvars)
stet_truenegatives = np.intersect1d(stet_recoverednotvars,
thisbin_actualnotvars)
stet_falsenegatives = np.intersect1d(stet_recoverednotvars,
thisbin_actualvars)
# calculate stetson recall, precision, Matthews correl coeff
stet_recall = recall(stet_truepositives.size,
stet_falsenegatives.size)
stet_precision = precision(stet_truepositives.size,
stet_falsepositives.size)
stet_mcc = matthews_correl_coeff(stet_truepositives.size,
stet_truenegatives.size,
stet_falsepositives.size,
stet_falsenegatives.size)
# inveta recovered variables in this magbin
inveta_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_inveta'
][magbinind]
inveta_recoverednotvars = np.setdiff1d(thisbin_objectids,
inveta_recoveredvars)
inveta_truepositives = np.intersect1d(inveta_recoveredvars,
thisbin_actualvars)
inveta_falsepositives = np.intersect1d(inveta_recoveredvars,
thisbin_actualnotvars)
inveta_truenegatives = np.intersect1d(inveta_recoverednotvars,
thisbin_actualnotvars)
inveta_falsenegatives = np.intersect1d(inveta_recoverednotvars,
thisbin_actualvars)
# calculate inveta recall, precision, Matthews correl coeff
inveta_recall = recall(inveta_truepositives.size,
inveta_falsenegatives.size)
inveta_precision = precision(inveta_truepositives.size,
inveta_falsepositives.size)
inveta_mcc = matthews_correl_coeff(inveta_truepositives.size,
inveta_truenegatives.size,
inveta_falsepositives.size,
inveta_falsenegatives.size)
# iqr recovered variables in this magbin
iqr_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_iqr'
][magbinind]
iqr_recoverednotvars = np.setdiff1d(thisbin_objectids,
iqr_recoveredvars)
iqr_truepositives = np.intersect1d(iqr_recoveredvars,
thisbin_actualvars)
iqr_falsepositives = np.intersect1d(iqr_recoveredvars,
thisbin_actualnotvars)
iqr_truenegatives = np.intersect1d(iqr_recoverednotvars,
thisbin_actualnotvars)
iqr_falsenegatives = np.intersect1d(iqr_recoverednotvars,
thisbin_actualvars)
# calculate iqr recall, precision, Matthews correl coeff
iqr_recall = recall(iqr_truepositives.size,
iqr_falsenegatives.size)
iqr_precision = precision(iqr_truepositives.size,
iqr_falsepositives.size)
iqr_mcc = matthews_correl_coeff(iqr_truepositives.size,
iqr_truenegatives.size,
iqr_falsepositives.size,
iqr_falsenegatives.size)
# calculate the items missed by one method but found by the other
# methods
stet_missed_inveta_found = np.setdiff1d(inveta_truepositives,
stet_truepositives)
stet_missed_iqr_found = np.setdiff1d(iqr_truepositives,
stet_truepositives)
inveta_missed_stet_found = np.setdiff1d(stet_truepositives,
inveta_truepositives)
inveta_missed_iqr_found = np.setdiff1d(iqr_truepositives,
inveta_truepositives)
iqr_missed_stet_found = np.setdiff1d(stet_truepositives,
iqr_truepositives)
iqr_missed_inveta_found = np.setdiff1d(inveta_truepositives,
iqr_truepositives)
if not statsonly:
recdict[magcol] = {
# stetson J alone
'stet_recoveredvars':stet_recoveredvars,
'stet_truepositives':stet_truepositives,
'stet_falsepositives':stet_falsepositives,
'stet_truenegatives':stet_truenegatives,
'stet_falsenegatives':stet_falsenegatives,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_mcc':stet_mcc,
# inveta alone
'inveta_recoveredvars':inveta_recoveredvars,
'inveta_truepositives':inveta_truepositives,
'inveta_falsepositives':inveta_falsepositives,
'inveta_truenegatives':inveta_truenegatives,
'inveta_falsenegatives':inveta_falsenegatives,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_mcc':inveta_mcc,
# iqr alone
'iqr_recoveredvars':iqr_recoveredvars,
'iqr_truepositives':iqr_truepositives,
'iqr_falsepositives':iqr_falsepositives,
'iqr_truenegatives':iqr_truenegatives,
'iqr_falsenegatives':iqr_falsenegatives,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_mcc':iqr_mcc,
# true positive variables missed by one method but picked up by
# the others
'stet_missed_inveta_found':stet_missed_inveta_found,
'stet_missed_iqr_found':stet_missed_iqr_found,
'inveta_missed_stet_found':inveta_missed_stet_found,
'inveta_missed_iqr_found':inveta_missed_iqr_found,
'iqr_missed_stet_found':iqr_missed_stet_found,
'iqr_missed_inveta_found':iqr_missed_inveta_found,
# bin info
'actual_variables':thisbin_actualvars,
'actual_nonvariables':thisbin_actualnotvars,
'all_objectids':thisbin_objectids,
'magbinind':magbinind,
}
# if statsonly is set, then we only return the numbers but not the
# arrays themselves
else:
recdict[magcol] = {
# stetson J alone
'stet_recoveredvars':stet_recoveredvars.size,
'stet_truepositives':stet_truepositives.size,
'stet_falsepositives':stet_falsepositives.size,
'stet_truenegatives':stet_truenegatives.size,
'stet_falsenegatives':stet_falsenegatives.size,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_mcc':stet_mcc,
# inveta alone
'inveta_recoveredvars':inveta_recoveredvars.size,
'inveta_truepositives':inveta_truepositives.size,
'inveta_falsepositives':inveta_falsepositives.size,
'inveta_truenegatives':inveta_truenegatives.size,
'inveta_falsenegatives':inveta_falsenegatives.size,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_mcc':inveta_mcc,
# iqr alone
'iqr_recoveredvars':iqr_recoveredvars.size,
'iqr_truepositives':iqr_truepositives.size,
'iqr_falsepositives':iqr_falsepositives.size,
'iqr_truenegatives':iqr_truenegatives.size,
'iqr_falsenegatives':iqr_falsenegatives.size,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_mcc':iqr_mcc,
# true positive variables missed by one method but picked up by
# the others
'stet_missed_inveta_found':stet_missed_inveta_found.size,
'stet_missed_iqr_found':stet_missed_iqr_found.size,
'inveta_missed_stet_found':inveta_missed_stet_found.size,
'inveta_missed_iqr_found':inveta_missed_iqr_found.size,
'iqr_missed_stet_found':iqr_missed_stet_found.size,
'iqr_missed_inveta_found':iqr_missed_inveta_found.size,
# bin info
'actual_variables':thisbin_actualvars.size,
'actual_nonvariables':thisbin_actualnotvars.size,
'all_objectids':thisbin_objectids.size,
'magbinind':magbinind,
}
#
# done with per magcol
#
return recdict
def magbin_varind_gridsearch_worker(task):
'''
This is a parallel grid search worker for the function below.
'''
simbasedir, gridpoint, magbinmedian = task
try:
res = get_recovered_variables_for_magbin(simbasedir,
magbinmedian,
stetson_stdev_min=gridpoint[0],
inveta_stdev_min=gridpoint[1],
iqr_stdev_min=gridpoint[2],
statsonly=True)
return res
except:
LOGEXCEPTION('failed to get info for %s' % gridpoint)
return None
def variable_index_gridsearch_magbin(simbasedir,
stetson_stdev_range=[1.0,20.0],
inveta_stdev_range=[1.0,20.0],
iqr_stdev_range=[1.0,20.0],
ngridpoints=32,
ngridworkers=None):
'''This runs a variable index grid search per magbin.
Similar to variable_index_gridsearch above.
Gets the magbin medians from the fakelcinfo.pkl's
dict['magrms'][magcols[0]['binned_sdssr_median'] value.
Reads the fakelcs-info.pkl in simbasedir to get:
- the variable objects, their types, periods, epochs, and params
- the nonvariable objects
For each magbin, this does a grid search using the stetson and inveta ranges
and tries to optimize the Matthews Correlation Coefficient (best value is
+1.0), indicating the best possible separation of variables
vs. nonvariables. The thresholds on these two variable indexes that produce
the largest coeff for the collection of fake LCs will probably be the ones
that work best for actual variable classification on the real LCs.
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
For each grid-point, calculates the true positives, false positives, true
negatives, false negatives. Then gets the precision and recall, confusion
matrix, and the ROC curve for variable vs. nonvariable.
Once we've identified the best thresholds to use, we can then calculate
variable object numbers:
- as a function of magnitude
- as a function of period
- as a function of number of detections
- as a function of amplitude of variability
Writes everything back to simbasedir/fakevar-recovery.pkl. Use the plotting
function below to make plots for the results.
For the default number of grid-points and 25000 simulated light curves, this
takes about 3 days to run on a 40 (effective) core machine with 2 x Xeon
E5-2650v3 CPUs.
'''
# make the output directory where all the pkls from the variability
# threshold runs will go
outdir = os.path.join(simbasedir,'recvar-threshold-pkls')
if not os.path.exists(outdir):
os.mkdir(outdir)
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# get the magbinmedians to use for the recovery processing
magbinmedians = siminfo['magrms'][magcols[0]]['binned_sdssr_median']
# generate the grids for stetson and inveta
stetson_grid = np.linspace(stetson_stdev_range[0],
stetson_stdev_range[1],
num=ngridpoints)
inveta_grid = np.linspace(inveta_stdev_range[0],
inveta_stdev_range[1],
num=ngridpoints)
iqr_grid = np.linspace(iqr_stdev_range[0],
iqr_stdev_range[1],
num=ngridpoints)
# generate the grid
stet_inveta_iqr_grid = []
for stet in stetson_grid:
for inveta in inveta_grid:
for iqr in iqr_grid:
grid_point = [stet, inveta, iqr]
stet_inveta_iqr_grid.append(grid_point)
# the output dict
grid_results = {'stetson_grid':stetson_grid,
'inveta_grid':inveta_grid,
'iqr_grid':iqr_grid,
'stet_inveta_iqr_grid':stet_inveta_iqr_grid,
'magbinmedians':magbinmedians,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'simbasedir':os.path.abspath(simbasedir),
'recovery':[]}
# set up the pool
pool = mp.Pool(ngridworkers)
# run the grid search per magbinmedian
for magbinmedian in magbinmedians:
LOGINFO('running stetson J-inveta grid-search '
'for magbinmedian = %.3f...' % magbinmedian)
tasks = [(simbasedir, gp, magbinmedian) for gp in stet_inveta_iqr_grid]
thisbin_results = pool.map(magbin_varind_gridsearch_worker, tasks)
grid_results['recovery'].append(thisbin_results)
pool.close()
pool.join()
LOGINFO('done.')
with open(os.path.join(simbasedir,
'fakevar-recovery-per-magbin.pkl'),'wb') as outfd:
pickle.dump(grid_results,outfd,pickle.HIGHEST_PROTOCOL)
return grid_results
def plot_varind_gridsearch_magbin_results(gridsearch_results):
'''
This plots the gridsearch results from variable_index_gridsearch_magbin.
'''
# get the result pickle/dict
if (isinstance(gridsearch_results, str) and
os.path.exists(gridsearch_results)):
with open(gridsearch_results,'rb') as infd:
gridresults = pickle.load(infd)
elif isinstance(gridsearch_results, dict):
gridresults = gridsearch_results
else:
LOGERROR('could not understand the input '
'variable index grid-search result dict/pickle')
return None
plotres = {'simbasedir':gridresults['simbasedir']}
recgrid = gridresults['recovery']
simbasedir = gridresults['simbasedir']
for magcol in gridresults['magcols']:
plotres[magcol] = {'best_stetsonj':[],
'best_inveta':[],
'best_iqr':[],
'magbinmedians':gridresults['magbinmedians']}
# go through all the magbins
for magbinind, magbinmedian in enumerate(gridresults['magbinmedians']):
LOGINFO('plotting results for %s: magbin: %.3f' %
(magcol, magbinmedian))
stet_mcc = np.array(
[x[magcol]['stet_mcc']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_precision = np.array(
[x[magcol]['stet_precision']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_recall = np.array(
[x[magcol]['stet_recall']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_missed_inveta_found = np.array(
[x[magcol]['stet_missed_inveta_found']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_missed_iqr_found = np.array(
[x[magcol]['stet_missed_iqr_found']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
inveta_mcc = np.array(
[x[magcol]['inveta_mcc']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_precision = np.array(
[x[magcol]['inveta_precision']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_recall = np.array(
[x[magcol]['inveta_recall']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_missed_stet_found = np.array(
[x[magcol]['inveta_missed_stet_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_missed_iqr_found = np.array(
[x[magcol]['inveta_missed_iqr_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
iqr_mcc = np.array(
[x[magcol]['iqr_mcc']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_precision = np.array(
[x[magcol]['iqr_precision']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_recall = np.array(
[x[magcol]['iqr_recall']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_missed_stet_found = np.array(
[x[magcol]['iqr_missed_stet_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_missed_inveta_found = np.array(
[x[magcol]['iqr_missed_inveta_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
fig = plt.figure(figsize=(6.4*5, 4.8*3))
# FIRST ROW: stetson J plot
plt.subplot(3,5,1)
if np.any(np.isfinite(stet_mcc)):
plt.plot(gridresults['stetson_grid'],
stet_mcc)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for stetson J')
else:
plt.text(0.5,0.5,
'stet MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,2)
if np.any(np.isfinite(stet_precision)):
plt.plot(gridresults['stetson_grid'],
stet_precision)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for stetson J')
else:
plt.text(0.5,0.5,
'stet precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,3)
if np.any(np.isfinite(stet_recall)):
plt.plot(gridresults['stetson_grid'],
stet_recall)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for stetson J')
else:
plt.text(0.5,0.5,
'stet recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,4)
if np.any(np.isfinite(stet_missed_inveta_found)):
plt.plot(gridresults['stetson_grid'],
stet_missed_inveta_found)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('# objects stetson missed but inveta found')
plt.title('stetson J missed, inveta found')
else:
plt.text(0.5,0.5,
'stet-missed/inveta-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,5)
if np.any(np.isfinite(stet_missed_iqr_found)):
plt.plot(gridresults['stetson_grid'],
stet_missed_iqr_found)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('# objects stetson missed but IQR found')
plt.title('stetson J missed, IQR found')
else:
plt.text(0.5,0.5,
'stet-missed/IQR-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
# SECOND ROW: inveta plots
plt.subplot(3,5,6)
if np.any(np.isfinite(inveta_mcc)):
plt.plot(gridresults['inveta_grid'],
inveta_mcc)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for inveta')
else:
plt.text(0.5,0.5,
'inveta MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,7)
if np.any(np.isfinite(inveta_precision)):
plt.plot(gridresults['inveta_grid'],
inveta_precision)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for inveta')
else:
plt.text(0.5,0.5,
'inveta precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,8)
if np.any(np.isfinite(inveta_recall)):
plt.plot(gridresults['inveta_grid'],
inveta_recall)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for inveta')
else:
plt.text(0.5,0.5,
'inveta recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,9)
if np.any(np.isfinite(inveta_missed_stet_found)):
plt.plot(gridresults['inveta_grid'],
inveta_missed_stet_found)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('# objects inveta missed but stetson found')
plt.title('inveta missed, stetson J found')
else:
plt.text(0.5,0.5,
'inveta-missed-stet-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,10)
if np.any(np.isfinite(inveta_missed_iqr_found)):
plt.plot(gridresults['inveta_grid'],
inveta_missed_iqr_found)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('# objects inveta missed but IQR found')
plt.title('inveta missed, IQR found')
else:
plt.text(0.5,0.5,
'inveta-missed-iqr-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
# THIRD ROW: inveta plots
plt.subplot(3,5,11)
if np.any(np.isfinite(iqr_mcc)):
plt.plot(gridresults['iqr_grid'],
iqr_mcc)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for IQR')
else:
plt.text(0.5,0.5,
'IQR MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,12)
if np.any(np.isfinite(iqr_precision)):
plt.plot(gridresults['iqr_grid'],
iqr_precision)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for IQR')
else:
plt.text(0.5,0.5,
'IQR precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,13)
if np.any(np.isfinite(iqr_recall)):
plt.plot(gridresults['iqr_grid'],
iqr_recall)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for IQR')
else:
plt.text(0.5,0.5,
'IQR recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,14)
if np.any(np.isfinite(iqr_missed_stet_found)):
plt.plot(gridresults['iqr_grid'],
iqr_missed_stet_found)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('# objects IQR missed but stetson found')
plt.title('IQR missed, stetson J found')
else:
plt.text(0.5,0.5,
'iqr-missed-stet-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,15)
if np.any(np.isfinite(iqr_missed_inveta_found)):
plt.plot(gridresults['iqr_grid'],
iqr_missed_inveta_found)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('# objects IQR missed but inveta found')
plt.title('IQR missed, inveta found')
else:
plt.text(0.5,0.5,
'iqr-missed-inveta-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplots_adjust(hspace=0.25,wspace=0.25)
plt.suptitle('magcol: %s, magbin: %.3f' % (magcol, magbinmedian))
plotdir = os.path.join(gridresults['simbasedir'],
'varindex-gridsearch-plots')
if not os.path.exists(plotdir):
os.mkdir(plotdir)
gridplotf = os.path.join(
plotdir,
'%s-magbin-%.3f-var-recoverygrid-permagbin.png' %
(magcol, magbinmedian)
)
plt.savefig(gridplotf,dpi=100,bbox_inches='tight')
plt.close('all')
# get the best values of MCC, recall, precision and their associated
# stet, inveta
stet_mcc_maxind = np.where(stet_mcc == np.max(stet_mcc))
stet_precision_maxind = np.where(
stet_precision == np.max(stet_precision)
)
stet_recall_maxind = np.where(stet_recall == np.max(stet_recall))
best_stet_mcc = stet_mcc[stet_mcc_maxind]
best_stet_precision = stet_mcc[stet_precision_maxind]
best_stet_recall = stet_mcc[stet_recall_maxind]
stet_with_best_mcc = gridresults['stetson_grid'][stet_mcc_maxind]
stet_with_best_precision = gridresults['stetson_grid'][
stet_precision_maxind
]
stet_with_best_recall = (
gridresults['stetson_grid'][stet_recall_maxind]
)
inveta_mcc_maxind = np.where(inveta_mcc == np.max(inveta_mcc))
inveta_precision_maxind = np.where(
inveta_precision == np.max(inveta_precision)
)
inveta_recall_maxind = (
np.where(inveta_recall == np.max(inveta_recall))
)
best_inveta_mcc = inveta_mcc[inveta_mcc_maxind]
best_inveta_precision = inveta_mcc[inveta_precision_maxind]
best_inveta_recall = inveta_mcc[inveta_recall_maxind]
inveta_with_best_mcc = gridresults['inveta_grid'][inveta_mcc_maxind]
inveta_with_best_precision = gridresults['inveta_grid'][
inveta_precision_maxind
]
inveta_with_best_recall = gridresults['inveta_grid'][
inveta_recall_maxind
]
iqr_mcc_maxind = np.where(iqr_mcc == np.max(iqr_mcc))
iqr_precision_maxind = np.where(
iqr_precision == np.max(iqr_precision)
)
iqr_recall_maxind = (
np.where(iqr_recall == np.max(iqr_recall))
)
best_iqr_mcc = iqr_mcc[iqr_mcc_maxind]
best_iqr_precision = iqr_mcc[iqr_precision_maxind]
best_iqr_recall = iqr_mcc[iqr_recall_maxind]
iqr_with_best_mcc = gridresults['iqr_grid'][iqr_mcc_maxind]
iqr_with_best_precision = gridresults['iqr_grid'][
iqr_precision_maxind
]
iqr_with_best_recall = gridresults['iqr_grid'][
iqr_recall_maxind
]
plotres[magcol][magbinmedian] = {
# stetson
'stet_grid':gridresults['stetson_grid'],
'stet_mcc':stet_mcc,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_missed_inveta_found':stet_missed_inveta_found,
'best_stet_mcc':best_stet_mcc,
'stet_with_best_mcc':stet_with_best_mcc,
'best_stet_precision':best_stet_precision,
'stet_with_best_precision':stet_with_best_precision,
'best_stet_recall':best_stet_recall,
'stet_with_best_recall':stet_with_best_recall,
# inveta
'inveta_grid':gridresults['inveta_grid'],
'inveta_mcc':inveta_mcc,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_missed_stet_found':inveta_missed_stet_found,
'best_inveta_mcc':best_inveta_mcc,
'inveta_with_best_mcc':inveta_with_best_mcc,
'best_inveta_precision':best_inveta_precision,
'inveta_with_best_precision':inveta_with_best_precision,
'best_inveta_recall':best_inveta_recall,
'inveta_with_best_recall':inveta_with_best_recall,
# iqr
'iqr_grid':gridresults['iqr_grid'],
'iqr_mcc':iqr_mcc,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_missed_stet_found':iqr_missed_stet_found,
'best_iqr_mcc':best_iqr_mcc,
'iqr_with_best_mcc':iqr_with_best_mcc,
'best_iqr_precision':best_iqr_precision,
'iqr_with_best_precision':iqr_with_best_precision,
'best_iqr_recall':best_iqr_recall,
'iqr_with_best_recall':iqr_with_best_recall,
# plot info
'recoveryplot':gridplotf
}
# recommend inveta, stetson index, and iqr for this magbin
# if there are multiple stets, choose the smallest one
if stet_with_best_mcc.size > 1:
plotres[magcol]['best_stetsonj'].append(stet_with_best_mcc[0])
elif stet_with_best_mcc.size > 0:
plotres[magcol]['best_stetsonj'].append(stet_with_best_mcc[0])
else:
plotres[magcol]['best_stetsonj'].append(np.nan)
# if there are multiple best invetas, choose the smallest one
if inveta_with_best_mcc.size > 1:
plotres[magcol]['best_inveta'].append(inveta_with_best_mcc[0])
elif inveta_with_best_mcc.size > 0:
plotres[magcol]['best_inveta'].append(inveta_with_best_mcc[0])
else:
plotres[magcol]['best_inveta'].append(np.nan)
# if there are multiple best iqrs, choose the smallest one
if iqr_with_best_mcc.size > 1:
plotres[magcol]['best_iqr'].append(iqr_with_best_mcc[0])
elif iqr_with_best_mcc.size > 0:
plotres[magcol]['best_iqr'].append(iqr_with_best_mcc[0])
else:
plotres[magcol]['best_iqr'].append(np.nan)
# write the plotresults to a pickle
plotrespicklef = os.path.join(simbasedir,
'varindex-gridsearch-magbin-results.pkl')
with open(plotrespicklef, 'wb') as outfd:
pickle.dump(plotres, outfd, pickle.HIGHEST_PROTOCOL)
# recommend the values of stetson J and inveta to use
for magcol in gridresults['magcols']:
LOGINFO('best stdev multipliers for each %s magbin:' % magcol)
LOGINFO('magbin inveta stetson J IQR')
for magbin, inveta, stet, iqr in zip(
plotres[magcol]['magbinmedians'],
plotres[magcol]['best_inveta'],
plotres[magcol]['best_stetsonj'],
plotres[magcol]['best_iqr']):
LOGINFO('%.3f %.3f %.3f %.3f' % (magbin,
inveta,
stet,
iqr))
return plotres
################################
## PERIODIC VARIABLE RECOVERY ##
################################
PERIODIC_VARTYPES = ['EB','RRab','RRc','rotator',
'HADS','planet','LPV','cepheid']
ALIAS_TYPES = ['actual',
'twice',
'half',
'ratio_over_1plus',
'ratio_over_1minus',
'ratio_over_1plus_twice',
'ratio_over_1minus_twice',
'ratio_over_1plus_thrice',
'ratio_over_1minus_thrice',
'ratio_over_minus1',
'ratio_over_twice_minus1']
def run_periodfinding(simbasedir,
pfmethods=['gls','pdm','bls'],
pfkwargs=[{},{},{'startp':1.0,'maxtransitduration':0.3}],
getblssnr=False,
sigclip=5.0,
nperiodworkers=10,
ncontrolworkers=4,
liststartindex=None,
listmaxobjects=None):
'''This runs periodfinding using several periodfinders on a collection of
fakelcs.
Use pfmethods to specify which periodfinders to run. These must be in
lcproc.PFMETHODS.
Use pfkwargs to provide optional kwargs to the periodfinders.
If getblssnr is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip sets the sigma-clip to use for the light curves before putting them
through each of the periodfinders.
nperiodworkers is the number of period-finder workers to launch.
ncontrolworkers is the number of controlling processes to launch.
liststartindex sets the index from where to start in the list of
fakelcs. listmaxobjects sets the maximum number of objects in the fakelc
list to run periodfinding for in this invocation. Together, these can be
used to distribute processing over several independent machines if the
number of light curves is very large.
As a rough benchmark, 25000 fakelcs with up to 50000 points per lc take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
pfdir = os.path.join(simbasedir,'periodfinding')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
if 'fakelc' not in lcproc.LCFORM:
lcproc.register_custom_lcformat(
'fakelc',
'*-fakelc.pkl',
lcproc.read_pklc,
timecols,
magcols,
errcols,
magsarefluxes=False,
specialnormfunc=None
)
if liststartindex:
lcfpaths = lcfpaths[liststartindex:]
if listmaxobjects:
lcfpaths = lcfpaths[:listmaxobjects]
pfinfo = lcproc.parallel_pf(lcfpaths,
pfdir,
lcformat='fakelc',
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nperiodworkers=nperiodworkers,
ncontrolworkers=ncontrolworker)
with open(os.path.join(simbasedir,
'fakelc-periodfinding.pkl'),'wb') as outfd:
pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-periodfinding.pkl')
def check_periodrec_alias(actualperiod, recoveredperiod, tolerance=1.0e-3):
'''This determines what kind of aliasing (if any) exists between
recoveredperiod and actualperiod.
'''
if not (np.isfinite(actualperiod) and np.isfinite(recoveredperiod)):
LOGERROR("can't compare nan values for actual/recovered periods")
return 'unknown'
else:
#################
## ALIAS TYPES ##
#################
# simple ratios
twotimes_p = actualperiod*2.0
half_p = actualperiod*0.5
# first kind of alias
alias_1a = actualperiod/(1.0+actualperiod)
alias_1b = actualperiod/(1.0-actualperiod)
# second kind of alias
alias_2a = actualperiod/(1.0+2.0*actualperiod)
alias_2b = actualperiod/(1.0-2.0*actualperiod)
# third kind of alias
alias_3a = actualperiod/(1.0+3.0*actualperiod)
alias_3b = actualperiod/(1.0-3.0*actualperiod)
# fourth kind of alias
alias_4a = actualperiod/(actualperiod - 1.0)
alias_4b = actualperiod/(2.0*actualperiod - 1.0)
aliases = np.ravel(np.array([
actualperiod,
twotimes_p,
half_p,
alias_1a,
alias_1b,
alias_2a,
alias_2b,
alias_3a,
alias_3b,
alias_4a,
alias_4b]
))
alias_labels = np.array(ALIAS_TYPES)
# check type of alias
closest_alias = np.isclose(recoveredperiod, aliases, rtol=tolerance)
if np.any(closest_alias):
closest_alias_type = alias_labels[closest_alias]
return ','.join(closest_alias_type.tolist())
else:
return 'other'
def periodicvar_recovery(fakepfpkl,
simbasedir,
period_tolerance=1.0e-3):
'''Recovers the periodic variable status/info for the simulated pf pickle.
fakepfpkl is a single periodfinding-<objectid>.pkl[.gz] file produced in the
<simbasedir>/periodfinding subdirectory after run_periodfinding above is
done.
- uses simbasedir and the lcfbasename stored in fakepfpkl to figure out
where the LC for this object is
- gets the actual_varparams, actual_varperiod, actual_vartype,
actual_varamplitude elements from the LC
- figures out if the current objectid is a periodic variable (using
actual_vartype)
- if it is a periodic variable, gets the canonical period assigned to it
- checks if the period was recovered in any of the five best periods
reported by any of the periodfinders, checks if the period recovered was a
harmonic of the period
- returns the objectid, actual period and vartype, recovered period, and
recovery status
'''
if fakepfpkl.endswith('.gz'):
infd = gzip.open(fakepfpkl,'rb')
else:
infd = open(fakepfpkl,'rb')
fakepf = pickle.load(infd)
infd.close()
# get info from the fakepf dict
objectid, lcfbasename = fakepf['objectid'], fakepf['lcfbasename']
lcfpath = os.path.join(simbasedir,'lightcurves',lcfbasename)
# if the LC doesn't exist, bail out
if not os.path.exists(lcfpath):
LOGERROR('light curve for %s does not exist at: %s' % (objectid,
lcfpath))
return None
# now, open the fakelc
fakelc = lcproc.read_pklc(lcfpath)
# get the actual_varparams, actual_varperiod, actual_varamplitude
actual_varparams, actual_varperiod, actual_varamplitude, actual_vartype = (
fakelc['actual_varparams'],
fakelc['actual_varperiod'],
fakelc['actual_varamplitude'],
fakelc['actual_vartype']
)
# get the moments too so we can track LC noise, etc.
actual_moments = fakelc['moments']
# get the magcols for this LC
magcols = fakelc['magcols']
# get the recovered info from each of the available methods
pfres = {
'objectid':objectid,
'simbasedir':simbasedir,
'magcols':magcols,
'fakelc':os.path.abspath(lcfpath),
'fakepf':os.path.abspath(fakepfpkl),
'actual_vartype':actual_vartype,
'actual_varperiod':actual_varperiod,
'actual_varamplitude':actual_varamplitude,
'actual_varparams':actual_varparams,
'actual_moments':actual_moments,
'recovery_periods':[],
'recovery_lspvals':[],
'recovery_pfmethods':[],
'recovery_magcols':[],
'recovery_status':[],
'recovery_pdiff':[],
}
# populate the pfres dict with the periods, pfmethods, and magcols
for magcol in magcols:
for pfm in lcproc.PFMETHODS:
if pfm in fakepf[magcol]:
# only get the unique recovered periods by using
# period_tolerance
for rpi, rp in enumerate(
fakepf[magcol][pfm]['nbestperiods']
):
if ((not np.any(np.isclose(
rp,
np.array(pfres['recovery_periods']),
rtol=period_tolerance
))) and np.isfinite(rp)):
# populate the recovery periods, pfmethods, and magcols
pfres['recovery_periods'].append(rp)
pfres['recovery_pfmethods'].append(pfm)
pfres['recovery_magcols'].append(magcol)
# normalize the periodogram peak value to between
# 0 and 1 so we can put in the results of multiple
# periodfinders on one scale
if pfm == 'pdm':
this_lspval = (
np.max(fakepf[magcol][pfm]['lspvals']) -
fakepf[magcol][pfm]['nbestlspvals'][rpi]
)
else:
this_lspval = (
fakepf[magcol][pfm]['nbestlspvals'][rpi] /
np.max(fakepf[magcol][pfm]['lspvals'])
)
# add the normalized lspval to the outdict for
# this object as well. later, we'll use this to
# construct a periodogram for objects that were actually
# not variables
pfres['recovery_lspvals'].append(this_lspval)
# convert the recovery_* lists to arrays
pfres['recovery_periods'] = np.array(pfres['recovery_periods'])
pfres['recovery_lspvals'] = np.array(pfres['recovery_lspvals'])
pfres['recovery_pfmethods'] = np.array(pfres['recovery_pfmethods'])
pfres['recovery_magcols'] = np.array(pfres['recovery_magcols'])
#
# now figure out recovery status
#
# if this is an actual periodic variable, characterize the recovery
if (actual_vartype and
actual_vartype in PERIODIC_VARTYPES and
np.isfinite(actual_varperiod)):
if pfres['recovery_periods'].size > 0:
for ri in range(pfres['recovery_periods'].size):
pfres['recovery_pdiff'].append(pfres['recovery_periods'][ri] -
np.asscalar(actual_varperiod))
# get the alias types
pfres['recovery_status'].append(
check_periodrec_alias(actual_varperiod,
pfres['recovery_periods'][ri],
tolerance=period_tolerance)
)
# turn the recovery_pdiff/status lists into arrays
pfres['recovery_status'] = np.array(pfres['recovery_status'])
pfres['recovery_pdiff'] = np.array(pfres['recovery_pdiff'])
# find the best recovered period and its status
rec_absdiff = np.abs(pfres['recovery_pdiff'])
best_recp_ind = rec_absdiff == rec_absdiff.min()
pfres['best_recovered_period'] = (
pfres['recovery_periods'][best_recp_ind]
)
pfres['best_recovered_pfmethod'] = (
pfres['recovery_pfmethods'][best_recp_ind]
)
pfres['best_recovered_magcol'] = (
pfres['recovery_magcols'][best_recp_ind]
)
pfres['best_recovered_status'] = (
pfres['recovery_status'][best_recp_ind]
)
pfres['best_recovered_pdiff'] = (
pfres['recovery_pdiff'][best_recp_ind]
)
else:
LOGWARNING(
'no finite periods recovered from period-finding for %s' %
fakepfpkl
)
pfres['recovery_status'] = np.array(['no_finite_periods_recovered'])
pfres['recovery_pdiff'] = np.array([np.nan])
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_pdiff'] = np.array([np.nan])
# if this is not actually a variable, get the recovered period,
# etc. anyway. this way, we can see what we need to look out for and avoid
# when getting these values for actual objects
else:
pfres['recovery_status'] = np.array(
['not_variable']*pfres['recovery_periods'].size
)
pfres['recovery_pdiff'] = np.zeros(pfres['recovery_periods'].size)
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array(['not_variable'])
pfres['best_recovered_pdiff'] = np.array([np.nan])
return pfres
def periodrec_worker(task):
'''
This is a parallel worker for the function below.
'''
pfpkl, simbasedir, period_tolerance = task
try:
return periodicvar_recovery(pfpkl,
simbasedir,
period_tolerance=period_tolerance)
except Exception as e:
LOGEXCEPTION('periodic var recovery failed for %s' % repr(task))
return None
def parallel_periodicvar_recovery(simbasedir,
period_tolerance=1.0e-3,
liststartind=None,
listmaxobjects=None,
nworkers=None):
'''
This is a parallel driver for periodicvar_recovery.
'''
# figure out the periodfinding pickles directory
pfpkldir = os.path.join(simbasedir,'periodfinding')
if not os.path.exists(pfpkldir):
LOGERROR('no "periodfinding" subdirectory in %s, can\'t continue' %
simbasedir)
return None
# find all the periodfinding pickles
pfpkl_list = glob.glob(os.path.join(pfpkldir,'*periodfinding*pkl*'))
if len(pfpkl_list) > 0:
if liststartind:
pfpkl_list = pfpkl_list[liststartind:]
if listmaxobjects:
pfpkl_list = pfpkl_list[:listmaxobjects]
tasks = [(x, simbasedir, period_tolerance) for x in pfpkl_list]
pool = mp.Pool(nworkers)
results = pool.map(periodrec_worker, tasks)
pool.close()
pool.join()
resdict = {x['objectid']:x for x in results if x is not None}
actual_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and x['actual_vartype'] in PERIODIC_VARTYPES)],
dtype=np.unicode_
)
recovered_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'actual' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_twice_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'twice' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_half_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'half' in x['best_recovered_status'])],
dtype=np.unicode_
)
all_objectids = [x['objectid'] for x in results]
outdict = {'simbasedir':os.path.abspath(simbasedir),
'objectids':all_objectids,
'period_tolerance':period_tolerance,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'alias_twice_periodicvars':alias_twice_periodicvars,
'alias_half_periodicvars':alias_half_periodicvars,
'details':resdict}
outfile = os.path.join(simbasedir,'periodicvar-recovery.pkl')
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
else:
LOGERROR(
'no periodfinding result pickles found in %s, can\'t continue' %
pfpkldir
)
return None
def plot_periodicvar_recovery_results(
precvar_results,
aliases_count_as_recovered=None,
magbins=np.arange(8.0,16.25,0.25),
periodbins=np.arange(0.0,500.0,0.5),
amplitudebins=np.arange(0.0,2.0,0.05),
ndetbins=np.arange(0.0,60000.0,1000.0),
minbinsize=1,
plotfile_ext='png',
):
'''This plots the results of periodic var recovery.
precvar_results is either a dict returned by parallel_periodicvar_recovery
or the pickle created by that function.
aliases_count_as recovered is used to set which kinds of aliases this
function considers as 'recovered' objects. Normally, we require that
recovered objects have a recovery status of 'actual' to indicate the actual
period was recovered. To change this default behavior,
aliases_count_as_recovered can be set to a list of alias status strings that
should be considered as 'recovered' objects as well. Choose from the
following alias types:
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set aliases_count_as_recovered='all' to include all of the above in the
'recovered' periodic var list.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs magbins, periodbins, amplitudebins, and ndetbins can be used to
set the bin lists as needed. The kwarg minbinsize controls how many elements
per bin are required to accept a bin in processing its recovery
characteristics for mags, periods, amplitudes, and ndets.
'''
# get the result pickle/dict
if isinstance(precvar_results, str) and os.path.exists(precvar_results):
with open(precvar_results,'rb') as infd:
precvar = pickle.load(infd)
elif isinstance(precvar_results, dict):
precvar = precvar_results
else:
LOGERROR('could not understand the input '
'periodic var recovery dict/pickle')
return None
# get the simbasedir and open the fakelc-info.pkl. we'll need the magbins
# definition from here.
simbasedir = precvar['simbasedir']
lcinfof = os.path.join(simbasedir,'fakelcs-info.pkl')
if not os.path.exists(lcinfof):
LOGERROR('fakelcs-info.pkl does not exist in %s, can\'t continue' %
simbasedir)
return None
with open(lcinfof,'rb') as infd:
lcinfo = pickle.load(infd)
# get the magcols, vartypes, sdssr, isvariable flags
magcols = lcinfo['magcols']
objectid = lcinfo['objectid']
ndet = lcinfo['ndet']
sdssr = lcinfo['sdssr']
# get the actual periodic vars
actual_periodicvars = precvar['actual_periodicvars']
# generate lists of objects binned by magbins and periodbins
LOGINFO('getting sdssr and ndet for actual periodic vars...')
# get the sdssr and ndet for all periodic vars
periodicvar_sdssr = []
periodicvar_ndet = []
periodicvar_objectids = []
for pobj in actual_periodicvars:
pobjind = objectid == pobj
periodicvar_objectids.append(pobj)
periodicvar_sdssr.append(sdssr[pobjind])
periodicvar_ndet.append(ndet[pobjind])
periodicvar_sdssr = np.array(periodicvar_sdssr)
periodicvar_objectids = np.array(periodicvar_objectids)
periodicvar_ndet = np.array(periodicvar_ndet)
LOGINFO('getting periods, vartypes, '
'amplitudes, ndet for actual periodic vars...')
# get the periods, vartypes, amplitudes for the actual periodic vars
periodicvar_periods = [
np.asscalar(precvar['details'][x]['actual_varperiod'])
for x in periodicvar_objectids
]
periodicvar_amplitudes = [
np.asscalar(precvar['details'][x]['actual_varamplitude'])
for x in periodicvar_objectids
]
periodicvar_vartypes = [
precvar['details'][x]['actual_vartype'] for x in periodicvar_objectids
]
#
# do the binning
#
# bin by mag
LOGINFO('binning actual periodic vars by magnitude...')
magbinned_sdssr = []
magbinned_periodicvars = []
magbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins)
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
magbinned_sdssr.append((magbins[magi] + magbins[magi+1])/2.0)
magbinned_periodicvars.append(thisbin_periodicvars)
# bin by period
LOGINFO('binning actual periodic vars by period...')
periodbinned_periods = []
periodbinned_periodicvars = []
periodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins)
for pbinind, peri in zip(np.unique(periodbininds),
range(len(periodbins)-1)):
thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
periodbinned_periods.append((periodbins[peri] +
periodbins[peri+1])/2.0)
periodbinned_periodicvars.append(thisbin_periodicvars)
# bin by amplitude of variability
LOGINFO('binning actual periodic vars by variability amplitude...')
amplitudebinned_amplitudes = []
amplitudebinned_periodicvars = []
amplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)),
amplitudebins)
for abinind, ampi in zip(np.unique(amplitudebininds),
range(len(amplitudebins)-1)):
thisbin_periodicvars = periodicvar_objectids[
amplitudebininds == abinind
]
if (thisbin_periodicvars.size > (minbinsize-1)):
amplitudebinned_amplitudes.append(
(amplitudebins[ampi] +
amplitudebins[ampi+1])/2.0
)
amplitudebinned_periodicvars.append(thisbin_periodicvars)
# bin by ndet
LOGINFO('binning actual periodic vars by ndet...')
ndetbinned_ndets = []
ndetbinned_periodicvars = []
ndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins)
for nbinind, ndeti in zip(np.unique(ndetbininds),
range(len(ndetbins)-1)):
thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
ndetbinned_ndets.append(
(ndetbins[ndeti] +
ndetbins[ndeti+1])/2.0
)
ndetbinned_periodicvars.append(thisbin_periodicvars)
# now figure out what 'recovered' means using the provided
# aliases_count_as_recovered kwarg
recovered_status = ['actual']
if aliases_count_as_recovered and aliases_count_as_recovered != 'all':
for atype in aliases_count_as_recovered:
if atype in ALIAS_TYPES:
recovered_status.append(atype)
else:
LOGWARNING('unknown alias type: %s, skipping' % atype)
elif aliases_count_as_recovered and aliases_count_as_recovered == 'all':
for atype in ALIAS_TYPES[1:]:
recovered_status.append(atype)
# find all the matching objects for these recovered statuses
recovered_periodicvars = np.array(
[precvar['details'][x]['objectid'] for x in precvar['details']
if (precvar['details'][x] is not None and
precvar['details'][x]['best_recovered_status']
in recovered_status)],
dtype=np.unicode_
)
LOGINFO('recovered %s/%s periodic variables (frac: %.3f) with '
'period recovery status: %s' %
(recovered_periodicvars.size,
actual_periodicvars.size,
float(recovered_periodicvars.size/actual_periodicvars.size),
', '.join(recovered_status)))
# get the objects recovered per bin and overall recovery fractions per bin
magbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in magbinned_periodicvars
]
magbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(magbinned_recovered_objects,
magbinned_periodicvars)])
periodbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in periodbinned_periodicvars
]
periodbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(periodbinned_recovered_objects,
periodbinned_periodicvars)])
amplitudebinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in amplitudebinned_periodicvars
]
amplitudebinned_recfrac = np.array(
[float(x.size/y.size) for x,y
in zip(amplitudebinned_recovered_objects,
amplitudebinned_periodicvars)]
)
ndetbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in ndetbinned_periodicvars
]
ndetbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(ndetbinned_recovered_objects,
ndetbinned_periodicvars)])
# convert the bin medians to arrays
magbinned_sdssr = np.array(magbinned_sdssr)
periodbinned_periods = np.array(periodbinned_periods)
amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes)
ndetbinned_ndets = np.array(ndetbinned_ndets)
# this is the initial output dict
outdict = {
'simbasedir':simbasedir,
'precvar_results':precvar,
'magcols':magcols,
'objectids':objectid,
'ndet':ndet,
'sdssr':sdssr,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'recovery_definition':recovered_status,
# mag binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'magbins':magbins,
'magbinned_mags':magbinned_sdssr,
'magbinned_periodicvars':magbinned_periodicvars,
'magbinned_recoveredvars':magbinned_recovered_objects,
'magbinned_recfrac':magbinned_recfrac,
# period binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'periodbins':periodbins,
'periodbinned_periods':periodbinned_periods,
'periodbinned_periodicvars':periodbinned_periodicvars,
'periodbinned_recoveredvars':periodbinned_recovered_objects,
'periodbinned_recfrac':periodbinned_recfrac,
# amplitude binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'amplitudebins':amplitudebins,
'amplitudebinned_amplitudes':amplitudebinned_amplitudes,
'amplitudebinned_periodicvars':amplitudebinned_periodicvars,
'amplitudebinned_recoveredvars':amplitudebinned_recovered_objects,
'amplitudebinned_recfrac':amplitudebinned_recfrac,
# ndet binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'ndetbins':ndetbins,
'ndetbinned_ndets':ndetbinned_ndets,
'ndetbinned_periodicvars':ndetbinned_periodicvars,
'ndetbinned_recoveredvars':ndetbinned_recovered_objects,
'ndetbinned_recfrac':ndetbinned_recfrac,
}
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
# figure out all alias types
all_aliastypes = recovered_status
# add these to the outdict
outdict['aliastypes'] = all_aliastypes
outdict['pfmethods'] = all_pfmethods
outdict['vartypes'] = all_vartypes
# these are recfracs per-magcol, -vartype, -periodfinder, -aliastype
# binned appropriately by mags, periods, amplitudes, and ndet
# all of these have the shape as the magcols, aliastypes, pfmethods, and
# vartypes lists above.
magbinned_per_magcol_recfracs = []
magbinned_per_vartype_recfracs = []
magbinned_per_pfmethod_recfracs = []
magbinned_per_aliastype_recfracs = []
periodbinned_per_magcol_recfracs = []
periodbinned_per_vartype_recfracs = []
periodbinned_per_pfmethod_recfracs = []
periodbinned_per_aliastype_recfracs = []
amplitudebinned_per_magcol_recfracs = []
amplitudebinned_per_vartype_recfracs = []
amplitudebinned_per_pfmethod_recfracs = []
amplitudebinned_per_aliastype_recfracs = []
ndetbinned_per_magcol_recfracs = []
ndetbinned_per_vartype_recfracs = []
ndetbinned_per_pfmethod_recfracs = []
ndetbinned_per_aliastype_recfracs = []
#
# finally, we do stuff for the plots!
#
recplotdir = os.path.join(simbasedir, 'periodic-variable-recovery-plots')
if not os.path.exists(recplotdir):
os.mkdir(recplotdir)
# 1. recovery-rate by magbin
# 1a. plot of overall recovery rate per magbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(magbinned_sdssr, magbinned_recfrac,marker='.',ms=0.0)
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1b. plot of recovery rate per magbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
magbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1c. plot of recovery rate per magbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
magbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1d. plot of recovery rate per magbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
magbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1e. plot of recovery rate per magbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all alias types
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
magbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2. recovery-rate by periodbin
# 2a. plot of overall recovery rate per periodbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable period [days]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2b. plot of recovery rate per periodbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
periodbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2c. plot of recovery rate per periodbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
periodbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2d. plot of recovery rate per periodbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
periodbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2e. plot of recovery rate per periodbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for periodbin_pv, periodbin_rv in zip(
periodbinned_periodicvars,
periodbinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
periodbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3. recovery-rate by amplitude bin
# 3a. plot of overall recovery rate per amplitude bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable amplitude [mag]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3b. plot of recovery rate per amplitude bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thismagcol_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
amplitudebin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3c. plot of recovery rate per amplitude bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thispf_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
amplitudebin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
amplitudebinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3d. plot of recovery rate per amplitude bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisvt_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
amplitudebin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3e. plot of recovery rate per amplitude bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
amplitudebin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4. recovery-rate by ndet bin
# 4a. plot of overall recovery rate per ndet bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable light curve points')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var ndet')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4b. plot of recovery rate per ndet bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
ndetbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
ndetbinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4c. plot of recovery rate per ndet bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
ndetbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
ndetbinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4d. plot of recovery rate per ndet bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
ndetbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
ndetbinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4e. plot of recovery rate per ndet bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
ndetbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
ndetbinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# update the lists in the outdict
outdict['magbinned_per_magcol_recfracs'] = (
magbinned_per_magcol_recfracs
)
outdict['magbinned_per_pfmethod_recfracs'] = (
magbinned_per_pfmethod_recfracs
)
outdict['magbinned_per_vartype_recfracs'] = (
magbinned_per_vartype_recfracs
)
outdict['magbinned_per_aliastype_recfracs'] = (
magbinned_per_aliastype_recfracs
)
outdict['periodbinned_per_magcol_recfracs'] = (
periodbinned_per_magcol_recfracs
)
outdict['periodbinned_per_pfmethod_recfracs'] = (
periodbinned_per_pfmethod_recfracs
)
outdict['periodbinned_per_vartype_recfracs'] = (
periodbinned_per_vartype_recfracs
)
outdict['periodbinned_per_aliastype_recfracs'] = (
periodbinned_per_aliastype_recfracs
)
outdict['amplitudebinned_per_magcol_recfracs'] = (
amplitudebinned_per_magcol_recfracs
)
outdict['amplitudebinned_per_pfmethod_recfracs'] = (
amplitudebinned_per_pfmethod_recfracs
)
outdict['amplitudebinned_per_vartype_recfracs'] = (
amplitudebinned_per_vartype_recfracs
)
outdict['amplitudebinned_per_aliastype_recfracs'] = (
amplitudebinned_per_aliastype_recfracs
)
outdict['ndetbinned_per_magcol_recfracs'] = (
ndetbinned_per_magcol_recfracs
)
outdict['ndetbinned_per_pfmethod_recfracs'] = (
ndetbinned_per_pfmethod_recfracs
)
outdict['ndetbinned_per_vartype_recfracs'] = (
ndetbinned_per_vartype_recfracs
)
outdict['ndetbinned_per_aliastype_recfracs'] = (
ndetbinned_per_aliastype_recfracs
)
# get the overall recovered vars per pfmethod
overall_recvars_per_pfmethod = []
for pfm in all_pfmethods:
thispfm_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_pfmethod'] == pfm))
])
overall_recvars_per_pfmethod.append(thispfm_recvars)
# get the overall recovered vars per vartype
overall_recvars_per_vartype = []
for vt in all_vartypes:
thisvt_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['actual_vartype'] == vt))
])
overall_recvars_per_vartype.append(thisvt_recvars)
# get the overall recovered vars per magcol
overall_recvars_per_magcol = []
for mc in magcols:
thismc_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_magcol'] == mc))
])
overall_recvars_per_magcol.append(thismc_recvars)
# get the overall recovered vars per aliastype
overall_recvars_per_aliastype = []
for at in all_aliastypes:
thisat_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_status'] == at))
])
overall_recvars_per_aliastype.append(thisat_recvars)
# update the outdict with these
outdict['overall_recfrac_per_pfmethod'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_pfmethod
])
outdict['overall_recfrac_per_vartype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_vartype
])
outdict['overall_recfrac_per_magcol'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_magcol
])
outdict['overall_recfrac_per_aliastype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_aliastype
])
# 5. bar plot of overall recovery rate per pfmethod
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_pfmethods))
xl = all_pfmethods
plt.barh(xt, outdict['overall_recfrac_per_pfmethod'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period-finding method')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period-finding method')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 6. bar plot of overall recovery rate per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(magcols))
xl = magcols
plt.barh(xt, outdict['overall_recfrac_per_magcol'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('light curve magnitude column')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per light curve magcol')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-magcol.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 7. bar plot of overall recovery rate per aliastype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_aliastypes))
xl = all_aliastypes
plt.barh(xt, outdict['overall_recfrac_per_aliastype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period recovery status')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period recovery status')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 8. bar plot of overall recovery rate per vartype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_vartypes))
xl = all_vartypes
plt.barh(xt, outdict['overall_recfrac_per_vartype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('periodic variable type')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per periodic variable type')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 9. overall recovered period periodogram for objects that aren't actual
# periodic variables. this effectively should give us the window function of
# the observations
notvariable_recovered_periods = np.concatenate([
precvar['details'][x]['recovery_periods']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
notvariable_recovered_lspvals = np.concatenate([
precvar['details'][x]['recovery_lspvals']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
sortind = np.argsort(notvariable_recovered_periods)
notvariable_recovered_periods = notvariable_recovered_periods[sortind]
notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]
outdict['notvariable_recovered_periods'] = notvariable_recovered_periods
outdict['notvariable_recovered_lspvals'] = notvariable_recovered_lspvals
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(notvariable_recovered_periods,
notvariable_recovered_lspvals,
ms=1.0,linestyle='none',marker='.')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('recovered normalized periodogram power')
plt.title('periodogram for actual not-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-periodogram-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 10. overall recovered period histogram for objects marked
# not-variable. this gives us the most common periods
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.hist(notvariable_recovered_periods,bins=np.arange(0.02,300.0,1.0e-3),
histtype='step')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('number of times periods recovered')
plt.title('recovered period histogram for non-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-period-hist-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# at the end, write the outdict to a pickle and return it
outfile = os.path.join(simbasedir, 'periodicvar-recovery-plotresults.pkl')
with open(outfile,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
| [
"matplotlib.pyplot.title",
"os.mkdir",
"pickle.dump",
"matplotlib.pyplot.savefig",
"numpy.random.seed",
"numpy.abs",
"numpy.ravel",
"matplotlib.pyplot.suptitle",
"numpy.argsort",
"datetime.datetime.utcnow",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.arange",
"numpy.isclose",
"matp... | [((2312, 2330), 'numpy.random.seed', 'npr.seed', (['(14600959)'], {}), '(14600959)\n', (2320, 2330), True, 'import numpy.random as npr\n'), ((2409, 2430), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (2423, 2430), False, 'import matplotlib\n'), ((582, 632), 'logging.getLogger', 'logging.getLogger', (["('%s.%s' % (parent_name, LOGMOD))"], {}), "('%s.%s' % (parent_name, LOGMOD))\n", (599, 632), False, 'import logging\n'), ((2196, 2230), 'functools.reduce', 'reduce', (['getitem', 'keylist', 'datadict'], {}), '(getitem, keylist, datadict)\n', (2202, 2230), False, 'from functools import reduce\n'), ((3558, 3597), 'os.path.join', 'os.path.join', (['simbasedir', '"""varfeatures"""'], {}), "(simbasedir, 'varfeatures')\n", (3570, 3597), False, 'import os\n'), ((4666, 4716), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelc-varfeatures.pkl"""'], {}), "(simbasedir, 'fakelc-varfeatures.pkl')\n", (4678, 4716), False, 'import os\n'), ((5279, 5339), 'math.sqrt', 'msqrt', (['((ntp + nfp) * (ntp + nfn) * (ntn + nfp) * (ntn + nfn))'], {}), '((ntp + nfp) * (ntp + nfn) * (ntn + nfp) * (ntn + nfn))\n', (5284, 5339), True, 'from math import sqrt as msqrt\n'), ((7352, 7401), 'os.path.join', 'os.path.join', (['simbasedir', '"""recvar-threshold-pkls"""'], {}), "(simbasedir, 'recvar-threshold-pkls')\n", (7364, 7401), False, 'import os\n'), ((7517, 7556), 'os.path.join', 'os.path.join', (['simbasedir', '"""varfeatures"""'], {}), "(simbasedir, 'varfeatures')\n", (7529, 7556), False, 'import os\n'), ((7578, 7708), 'os.path.join', 'os.path.join', (['outdir', "('varthresh-magbinmed%.2f-stet%.2f-inveta%.2f.pkl' % (magbinmedian,\n stetson_stdev_min, inveta_stdev_min))"], {}), "(outdir, 'varthresh-magbinmed%.2f-stet%.2f-inveta%.2f.pkl' % (\n magbinmedian, stetson_stdev_min, inveta_stdev_min))\n", (7590, 7708), False, 'import os\n'), ((8452, 8479), 'numpy.digitize', 'np.digitize', (['sdssr', 'magbins'], {}), '(sdssr, magbins)\n', (8463, 8479), True, 'import numpy as np\n'), ((22934, 22983), 'os.path.join', 'os.path.join', (['simbasedir', '"""recvar-threshold-pkls"""'], {}), "(simbasedir, 'recvar-threshold-pkls')\n", (22946, 22983), False, 'import os\n'), ((23542, 23618), 'numpy.linspace', 'np.linspace', (['stetson_stdev_range[0]', 'stetson_stdev_range[1]'], {'num': 'ngridpoints'}), '(stetson_stdev_range[0], stetson_stdev_range[1], num=ngridpoints)\n', (23553, 23618), True, 'import numpy as np\n'), ((23699, 23773), 'numpy.linspace', 'np.linspace', (['inveta_stdev_range[0]', 'inveta_stdev_range[1]'], {'num': 'ngridpoints'}), '(inveta_stdev_range[0], inveta_stdev_range[1], num=ngridpoints)\n', (23710, 23773), True, 'import numpy as np\n'), ((23849, 23917), 'numpy.linspace', 'np.linspace', (['iqr_stdev_range[0]', 'iqr_stdev_range[1]'], {'num': 'ngridpoints'}), '(iqr_stdev_range[0], iqr_stdev_range[1], num=ngridpoints)\n', (23860, 23917), True, 'import numpy as np\n'), ((24767, 24788), 'multiprocessing.Pool', 'mp.Pool', (['ngridworkers'], {}), '(ngridworkers)\n', (24774, 24788), True, 'import multiprocessing as mp\n'), ((48521, 48587), 'os.path.join', 'os.path.join', (['simbasedir', '"""varindex-gridsearch-magbin-results.pkl"""'], {}), "(simbasedir, 'varindex-gridsearch-magbin-results.pkl')\n", (48533, 48587), False, 'import os\n'), ((51956, 51997), 'os.path.join', 'os.path.join', (['simbasedir', '"""periodfinding"""'], {}), "(simbasedir, 'periodfinding')\n", (51968, 51997), False, 'import os\n'), ((53353, 53405), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelc-periodfinding.pkl"""'], {}), "(simbasedir, 'fakelc-periodfinding.pkl')\n", (53365, 53405), False, 'import os\n'), ((56373, 56390), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (56384, 56390), False, 'import pickle\n'), ((56529, 56581), 'os.path.join', 'os.path.join', (['simbasedir', '"""lightcurves"""', 'lcfbasename'], {}), "(simbasedir, 'lightcurves', lcfbasename)\n", (56541, 56581), False, 'import os\n'), ((60067, 60102), 'numpy.array', 'np.array', (["pfres['recovery_periods']"], {}), "(pfres['recovery_periods'])\n", (60075, 60102), True, 'import numpy as np\n'), ((60135, 60170), 'numpy.array', 'np.array', (["pfres['recovery_lspvals']"], {}), "(pfres['recovery_lspvals'])\n", (60143, 60170), True, 'import numpy as np\n'), ((60205, 60242), 'numpy.array', 'np.array', (["pfres['recovery_pfmethods']"], {}), "(pfres['recovery_pfmethods'])\n", (60213, 60242), True, 'import numpy as np\n'), ((60275, 60310), 'numpy.array', 'np.array', (["pfres['recovery_magcols']"], {}), "(pfres['recovery_magcols'])\n", (60283, 60310), True, 'import numpy as np\n'), ((64362, 64403), 'os.path.join', 'os.path.join', (['simbasedir', '"""periodfinding"""'], {}), "(simbasedir, 'periodfinding')\n", (64374, 64403), False, 'import os\n'), ((66973, 67000), 'numpy.arange', 'np.arange', (['(8.0)', '(16.25)', '(0.25)'], {}), '(8.0, 16.25, 0.25)\n', (66982, 67000), True, 'import numpy as np\n'), ((67019, 67045), 'numpy.arange', 'np.arange', (['(0.0)', '(500.0)', '(0.5)'], {}), '(0.0, 500.0, 0.5)\n', (67028, 67045), True, 'import numpy as np\n'), ((67067, 67092), 'numpy.arange', 'np.arange', (['(0.0)', '(2.0)', '(0.05)'], {}), '(0.0, 2.0, 0.05)\n', (67076, 67092), True, 'import numpy as np\n'), ((67109, 67140), 'numpy.arange', 'np.arange', (['(0.0)', '(60000.0)', '(1000.0)'], {}), '(0.0, 60000.0, 1000.0)\n', (67118, 67140), True, 'import numpy as np\n'), ((69819, 69863), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelcs-info.pkl"""'], {}), "(simbasedir, 'fakelcs-info.pkl')\n", (69831, 69863), False, 'import os\n'), ((70876, 70903), 'numpy.array', 'np.array', (['periodicvar_sdssr'], {}), '(periodicvar_sdssr)\n', (70884, 70903), True, 'import numpy as np\n'), ((70932, 70963), 'numpy.array', 'np.array', (['periodicvar_objectids'], {}), '(periodicvar_objectids)\n', (70940, 70963), True, 'import numpy as np\n'), ((70987, 71013), 'numpy.array', 'np.array', (['periodicvar_ndet'], {}), '(periodicvar_ndet)\n', (70995, 71013), True, 'import numpy as np\n'), ((75006, 75216), 'numpy.array', 'np.array', (["[precvar['details'][x]['objectid'] for x in precvar['details'] if precvar[\n 'details'][x] is not None and precvar['details'][x][\n 'best_recovered_status'] in recovered_status]"], {'dtype': 'np.unicode_'}), "([precvar['details'][x]['objectid'] for x in precvar['details'] if \n precvar['details'][x] is not None and precvar['details'][x][\n 'best_recovered_status'] in recovered_status], dtype=np.unicode_)\n", (75014, 75216), True, 'import numpy as np\n'), ((77094, 77119), 'numpy.array', 'np.array', (['magbinned_sdssr'], {}), '(magbinned_sdssr)\n', (77102, 77119), True, 'import numpy as np\n'), ((77147, 77177), 'numpy.array', 'np.array', (['periodbinned_periods'], {}), '(periodbinned_periods)\n', (77155, 77177), True, 'import numpy as np\n'), ((77211, 77247), 'numpy.array', 'np.array', (['amplitudebinned_amplitudes'], {}), '(amplitudebinned_amplitudes)\n', (77219, 77247), True, 'import numpy as np\n'), ((77271, 77297), 'numpy.array', 'np.array', (['ndetbinned_ndets'], {}), '(ndetbinned_ndets)\n', (77279, 77297), True, 'import numpy as np\n'), ((79475, 79615), 'numpy.unique', 'np.unique', (["[precvar['details'][x]['actual_vartype'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is not None]"], {}), "([precvar['details'][x]['actual_vartype'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is not None])\n", (79484, 79615), True, 'import numpy as np\n'), ((80897, 80957), 'os.path.join', 'os.path.join', (['simbasedir', '"""periodic-variable-recovery-plots"""'], {}), "(simbasedir, 'periodic-variable-recovery-plots')\n", (80909, 80957), False, 'import os\n'), ((81122, 81164), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (81132, 81164), True, 'import matplotlib.pyplot as plt\n'), ((81165, 81229), 'matplotlib.pyplot.plot', 'plt.plot', (['magbinned_sdssr', 'magbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)'}), "(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0)\n", (81173, 81229), True, 'import matplotlib.pyplot as plt\n'), ((81232, 81264), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (81242, 81264), True, 'import matplotlib.pyplot as plt\n'), ((81270, 81324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (81280, 81324), True, 'import matplotlib.pyplot as plt\n'), ((81329, 81394), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery fraction by periodic var magnitudes"""'], {}), "('overall recovery fraction by periodic var magnitudes')\n", (81338, 81394), True, 'import matplotlib.pyplot as plt\n'), ((81399, 81415), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (81407, 81415), True, 'import matplotlib.pyplot as plt\n'), ((81597, 81613), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (81606, 81613), True, 'import matplotlib.pyplot as plt\n'), ((81680, 81722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (81690, 81722), True, 'import matplotlib.pyplot as plt\n'), ((82691, 82788), 'matplotlib.pyplot.plot', 'plt.plot', (['magbinned_sdssr', 'magbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (82699, 82788), True, 'import matplotlib.pyplot as plt\n'), ((82800, 82832), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (82810, 82832), True, 'import matplotlib.pyplot as plt\n'), ((82838, 82892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (82848, 82892), True, 'import matplotlib.pyplot as plt\n'), ((82897, 82965), 'matplotlib.pyplot.title', 'plt.title', (['"""per magcol recovery fraction by periodic var magnitudes"""'], {}), "('per magcol recovery fraction by periodic var magnitudes')\n", (82906, 82965), True, 'import matplotlib.pyplot as plt\n'), ((82970, 82986), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (82978, 82986), True, 'import matplotlib.pyplot as plt\n'), ((82990, 83018), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (83000, 83018), True, 'import matplotlib.pyplot as plt\n'), ((83201, 83217), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (83210, 83217), True, 'import matplotlib.pyplot as plt\n'), ((83290, 83332), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (83300, 83332), True, 'import matplotlib.pyplot as plt\n'), ((84503, 84600), 'matplotlib.pyplot.plot', 'plt.plot', (['magbinned_sdssr', 'magbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (84511, 84600), True, 'import matplotlib.pyplot as plt\n'), ((84612, 84644), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (84622, 84644), True, 'import matplotlib.pyplot as plt\n'), ((84650, 84704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (84660, 84704), True, 'import matplotlib.pyplot as plt\n'), ((84709, 84784), 'matplotlib.pyplot.title', 'plt.title', (['"""per period-finder recovery fraction by periodic var magnitudes"""'], {}), "('per period-finder recovery fraction by periodic var magnitudes')\n", (84718, 84784), True, 'import matplotlib.pyplot as plt\n'), ((84789, 84805), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (84797, 84805), True, 'import matplotlib.pyplot as plt\n'), ((84809, 84837), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (84819, 84837), True, 'import matplotlib.pyplot as plt\n'), ((85021, 85037), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (85030, 85037), True, 'import matplotlib.pyplot as plt\n'), ((85111, 85153), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (85121, 85153), True, 'import matplotlib.pyplot as plt\n'), ((85199, 85339), 'numpy.unique', 'np.unique', (["[precvar['details'][x]['actual_vartype'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is not None]"], {}), "([precvar['details'][x]['actual_vartype'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is not None])\n", (85208, 85339), True, 'import numpy as np\n'), ((86311, 86408), 'matplotlib.pyplot.plot', 'plt.plot', (['magbinned_sdssr', 'magbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (86319, 86408), True, 'import matplotlib.pyplot as plt\n'), ((86420, 86452), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (86430, 86452), True, 'import matplotlib.pyplot as plt\n'), ((86458, 86512), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (86468, 86512), True, 'import matplotlib.pyplot as plt\n'), ((86517, 86586), 'matplotlib.pyplot.title', 'plt.title', (['"""per vartype recovery fraction by periodic var magnitudes"""'], {}), "('per vartype recovery fraction by periodic var magnitudes')\n", (86526, 86586), True, 'import matplotlib.pyplot as plt\n'), ((86591, 86607), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (86599, 86607), True, 'import matplotlib.pyplot as plt\n'), ((86611, 86639), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (86621, 86639), True, 'import matplotlib.pyplot as plt\n'), ((86822, 86838), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (86831, 86838), True, 'import matplotlib.pyplot as plt\n'), ((86909, 86951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (86919, 86951), True, 'import matplotlib.pyplot as plt\n'), ((87961, 88058), 'matplotlib.pyplot.plot', 'plt.plot', (['magbinned_sdssr', 'magbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(magbinned_sdssr, magbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (87969, 88058), True, 'import matplotlib.pyplot as plt\n'), ((88070, 88102), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (88080, 88102), True, 'import matplotlib.pyplot as plt\n'), ((88108, 88162), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (88118, 88162), True, 'import matplotlib.pyplot as plt\n'), ((88167, 88239), 'matplotlib.pyplot.title', 'plt.title', (['"""per alias-type recovery fraction by periodic var magnitudes"""'], {}), "('per alias-type recovery fraction by periodic var magnitudes')\n", (88176, 88239), True, 'import matplotlib.pyplot as plt\n'), ((88244, 88260), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (88252, 88260), True, 'import matplotlib.pyplot as plt\n'), ((88264, 88292), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (88274, 88292), True, 'import matplotlib.pyplot as plt\n'), ((88477, 88493), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (88486, 88493), True, 'import matplotlib.pyplot as plt\n'), ((88597, 88639), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (88607, 88639), True, 'import matplotlib.pyplot as plt\n'), ((88640, 88712), 'matplotlib.pyplot.plot', 'plt.plot', (['periodbinned_periods', 'periodbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)'}), "(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0)\n", (88648, 88712), True, 'import matplotlib.pyplot as plt\n'), ((88729, 88774), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""periodic variable period [days]"""'], {}), "('periodic variable period [days]')\n", (88739, 88774), True, 'import matplotlib.pyplot as plt\n'), ((88779, 88833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (88789, 88833), True, 'import matplotlib.pyplot as plt\n'), ((88838, 88900), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery fraction by periodic var periods"""'], {}), "('overall recovery fraction by periodic var periods')\n", (88847, 88900), True, 'import matplotlib.pyplot as plt\n'), ((88905, 88921), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (88913, 88921), True, 'import matplotlib.pyplot as plt\n'), ((89100, 89116), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (89109, 89116), True, 'import matplotlib.pyplot as plt\n'), ((89185, 89227), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (89195, 89227), True, 'import matplotlib.pyplot as plt\n'), ((90228, 90332), 'matplotlib.pyplot.plot', 'plt.plot', (['periodbinned_periods', 'periodbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0,\n label='overall', color='k')\n", (90236, 90332), True, 'import matplotlib.pyplot as plt\n'), ((90345, 90377), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (90355, 90377), True, 'import matplotlib.pyplot as plt\n'), ((90383, 90437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (90393, 90437), True, 'import matplotlib.pyplot as plt\n'), ((90442, 90507), 'matplotlib.pyplot.title', 'plt.title', (['"""per magcol recovery fraction by periodic var periods"""'], {}), "('per magcol recovery fraction by periodic var periods')\n", (90451, 90507), True, 'import matplotlib.pyplot as plt\n'), ((90512, 90528), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (90520, 90528), True, 'import matplotlib.pyplot as plt\n'), ((90532, 90560), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (90542, 90560), True, 'import matplotlib.pyplot as plt\n'), ((90740, 90756), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (90749, 90756), True, 'import matplotlib.pyplot as plt\n'), ((90832, 90874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (90842, 90874), True, 'import matplotlib.pyplot as plt\n'), ((92077, 92181), 'matplotlib.pyplot.plot', 'plt.plot', (['periodbinned_periods', 'periodbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0,\n label='overall', color='k')\n", (92085, 92181), True, 'import matplotlib.pyplot as plt\n'), ((92194, 92226), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (92204, 92226), True, 'import matplotlib.pyplot as plt\n'), ((92232, 92286), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (92242, 92286), True, 'import matplotlib.pyplot as plt\n'), ((92291, 92363), 'matplotlib.pyplot.title', 'plt.title', (['"""per period-finder recovery fraction by periodic var periods"""'], {}), "('per period-finder recovery fraction by periodic var periods')\n", (92300, 92363), True, 'import matplotlib.pyplot as plt\n'), ((92368, 92384), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (92376, 92384), True, 'import matplotlib.pyplot as plt\n'), ((92388, 92416), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (92398, 92416), True, 'import matplotlib.pyplot as plt\n'), ((92597, 92613), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (92606, 92613), True, 'import matplotlib.pyplot as plt\n'), ((92690, 92732), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (92700, 92732), True, 'import matplotlib.pyplot as plt\n'), ((92778, 92918), 'numpy.unique', 'np.unique', (["[precvar['details'][x]['actual_vartype'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is not None]"], {}), "([precvar['details'][x]['actual_vartype'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is not None])\n", (92787, 92918), True, 'import numpy as np\n'), ((93916, 94020), 'matplotlib.pyplot.plot', 'plt.plot', (['periodbinned_periods', 'periodbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0,\n label='overall', color='k')\n", (93924, 94020), True, 'import matplotlib.pyplot as plt\n'), ((94033, 94065), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (94043, 94065), True, 'import matplotlib.pyplot as plt\n'), ((94071, 94125), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (94081, 94125), True, 'import matplotlib.pyplot as plt\n'), ((94130, 94199), 'matplotlib.pyplot.title', 'plt.title', (['"""per vartype recovery fraction by periodic var magnitudes"""'], {}), "('per vartype recovery fraction by periodic var magnitudes')\n", (94139, 94199), True, 'import matplotlib.pyplot as plt\n'), ((94204, 94220), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (94212, 94220), True, 'import matplotlib.pyplot as plt\n'), ((94224, 94252), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (94234, 94252), True, 'import matplotlib.pyplot as plt\n'), ((94432, 94448), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (94441, 94448), True, 'import matplotlib.pyplot as plt\n'), ((94522, 94564), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (94532, 94564), True, 'import matplotlib.pyplot as plt\n'), ((95599, 95703), 'matplotlib.pyplot.plot', 'plt.plot', (['periodbinned_periods', 'periodbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(periodbinned_periods, periodbinned_recfrac, marker='.', ms=0.0,\n label='overall', color='k')\n", (95607, 95703), True, 'import matplotlib.pyplot as plt\n'), ((95716, 95748), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (95726, 95748), True, 'import matplotlib.pyplot as plt\n'), ((95754, 95808), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (95764, 95808), True, 'import matplotlib.pyplot as plt\n'), ((95813, 95885), 'matplotlib.pyplot.title', 'plt.title', (['"""per alias-type recovery fraction by periodic var magnitudes"""'], {}), "('per alias-type recovery fraction by periodic var magnitudes')\n", (95822, 95885), True, 'import matplotlib.pyplot as plt\n'), ((95890, 95906), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (95898, 95906), True, 'import matplotlib.pyplot as plt\n'), ((95910, 95938), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (95920, 95938), True, 'import matplotlib.pyplot as plt\n'), ((96120, 96136), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (96129, 96136), True, 'import matplotlib.pyplot as plt\n'), ((96248, 96290), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (96258, 96290), True, 'import matplotlib.pyplot as plt\n'), ((96291, 96376), 'matplotlib.pyplot.plot', 'plt.plot', (['amplitudebinned_amplitudes', 'amplitudebinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)'}), "(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',\n ms=0.0)\n", (96299, 96376), True, 'import matplotlib.pyplot as plt\n'), ((96389, 96436), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""periodic variable amplitude [mag]"""'], {}), "('periodic variable amplitude [mag]')\n", (96399, 96436), True, 'import matplotlib.pyplot as plt\n'), ((96441, 96495), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (96451, 96495), True, 'import matplotlib.pyplot as plt\n'), ((96500, 96565), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery fraction by periodic var amplitudes"""'], {}), "('overall recovery fraction by periodic var amplitudes')\n", (96509, 96565), True, 'import matplotlib.pyplot as plt\n'), ((96570, 96586), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (96578, 96586), True, 'import matplotlib.pyplot as plt\n'), ((96768, 96784), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (96777, 96784), True, 'import matplotlib.pyplot as plt\n'), ((96857, 96899), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (96867, 96899), True, 'import matplotlib.pyplot as plt\n'), ((97945, 98058), 'matplotlib.pyplot.plot', 'plt.plot', (['amplitudebinned_amplitudes', 'amplitudebinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',\n ms=0.0, label='overall', color='k')\n", (97953, 98058), True, 'import matplotlib.pyplot as plt\n'), ((98071, 98103), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (98081, 98103), True, 'import matplotlib.pyplot as plt\n'), ((98109, 98163), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (98119, 98163), True, 'import matplotlib.pyplot as plt\n'), ((98168, 98236), 'matplotlib.pyplot.title', 'plt.title', (['"""per magcol recovery fraction by periodic var amplitudes"""'], {}), "('per magcol recovery fraction by periodic var amplitudes')\n", (98177, 98236), True, 'import matplotlib.pyplot as plt\n'), ((98241, 98257), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (98249, 98257), True, 'import matplotlib.pyplot as plt\n'), ((98261, 98289), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (98271, 98289), True, 'import matplotlib.pyplot as plt\n'), ((98472, 98488), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (98481, 98488), True, 'import matplotlib.pyplot as plt\n'), ((98568, 98610), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (98578, 98610), True, 'import matplotlib.pyplot as plt\n'), ((99858, 99971), 'matplotlib.pyplot.plot', 'plt.plot', (['amplitudebinned_amplitudes', 'amplitudebinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',\n ms=0.0, label='overall', color='k')\n", (99866, 99971), True, 'import matplotlib.pyplot as plt\n'), ((99984, 100016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (99994, 100016), True, 'import matplotlib.pyplot as plt\n'), ((100022, 100076), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (100032, 100076), True, 'import matplotlib.pyplot as plt\n'), ((100081, 100156), 'matplotlib.pyplot.title', 'plt.title', (['"""per period-finder recovery fraction by periodic var amplitudes"""'], {}), "('per period-finder recovery fraction by periodic var amplitudes')\n", (100090, 100156), True, 'import matplotlib.pyplot as plt\n'), ((100161, 100177), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (100169, 100177), True, 'import matplotlib.pyplot as plt\n'), ((100181, 100209), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (100191, 100209), True, 'import matplotlib.pyplot as plt\n'), ((100393, 100409), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (100402, 100409), True, 'import matplotlib.pyplot as plt\n'), ((100490, 100532), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (100500, 100532), True, 'import matplotlib.pyplot as plt\n'), ((100578, 100718), 'numpy.unique', 'np.unique', (["[precvar['details'][x]['actual_vartype'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is not None]"], {}), "([precvar['details'][x]['actual_vartype'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is not None])\n", (100587, 100718), True, 'import numpy as np\n'), ((101768, 101881), 'matplotlib.pyplot.plot', 'plt.plot', (['amplitudebinned_amplitudes', 'amplitudebinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',\n ms=0.0, label='overall', color='k')\n", (101776, 101881), True, 'import matplotlib.pyplot as plt\n'), ((101894, 101926), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (101904, 101926), True, 'import matplotlib.pyplot as plt\n'), ((101932, 101986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (101942, 101986), True, 'import matplotlib.pyplot as plt\n'), ((101991, 102060), 'matplotlib.pyplot.title', 'plt.title', (['"""per vartype recovery fraction by periodic var amplitudes"""'], {}), "('per vartype recovery fraction by periodic var amplitudes')\n", (102000, 102060), True, 'import matplotlib.pyplot as plt\n'), ((102065, 102081), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (102073, 102081), True, 'import matplotlib.pyplot as plt\n'), ((102085, 102113), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (102095, 102113), True, 'import matplotlib.pyplot as plt\n'), ((102296, 102312), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (102305, 102312), True, 'import matplotlib.pyplot as plt\n'), ((102390, 102432), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (102400, 102432), True, 'import matplotlib.pyplot as plt\n'), ((103517, 103630), 'matplotlib.pyplot.plot', 'plt.plot', (['amplitudebinned_amplitudes', 'amplitudebinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(amplitudebinned_amplitudes, amplitudebinned_recfrac, marker='.',\n ms=0.0, label='overall', color='k')\n", (103525, 103630), True, 'import matplotlib.pyplot as plt\n'), ((103643, 103675), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (103653, 103675), True, 'import matplotlib.pyplot as plt\n'), ((103681, 103735), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (103691, 103735), True, 'import matplotlib.pyplot as plt\n'), ((103740, 103812), 'matplotlib.pyplot.title', 'plt.title', (['"""per alias-type recovery fraction by periodic var amplitudes"""'], {}), "('per alias-type recovery fraction by periodic var amplitudes')\n", (103749, 103812), True, 'import matplotlib.pyplot as plt\n'), ((103817, 103833), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (103825, 103833), True, 'import matplotlib.pyplot as plt\n'), ((103837, 103865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (103847, 103865), True, 'import matplotlib.pyplot as plt\n'), ((104050, 104066), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (104059, 104066), True, 'import matplotlib.pyplot as plt\n'), ((104168, 104210), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (104178, 104210), True, 'import matplotlib.pyplot as plt\n'), ((104211, 104277), 'matplotlib.pyplot.plot', 'plt.plot', (['ndetbinned_ndets', 'ndetbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)'}), "(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0)\n", (104219, 104277), True, 'import matplotlib.pyplot as plt\n'), ((104294, 104344), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""periodic variable light curve points"""'], {}), "('periodic variable light curve points')\n", (104304, 104344), True, 'import matplotlib.pyplot as plt\n'), ((104349, 104403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (104359, 104403), True, 'import matplotlib.pyplot as plt\n'), ((104408, 104467), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery fraction by periodic var ndet"""'], {}), "('overall recovery fraction by periodic var ndet')\n", (104417, 104467), True, 'import matplotlib.pyplot as plt\n'), ((104472, 104488), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (104480, 104488), True, 'import matplotlib.pyplot as plt\n'), ((104664, 104680), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (104673, 104680), True, 'import matplotlib.pyplot as plt\n'), ((104748, 104790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (104758, 104790), True, 'import matplotlib.pyplot as plt\n'), ((105791, 105890), 'matplotlib.pyplot.plot', 'plt.plot', (['ndetbinned_ndets', 'ndetbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (105799, 105890), True, 'import matplotlib.pyplot as plt\n'), ((105902, 105934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (105912, 105934), True, 'import matplotlib.pyplot as plt\n'), ((105940, 105994), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (105950, 105994), True, 'import matplotlib.pyplot as plt\n'), ((105999, 106062), 'matplotlib.pyplot.title', 'plt.title', (['"""per magcol recovery fraction by periodic var ndets"""'], {}), "('per magcol recovery fraction by periodic var ndets')\n", (106008, 106062), True, 'import matplotlib.pyplot as plt\n'), ((106067, 106083), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (106075, 106083), True, 'import matplotlib.pyplot as plt\n'), ((106087, 106115), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (106097, 106115), True, 'import matplotlib.pyplot as plt\n'), ((106292, 106308), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (106301, 106308), True, 'import matplotlib.pyplot as plt\n'), ((106383, 106425), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (106393, 106425), True, 'import matplotlib.pyplot as plt\n'), ((107626, 107725), 'matplotlib.pyplot.plot', 'plt.plot', (['ndetbinned_ndets', 'ndetbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (107634, 107725), True, 'import matplotlib.pyplot as plt\n'), ((107737, 107769), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (107747, 107769), True, 'import matplotlib.pyplot as plt\n'), ((107775, 107829), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (107785, 107829), True, 'import matplotlib.pyplot as plt\n'), ((107834, 107904), 'matplotlib.pyplot.title', 'plt.title', (['"""per period-finder recovery fraction by periodic var ndets"""'], {}), "('per period-finder recovery fraction by periodic var ndets')\n", (107843, 107904), True, 'import matplotlib.pyplot as plt\n'), ((107909, 107925), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (107917, 107925), True, 'import matplotlib.pyplot as plt\n'), ((107929, 107957), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (107939, 107957), True, 'import matplotlib.pyplot as plt\n'), ((108135, 108151), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (108144, 108151), True, 'import matplotlib.pyplot as plt\n'), ((108227, 108269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (108237, 108269), True, 'import matplotlib.pyplot as plt\n'), ((108315, 108469), 'numpy.unique', 'np.unique', (["[precvar['details'][x]['actual_vartype'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES]"], {}), "([precvar['details'][x]['actual_vartype'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES]\n )\n", (108324, 108469), True, 'import numpy as np\n'), ((109468, 109567), 'matplotlib.pyplot.plot', 'plt.plot', (['ndetbinned_ndets', 'ndetbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (109476, 109567), True, 'import matplotlib.pyplot as plt\n'), ((109579, 109611), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (109589, 109611), True, 'import matplotlib.pyplot as plt\n'), ((109617, 109671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (109627, 109671), True, 'import matplotlib.pyplot as plt\n'), ((109676, 109740), 'matplotlib.pyplot.title', 'plt.title', (['"""per vartype recovery fraction by periodic var ndets"""'], {}), "('per vartype recovery fraction by periodic var ndets')\n", (109685, 109740), True, 'import matplotlib.pyplot as plt\n'), ((109745, 109761), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (109753, 109761), True, 'import matplotlib.pyplot as plt\n'), ((109765, 109793), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (109775, 109793), True, 'import matplotlib.pyplot as plt\n'), ((109970, 109986), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (109979, 109986), True, 'import matplotlib.pyplot as plt\n'), ((110058, 110100), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (110068, 110100), True, 'import matplotlib.pyplot as plt\n'), ((111139, 111238), 'matplotlib.pyplot.plot', 'plt.plot', (['ndetbinned_ndets', 'ndetbinned_recfrac'], {'marker': '"""."""', 'ms': '(0.0)', 'label': '"""overall"""', 'color': '"""k"""'}), "(ndetbinned_ndets, ndetbinned_recfrac, marker='.', ms=0.0, label=\n 'overall', color='k')\n", (111147, 111238), True, 'import matplotlib.pyplot as plt\n'), ((111250, 111282), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SDSS $r$ magnitude"""'], {}), "('SDSS $r$ magnitude')\n", (111260, 111282), True, 'import matplotlib.pyplot as plt\n'), ((111288, 111342), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered fraction of periodic variables"""'], {}), "('recovered fraction of periodic variables')\n", (111298, 111342), True, 'import matplotlib.pyplot as plt\n'), ((111347, 111414), 'matplotlib.pyplot.title', 'plt.title', (['"""per alias-type recovery fraction by periodic var ndets"""'], {}), "('per alias-type recovery fraction by periodic var ndets')\n", (111356, 111414), True, 'import matplotlib.pyplot as plt\n'), ((111419, 111435), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (111427, 111435), True, 'import matplotlib.pyplot as plt\n'), ((111439, 111467), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'markerscale': '(10.0)'}), '(markerscale=10.0)\n', (111449, 111467), True, 'import matplotlib.pyplot as plt\n'), ((111646, 111662), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (111655, 111662), True, 'import matplotlib.pyplot as plt\n'), ((114947, 115036), 'numpy.array', 'np.array', (['[(x.size / actual_periodicvars.size) for x in overall_recvars_per_pfmethod]'], {}), '([(x.size / actual_periodicvars.size) for x in\n overall_recvars_per_pfmethod])\n', (114955, 115036), True, 'import numpy as np\n'), ((115088, 115176), 'numpy.array', 'np.array', (['[(x.size / actual_periodicvars.size) for x in overall_recvars_per_vartype]'], {}), '([(x.size / actual_periodicvars.size) for x in\n overall_recvars_per_vartype])\n', (115096, 115176), True, 'import numpy as np\n'), ((115227, 115314), 'numpy.array', 'np.array', (['[(x.size / actual_periodicvars.size) for x in overall_recvars_per_magcol]'], {}), '([(x.size / actual_periodicvars.size) for x in\n overall_recvars_per_magcol])\n', (115235, 115314), True, 'import numpy as np\n'), ((115368, 115458), 'numpy.array', 'np.array', (['[(x.size / actual_periodicvars.size) for x in overall_recvars_per_aliastype]'], {}), '([(x.size / actual_periodicvars.size) for x in\n overall_recvars_per_aliastype])\n', (115376, 115458), True, 'import numpy as np\n'), ((115533, 115575), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (115543, 115575), True, 'import matplotlib.pyplot as plt\n'), ((115639, 115697), 'matplotlib.pyplot.barh', 'plt.barh', (['xt', "outdict['overall_recfrac_per_pfmethod']", '(0.5)'], {}), "(xt, outdict['overall_recfrac_per_pfmethod'], 0.5)\n", (115647, 115697), True, 'import matplotlib.pyplot as plt\n'), ((115703, 115721), 'matplotlib.pyplot.yticks', 'plt.yticks', (['xt', 'xl'], {}), '(xt, xl)\n', (115713, 115721), True, 'import matplotlib.pyplot as plt\n'), ((115726, 115761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""period-finding method"""'], {}), "('period-finding method')\n", (115736, 115761), True, 'import matplotlib.pyplot as plt\n'), ((115766, 115801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""overall recovery rate"""'], {}), "('overall recovery rate')\n", (115776, 115801), True, 'import matplotlib.pyplot as plt\n'), ((115806, 115866), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery rate per period-finding method"""'], {}), "('overall recovery rate per period-finding method')\n", (115815, 115866), True, 'import matplotlib.pyplot as plt\n'), ((116040, 116056), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (116049, 116056), True, 'import matplotlib.pyplot as plt\n'), ((116123, 116165), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (116133, 116165), True, 'import matplotlib.pyplot as plt\n'), ((116217, 116273), 'matplotlib.pyplot.barh', 'plt.barh', (['xt', "outdict['overall_recfrac_per_magcol']", '(0.5)'], {}), "(xt, outdict['overall_recfrac_per_magcol'], 0.5)\n", (116225, 116273), True, 'import matplotlib.pyplot as plt\n'), ((116279, 116297), 'matplotlib.pyplot.yticks', 'plt.yticks', (['xt', 'xl'], {}), '(xt, xl)\n', (116289, 116297), True, 'import matplotlib.pyplot as plt\n'), ((116302, 116344), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""light curve magnitude column"""'], {}), "('light curve magnitude column')\n", (116312, 116344), True, 'import matplotlib.pyplot as plt\n'), ((116349, 116384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""overall recovery rate"""'], {}), "('overall recovery rate')\n", (116359, 116384), True, 'import matplotlib.pyplot as plt\n'), ((116389, 116446), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery rate per light curve magcol"""'], {}), "('overall recovery rate per light curve magcol')\n", (116398, 116446), True, 'import matplotlib.pyplot as plt\n'), ((116618, 116634), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (116627, 116634), True, 'import matplotlib.pyplot as plt\n'), ((116704, 116746), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (116714, 116746), True, 'import matplotlib.pyplot as plt\n'), ((116812, 116871), 'matplotlib.pyplot.barh', 'plt.barh', (['xt', "outdict['overall_recfrac_per_aliastype']", '(0.5)'], {}), "(xt, outdict['overall_recfrac_per_aliastype'], 0.5)\n", (116820, 116871), True, 'import matplotlib.pyplot as plt\n'), ((116877, 116895), 'matplotlib.pyplot.yticks', 'plt.yticks', (['xt', 'xl'], {}), '(xt, xl)\n', (116887, 116895), True, 'import matplotlib.pyplot as plt\n'), ((116900, 116936), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""period recovery status"""'], {}), "('period recovery status')\n", (116910, 116936), True, 'import matplotlib.pyplot as plt\n'), ((116941, 116976), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""overall recovery rate"""'], {}), "('overall recovery rate')\n", (116951, 116976), True, 'import matplotlib.pyplot as plt\n'), ((116981, 117042), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery rate per period recovery status"""'], {}), "('overall recovery rate per period recovery status')\n", (116990, 117042), True, 'import matplotlib.pyplot as plt\n'), ((117217, 117233), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (117226, 117233), True, 'import matplotlib.pyplot as plt\n'), ((117301, 117343), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (117311, 117343), True, 'import matplotlib.pyplot as plt\n'), ((117405, 117462), 'matplotlib.pyplot.barh', 'plt.barh', (['xt', "outdict['overall_recfrac_per_vartype']", '(0.5)'], {}), "(xt, outdict['overall_recfrac_per_vartype'], 0.5)\n", (117413, 117462), True, 'import matplotlib.pyplot as plt\n'), ((117468, 117486), 'matplotlib.pyplot.yticks', 'plt.yticks', (['xt', 'xl'], {}), '(xt, xl)\n', (117478, 117486), True, 'import matplotlib.pyplot as plt\n'), ((117491, 117527), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""periodic variable type"""'], {}), "('periodic variable type')\n", (117501, 117527), True, 'import matplotlib.pyplot as plt\n'), ((117532, 117567), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""overall recovery rate"""'], {}), "('overall recovery rate')\n", (117542, 117567), True, 'import matplotlib.pyplot as plt\n'), ((117572, 117633), 'matplotlib.pyplot.title', 'plt.title', (['"""overall recovery rate per periodic variable type"""'], {}), "('overall recovery rate per periodic variable type')\n", (117581, 117633), True, 'import matplotlib.pyplot as plt\n'), ((117806, 117822), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (117815, 117822), True, 'import matplotlib.pyplot as plt\n'), ((118043, 118186), 'numpy.concatenate', 'np.concatenate', (["[precvar['details'][x]['recovery_periods'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is None]"], {}), "([precvar['details'][x]['recovery_periods'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is None])\n", (118057, 118186), True, 'import numpy as np\n'), ((118250, 118393), 'numpy.concatenate', 'np.concatenate', (["[precvar['details'][x]['recovery_lspvals'] for x in precvar['details'] if \n precvar['details'][x]['actual_vartype'] is None]"], {}), "([precvar['details'][x]['recovery_lspvals'] for x in precvar[\n 'details'] if precvar['details'][x]['actual_vartype'] is None])\n", (118264, 118393), True, 'import numpy as np\n'), ((118436, 118477), 'numpy.argsort', 'np.argsort', (['notvariable_recovered_periods'], {}), '(notvariable_recovered_periods)\n', (118446, 118477), True, 'import numpy as np\n'), ((118794, 118836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (118804, 118836), True, 'import matplotlib.pyplot as plt\n'), ((118836, 118949), 'matplotlib.pyplot.plot', 'plt.plot', (['notvariable_recovered_periods', 'notvariable_recovered_lspvals'], {'ms': '(1.0)', 'linestyle': '"""none"""', 'marker': '"""."""'}), "(notvariable_recovered_periods, notvariable_recovered_lspvals, ms=\n 1.0, linestyle='none', marker='.')\n", (118844, 118949), True, 'import matplotlib.pyplot as plt\n'), ((118973, 118990), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (118983, 118990), True, 'import matplotlib.pyplot as plt\n'), ((118995, 119033), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recovered periods [days]"""'], {}), "('recovered periods [days]')\n", (119005, 119033), True, 'import matplotlib.pyplot as plt\n'), ((119038, 119090), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recovered normalized periodogram power"""'], {}), "('recovered normalized periodogram power')\n", (119048, 119090), True, 'import matplotlib.pyplot as plt\n'), ((119095, 119151), 'matplotlib.pyplot.title', 'plt.title', (['"""periodogram for actual not-variable objects"""'], {}), "('periodogram for actual not-variable objects')\n", (119104, 119151), True, 'import matplotlib.pyplot as plt\n'), ((119335, 119351), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (119344, 119351), True, 'import matplotlib.pyplot as plt\n'), ((119485, 119527), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 1.5, 4.8 * 1.5)'}), '(figsize=(6.4 * 1.5, 4.8 * 1.5))\n', (119495, 119527), True, 'import matplotlib.pyplot as plt\n'), ((119635, 119652), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (119645, 119652), True, 'import matplotlib.pyplot as plt\n'), ((119657, 119695), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""recovered periods [days]"""'], {}), "('recovered periods [days]')\n", (119667, 119695), True, 'import matplotlib.pyplot as plt\n'), ((119700, 119747), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of times periods recovered"""'], {}), "('number of times periods recovered')\n", (119710, 119747), True, 'import matplotlib.pyplot as plt\n'), ((119752, 119816), 'matplotlib.pyplot.title', 'plt.title', (['"""recovered period histogram for non-variable objects"""'], {}), "('recovered period histogram for non-variable objects')\n", (119761, 119816), True, 'import matplotlib.pyplot as plt\n'), ((120000, 120016), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (120009, 120016), True, 'import matplotlib.pyplot as plt\n'), ((120095, 120159), 'os.path.join', 'os.path.join', (['simbasedir', '"""periodicvar-recovery-plotresults.pkl"""'], {}), "(simbasedir, 'periodicvar-recovery-plotresults.pkl')\n", (120107, 120159), False, 'import os\n'), ((3485, 3502), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (3496, 3502), False, 'import pickle\n'), ((4601, 4653), 'pickle.dump', 'pickle.dump', (['varinfo', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(varinfo, outfd, pickle.HIGHEST_PROTOCOL)\n', (4612, 4653), False, 'import pickle\n'), ((6409, 6426), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (6420, 6426), False, 'import pickle\n'), ((7413, 7435), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (7427, 7435), False, 'import os\n'), ((7445, 7461), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (7453, 7461), False, 'import os\n'), ((8752, 8773), 'numpy.unique', 'np.unique', (['magbininds'], {}), '(magbininds)\n', (8761, 8773), True, 'import numpy as np\n'), ((8850, 8881), 'numpy.where', 'np.where', (['(magbininds == mbinind)'], {}), '(magbininds == mbinind)\n', (8858, 8881), True, 'import numpy as np\n'), ((9983, 10008), 'numpy.asscalar', 'np.asscalar', (['magbinind[0]'], {}), '(magbinind[0])\n', (9994, 10008), True, 'import numpy as np\n'), ((10501, 10552), 'numpy.setdiff1d', 'np.setdiff1d', (['thisbin_objectids', 'stet_recoveredvars'], {}), '(thisbin_objectids, stet_recoveredvars)\n', (10513, 10552), True, 'import numpy as np\n'), ((10628, 10682), 'numpy.intersect1d', 'np.intersect1d', (['stet_recoveredvars', 'thisbin_actualvars'], {}), '(stet_recoveredvars, thisbin_actualvars)\n', (10642, 10682), True, 'import numpy as np\n'), ((10757, 10814), 'numpy.intersect1d', 'np.intersect1d', (['stet_recoveredvars', 'thisbin_actualnotvars'], {}), '(stet_recoveredvars, thisbin_actualnotvars)\n', (10771, 10814), True, 'import numpy as np\n'), ((10889, 10949), 'numpy.intersect1d', 'np.intersect1d', (['stet_recoverednotvars', 'thisbin_actualnotvars'], {}), '(stet_recoverednotvars, thisbin_actualnotvars)\n', (10903, 10949), True, 'import numpy as np\n'), ((11024, 11081), 'numpy.intersect1d', 'np.intersect1d', (['stet_recoverednotvars', 'thisbin_actualvars'], {}), '(stet_recoverednotvars, thisbin_actualvars)\n', (11038, 11081), True, 'import numpy as np\n'), ((11899, 11952), 'numpy.setdiff1d', 'np.setdiff1d', (['thisbin_objectids', 'inveta_recoveredvars'], {}), '(thisbin_objectids, inveta_recoveredvars)\n', (11911, 11952), True, 'import numpy as np\n'), ((12032, 12088), 'numpy.intersect1d', 'np.intersect1d', (['inveta_recoveredvars', 'thisbin_actualvars'], {}), '(inveta_recoveredvars, thisbin_actualvars)\n', (12046, 12088), True, 'import numpy as np\n'), ((12167, 12226), 'numpy.intersect1d', 'np.intersect1d', (['inveta_recoveredvars', 'thisbin_actualnotvars'], {}), '(inveta_recoveredvars, thisbin_actualnotvars)\n', (12181, 12226), True, 'import numpy as np\n'), ((12305, 12367), 'numpy.intersect1d', 'np.intersect1d', (['inveta_recoverednotvars', 'thisbin_actualnotvars'], {}), '(inveta_recoverednotvars, thisbin_actualnotvars)\n', (12319, 12367), True, 'import numpy as np\n'), ((12446, 12505), 'numpy.intersect1d', 'np.intersect1d', (['inveta_recoverednotvars', 'thisbin_actualvars'], {}), '(inveta_recoverednotvars, thisbin_actualvars)\n', (12460, 12505), True, 'import numpy as np\n'), ((13344, 13394), 'numpy.setdiff1d', 'np.setdiff1d', (['thisbin_objectids', 'iqr_recoveredvars'], {}), '(thisbin_objectids, iqr_recoveredvars)\n', (13356, 13394), True, 'import numpy as np\n'), ((13468, 13521), 'numpy.intersect1d', 'np.intersect1d', (['iqr_recoveredvars', 'thisbin_actualvars'], {}), '(iqr_recoveredvars, thisbin_actualvars)\n', (13482, 13521), True, 'import numpy as np\n'), ((13594, 13650), 'numpy.intersect1d', 'np.intersect1d', (['iqr_recoveredvars', 'thisbin_actualnotvars'], {}), '(iqr_recoveredvars, thisbin_actualnotvars)\n', (13608, 13650), True, 'import numpy as np\n'), ((13723, 13782), 'numpy.intersect1d', 'np.intersect1d', (['iqr_recoverednotvars', 'thisbin_actualnotvars'], {}), '(iqr_recoverednotvars, thisbin_actualnotvars)\n', (13737, 13782), True, 'import numpy as np\n'), ((13855, 13911), 'numpy.intersect1d', 'np.intersect1d', (['iqr_recoverednotvars', 'thisbin_actualvars'], {}), '(iqr_recoverednotvars, thisbin_actualvars)\n', (13869, 13911), True, 'import numpy as np\n'), ((14633, 14687), 'numpy.setdiff1d', 'np.setdiff1d', (['inveta_truepositives', 'stet_truepositives'], {}), '(inveta_truepositives, stet_truepositives)\n', (14645, 14687), True, 'import numpy as np\n'), ((14768, 14819), 'numpy.setdiff1d', 'np.setdiff1d', (['iqr_truepositives', 'stet_truepositives'], {}), '(iqr_truepositives, stet_truepositives)\n', (14780, 14819), True, 'import numpy as np\n'), ((14901, 14955), 'numpy.setdiff1d', 'np.setdiff1d', (['stet_truepositives', 'inveta_truepositives'], {}), '(stet_truepositives, inveta_truepositives)\n', (14913, 14955), True, 'import numpy as np\n'), ((15038, 15091), 'numpy.setdiff1d', 'np.setdiff1d', (['iqr_truepositives', 'inveta_truepositives'], {}), '(iqr_truepositives, inveta_truepositives)\n', (15050, 15091), True, 'import numpy as np\n'), ((15172, 15223), 'numpy.setdiff1d', 'np.setdiff1d', (['stet_truepositives', 'iqr_truepositives'], {}), '(stet_truepositives, iqr_truepositives)\n', (15184, 15223), True, 'import numpy as np\n'), ((15303, 15356), 'numpy.setdiff1d', 'np.setdiff1d', (['inveta_truepositives', 'iqr_truepositives'], {}), '(inveta_truepositives, iqr_truepositives)\n', (15315, 15356), True, 'import numpy as np\n'), ((22994, 23016), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (23008, 23016), False, 'import os\n'), ((23026, 23042), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (23034, 23042), False, 'import os\n'), ((23175, 23192), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (23186, 23192), False, 'import pickle\n'), ((24667, 24694), 'os.path.abspath', 'os.path.abspath', (['simbasedir'], {}), '(simbasedir)\n', (24682, 24694), False, 'import os\n'), ((25385, 25442), 'pickle.dump', 'pickle.dump', (['grid_results', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(grid_results, outfd, pickle.HIGHEST_PROTOCOL)\n', (25396, 25442), False, 'import pickle\n'), ((25716, 25750), 'os.path.exists', 'os.path.exists', (['gridsearch_results'], {}), '(gridsearch_results)\n', (25730, 25750), False, 'import os\n'), ((48676, 48728), 'pickle.dump', 'pickle.dump', (['plotres', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(plotres, outfd, pickle.HIGHEST_PROTOCOL)\n', (48687, 48728), False, 'import pickle\n'), ((51891, 51908), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (51902, 51908), False, 'import pickle\n'), ((53288, 53340), 'pickle.dump', 'pickle.dump', (['varinfo', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(varinfo, outfd, pickle.HIGHEST_PROTOCOL)\n', (53299, 53340), False, 'import pickle\n'), ((54838, 54859), 'numpy.array', 'np.array', (['ALIAS_TYPES'], {}), '(ALIAS_TYPES)\n', (54846, 54859), True, 'import numpy as np\n'), ((54915, 54967), 'numpy.isclose', 'np.isclose', (['recoveredperiod', 'aliases'], {'rtol': 'tolerance'}), '(recoveredperiod, aliases, rtol=tolerance)\n', (54925, 54967), True, 'import numpy as np\n'), ((54980, 55001), 'numpy.any', 'np.any', (['closest_alias'], {}), '(closest_alias)\n', (54986, 55001), True, 'import numpy as np\n'), ((56287, 56313), 'gzip.open', 'gzip.open', (['fakepfpkl', '"""rb"""'], {}), "(fakepfpkl, 'rb')\n", (56296, 56313), False, 'import gzip\n'), ((56632, 56655), 'os.path.exists', 'os.path.exists', (['lcfpath'], {}), '(lcfpath)\n', (56646, 56655), False, 'import os\n'), ((57540, 57564), 'os.path.abspath', 'os.path.abspath', (['lcfpath'], {}), '(lcfpath)\n', (57555, 57564), False, 'import os\n'), ((57583, 57609), 'os.path.abspath', 'os.path.abspath', (['fakepfpkl'], {}), '(fakepfpkl)\n', (57598, 57609), False, 'import os\n'), ((60517, 60546), 'numpy.isfinite', 'np.isfinite', (['actual_varperiod'], {}), '(actual_varperiod)\n', (60528, 60546), True, 'import numpy as np\n'), ((63003, 63062), 'numpy.array', 'np.array', (["(['not_variable'] * pfres['recovery_periods'].size)"], {}), "(['not_variable'] * pfres['recovery_periods'].size)\n", (63011, 63062), True, 'import numpy as np\n'), ((63117, 63157), 'numpy.zeros', 'np.zeros', (["pfres['recovery_periods'].size"], {}), "(pfres['recovery_periods'].size)\n", (63125, 63157), True, 'import numpy as np\n'), ((63200, 63218), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (63208, 63218), True, 'import numpy as np\n'), ((63262, 63293), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.unicode_'}), '([], dtype=np.unicode_)\n', (63270, 63293), True, 'import numpy as np\n'), ((63334, 63365), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.unicode_'}), '([], dtype=np.unicode_)\n', (63342, 63365), True, 'import numpy as np\n'), ((63406, 63432), 'numpy.array', 'np.array', (["['not_variable']"], {}), "(['not_variable'])\n", (63414, 63432), True, 'import numpy as np\n'), ((63473, 63491), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (63481, 63491), True, 'import numpy as np\n'), ((64415, 64439), 'os.path.exists', 'os.path.exists', (['pfpkldir'], {}), '(pfpkldir)\n', (64429, 64439), False, 'import os\n'), ((64635, 64680), 'os.path.join', 'os.path.join', (['pfpkldir', '"""*periodfinding*pkl*"""'], {}), "(pfpkldir, '*periodfinding*pkl*')\n", (64647, 64680), False, 'import os\n'), ((64957, 64974), 'multiprocessing.Pool', 'mp.Pool', (['nworkers'], {}), '(nworkers)\n', (64964, 64974), True, 'import multiprocessing as mp\n'), ((65170, 65298), 'numpy.array', 'np.array', (["[x['objectid'] for x in results if x is not None and x['actual_vartype'] in\n PERIODIC_VARTYPES]"], {'dtype': 'np.unicode_'}), "([x['objectid'] for x in results if x is not None and x[\n 'actual_vartype'] in PERIODIC_VARTYPES], dtype=np.unicode_)\n", (65178, 65298), True, 'import numpy as np\n'), ((65377, 65503), 'numpy.array', 'np.array', (["[x['objectid'] for x in results if x is not None and 'actual' in x[\n 'best_recovered_status']]"], {'dtype': 'np.unicode_'}), "([x['objectid'] for x in results if x is not None and 'actual' in x\n ['best_recovered_status']], dtype=np.unicode_)\n", (65385, 65503), True, 'import numpy as np\n'), ((65583, 65708), 'numpy.array', 'np.array', (["[x['objectid'] for x in results if x is not None and 'twice' in x[\n 'best_recovered_status']]"], {'dtype': 'np.unicode_'}), "([x['objectid'] for x in results if x is not None and 'twice' in x[\n 'best_recovered_status']], dtype=np.unicode_)\n", (65591, 65708), True, 'import numpy as np\n'), ((65787, 65911), 'numpy.array', 'np.array', (["[x['objectid'] for x in results if x is not None and 'half' in x[\n 'best_recovered_status']]"], {'dtype': 'np.unicode_'}), "([x['objectid'] for x in results if x is not None and 'half' in x[\n 'best_recovered_status']], dtype=np.unicode_)\n", (65795, 65911), True, 'import numpy as np\n'), ((66507, 66559), 'os.path.join', 'os.path.join', (['simbasedir', '"""periodicvar-recovery.pkl"""'], {}), "(simbasedir, 'periodicvar-recovery.pkl')\n", (66519, 66559), False, 'import os\n'), ((69319, 69350), 'os.path.exists', 'os.path.exists', (['precvar_results'], {}), '(precvar_results)\n', (69333, 69350), False, 'import os\n'), ((69875, 69898), 'os.path.exists', 'os.path.exists', (['lcinfof'], {}), '(lcinfof)\n', (69889, 69898), False, 'import os\n'), ((70080, 70097), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (70091, 70097), False, 'import pickle\n'), ((71227, 71281), 'numpy.asscalar', 'np.asscalar', (["precvar['details'][x]['actual_varperiod']"], {}), "(precvar['details'][x]['actual_varperiod'])\n", (71238, 71281), True, 'import numpy as np\n'), ((71366, 71423), 'numpy.asscalar', 'np.asscalar', (["precvar['details'][x]['actual_varamplitude']"], {}), "(precvar['details'][x]['actual_varamplitude'])\n", (71377, 71423), True, 'import numpy as np\n'), ((71782, 71809), 'numpy.ravel', 'np.ravel', (['periodicvar_sdssr'], {}), '(periodicvar_sdssr)\n', (71790, 71809), True, 'import numpy as np\n'), ((71850, 71871), 'numpy.unique', 'np.unique', (['magbininds'], {}), '(magbininds)\n', (71859, 71871), True, 'import numpy as np\n'), ((72377, 72406), 'numpy.ravel', 'np.ravel', (['periodicvar_periods'], {}), '(periodicvar_periods)\n', (72385, 72406), True, 'import numpy as np\n'), ((72450, 72474), 'numpy.unique', 'np.unique', (['periodbininds'], {}), '(periodbininds)\n', (72459, 72474), True, 'import numpy as np\n'), ((73208, 73235), 'numpy.unique', 'np.unique', (['amplitudebininds'], {}), '(amplitudebininds)\n', (73217, 73235), True, 'import numpy as np\n'), ((73839, 73865), 'numpy.ravel', 'np.ravel', (['periodicvar_ndet'], {}), '(periodicvar_ndet)\n', (73847, 73865), True, 'import numpy as np\n'), ((73908, 73930), 'numpy.unique', 'np.unique', (['ndetbininds'], {}), '(ndetbininds)\n', (73917, 73930), True, 'import numpy as np\n'), ((75725, 75766), 'numpy.intersect1d', 'np.intersect1d', (['x', 'recovered_periodicvars'], {}), '(x, recovered_periodicvars)\n', (75739, 75766), True, 'import numpy as np\n'), ((76060, 76101), 'numpy.intersect1d', 'np.intersect1d', (['x', 'recovered_periodicvars'], {}), '(x, recovered_periodicvars)\n', (76074, 76101), True, 'import numpy as np\n'), ((76416, 76457), 'numpy.intersect1d', 'np.intersect1d', (['x', 'recovered_periodicvars'], {}), '(x, recovered_periodicvars)\n', (76430, 76457), True, 'import numpy as np\n'), ((76737, 76778), 'numpy.intersect1d', 'np.intersect1d', (['x', 'recovered_periodicvars'], {}), '(x, recovered_periodicvars)\n', (76751, 76778), True, 'import numpy as np\n'), ((80969, 80995), 'os.path.exists', 'os.path.exists', (['recplotdir'], {}), '(recplotdir)\n', (80983, 80995), False, 'import os\n'), ((81005, 81025), 'os.mkdir', 'os.mkdir', (['recplotdir'], {}), '(recplotdir)\n', (81013, 81025), False, 'import os\n'), ((81440, 81519), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-magnitudes-overall.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-magnitudes-overall.%s' % plotfile_ext)\n", (81452, 81519), False, 'import os\n'), ((83044, 83123), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-magnitudes-magcols.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext)\n", (83056, 83123), False, 'import os\n'), ((84863, 84948), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext\n )\n", (84875, 84948), False, 'import os\n'), ((86665, 86744), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-magnitudes-vartype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext)\n", (86677, 86744), False, 'import os\n'), ((88318, 88403), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-magnitudes-aliastype.%s' %\n plotfile_ext)\n", (88330, 88403), False, 'import os\n'), ((88946, 89022), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-periods-overall.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-periods-overall.%s' % plotfile_ext)\n", (88958, 89022), False, 'import os\n'), ((90586, 90662), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-periods-magcols.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-periods-magcols.%s' % plotfile_ext)\n", (90598, 90662), False, 'import os\n'), ((92442, 92519), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-periods-pfmethod.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-periods-pfmethod.%s' % plotfile_ext)\n", (92454, 92519), False, 'import os\n'), ((94278, 94354), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-periods-vartype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-periods-vartype.%s' % plotfile_ext)\n", (94290, 94354), False, 'import os\n'), ((95964, 96042), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-periods-aliastype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-periods-aliastype.%s' % plotfile_ext)\n", (95976, 96042), False, 'import os\n'), ((96611, 96690), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-amplitudes-overall.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-amplitudes-overall.%s' % plotfile_ext)\n", (96623, 96690), False, 'import os\n'), ((98315, 98394), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-amplitudes-magcols.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext)\n", (98327, 98394), False, 'import os\n'), ((100235, 100320), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext\n )\n", (100247, 100320), False, 'import os\n'), ((102139, 102218), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-amplitudes-vartype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext)\n", (102151, 102218), False, 'import os\n'), ((103891, 103976), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-amplitudes-aliastype.%s' %\n plotfile_ext)\n", (103903, 103976), False, 'import os\n'), ((104513, 104586), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-ndet-overall.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-ndet-overall.%s' % plotfile_ext)\n", (104525, 104586), False, 'import os\n'), ((106141, 106214), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-ndet-magcols.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-ndet-magcols.%s' % plotfile_ext)\n", (106153, 106214), False, 'import os\n'), ((107983, 108057), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-ndet-pfmethod.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext)\n", (107995, 108057), False, 'import os\n'), ((109819, 109892), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-ndet-vartype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-ndet-vartype.%s' % plotfile_ext)\n", (109831, 109892), False, 'import os\n'), ((111493, 111568), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-binned-ndet-aliastype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-binned-ndet-aliastype.%s' % plotfile_ext)\n", (111505, 111568), False, 'import os\n'), ((113469, 113606), 'numpy.array', 'np.array', (["[x for x in precvar['details'] if x in recovered_periodicvars and precvar[\n 'details'][x]['best_recovered_pfmethod'] == pfm]"], {}), "([x for x in precvar['details'] if x in recovered_periodicvars and \n precvar['details'][x]['best_recovered_pfmethod'] == pfm])\n", (113477, 113606), True, 'import numpy as np\n'), ((113859, 113986), 'numpy.array', 'np.array', (["[x for x in precvar['details'] if x in recovered_periodicvars and precvar[\n 'details'][x]['actual_vartype'] == vt]"], {}), "([x for x in precvar['details'] if x in recovered_periodicvars and \n precvar['details'][x]['actual_vartype'] == vt])\n", (113867, 113986), True, 'import numpy as np\n'), ((114230, 114364), 'numpy.array', 'np.array', (["[x for x in precvar['details'] if x in recovered_periodicvars and precvar[\n 'details'][x]['best_recovered_magcol'] == mc]"], {}), "([x for x in precvar['details'] if x in recovered_periodicvars and \n precvar['details'][x]['best_recovered_magcol'] == mc])\n", (114238, 114364), True, 'import numpy as np\n'), ((114620, 114754), 'numpy.array', 'np.array', (["[x for x in precvar['details'] if x in recovered_periodicvars and precvar[\n 'details'][x]['best_recovered_status'] == at]"], {}), "([x for x in precvar['details'] if x in recovered_periodicvars and \n precvar['details'][x]['best_recovered_status'] == at])\n", (114628, 114754), True, 'import numpy as np\n'), ((115892, 115962), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-overall-pfmethod.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-overall-pfmethod.%s' % plotfile_ext)\n", (115904, 115962), False, 'import os\n'), ((116472, 116540), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-overall-magcol.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-overall-magcol.%s' % plotfile_ext)\n", (116484, 116540), False, 'import os\n'), ((117068, 117139), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-overall-aliastype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-overall-aliastype.%s' % plotfile_ext)\n", (117080, 117139), False, 'import os\n'), ((117659, 117728), 'os.path.join', 'os.path.join', (['recplotdir', "('recfrac-overall-vartype.%s' % plotfile_ext)"], {}), "(recplotdir, 'recfrac-overall-vartype.%s' % plotfile_ext)\n", (117671, 117728), False, 'import os\n'), ((119177, 119262), 'os.path.join', 'os.path.join', (['recplotdir', "('recovered-periodogram-nonvariables.%s' % plotfile_ext)"], {}), "(recplotdir, 'recovered-periodogram-nonvariables.%s' % plotfile_ext\n )\n", (119189, 119262), False, 'import os\n'), ((119842, 119927), 'os.path.join', 'os.path.join', (['recplotdir', "('recovered-period-hist-nonvariables.%s' % plotfile_ext)"], {}), "(recplotdir, 'recovered-period-hist-nonvariables.%s' % plotfile_ext\n )\n", (119854, 119927), False, 'import os\n'), ((120206, 120258), 'pickle.dump', 'pickle.dump', (['outdict', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(outdict, outfd, pickle.HIGHEST_PROTOCOL)\n', (120217, 120258), False, 'import pickle\n'), ((2920, 2937), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (2931, 2937), False, 'import pickle\n'), ((3407, 3451), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelcs-info.pkl"""'], {}), "(simbasedir, 'fakelcs-info.pkl')\n", (3419, 3451), False, 'import os\n'), ((4527, 4577), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelc-varfeatures.pkl"""'], {}), "(simbasedir, 'fakelc-varfeatures.pkl')\n", (4539, 4577), False, 'import os\n'), ((6331, 6375), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelcs-info.pkl"""'], {}), "(simbasedir, 'fakelcs-info.pkl')\n", (6343, 6375), False, 'import os\n'), ((23097, 23141), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelcs-info.pkl"""'], {}), "(simbasedir, 'fakelcs-info.pkl')\n", (23109, 23141), False, 'import os\n'), ((25274, 25333), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakevar-recovery-per-magbin.pkl"""'], {}), "(simbasedir, 'fakevar-recovery-per-magbin.pkl')\n", (25286, 25333), False, 'import os\n'), ((25832, 25849), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (25843, 25849), False, 'import pickle\n'), ((30953, 30991), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4 * 5, 4.8 * 3)'}), '(figsize=(6.4 * 5, 4.8 * 3))\n', (30963, 30991), True, 'import matplotlib.pyplot as plt\n'), ((31042, 31062), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(1)'], {}), '(3, 5, 1)\n', (31053, 31062), True, 'import matplotlib.pyplot as plt\n'), ((31735, 31755), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(2)'], {}), '(3, 5, 2)\n', (31746, 31755), True, 'import matplotlib.pyplot as plt\n'), ((32458, 32478), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(3)'], {}), '(3, 5, 3)\n', (32469, 32478), True, 'import matplotlib.pyplot as plt\n'), ((33166, 33186), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(4)'], {}), '(3, 5, 4)\n', (33177, 33186), True, 'import matplotlib.pyplot as plt\n'), ((33958, 33978), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(5)'], {}), '(3, 5, 5)\n', (33969, 33978), True, 'import matplotlib.pyplot as plt\n'), ((34775, 34795), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(6)'], {}), '(3, 5, 6)\n', (34786, 34795), True, 'import matplotlib.pyplot as plt\n'), ((35467, 35487), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(7)'], {}), '(3, 5, 7)\n', (35478, 35487), True, 'import matplotlib.pyplot as plt\n'), ((36189, 36209), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(8)'], {}), '(3, 5, 8)\n', (36200, 36209), True, 'import matplotlib.pyplot as plt\n'), ((36896, 36916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(9)'], {}), '(3, 5, 9)\n', (36907, 36916), True, 'import matplotlib.pyplot as plt\n'), ((37684, 37705), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(10)'], {}), '(3, 5, 10)\n', (37695, 37705), True, 'import matplotlib.pyplot as plt\n'), ((38500, 38521), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(11)'], {}), '(3, 5, 11)\n', (38511, 38521), True, 'import matplotlib.pyplot as plt\n'), ((39175, 39196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(12)'], {}), '(3, 5, 12)\n', (39186, 39196), True, 'import matplotlib.pyplot as plt\n'), ((39880, 39901), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(13)'], {}), '(3, 5, 13)\n', (39891, 39901), True, 'import matplotlib.pyplot as plt\n'), ((40570, 40591), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(14)'], {}), '(3, 5, 14)\n', (40581, 40591), True, 'import matplotlib.pyplot as plt\n'), ((41338, 41359), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(5)', '(15)'], {}), '(3, 5, 15)\n', (41349, 41359), True, 'import matplotlib.pyplot as plt\n'), ((42109, 42154), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.25)', 'wspace': '(0.25)'}), '(hspace=0.25, wspace=0.25)\n', (42128, 42154), True, 'import matplotlib.pyplot as plt\n'), ((42167, 42232), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('magcol: %s, magbin: %.3f' % (magcol, magbinmedian))"], {}), "('magcol: %s, magbin: %.3f' % (magcol, magbinmedian))\n", (42179, 42232), True, 'import matplotlib.pyplot as plt\n'), ((42256, 42324), 'os.path.join', 'os.path.join', (["gridresults['simbasedir']", '"""varindex-gridsearch-plots"""'], {}), "(gridresults['simbasedir'], 'varindex-gridsearch-plots')\n", (42268, 42324), False, 'import os\n'), ((42463, 42563), 'os.path.join', 'os.path.join', (['plotdir', "('%s-magbin-%.3f-var-recoverygrid-permagbin.png' % (magcol, magbinmedian))"], {}), "(plotdir, '%s-magbin-%.3f-var-recoverygrid-permagbin.png' % (\n magcol, magbinmedian))\n", (42475, 42563), False, 'import os\n'), ((42634, 42686), 'matplotlib.pyplot.savefig', 'plt.savefig', (['gridplotf'], {'dpi': '(100)', 'bbox_inches': '"""tight"""'}), "(gridplotf, dpi=100, bbox_inches='tight')\n", (42645, 42686), True, 'import matplotlib.pyplot as plt\n'), ((42697, 42713), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (42706, 42713), True, 'import matplotlib.pyplot as plt\n'), ((51813, 51857), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelcs-info.pkl"""'], {}), "(simbasedir, 'fakelcs-info.pkl')\n", (51825, 51857), False, 'import os\n'), ((53184, 53236), 'os.path.join', 'os.path.join', (['simbasedir', '"""fakelc-periodfinding.pkl"""'], {}), "(simbasedir, 'fakelc-periodfinding.pkl')\n", (53196, 53236), False, 'import os\n'), ((53613, 53638), 'numpy.isfinite', 'np.isfinite', (['actualperiod'], {}), '(actualperiod)\n', (53624, 53638), True, 'import numpy as np\n'), ((53643, 53671), 'numpy.isfinite', 'np.isfinite', (['recoveredperiod'], {}), '(recoveredperiod)\n', (53654, 53671), True, 'import numpy as np\n'), ((54547, 54675), 'numpy.array', 'np.array', (['[actualperiod, twotimes_p, half_p, alias_1a, alias_1b, alias_2a, alias_2b,\n alias_3a, alias_3b, alias_4a, alias_4b]'], {}), '([actualperiod, twotimes_p, half_p, alias_1a, alias_1b, alias_2a,\n alias_2b, alias_3a, alias_3b, alias_4a, alias_4b])\n', (54555, 54675), True, 'import numpy as np\n'), ((61229, 61263), 'numpy.array', 'np.array', (["pfres['recovery_status']"], {}), "(pfres['recovery_status'])\n", (61237, 61263), True, 'import numpy as np\n'), ((61302, 61335), 'numpy.array', 'np.array', (["pfres['recovery_pdiff']"], {}), "(pfres['recovery_pdiff'])\n", (61310, 61335), True, 'import numpy as np\n'), ((61423, 61454), 'numpy.abs', 'np.abs', (["pfres['recovery_pdiff']"], {}), "(pfres['recovery_pdiff'])\n", (61429, 61454), True, 'import numpy as np\n'), ((62302, 62343), 'numpy.array', 'np.array', (["['no_finite_periods_recovered']"], {}), "(['no_finite_periods_recovered'])\n", (62310, 62343), True, 'import numpy as np\n'), ((62382, 62400), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (62390, 62400), True, 'import numpy as np\n'), ((62446, 62464), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (62454, 62464), True, 'import numpy as np\n'), ((62512, 62543), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.unicode_'}), '([], dtype=np.unicode_)\n', (62520, 62543), True, 'import numpy as np\n'), ((62588, 62619), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.unicode_'}), '([], dtype=np.unicode_)\n', (62596, 62619), True, 'import numpy as np\n'), ((62664, 62695), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.unicode_'}), '([], dtype=np.unicode_)\n', (62672, 62695), True, 'import numpy as np\n'), ((62739, 62757), 'numpy.array', 'np.array', (['[np.nan]'], {}), '([np.nan])\n', (62747, 62757), True, 'import numpy as np\n'), ((66047, 66074), 'os.path.abspath', 'os.path.abspath', (['simbasedir'], {}), '(simbasedir)\n', (66062, 66074), False, 'import os\n'), ((66614, 66666), 'pickle.dump', 'pickle.dump', (['outdict', 'outfd', 'pickle.HIGHEST_PROTOCOL'], {}), '(outdict, outfd, pickle.HIGHEST_PROTOCOL)\n', (66625, 66666), False, 'import pickle\n'), ((69424, 69441), 'pickle.load', 'pickle.load', (['infd'], {}), '(infd)\n', (69435, 69441), False, 'import pickle\n'), ((73095, 73125), 'numpy.abs', 'np.abs', (['periodicvar_amplitudes'], {}), '(periodicvar_amplitudes)\n', (73101, 73125), True, 'import numpy as np\n'), ((82413, 82442), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (82421, 82442), True, 'import numpy as np\n'), ((82630, 82659), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (82638, 82659), True, 'import numpy as np\n'), ((84234, 84259), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (84242, 84259), True, 'import numpy as np\n'), ((84446, 84471), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (84454, 84471), True, 'import numpy as np\n'), ((86052, 86077), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (86060, 86077), True, 'import numpy as np\n'), ((86254, 86279), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (86262, 86279), True, 'import numpy as np\n'), ((87700, 87725), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (87708, 87725), True, 'import numpy as np\n'), ((87904, 87929), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (87912, 87929), True, 'import numpy as np\n'), ((89947, 89976), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (89955, 89976), True, 'import numpy as np\n'), ((90167, 90196), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (90175, 90196), True, 'import numpy as np\n'), ((91805, 91830), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (91813, 91830), True, 'import numpy as np\n'), ((92020, 92045), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (92028, 92045), True, 'import numpy as np\n'), ((93654, 93679), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (93662, 93679), True, 'import numpy as np\n'), ((93859, 93884), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (93867, 93884), True, 'import numpy as np\n'), ((95335, 95360), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (95343, 95360), True, 'import numpy as np\n'), ((95542, 95567), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (95550, 95567), True, 'import numpy as np\n'), ((97639, 97668), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (97647, 97668), True, 'import numpy as np\n'), ((97875, 97904), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (97883, 97904), True, 'import numpy as np\n'), ((99561, 99586), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (99569, 99586), True, 'import numpy as np\n'), ((99792, 99817), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (99800, 99817), True, 'import numpy as np\n'), ((101480, 101505), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (101488, 101505), True, 'import numpy as np\n'), ((101701, 101726), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (101709, 101726), True, 'import numpy as np\n'), ((103227, 103252), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (103235, 103252), True, 'import numpy as np\n'), ((103450, 103475), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (103458, 103475), True, 'import numpy as np\n'), ((105490, 105519), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (105498, 105519), True, 'import numpy as np\n'), ((105721, 105750), 'numpy.array', 'np.array', (['thismagcol_recfracs'], {}), '(thismagcol_recfracs)\n', (105729, 105750), True, 'import numpy as np\n'), ((107334, 107359), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (107342, 107359), True, 'import numpy as np\n'), ((107560, 107585), 'numpy.array', 'np.array', (['thispf_recfracs'], {}), '(thispf_recfracs)\n', (107568, 107585), True, 'import numpy as np\n'), ((109186, 109211), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (109194, 109211), True, 'import numpy as np\n'), ((109402, 109427), 'numpy.array', 'np.array', (['thisvt_recfracs'], {}), '(thisvt_recfracs)\n', (109410, 109427), True, 'import numpy as np\n'), ((110855, 110880), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (110863, 110880), True, 'import numpy as np\n'), ((111073, 111098), 'numpy.array', 'np.array', (['thisat_recfracs'], {}), '(thisat_recfracs)\n', (111081, 111098), True, 'import numpy as np\n'), ((119571, 119600), 'numpy.arange', 'np.arange', (['(0.02)', '(300.0)', '(0.001)'], {}), '(0.02, 300.0, 0.001)\n', (119580, 119600), True, 'import numpy as np\n'), ((3030, 3066), 'pickle.load', 'pickle.load', (['infd'], {'encoding': '"""latin1"""'}), "(infd, encoding='latin1')\n", (3041, 3066), False, 'import pickle\n'), ((9885, 9935), 'numpy.array', 'np.array', (["varthresh[magcol]['binned_sdssr_median']"], {}), "(varthresh[magcol]['binned_sdssr_median'])\n", (9893, 9935), True, 'import numpy as np\n'), ((26728, 26789), 'numpy.array', 'np.array', (["[x[magcol]['stet_mcc'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['stet_mcc'] for x in recgrid[magbinind]])\n", (26736, 26789), True, 'import numpy as np\n'), ((26955, 27022), 'numpy.array', 'np.array', (["[x[magcol]['stet_precision'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['stet_precision'] for x in recgrid[magbinind]])\n", (26963, 27022), True, 'import numpy as np\n'), ((27185, 27249), 'numpy.array', 'np.array', (["[x[magcol]['stet_recall'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['stet_recall'] for x in recgrid[magbinind]])\n", (27193, 27249), True, 'import numpy as np\n'), ((27425, 27502), 'numpy.array', 'np.array', (["[x[magcol]['stet_missed_inveta_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['stet_missed_inveta_found'] for x in recgrid[magbinind]])\n", (27433, 27502), True, 'import numpy as np\n'), ((27675, 27749), 'numpy.array', 'np.array', (["[x[magcol]['stet_missed_iqr_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['stet_missed_iqr_found'] for x in recgrid[magbinind]])\n", (27683, 27749), True, 'import numpy as np\n'), ((31083, 31104), 'numpy.isfinite', 'np.isfinite', (['stet_mcc'], {}), '(stet_mcc)\n', (31094, 31104), True, 'import numpy as np\n'), ((31123, 31170), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['stetson_grid']", 'stet_mcc'], {}), "(gridresults['stetson_grid'], stet_mcc)\n", (31131, 31170), True, 'import matplotlib.pyplot as plt\n'), ((31212, 31262), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""stetson J stdev multiplier threshold"""'], {}), "('stetson J stdev multiplier threshold')\n", (31222, 31262), True, 'import matplotlib.pyplot as plt\n'), ((31279, 31296), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MCC"""'], {}), "('MCC')\n", (31289, 31296), True, 'import matplotlib.pyplot as plt\n'), ((31313, 31343), 'matplotlib.pyplot.title', 'plt.title', (['"""MCC for stetson J"""'], {}), "('MCC for stetson J')\n", (31322, 31343), True, 'import matplotlib.pyplot as plt\n'), ((31676, 31690), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (31686, 31690), True, 'import matplotlib.pyplot as plt\n'), ((31707, 31721), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (31717, 31721), True, 'import matplotlib.pyplot as plt\n'), ((31776, 31803), 'numpy.isfinite', 'np.isfinite', (['stet_precision'], {}), '(stet_precision)\n', (31787, 31803), True, 'import numpy as np\n'), ((31822, 31875), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['stetson_grid']", 'stet_precision'], {}), "(gridresults['stetson_grid'], stet_precision)\n", (31830, 31875), True, 'import matplotlib.pyplot as plt\n'), ((31917, 31967), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""stetson J stdev multiplier threshold"""'], {}), "('stetson J stdev multiplier threshold')\n", (31927, 31967), True, 'import matplotlib.pyplot as plt\n'), ((31984, 32007), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (31994, 32007), True, 'import matplotlib.pyplot as plt\n'), ((32024, 32060), 'matplotlib.pyplot.title', 'plt.title', (['"""precision for stetson J"""'], {}), "('precision for stetson J')\n", (32033, 32060), True, 'import matplotlib.pyplot as plt\n'), ((32399, 32413), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (32409, 32413), True, 'import matplotlib.pyplot as plt\n'), ((32430, 32444), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (32440, 32444), True, 'import matplotlib.pyplot as plt\n'), ((32499, 32523), 'numpy.isfinite', 'np.isfinite', (['stet_recall'], {}), '(stet_recall)\n', (32510, 32523), True, 'import numpy as np\n'), ((32542, 32592), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['stetson_grid']", 'stet_recall'], {}), "(gridresults['stetson_grid'], stet_recall)\n", (32550, 32592), True, 'import matplotlib.pyplot as plt\n'), ((32634, 32684), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""stetson J stdev multiplier threshold"""'], {}), "('stetson J stdev multiplier threshold')\n", (32644, 32684), True, 'import matplotlib.pyplot as plt\n'), ((32701, 32721), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (32711, 32721), True, 'import matplotlib.pyplot as plt\n'), ((32738, 32771), 'matplotlib.pyplot.title', 'plt.title', (['"""recall for stetson J"""'], {}), "('recall for stetson J')\n", (32747, 32771), True, 'import matplotlib.pyplot as plt\n'), ((33107, 33121), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (33117, 33121), True, 'import matplotlib.pyplot as plt\n'), ((33138, 33152), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (33148, 33152), True, 'import matplotlib.pyplot as plt\n'), ((33207, 33244), 'numpy.isfinite', 'np.isfinite', (['stet_missed_inveta_found'], {}), '(stet_missed_inveta_found)\n', (33218, 33244), True, 'import numpy as np\n'), ((33263, 33326), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['stetson_grid']", 'stet_missed_inveta_found'], {}), "(gridresults['stetson_grid'], stet_missed_inveta_found)\n", (33271, 33326), True, 'import matplotlib.pyplot as plt\n'), ((33368, 33418), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""stetson J stdev multiplier threshold"""'], {}), "('stetson J stdev multiplier threshold')\n", (33378, 33418), True, 'import matplotlib.pyplot as plt\n'), ((33435, 33490), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects stetson missed but inveta found"""'], {}), "('# objects stetson missed but inveta found')\n", (33445, 33490), True, 'import matplotlib.pyplot as plt\n'), ((33507, 33550), 'matplotlib.pyplot.title', 'plt.title', (['"""stetson J missed, inveta found"""'], {}), "('stetson J missed, inveta found')\n", (33516, 33550), True, 'import matplotlib.pyplot as plt\n'), ((33899, 33913), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (33909, 33913), True, 'import matplotlib.pyplot as plt\n'), ((33930, 33944), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (33940, 33944), True, 'import matplotlib.pyplot as plt\n'), ((33999, 34033), 'numpy.isfinite', 'np.isfinite', (['stet_missed_iqr_found'], {}), '(stet_missed_iqr_found)\n', (34010, 34033), True, 'import numpy as np\n'), ((34052, 34112), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['stetson_grid']", 'stet_missed_iqr_found'], {}), "(gridresults['stetson_grid'], stet_missed_iqr_found)\n", (34060, 34112), True, 'import matplotlib.pyplot as plt\n'), ((34154, 34204), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""stetson J stdev multiplier threshold"""'], {}), "('stetson J stdev multiplier threshold')\n", (34164, 34204), True, 'import matplotlib.pyplot as plt\n'), ((34221, 34273), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects stetson missed but IQR found"""'], {}), "('# objects stetson missed but IQR found')\n", (34231, 34273), True, 'import matplotlib.pyplot as plt\n'), ((34290, 34330), 'matplotlib.pyplot.title', 'plt.title', (['"""stetson J missed, IQR found"""'], {}), "('stetson J missed, IQR found')\n", (34299, 34330), True, 'import matplotlib.pyplot as plt\n'), ((34676, 34690), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (34686, 34690), True, 'import matplotlib.pyplot as plt\n'), ((34707, 34721), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (34717, 34721), True, 'import matplotlib.pyplot as plt\n'), ((34816, 34839), 'numpy.isfinite', 'np.isfinite', (['inveta_mcc'], {}), '(inveta_mcc)\n', (34827, 34839), True, 'import numpy as np\n'), ((34858, 34906), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['inveta_grid']", 'inveta_mcc'], {}), "(gridresults['inveta_grid'], inveta_mcc)\n", (34866, 34906), True, 'import matplotlib.pyplot as plt\n'), ((34948, 34995), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""inveta stdev multiplier threshold"""'], {}), "('inveta stdev multiplier threshold')\n", (34958, 34995), True, 'import matplotlib.pyplot as plt\n'), ((35012, 35029), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MCC"""'], {}), "('MCC')\n", (35022, 35029), True, 'import matplotlib.pyplot as plt\n'), ((35046, 35073), 'matplotlib.pyplot.title', 'plt.title', (['"""MCC for inveta"""'], {}), "('MCC for inveta')\n", (35055, 35073), True, 'import matplotlib.pyplot as plt\n'), ((35408, 35422), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (35418, 35422), True, 'import matplotlib.pyplot as plt\n'), ((35439, 35453), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (35449, 35453), True, 'import matplotlib.pyplot as plt\n'), ((35508, 35537), 'numpy.isfinite', 'np.isfinite', (['inveta_precision'], {}), '(inveta_precision)\n', (35519, 35537), True, 'import numpy as np\n'), ((35556, 35610), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['inveta_grid']", 'inveta_precision'], {}), "(gridresults['inveta_grid'], inveta_precision)\n", (35564, 35610), True, 'import matplotlib.pyplot as plt\n'), ((35652, 35699), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""inveta stdev multiplier threshold"""'], {}), "('inveta stdev multiplier threshold')\n", (35662, 35699), True, 'import matplotlib.pyplot as plt\n'), ((35716, 35739), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (35726, 35739), True, 'import matplotlib.pyplot as plt\n'), ((35756, 35789), 'matplotlib.pyplot.title', 'plt.title', (['"""precision for inveta"""'], {}), "('precision for inveta')\n", (35765, 35789), True, 'import matplotlib.pyplot as plt\n'), ((36130, 36144), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (36140, 36144), True, 'import matplotlib.pyplot as plt\n'), ((36161, 36175), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (36171, 36175), True, 'import matplotlib.pyplot as plt\n'), ((36230, 36256), 'numpy.isfinite', 'np.isfinite', (['inveta_recall'], {}), '(inveta_recall)\n', (36241, 36256), True, 'import numpy as np\n'), ((36275, 36326), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['inveta_grid']", 'inveta_recall'], {}), "(gridresults['inveta_grid'], inveta_recall)\n", (36283, 36326), True, 'import matplotlib.pyplot as plt\n'), ((36368, 36415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""inveta stdev multiplier threshold"""'], {}), "('inveta stdev multiplier threshold')\n", (36378, 36415), True, 'import matplotlib.pyplot as plt\n'), ((36432, 36452), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (36442, 36452), True, 'import matplotlib.pyplot as plt\n'), ((36469, 36499), 'matplotlib.pyplot.title', 'plt.title', (['"""recall for inveta"""'], {}), "('recall for inveta')\n", (36478, 36499), True, 'import matplotlib.pyplot as plt\n'), ((36837, 36851), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (36847, 36851), True, 'import matplotlib.pyplot as plt\n'), ((36868, 36882), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (36878, 36882), True, 'import matplotlib.pyplot as plt\n'), ((36937, 36974), 'numpy.isfinite', 'np.isfinite', (['inveta_missed_stet_found'], {}), '(inveta_missed_stet_found)\n', (36948, 36974), True, 'import numpy as np\n'), ((36993, 37055), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['inveta_grid']", 'inveta_missed_stet_found'], {}), "(gridresults['inveta_grid'], inveta_missed_stet_found)\n", (37001, 37055), True, 'import matplotlib.pyplot as plt\n'), ((37097, 37144), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""inveta stdev multiplier threshold"""'], {}), "('inveta stdev multiplier threshold')\n", (37107, 37144), True, 'import matplotlib.pyplot as plt\n'), ((37161, 37216), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects inveta missed but stetson found"""'], {}), "('# objects inveta missed but stetson found')\n", (37171, 37216), True, 'import matplotlib.pyplot as plt\n'), ((37233, 37276), 'matplotlib.pyplot.title', 'plt.title', (['"""inveta missed, stetson J found"""'], {}), "('inveta missed, stetson J found')\n", (37242, 37276), True, 'import matplotlib.pyplot as plt\n'), ((37625, 37639), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (37635, 37639), True, 'import matplotlib.pyplot as plt\n'), ((37656, 37670), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (37666, 37670), True, 'import matplotlib.pyplot as plt\n'), ((37726, 37762), 'numpy.isfinite', 'np.isfinite', (['inveta_missed_iqr_found'], {}), '(inveta_missed_iqr_found)\n', (37737, 37762), True, 'import numpy as np\n'), ((37781, 37842), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['inveta_grid']", 'inveta_missed_iqr_found'], {}), "(gridresults['inveta_grid'], inveta_missed_iqr_found)\n", (37789, 37842), True, 'import matplotlib.pyplot as plt\n'), ((37884, 37931), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""inveta stdev multiplier threshold"""'], {}), "('inveta stdev multiplier threshold')\n", (37894, 37931), True, 'import matplotlib.pyplot as plt\n'), ((37948, 37999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects inveta missed but IQR found"""'], {}), "('# objects inveta missed but IQR found')\n", (37958, 37999), True, 'import matplotlib.pyplot as plt\n'), ((38016, 38053), 'matplotlib.pyplot.title', 'plt.title', (['"""inveta missed, IQR found"""'], {}), "('inveta missed, IQR found')\n", (38025, 38053), True, 'import matplotlib.pyplot as plt\n'), ((38401, 38415), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (38411, 38415), True, 'import matplotlib.pyplot as plt\n'), ((38432, 38446), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (38442, 38446), True, 'import matplotlib.pyplot as plt\n'), ((38542, 38562), 'numpy.isfinite', 'np.isfinite', (['iqr_mcc'], {}), '(iqr_mcc)\n', (38553, 38562), True, 'import numpy as np\n'), ((38581, 38623), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['iqr_grid']", 'iqr_mcc'], {}), "(gridresults['iqr_grid'], iqr_mcc)\n", (38589, 38623), True, 'import matplotlib.pyplot as plt\n'), ((38665, 38709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IQR stdev multiplier threshold"""'], {}), "('IQR stdev multiplier threshold')\n", (38675, 38709), True, 'import matplotlib.pyplot as plt\n'), ((38726, 38743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MCC"""'], {}), "('MCC')\n", (38736, 38743), True, 'import matplotlib.pyplot as plt\n'), ((38760, 38784), 'matplotlib.pyplot.title', 'plt.title', (['"""MCC for IQR"""'], {}), "('MCC for IQR')\n", (38769, 38784), True, 'import matplotlib.pyplot as plt\n'), ((39116, 39130), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (39126, 39130), True, 'import matplotlib.pyplot as plt\n'), ((39147, 39161), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (39157, 39161), True, 'import matplotlib.pyplot as plt\n'), ((39217, 39243), 'numpy.isfinite', 'np.isfinite', (['iqr_precision'], {}), '(iqr_precision)\n', (39228, 39243), True, 'import numpy as np\n'), ((39262, 39310), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['iqr_grid']", 'iqr_precision'], {}), "(gridresults['iqr_grid'], iqr_precision)\n", (39270, 39310), True, 'import matplotlib.pyplot as plt\n'), ((39352, 39396), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IQR stdev multiplier threshold"""'], {}), "('IQR stdev multiplier threshold')\n", (39362, 39396), True, 'import matplotlib.pyplot as plt\n'), ((39413, 39436), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""precision"""'], {}), "('precision')\n", (39423, 39436), True, 'import matplotlib.pyplot as plt\n'), ((39453, 39483), 'matplotlib.pyplot.title', 'plt.title', (['"""precision for IQR"""'], {}), "('precision for IQR')\n", (39462, 39483), True, 'import matplotlib.pyplot as plt\n'), ((39821, 39835), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (39831, 39835), True, 'import matplotlib.pyplot as plt\n'), ((39852, 39866), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (39862, 39866), True, 'import matplotlib.pyplot as plt\n'), ((39922, 39945), 'numpy.isfinite', 'np.isfinite', (['iqr_recall'], {}), '(iqr_recall)\n', (39933, 39945), True, 'import numpy as np\n'), ((39964, 40009), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['iqr_grid']", 'iqr_recall'], {}), "(gridresults['iqr_grid'], iqr_recall)\n", (39972, 40009), True, 'import matplotlib.pyplot as plt\n'), ((40051, 40095), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IQR stdev multiplier threshold"""'], {}), "('IQR stdev multiplier threshold')\n", (40061, 40095), True, 'import matplotlib.pyplot as plt\n'), ((40112, 40132), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""recall"""'], {}), "('recall')\n", (40122, 40132), True, 'import matplotlib.pyplot as plt\n'), ((40149, 40176), 'matplotlib.pyplot.title', 'plt.title', (['"""recall for IQR"""'], {}), "('recall for IQR')\n", (40158, 40176), True, 'import matplotlib.pyplot as plt\n'), ((40511, 40525), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (40521, 40525), True, 'import matplotlib.pyplot as plt\n'), ((40542, 40556), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (40552, 40556), True, 'import matplotlib.pyplot as plt\n'), ((40612, 40646), 'numpy.isfinite', 'np.isfinite', (['iqr_missed_stet_found'], {}), '(iqr_missed_stet_found)\n', (40623, 40646), True, 'import numpy as np\n'), ((40665, 40721), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['iqr_grid']", 'iqr_missed_stet_found'], {}), "(gridresults['iqr_grid'], iqr_missed_stet_found)\n", (40673, 40721), True, 'import matplotlib.pyplot as plt\n'), ((40763, 40807), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IQR stdev multiplier threshold"""'], {}), "('IQR stdev multiplier threshold')\n", (40773, 40807), True, 'import matplotlib.pyplot as plt\n'), ((40824, 40876), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects IQR missed but stetson found"""'], {}), "('# objects IQR missed but stetson found')\n", (40834, 40876), True, 'import matplotlib.pyplot as plt\n'), ((40893, 40933), 'matplotlib.pyplot.title', 'plt.title', (['"""IQR missed, stetson J found"""'], {}), "('IQR missed, stetson J found')\n", (40902, 40933), True, 'import matplotlib.pyplot as plt\n'), ((41279, 41293), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (41289, 41293), True, 'import matplotlib.pyplot as plt\n'), ((41310, 41324), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (41320, 41324), True, 'import matplotlib.pyplot as plt\n'), ((41380, 41416), 'numpy.isfinite', 'np.isfinite', (['iqr_missed_inveta_found'], {}), '(iqr_missed_inveta_found)\n', (41391, 41416), True, 'import numpy as np\n'), ((41435, 41493), 'matplotlib.pyplot.plot', 'plt.plot', (["gridresults['iqr_grid']", 'iqr_missed_inveta_found'], {}), "(gridresults['iqr_grid'], iqr_missed_inveta_found)\n", (41443, 41493), True, 'import matplotlib.pyplot as plt\n'), ((41535, 41579), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IQR stdev multiplier threshold"""'], {}), "('IQR stdev multiplier threshold')\n", (41545, 41579), True, 'import matplotlib.pyplot as plt\n'), ((41596, 41647), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# objects IQR missed but inveta found"""'], {}), "('# objects IQR missed but inveta found')\n", (41606, 41647), True, 'import matplotlib.pyplot as plt\n'), ((41664, 41701), 'matplotlib.pyplot.title', 'plt.title', (['"""IQR missed, inveta found"""'], {}), "('IQR missed, inveta found')\n", (41673, 41701), True, 'import matplotlib.pyplot as plt\n'), ((42049, 42063), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (42059, 42063), True, 'import matplotlib.pyplot as plt\n'), ((42080, 42094), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (42090, 42094), True, 'import matplotlib.pyplot as plt\n'), ((42379, 42402), 'os.path.exists', 'os.path.exists', (['plotdir'], {}), '(plotdir)\n', (42393, 42402), False, 'import os\n'), ((42420, 42437), 'os.mkdir', 'os.mkdir', (['plotdir'], {}), '(plotdir)\n', (42428, 42437), False, 'import os\n'), ((79312, 79366), 'numpy.unique', 'np.unique', (["precvar['details'][x]['recovery_pfmethods']"], {}), "(precvar['details'][x]['recovery_pfmethods'])\n", (79321, 79366), True, 'import numpy as np\n'), ((83440, 83494), 'numpy.unique', 'np.unique', (["precvar['details'][x]['recovery_pfmethods']"], {}), "(precvar['details'][x]['recovery_pfmethods'])\n", (83449, 83494), True, 'import numpy as np\n'), ((90982, 91036), 'numpy.unique', 'np.unique', (["precvar['details'][x]['recovery_pfmethods']"], {}), "(precvar['details'][x]['recovery_pfmethods'])\n", (90991, 91036), True, 'import numpy as np\n'), ((98718, 98772), 'numpy.unique', 'np.unique', (["precvar['details'][x]['recovery_pfmethods']"], {}), "(precvar['details'][x]['recovery_pfmethods'])\n", (98727, 98772), True, 'import numpy as np\n'), ((106533, 106587), 'numpy.unique', 'np.unique', (["precvar['details'][x]['recovery_pfmethods']"], {}), "(precvar['details'][x]['recovery_pfmethods'])\n", (106542, 106587), True, 'import numpy as np\n'), ((1714, 1726), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (1724, 1726), False, 'from traceback import format_exc\n'), ((27912, 27975), 'numpy.array', 'np.array', (["[x[magcol]['inveta_mcc'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['inveta_mcc'] for x in recgrid[magbinind]])\n", (27920, 27975), True, 'import numpy as np\n'), ((28211, 28280), 'numpy.array', 'np.array', (["[x[magcol]['inveta_precision'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['inveta_precision'] for x in recgrid[magbinind]])\n", (28219, 28280), True, 'import numpy as np\n'), ((28513, 28579), 'numpy.array', 'np.array', (["[x[magcol]['inveta_recall'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['inveta_recall'] for x in recgrid[magbinind]])\n", (28521, 28579), True, 'import numpy as np\n'), ((28823, 28900), 'numpy.array', 'np.array', (["[x[magcol]['inveta_missed_stet_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['inveta_missed_stet_found'] for x in recgrid[magbinind]])\n", (28831, 28900), True, 'import numpy as np\n'), ((29143, 29219), 'numpy.array', 'np.array', (["[x[magcol]['inveta_missed_iqr_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['inveta_missed_iqr_found'] for x in recgrid[magbinind]])\n", (29151, 29219), True, 'import numpy as np\n'), ((29447, 29507), 'numpy.array', 'np.array', (["[x[magcol]['iqr_mcc'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['iqr_mcc'] for x in recgrid[magbinind]])\n", (29455, 29507), True, 'import numpy as np\n'), ((29739, 29805), 'numpy.array', 'np.array', (["[x[magcol]['iqr_precision'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['iqr_precision'] for x in recgrid[magbinind]])\n", (29747, 29805), True, 'import numpy as np\n'), ((30034, 30097), 'numpy.array', 'np.array', (["[x[magcol]['iqr_recall'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['iqr_recall'] for x in recgrid[magbinind]])\n", (30042, 30097), True, 'import numpy as np\n'), ((30337, 30411), 'numpy.array', 'np.array', (["[x[magcol]['iqr_missed_stet_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['iqr_missed_stet_found'] for x in recgrid[magbinind]])\n", (30345, 30411), True, 'import numpy as np\n'), ((30653, 30729), 'numpy.array', 'np.array', (["[x[magcol]['iqr_missed_inveta_found'] for x in recgrid[magbinind]]"], {}), "([x[magcol]['iqr_missed_inveta_found'] for x in recgrid[magbinind]])\n", (30661, 30729), True, 'import numpy as np\n'), ((42875, 42891), 'numpy.max', 'np.max', (['stet_mcc'], {}), '(stet_mcc)\n', (42881, 42891), True, 'import numpy as np\n'), ((42973, 42995), 'numpy.max', 'np.max', (['stet_precision'], {}), '(stet_precision)\n', (42979, 42995), True, 'import numpy as np\n'), ((43067, 43086), 'numpy.max', 'np.max', (['stet_recall'], {}), '(stet_recall)\n', (43073, 43086), True, 'import numpy as np\n'), ((43641, 43659), 'numpy.max', 'np.max', (['inveta_mcc'], {}), '(inveta_mcc)\n', (43647, 43659), True, 'import numpy as np\n'), ((43745, 43769), 'numpy.max', 'np.max', (['inveta_precision'], {}), '(inveta_precision)\n', (43751, 43769), True, 'import numpy as np\n'), ((43863, 43884), 'numpy.max', 'np.max', (['inveta_recall'], {}), '(inveta_recall)\n', (43869, 43884), True, 'import numpy as np\n'), ((44472, 44487), 'numpy.max', 'np.max', (['iqr_mcc'], {}), '(iqr_mcc)\n', (44478, 44487), True, 'import numpy as np\n'), ((44567, 44588), 'numpy.max', 'np.max', (['iqr_precision'], {}), '(iqr_precision)\n', (44573, 44588), True, 'import numpy as np\n'), ((44676, 44694), 'numpy.max', 'np.max', (['iqr_recall'], {}), '(iqr_recall)\n', (44682, 44694), True, 'import numpy as np\n'), ((82147, 82183), 'numpy.array', 'np.array', (['thisbin_thismagcol_recvars'], {}), '(thisbin_thismagcol_recvars)\n', (82155, 82183), True, 'import numpy as np\n'), ((83976, 84008), 'numpy.array', 'np.array', (['thisbin_thispf_recvars'], {}), '(thisbin_thispf_recvars)\n', (83984, 84008), True, 'import numpy as np\n'), ((85794, 85826), 'numpy.array', 'np.array', (['thisbin_thisvt_recvars'], {}), '(thisbin_thisvt_recvars)\n', (85802, 85826), True, 'import numpy as np\n'), ((87442, 87474), 'numpy.array', 'np.array', (['thisbin_thisat_recvars'], {}), '(thisbin_thisat_recvars)\n', (87450, 87474), True, 'import numpy as np\n'), ((89673, 89709), 'numpy.array', 'np.array', (['thisbin_thismagcol_recvars'], {}), '(thisbin_thismagcol_recvars)\n', (89681, 89709), True, 'import numpy as np\n'), ((91539, 91571), 'numpy.array', 'np.array', (['thisbin_thispf_recvars'], {}), '(thisbin_thispf_recvars)\n', (91547, 91571), True, 'import numpy as np\n'), ((93388, 93420), 'numpy.array', 'np.array', (['thisbin_thisvt_recvars'], {}), '(thisbin_thisvt_recvars)\n', (93396, 93420), True, 'import numpy as np\n'), ((95069, 95101), 'numpy.array', 'np.array', (['thisbin_thisat_recvars'], {}), '(thisbin_thisat_recvars)\n', (95077, 95101), True, 'import numpy as np\n'), ((97356, 97392), 'numpy.array', 'np.array', (['thisbin_thismagcol_recvars'], {}), '(thisbin_thismagcol_recvars)\n', (97364, 97392), True, 'import numpy as np\n'), ((99286, 99318), 'numpy.array', 'np.array', (['thisbin_thispf_recvars'], {}), '(thisbin_thispf_recvars)\n', (99294, 99318), True, 'import numpy as np\n'), ((101205, 101237), 'numpy.array', 'np.array', (['thisbin_thisvt_recvars'], {}), '(thisbin_thisvt_recvars)\n', (101213, 101237), True, 'import numpy as np\n'), ((102952, 102984), 'numpy.array', 'np.array', (['thisbin_thisat_recvars'], {}), '(thisbin_thisat_recvars)\n', (102960, 102984), True, 'import numpy as np\n'), ((105222, 105258), 'numpy.array', 'np.array', (['thisbin_thismagcol_recvars'], {}), '(thisbin_thismagcol_recvars)\n', (105230, 105258), True, 'import numpy as np\n'), ((107074, 107106), 'numpy.array', 'np.array', (['thisbin_thispf_recvars'], {}), '(thisbin_thispf_recvars)\n', (107082, 107106), True, 'import numpy as np\n'), ((108926, 108958), 'numpy.array', 'np.array', (['thisbin_thisvt_recvars'], {}), '(thisbin_thisvt_recvars)\n', (108934, 108958), True, 'import numpy as np\n'), ((110595, 110627), 'numpy.array', 'np.array', (['thisbin_thisat_recvars'], {}), '(thisbin_thisat_recvars)\n', (110603, 110627), True, 'import numpy as np\n'), ((58647, 58662), 'numpy.isfinite', 'np.isfinite', (['rp'], {}), '(rp)\n', (58658, 58662), True, 'import numpy as np\n'), ((60786, 60815), 'numpy.asscalar', 'np.asscalar', (['actual_varperiod'], {}), '(actual_varperiod)\n', (60797, 60815), True, 'import numpy as np\n'), ((970, 987), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (985, 987), False, 'from datetime import datetime\n'), ((1177, 1194), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1192, 1194), False, 'from datetime import datetime\n'), ((1388, 1405), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1403, 1405), False, 'from datetime import datetime\n'), ((1639, 1656), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1654, 1656), False, 'from datetime import datetime\n'), ((31531, 31540), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (31538, 31540), True, 'import matplotlib.pyplot as plt\n'), ((32254, 32263), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32261, 32263), True, 'import matplotlib.pyplot as plt\n'), ((32962, 32971), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (32969, 32971), True, 'import matplotlib.pyplot as plt\n'), ((33754, 33763), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (33761, 33763), True, 'import matplotlib.pyplot as plt\n'), ((34531, 34540), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34538, 34540), True, 'import matplotlib.pyplot as plt\n'), ((35263, 35272), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35270, 35272), True, 'import matplotlib.pyplot as plt\n'), ((35985, 35994), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35992, 35994), True, 'import matplotlib.pyplot as plt\n'), ((36692, 36701), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36699, 36701), True, 'import matplotlib.pyplot as plt\n'), ((37480, 37489), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (37487, 37489), True, 'import matplotlib.pyplot as plt\n'), ((38256, 38265), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38263, 38265), True, 'import matplotlib.pyplot as plt\n'), ((38971, 38980), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38978, 38980), True, 'import matplotlib.pyplot as plt\n'), ((39676, 39685), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (39683, 39685), True, 'import matplotlib.pyplot as plt\n'), ((40366, 40375), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (40373, 40375), True, 'import matplotlib.pyplot as plt\n'), ((41134, 41143), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41141, 41143), True, 'import matplotlib.pyplot as plt\n'), ((41904, 41913), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41911, 41913), True, 'import matplotlib.pyplot as plt\n'), ((765, 782), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (780, 782), False, 'from datetime import datetime\n'), ((59257, 59295), 'numpy.max', 'np.max', (["fakepf[magcol][pfm]['lspvals']"], {}), "(fakepf[magcol][pfm]['lspvals'])\n", (59263, 59295), True, 'import numpy as np\n'), ((59584, 59622), 'numpy.max', 'np.max', (["fakepf[magcol][pfm]['lspvals']"], {}), "(fakepf[magcol][pfm]['lspvals'])\n", (59590, 59622), True, 'import numpy as np\n'), ((58532, 58567), 'numpy.array', 'np.array', (["pfres['recovery_periods']"], {}), "(pfres['recovery_periods'])\n", (58540, 58567), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os, sys
import numpy as np
from skimage import io
import random, uuid
import matplotlib.pyplot as plt
from pydaily import filesystem
def simulate_overlap(dataset_dir, cur_id, overlap_size=160, simulate_num = 50,
overlap_ratio_low=0.5, overlap_ratio_high=1.0):
overlap_dir = os.path.join(dataset_dir, "simu_train")
if not os.path.exists(overlap_dir):
os.makedirs(overlap_dir)
img_list = os.listdir(os.path.join(dataset_dir, cur_id+"use"))
mask_list = os.listdir(os.path.join(dataset_dir, cur_id+"mask"))
img_list.sort()
mask_list.sort()
success_num, try_num = 0, 0
while success_num < simulate_num:
try_num += 1
if try_num > 5000:
print("Try more than {} times, quit!".format(try_num))
break
cur_selection = random.sample(img_list, 2)
img1_path = os.path.join(dataset_dir, cur_id+"use", cur_selection[0])
img2_path = os.path.join(dataset_dir, cur_id+"use", cur_selection[1])
mask1_path = os.path.join(dataset_dir, cur_id+"mask", cur_selection[0])
mask2_path = os.path.join(dataset_dir, cur_id+"mask", cur_selection[1])
overlap_img = np.zeros((overlap_size, overlap_size), np.float32)
overlap_mask = np.zeros((overlap_size, overlap_size), np.uint8)
img1 = io.imread(img1_path)
h1, w1 = img1.shape[0], img1.shape[1]
img2 = io.imread(img2_path)
h2, w2 = img2.shape[0], img2.shape[1]
max_size = overlap_size - 3
if h1 > max_size or w1 > max_size or h2 > max_size or w2 > max_size:
continue
rand_h1 = random.choice(np.arange(overlap_size-h1))
rand_w1 = random.choice(np.arange(overlap_size-w1))
rand_h2 = random.choice(np.arange(overlap_size-h2))
rand_w2 = random.choice(np.arange(overlap_size-w2))
mask1 = io.imread(mask1_path) / 2
mask1 = mask1.astype(np.uint8)
mask2 = io.imread(mask2_path) / 2
mask2 = mask2.astype(np.uint8)
overlap_mask[rand_h1:rand_h1+h1, rand_w1:rand_w1+w1] += mask1
overlap_mask[rand_h2:rand_h2+h2, rand_w2:rand_w2+w2] += mask2
num_overlap = np.count_nonzero(overlap_mask == 254)
num_single = np.count_nonzero(overlap_mask == 127)
overlap_ratio = num_overlap * 2.0 / (num_single + num_overlap * 2.0)
if overlap_ratio > overlap_ratio_low and overlap_ratio < overlap_ratio_high:
extend_img1 = np.zeros((overlap_size, overlap_size), np.uint8)
extend_img1[rand_h1:rand_h1+h1, rand_w1:rand_w1+w1] = img1
overlap_img += extend_img1
extend_img2 = np.zeros((overlap_size, overlap_size), np.uint8)
extend_img2[rand_h2:rand_h2+h2, rand_w2:rand_w2+w2] = img2
overlap_img += extend_img2
overlap_r = overlap_mask == 254
overlap_img[overlap_r] = 0
inv_overlap_r = np.invert(overlap_r)
extend_img1[inv_overlap_r] = 0
extend_img2[inv_overlap_r] = 0
overlap_ratio1 = np.random.uniform(0.6, 1.0)
overlap_ratio2 = np.random.uniform(0.6, 1.0)
overlap_img += overlap_ratio1 * extend_img1
overlap_img += overlap_ratio2 * extend_img2
overlap_img[overlap_img > 255] = 255
overlap_img = overlap_img.astype(np.uint8)
cur_overlap_name = str(uuid.uuid4())[:8]
overlap_img_path = os.path.join(overlap_dir, cur_overlap_name + "_img.bmp")
overlap_mask_path = os.path.join(overlap_dir, cur_overlap_name + "_mask.bmp")
io.imsave(overlap_img_path, overlap_img)
io.imsave(overlap_mask_path, overlap_mask)
success_num += 1
else:
continue
if __name__ == "__main__":
dataset_dir = "../data/OverlapSeg/single_chromosomes"
img_ids = [str(id) for id in np.arange(1, 80)]
for cur_id in img_ids:
print("Processing {}".format(cur_id))
simulate_overlap(dataset_dir, cur_id, overlap_size=160, simulate_num = 10, overlap_ratio_low=0.5, overlap_ratio_high=1.0)
simulate_overlap(dataset_dir, cur_id, overlap_size=160, simulate_num = 10, overlap_ratio_low=0.3, overlap_ratio_high=0.5)
simulate_overlap(dataset_dir, cur_id, overlap_size=160, simulate_num = 10, overlap_ratio_low=0.1, overlap_ratio_high=0.3)
simulate_overlap(dataset_dir, cur_id, overlap_size=160, simulate_num = 30, overlap_ratio_low=-1.0, overlap_ratio_high=0.1)
| [
"numpy.random.uniform",
"uuid.uuid4",
"numpy.count_nonzero",
"os.makedirs",
"numpy.invert",
"skimage.io.imsave",
"random.sample",
"numpy.zeros",
"os.path.exists",
"numpy.arange",
"os.path.join",
"skimage.io.imread"
] | [((333, 372), 'os.path.join', 'os.path.join', (['dataset_dir', '"""simu_train"""'], {}), "(dataset_dir, 'simu_train')\n", (345, 372), False, 'import os, sys\n'), ((384, 411), 'os.path.exists', 'os.path.exists', (['overlap_dir'], {}), '(overlap_dir)\n', (398, 411), False, 'import os, sys\n'), ((421, 445), 'os.makedirs', 'os.makedirs', (['overlap_dir'], {}), '(overlap_dir)\n', (432, 445), False, 'import os, sys\n'), ((473, 514), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'use')"], {}), "(dataset_dir, cur_id + 'use')\n", (485, 514), False, 'import os, sys\n'), ((541, 583), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'mask')"], {}), "(dataset_dir, cur_id + 'mask')\n", (553, 583), False, 'import os, sys\n'), ((854, 880), 'random.sample', 'random.sample', (['img_list', '(2)'], {}), '(img_list, 2)\n', (867, 880), False, 'import random, uuid\n'), ((901, 960), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'use')", 'cur_selection[0]'], {}), "(dataset_dir, cur_id + 'use', cur_selection[0])\n", (913, 960), False, 'import os, sys\n'), ((979, 1038), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'use')", 'cur_selection[1]'], {}), "(dataset_dir, cur_id + 'use', cur_selection[1])\n", (991, 1038), False, 'import os, sys\n'), ((1058, 1118), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'mask')", 'cur_selection[0]'], {}), "(dataset_dir, cur_id + 'mask', cur_selection[0])\n", (1070, 1118), False, 'import os, sys\n'), ((1138, 1198), 'os.path.join', 'os.path.join', (['dataset_dir', "(cur_id + 'mask')", 'cur_selection[1]'], {}), "(dataset_dir, cur_id + 'mask', cur_selection[1])\n", (1150, 1198), False, 'import os, sys\n'), ((1220, 1270), 'numpy.zeros', 'np.zeros', (['(overlap_size, overlap_size)', 'np.float32'], {}), '((overlap_size, overlap_size), np.float32)\n', (1228, 1270), True, 'import numpy as np\n'), ((1294, 1342), 'numpy.zeros', 'np.zeros', (['(overlap_size, overlap_size)', 'np.uint8'], {}), '((overlap_size, overlap_size), np.uint8)\n', (1302, 1342), True, 'import numpy as np\n'), ((1359, 1379), 'skimage.io.imread', 'io.imread', (['img1_path'], {}), '(img1_path)\n', (1368, 1379), False, 'from skimage import io\n'), ((1441, 1461), 'skimage.io.imread', 'io.imread', (['img2_path'], {}), '(img2_path)\n', (1450, 1461), False, 'from skimage import io\n'), ((2211, 2248), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap_mask == 254)'], {}), '(overlap_mask == 254)\n', (2227, 2248), True, 'import numpy as np\n'), ((2270, 2307), 'numpy.count_nonzero', 'np.count_nonzero', (['(overlap_mask == 127)'], {}), '(overlap_mask == 127)\n', (2286, 2307), True, 'import numpy as np\n'), ((1677, 1705), 'numpy.arange', 'np.arange', (['(overlap_size - h1)'], {}), '(overlap_size - h1)\n', (1686, 1705), True, 'import numpy as np\n'), ((1737, 1765), 'numpy.arange', 'np.arange', (['(overlap_size - w1)'], {}), '(overlap_size - w1)\n', (1746, 1765), True, 'import numpy as np\n'), ((1797, 1825), 'numpy.arange', 'np.arange', (['(overlap_size - h2)'], {}), '(overlap_size - h2)\n', (1806, 1825), True, 'import numpy as np\n'), ((1857, 1885), 'numpy.arange', 'np.arange', (['(overlap_size - w2)'], {}), '(overlap_size - w2)\n', (1866, 1885), True, 'import numpy as np\n'), ((1902, 1923), 'skimage.io.imread', 'io.imread', (['mask1_path'], {}), '(mask1_path)\n', (1911, 1923), False, 'from skimage import io\n'), ((1983, 2004), 'skimage.io.imread', 'io.imread', (['mask2_path'], {}), '(mask2_path)\n', (1992, 2004), False, 'from skimage import io\n'), ((2497, 2545), 'numpy.zeros', 'np.zeros', (['(overlap_size, overlap_size)', 'np.uint8'], {}), '((overlap_size, overlap_size), np.uint8)\n', (2505, 2545), True, 'import numpy as np\n'), ((2682, 2730), 'numpy.zeros', 'np.zeros', (['(overlap_size, overlap_size)', 'np.uint8'], {}), '((overlap_size, overlap_size), np.uint8)\n', (2690, 2730), True, 'import numpy as np\n'), ((2952, 2972), 'numpy.invert', 'np.invert', (['overlap_r'], {}), '(overlap_r)\n', (2961, 2972), True, 'import numpy as np\n'), ((3088, 3115), 'numpy.random.uniform', 'np.random.uniform', (['(0.6)', '(1.0)'], {}), '(0.6, 1.0)\n', (3105, 3115), True, 'import numpy as np\n'), ((3145, 3172), 'numpy.random.uniform', 'np.random.uniform', (['(0.6)', '(1.0)'], {}), '(0.6, 1.0)\n', (3162, 3172), True, 'import numpy as np\n'), ((3473, 3529), 'os.path.join', 'os.path.join', (['overlap_dir', "(cur_overlap_name + '_img.bmp')"], {}), "(overlap_dir, cur_overlap_name + '_img.bmp')\n", (3485, 3529), False, 'import os, sys\n'), ((3562, 3619), 'os.path.join', 'os.path.join', (['overlap_dir', "(cur_overlap_name + '_mask.bmp')"], {}), "(overlap_dir, cur_overlap_name + '_mask.bmp')\n", (3574, 3619), False, 'import os, sys\n'), ((3632, 3672), 'skimage.io.imsave', 'io.imsave', (['overlap_img_path', 'overlap_img'], {}), '(overlap_img_path, overlap_img)\n', (3641, 3672), False, 'from skimage import io\n'), ((3685, 3727), 'skimage.io.imsave', 'io.imsave', (['overlap_mask_path', 'overlap_mask'], {}), '(overlap_mask_path, overlap_mask)\n', (3694, 3727), False, 'from skimage import io\n'), ((3913, 3929), 'numpy.arange', 'np.arange', (['(1)', '(80)'], {}), '(1, 80)\n', (3922, 3929), True, 'import numpy as np\n'), ((3424, 3436), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3434, 3436), False, 'import random, uuid\n')] |
import os
import random
import numpy as np
from PIL.Image import ANTIALIAS
from keras.callbacks import ModelCheckpoint
from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop, Adam
class Model:
def __init__(self, training_data, model_path):
self.training_data = training_data
self.input_shape = self.training_data[0].shape[1:]
self.classes = self.training_data[1].shape[1]
self.checkpoint_cb = ModelCheckpoint(model_path, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=5)
self.X = self.training_data[0]
self.Y = self.training_data[1]
self.model = self.build_model()
def build_model(self):
model = Sequential()
model.add(Conv2D(32, (3, 3), data_format="channels_last", input_shape=self.input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), data_format="channels_last"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), data_format="channels_last"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), data_format="channels_last"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(self.classes))
model.add(Activation('softmax'))
optimizer = Adam(lr=0.00001, decay=8e-08)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def train(self, shuffle=True):
if shuffle:
idxes = [x for x in range(len(self.X))]
random.shuffle(idxes)
X_NEW = []
Y_NEW = []
for i in idxes:
X_NEW.append(self.X[i])
Y_NEW.append(self.Y[i])
X = np.array(X_NEW)
Y = np.array(Y_NEW)
acc = self.model.fit(
X,
Y,
batch_size=8,
epochs=300,
verbose=1,
callbacks=[self.checkpoint_cb],
validation_split=0.4
)
return acc
def predict(self, X):
answer = self.model.predict(np.array([X]))
idx = np.argmax(answer)
return self.classes[idx]
| [
"numpy.argmax",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Activation",
"keras.layers.Dropout",
"random.shuffle",
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.layers.Conv2D",
"numpy.array",
"keras.models.Sequential",
"keras.layers.MaxPooling2D"
] | [((532, 665), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_path'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(False)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(5)'}), "(model_path, monitor='val_loss', verbose=0, save_best_only=\n False, save_weights_only=False, mode='auto', period=5)\n", (547, 665), False, 'from keras.callbacks import ModelCheckpoint\n'), ((824, 836), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (834, 836), False, 'from keras.models import Sequential\n'), ((1793, 1820), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(1e-05)', 'decay': '(8e-08)'}), '(lr=1e-05, decay=8e-08)\n', (1797, 1820), False, 'from keras.optimizers import RMSprop, Adam\n'), ((2683, 2700), 'numpy.argmax', 'np.argmax', (['answer'], {}), '(answer)\n', (2692, 2700), True, 'import numpy as np\n'), ((855, 932), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'data_format': '"""channels_last"""', 'input_shape': 'self.input_shape'}), "(32, (3, 3), data_format='channels_last', input_shape=self.input_shape)\n", (861, 932), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((952, 970), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (962, 970), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((990, 1020), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1002, 1020), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1041, 1088), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'data_format': '"""channels_last"""'}), "(32, (3, 3), data_format='channels_last')\n", (1047, 1088), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1109, 1127), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1119, 1127), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1147, 1177), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1159, 1177), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1198, 1245), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'data_format': '"""channels_last"""'}), "(64, (3, 3), data_format='channels_last')\n", (1204, 1245), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1266, 1284), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1276, 1284), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1304, 1334), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1316, 1334), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1363, 1410), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'data_format': '"""channels_last"""'}), "(64, (3, 3), data_format='channels_last')\n", (1369, 1410), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1431, 1449), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1441, 1449), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1469, 1499), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1481, 1499), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1521, 1530), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1528, 1530), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1609, 1619), 'keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1614, 1619), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1639, 1657), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1649, 1657), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1677, 1689), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1684, 1689), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1710, 1729), 'keras.layers.Dense', 'Dense', (['self.classes'], {}), '(self.classes)\n', (1715, 1729), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((1749, 1770), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1759, 1770), False, 'from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout\n'), ((2108, 2129), 'random.shuffle', 'random.shuffle', (['idxes'], {}), '(idxes)\n', (2122, 2129), False, 'import random\n'), ((2302, 2317), 'numpy.array', 'np.array', (['X_NEW'], {}), '(X_NEW)\n', (2310, 2317), True, 'import numpy as np\n'), ((2334, 2349), 'numpy.array', 'np.array', (['Y_NEW'], {}), '(Y_NEW)\n', (2342, 2349), True, 'import numpy as np\n'), ((2654, 2667), 'numpy.array', 'np.array', (['[X]'], {}), '([X])\n', (2662, 2667), True, 'import numpy as np\n')] |
"""The :mod:`variation` module defines classes for variation operators.
Variation operators (aka genetic operators) are used in evolutionary/genetic
algorithms to create "child" genomes from "parent" genomes.
"""
from abc import ABC, abstractmethod
from typing import Sequence, Union
import math
from copy import copy
from numpy.random import random, choice
from pyshgp.push.types import PushType
from pyshgp.push.atoms import Literal
from pyshgp.gp.genome import Genome, GeneSpawner
from pyshgp.utils import DiscreteProbDistrib, instantiate_using
class VariationOperator(ABC):
"""Base class of all VariationOperators.
Parameters
----------
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
Attributes
----------
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, num_parents: int):
self.num_parents = num_parents
def checknum_parents(self, parents: Sequence[Genome]):
"""Raise error if given too few parents.
Parameters
----------
parents
A list of parent Genomes given to the operator.
"""
if not len(parents) >= self.num_parents:
raise ValueError("Variation operator given {a} parents. Expected {e}.".format(
a=len(parents),
e=self.num_parents)
)
@abstractmethod
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner = None) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
pass
class VariationStrategy(DiscreteProbDistrib):
"""A collection of VariationOperator and how frequently to use them."""
def add(self, op: VariationOperator, p: float):
"""Add an element with a relative probability.
Parameters
----------
op : VariationOperator
The VariationOperator to add to the variaiton strategy.
p : float
The probability of using the given operator relative to the other
operators that have been added to the VariationStrategy.
"""
super().add(op, p)
class VariationPipeline(VariationOperator):
"""Variation operator that sequencially applies multiple others variation operators.
Parameters
----------
operators : list of VariationOperators
A list of operators to apply in order to produce the child Genome.
Attributes
----------
operators : list of VariationOperators
A list of operators to apply in order to produce the child Genome.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, operators: Sequence[VariationOperator]):
num_parents_needed = max([op.num_parents for op in operators])
super().__init__(num_parents_needed)
self.operators = operators
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
self.checknum_parents(parents)
child = parents[0].copy()
for op in self.operators:
child = op.produce([child] + parents[1:], spawner)
return child
# Utilities
def _gaussian_noise_factor():
"""Return Gaussian noise of mean 0, std dev 1.
Returns
--------
Float samples from Gaussian distribution.
Examples
--------
>>> gaussian_noise_factor()
1.43412557975
>>> gaussian_noise_factor()
-0.0410900866765
"""
return math.sqrt(-2.0 * math.log(random())) * math.cos(2.0 * math.pi * random())
# Mutations
# @TODO: Implement all the common literal mutations.
class LiteralMutation(VariationOperator, ABC):
"""Base class for mutations of literal Atoms.
Parameters
----------
push_type : pyshgp.push.types.PushType
The PushType which the operator can mutate.
rate : float
The probablility of applying the mutation to a given Literal.
Attributes
----------
push_type : pyshgp.push.types.PushType
The PushType which the operator can mutate.
rate : float
The probablility of applying the mutation to a given Literal.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, push_type: PushType, rate: float = 0.01):
super().__init__(1)
self.rate = rate
self.push_type = push_type
@abstractmethod
def _mutate_literal(literal: Literal) -> Literal:
...
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner = None) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
self.checknum_parents(parents)
new_genome = Genome()
for atom in parents[0]:
if isinstance(atom, Literal) and self.push_type == atom.push_type and random() < self.rate:
new_atom = self._mutate_literal(atom)
else:
new_atom = atom
new_genome.append(new_atom)
return new_genome
class DeletionMutation(VariationOperator):
"""Uniformly randomly removes some Atoms from parent.
Parameters
----------
rate : float
The probablility of removing any given Atom in the parent Genome.
Default is 0.01.
Attributes
----------
rate : float
The probablility of removing any given Atom in the parent Genome.
Default is 0.01.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, deletion_rate: float = 0.01):
super().__init__(1)
self.rate = deletion_rate
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
self.checknum_parents(parents)
new_genome = Genome()
for gene in parents[0]:
if random() < self.rate:
continue
new_genome.append(gene)
return new_genome
class AdditionMutation(VariationOperator):
"""Uniformly randomly adds some Atoms to parent.
Parameters
----------
rate : float
The probablility of adding a new Atom at any given point in the parent
Genome. Default is 0.01.
Attributes
----------
rate : float
The probablility of adding a new Atom at any given point in the parent
Genome. Default is 0.01.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, addition_rate: float = 0.01):
super().__init__(1)
self.rate = addition_rate
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
self.checknum_parents(parents)
new_genome = Genome()
for gene in parents[0]:
if random() < self.rate:
new_genome.append(spawner.spawn_atom())
new_genome.append(gene)
return new_genome
# Recombinations
class Alternation(VariationOperator):
"""Uniformly alternates between the two parent genomes.
Parameters
----------
rate : float, optional (default=0.01)
The probablility of switching which parent program elements are being
copied from. Must be 0 <= rate <= 1. Defaults to 0.1.
alignment_deviation : int, optional (default=10)
The standard deviation of how far alternation may jump between indices
when switching between parents.
Attributes
----------
rate : float, optional (default=0.01)
The probablility of switching which parent program elements are being
copied from. Must be 0 <= rate <= 1. Defaults to 0.1.
alignment_deviation : int, optional (default=10)
The standard deviation of how far alternation may jump between indices
when switching between parents.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, alternation_rate=0.01, alignment_deviation=10):
super().__init__(2)
self.rate = alternation_rate
self.alignment_deviation = alignment_deviation
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner = None) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
self.checknum_parents(parents)
gn1 = parents[0].copy()
gn2 = parents[1].copy()
new_genome = Genome()
# Random pick which parent to start from
use_parent_1 = choice([True, False])
loop_times = len(gn1)
if not use_parent_1:
loop_times = len(gn2)
i = 0
while (i < loop_times):
if random() < self.rate:
# Switch which parent we are pulling genes from
i += round(self.alignment_deviation * _gaussian_noise_factor())
i = int(max(0, i))
use_parent_1 = not use_parent_1
else:
# Pull gene from parent
if use_parent_1:
new_genome.append(gn1[i])
else:
new_genome.append(gn2[i])
i = int(i + 1)
# Change loop stop condition
loop_times = len(gn1)
if not use_parent_1:
loop_times = len(gn2)
return new_genome
# Other
class Genesis(VariationOperator):
"""Creates an entirely new (and random) genome.
Parameters
----------
size
The child genome will contain this many Atoms if size is an integer.
If size is a pair of integers, the genome will be of a random
size in the range of the two integers.
Attributes
----------
size
The child genome will contain this many Atoms if size is an integer.
If size is a pair of integers, the genome will be of a random
size in the range of the two integers.
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self, *, size: Union[int, Sequence[int]]):
super().__init__(0)
self.size = size
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
return spawner.spawn_genome(self.size)
class Cloning(VariationOperator):
"""Clones the parent genome.
Attributes
----------
num_parents : int
Number of parent Genomes the operator needs to produce a child
Individual.
"""
def __init__(self):
super().__init__(1)
def produce(self, parents: Sequence[Genome], spawner: GeneSpawner = None) -> Genome:
"""Produce a child Genome from parent Genomes and optional GenomeSpawner.
Parameters
----------
parents
A list of parent Genomes given to the operator.
spawner
A GeneSpawner that can be used to produce new genes (aka Atoms).
"""
return copy.copy(parents[0])
def get_variation_operator(name: str, **kwargs) -> VariationOperator:
"""Get the variaton operator class with the given name."""
name_to_cls = {
"deletion": DeletionMutation,
"addition": AdditionMutation,
"alternation": Alternation,
"genesis": Genesis,
"cloning": Cloning,
# UMAD citation: https://dl.acm.org/citation.cfm?id=3205455.3205603
"umad": VariationPipeline([AdditionMutation(0.09), DeletionMutation(0.0826)]),
"umad-shrink": VariationPipeline([AdditionMutation(0.09), DeletionMutation(0.1)]),
"umad-grow": VariationPipeline([AdditionMutation(0.09), DeletionMutation(0.0652)])
}
op = name_to_cls.get(name, None)
if op is None:
raise ValueError("No varition operator '{nm}'. Supported names: {lst}.".format(
nm=name,
lst=list(name_to_cls.keys())
))
if isinstance(op, type):
op = instantiate_using(op, kwargs)
return op
| [
"pyshgp.gp.genome.Genome",
"copy.copy.copy",
"pyshgp.utils.instantiate_using",
"numpy.random.random",
"numpy.random.choice"
] | [((5615, 5623), 'pyshgp.gp.genome.Genome', 'Genome', ([], {}), '()\n', (5621, 5623), False, 'from pyshgp.gp.genome import Genome, GeneSpawner\n'), ((7011, 7019), 'pyshgp.gp.genome.Genome', 'Genome', ([], {}), '()\n', (7017, 7019), False, 'from pyshgp.gp.genome import Genome, GeneSpawner\n'), ((8278, 8286), 'pyshgp.gp.genome.Genome', 'Genome', ([], {}), '()\n', (8284, 8286), False, 'from pyshgp.gp.genome import Genome, GeneSpawner\n'), ((10193, 10201), 'pyshgp.gp.genome.Genome', 'Genome', ([], {}), '()\n', (10199, 10201), False, 'from pyshgp.gp.genome import Genome, GeneSpawner\n'), ((10274, 10295), 'numpy.random.choice', 'choice', (['[True, False]'], {}), '([True, False])\n', (10280, 10295), False, 'from numpy.random import random, choice\n'), ((13024, 13045), 'copy.copy.copy', 'copy.copy', (['parents[0]'], {}), '(parents[0])\n', (13033, 13045), False, 'from copy import copy\n'), ((13979, 14008), 'pyshgp.utils.instantiate_using', 'instantiate_using', (['op', 'kwargs'], {}), '(op, kwargs)\n', (13996, 14008), False, 'from pyshgp.utils import DiscreteProbDistrib, instantiate_using\n'), ((4198, 4206), 'numpy.random.random', 'random', ([], {}), '()\n', (4204, 4206), False, 'from numpy.random import random, choice\n'), ((7067, 7075), 'numpy.random.random', 'random', ([], {}), '()\n', (7073, 7075), False, 'from numpy.random import random, choice\n'), ((8334, 8342), 'numpy.random.random', 'random', ([], {}), '()\n', (8340, 8342), False, 'from numpy.random import random, choice\n'), ((10450, 10458), 'numpy.random.random', 'random', ([], {}), '()\n', (10456, 10458), False, 'from numpy.random import random, choice\n'), ((4160, 4168), 'numpy.random.random', 'random', ([], {}), '()\n', (4166, 4168), False, 'from numpy.random import random, choice\n'), ((5738, 5746), 'numpy.random.random', 'random', ([], {}), '()\n', (5744, 5746), False, 'from numpy.random import random, choice\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
ELEC-E5500 Speech Processing -- Autumn 2020 Python Exercise 1:
Basics of speech processing and analysis in Python.
Recommended to use a virtual environment to have a clear management of the libraries used in the exercises.
Python version: 3.5 or higher
To make sure all the packages are up-to-date for the exercise, run the script Update_Packages_ex1.py.
"""
import os.path as path
import scipy.io.wavfile as wav
import scipy.signal as sig
import numpy as np
import ex1_windowing as win
import matplotlib.pyplot as pl
# custom function for creating a spectrogram
def fm_to_spectrogram(frame_matrix, hop_size):
# frame_points = np.arange(0, frame_matrix.shape[0], step=hop_size)
spectrums = []
for frame in frame_matrix.T:
spectrum = np.abs(np.fft.rfft(frame))
spectrums.append(spectrum)
spectrums = np.array(spectrums).T
spectrums = 20 * np.log10(spectrums)
return spectrums
# 1.1. Read the audio file bernard_speech.wav and sampling rate
file_path = path.join(".", "Sounds")
sound_file = path.join(file_path, "bernard_speech.wav")
Fs, in_sig = wav.read(sound_file) # Read audio file
# 1.2. Make sure the sampling rate is 16kHz, resample if necessary
Fs_target = 16000
if Fs != Fs_target:
in_sig = sig.resample(in_sig, int(np.round(Fs_target * (in_sig.shape[0] / Fs))))
Fs = Fs_target
## 1.3. Split the data sequence into windows.
# Implement windowing function in ex1_windowing.py
frame_length = int(np.around(0.025 * Fs)) # 25ms in samples
hop_size = int(np.around(0.0125 * Fs)) # 12.5 ms in samples (50% overlap)
window_types = ("rect", "hann", "cosine", "hamming")
frame_matrix = win.ex1_windowing(
in_sig, frame_length, hop_size, window_types[3]
) # Windowing
# 1.4. Visualization. Create a new figure with three subplots.
pl.figure(1)
## 1.4.1. Plot the whole signal into subplot 1. Denote x-axis as time in seconds and y-axis as Amplitude.
### Set appropriate strings to title, xlabel and ylabel
pl.subplot(3, 1, 1)
pl.plot(in_sig)
pl.xticks(
np.arange(0, in_sig.shape[0] + 1, step=Fs),
np.arange(0, in_sig.shape[0] / Fs + 1, step=1.0),
)
pl.yticks(
np.arange(-10000, 10000 + 1, step=2500), np.arange(-10000, 10000 + 1, step=2500)
)
pl.title("Original signal")
pl.xlabel("Time in seconds")
pl.ylabel("Amplitude")
## 1.4.2. Plot a VOICED frame from frame_matrix into subplot 2. Denote x-axis as milliseconds.
### Set appropriate strings to title, xlabel and ylabel
pl.subplot(3, 1, 2)
## randomly pick a voiced frame (frame with average above the given threshold)
voiced = False
while not voiced:
frame_idx = np.random.randint(0, frame_matrix.shape[0])
if np.abs(np.mean(frame_matrix[frame_idx])) > 80:
voiced = True
## test frame
frame_idx = 35
pl.plot(frame_matrix.T[frame_idx])
pl.xticks(np.arange(0, 401, 16 * 5), np.arange(0, 26, 5))
pl.yticks()
pl.title("25 ms segment of a voiced frame")
pl.xlabel("Time in miliseconds")
pl.ylabel("Amplitude")
## 1.4.3. Plot the magnitude spectrum of the same frame as in 1.4.2. into
## subplot 3. Denote x-axis as Hz, and y-axis as decibels.
### Set appropriate strings to title, xlabel and ylabel
pl.subplot(3, 1, 3)
magnitude_spectrum = 20 * np.log10(np.abs(np.fft.rfft(frame_matrix.T[frame_idx])))
pl.plot(magnitude_spectrum)
## builtin matplotlib function (same result)
# pl.magnitude_spectrum(frame_matrix[frame_idx], Fs=Fs, scale="dB")
pl.xticks(np.arange(0, 201, step=25), np.arange(0, 9, step=1))
pl.title("Magnitude spectrum of the same frame")
pl.xlabel("Frequency (kHz)")
pl.ylabel("Amplitude (dB)")
pl.show(block=False)
## 1.4.4. Compute and plot the spectrogram of the whole signal into a new
## figure. Denote x-axis as frame number and y-axis as Frequency in Hz
### Set appropriate strings to title, xlabel and ylabel
pl.figure(2)
spectrogram = fm_to_spectrogram(frame_matrix, hop_size)
# pl.specgram(in_sig, Fs=Fs, scale="dB")
pl.imshow(spectrogram, origin="lower")
pl.yticks(np.arange(0, 201, 50), np.arange(0, 8001, 2000))
pl.xlabel("Frame number")
pl.ylabel("Frequency (Hz)")
pl.show()
| [
"matplotlib.pyplot.title",
"numpy.fft.rfft",
"scipy.io.wavfile.read",
"numpy.around",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"numpy.round",
"os.path.join",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"numpy.log10",
"matplotlib.pyplot.s... | [((1607, 1631), 'os.path.join', 'path.join', (['"""."""', '"""Sounds"""'], {}), "('.', 'Sounds')\n", (1616, 1631), True, 'import os.path as path\n'), ((1645, 1687), 'os.path.join', 'path.join', (['file_path', '"""bernard_speech.wav"""'], {}), "(file_path, 'bernard_speech.wav')\n", (1654, 1687), True, 'import os.path as path\n'), ((1701, 1721), 'scipy.io.wavfile.read', 'wav.read', (['sound_file'], {}), '(sound_file)\n', (1709, 1721), True, 'import scipy.io.wavfile as wav\n'), ((2255, 2321), 'ex1_windowing.ex1_windowing', 'win.ex1_windowing', (['in_sig', 'frame_length', 'hop_size', 'window_types[3]'], {}), '(in_sig, frame_length, hop_size, window_types[3])\n', (2272, 2321), True, 'import ex1_windowing as win\n'), ((2406, 2418), 'matplotlib.pyplot.figure', 'pl.figure', (['(1)'], {}), '(1)\n', (2415, 2418), True, 'import matplotlib.pyplot as pl\n'), ((2581, 2600), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (2591, 2600), True, 'import matplotlib.pyplot as pl\n'), ((2601, 2616), 'matplotlib.pyplot.plot', 'pl.plot', (['in_sig'], {}), '(in_sig)\n', (2608, 2616), True, 'import matplotlib.pyplot as pl\n'), ((2830, 2857), 'matplotlib.pyplot.title', 'pl.title', (['"""Original signal"""'], {}), "('Original signal')\n", (2838, 2857), True, 'import matplotlib.pyplot as pl\n'), ((2858, 2886), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time in seconds"""'], {}), "('Time in seconds')\n", (2867, 2886), True, 'import matplotlib.pyplot as pl\n'), ((2887, 2909), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (2896, 2909), True, 'import matplotlib.pyplot as pl\n'), ((3062, 3081), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (3072, 3081), True, 'import matplotlib.pyplot as pl\n'), ((3362, 3396), 'matplotlib.pyplot.plot', 'pl.plot', (['frame_matrix.T[frame_idx]'], {}), '(frame_matrix.T[frame_idx])\n', (3369, 3396), True, 'import matplotlib.pyplot as pl\n'), ((3455, 3466), 'matplotlib.pyplot.yticks', 'pl.yticks', ([], {}), '()\n', (3464, 3466), True, 'import matplotlib.pyplot as pl\n'), ((3467, 3510), 'matplotlib.pyplot.title', 'pl.title', (['"""25 ms segment of a voiced frame"""'], {}), "('25 ms segment of a voiced frame')\n", (3475, 3510), True, 'import matplotlib.pyplot as pl\n'), ((3511, 3543), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time in miliseconds"""'], {}), "('Time in miliseconds')\n", (3520, 3543), True, 'import matplotlib.pyplot as pl\n'), ((3544, 3566), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (3553, 3566), True, 'import matplotlib.pyplot as pl\n'), ((3757, 3776), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (3767, 3776), True, 'import matplotlib.pyplot as pl\n'), ((3860, 3887), 'matplotlib.pyplot.plot', 'pl.plot', (['magnitude_spectrum'], {}), '(magnitude_spectrum)\n', (3867, 3887), True, 'import matplotlib.pyplot as pl\n'), ((4066, 4114), 'matplotlib.pyplot.title', 'pl.title', (['"""Magnitude spectrum of the same frame"""'], {}), "('Magnitude spectrum of the same frame')\n", (4074, 4114), True, 'import matplotlib.pyplot as pl\n'), ((4115, 4143), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Frequency (kHz)"""'], {}), "('Frequency (kHz)')\n", (4124, 4143), True, 'import matplotlib.pyplot as pl\n'), ((4144, 4171), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Amplitude (dB)"""'], {}), "('Amplitude (dB)')\n", (4153, 4171), True, 'import matplotlib.pyplot as pl\n'), ((4174, 4194), 'matplotlib.pyplot.show', 'pl.show', ([], {'block': '(False)'}), '(block=False)\n', (4181, 4194), True, 'import matplotlib.pyplot as pl\n'), ((4396, 4408), 'matplotlib.pyplot.figure', 'pl.figure', (['(2)'], {}), '(2)\n', (4405, 4408), True, 'import matplotlib.pyplot as pl\n'), ((4506, 4544), 'matplotlib.pyplot.imshow', 'pl.imshow', (['spectrogram'], {'origin': '"""lower"""'}), "(spectrogram, origin='lower')\n", (4515, 4544), True, 'import matplotlib.pyplot as pl\n'), ((4606, 4631), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Frame number"""'], {}), "('Frame number')\n", (4615, 4631), True, 'import matplotlib.pyplot as pl\n'), ((4632, 4659), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4641, 4659), True, 'import matplotlib.pyplot as pl\n'), ((4661, 4670), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (4668, 4670), True, 'import matplotlib.pyplot as pl\n'), ((2070, 2091), 'numpy.around', 'np.around', (['(0.025 * Fs)'], {}), '(0.025 * Fs)\n', (2079, 2091), True, 'import numpy as np\n'), ((2127, 2149), 'numpy.around', 'np.around', (['(0.0125 * Fs)'], {}), '(0.0125 * Fs)\n', (2136, 2149), True, 'import numpy as np\n'), ((2632, 2674), 'numpy.arange', 'np.arange', (['(0)', '(in_sig.shape[0] + 1)'], {'step': 'Fs'}), '(0, in_sig.shape[0] + 1, step=Fs)\n', (2641, 2674), True, 'import numpy as np\n'), ((2680, 2728), 'numpy.arange', 'np.arange', (['(0)', '(in_sig.shape[0] / Fs + 1)'], {'step': '(1.0)'}), '(0, in_sig.shape[0] / Fs + 1, step=1.0)\n', (2689, 2728), True, 'import numpy as np\n'), ((2747, 2786), 'numpy.arange', 'np.arange', (['(-10000)', '(10000 + 1)'], {'step': '(2500)'}), '(-10000, 10000 + 1, step=2500)\n', (2756, 2786), True, 'import numpy as np\n'), ((2788, 2827), 'numpy.arange', 'np.arange', (['(-10000)', '(10000 + 1)'], {'step': '(2500)'}), '(-10000, 10000 + 1, step=2500)\n', (2797, 2827), True, 'import numpy as np\n'), ((3211, 3254), 'numpy.random.randint', 'np.random.randint', (['(0)', 'frame_matrix.shape[0]'], {}), '(0, frame_matrix.shape[0])\n', (3228, 3254), True, 'import numpy as np\n'), ((3407, 3432), 'numpy.arange', 'np.arange', (['(0)', '(401)', '(16 * 5)'], {}), '(0, 401, 16 * 5)\n', (3416, 3432), True, 'import numpy as np\n'), ((3434, 3453), 'numpy.arange', 'np.arange', (['(0)', '(26)', '(5)'], {}), '(0, 26, 5)\n', (3443, 3453), True, 'import numpy as np\n'), ((4013, 4039), 'numpy.arange', 'np.arange', (['(0)', '(201)'], {'step': '(25)'}), '(0, 201, step=25)\n', (4022, 4039), True, 'import numpy as np\n'), ((4041, 4064), 'numpy.arange', 'np.arange', (['(0)', '(9)'], {'step': '(1)'}), '(0, 9, step=1)\n', (4050, 4064), True, 'import numpy as np\n'), ((4555, 4576), 'numpy.arange', 'np.arange', (['(0)', '(201)', '(50)'], {}), '(0, 201, 50)\n', (4564, 4576), True, 'import numpy as np\n'), ((4578, 4602), 'numpy.arange', 'np.arange', (['(0)', '(8001)', '(2000)'], {}), '(0, 8001, 2000)\n', (4587, 4602), True, 'import numpy as np\n'), ((1445, 1464), 'numpy.array', 'np.array', (['spectrums'], {}), '(spectrums)\n', (1453, 1464), True, 'import numpy as np\n'), ((1488, 1507), 'numpy.log10', 'np.log10', (['spectrums'], {}), '(spectrums)\n', (1496, 1507), True, 'import numpy as np\n'), ((1374, 1392), 'numpy.fft.rfft', 'np.fft.rfft', (['frame'], {}), '(frame)\n', (1385, 1392), True, 'import numpy as np\n'), ((1886, 1930), 'numpy.round', 'np.round', (['(Fs_target * (in_sig.shape[0] / Fs))'], {}), '(Fs_target * (in_sig.shape[0] / Fs))\n', (1894, 1930), True, 'import numpy as np\n'), ((3269, 3301), 'numpy.mean', 'np.mean', (['frame_matrix[frame_idx]'], {}), '(frame_matrix[frame_idx])\n', (3276, 3301), True, 'import numpy as np\n'), ((3819, 3857), 'numpy.fft.rfft', 'np.fft.rfft', (['frame_matrix.T[frame_idx]'], {}), '(frame_matrix.T[frame_idx])\n', (3830, 3857), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# ##### Copyright 2019 The TensorFlow Authors.
# In[ ]:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # tf.data API로 성능 향상하기
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/data_performance"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/data_performance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/data_performance.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a>
# </td>
# </table>
# Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
# 불구하고 [공식 영문 문서](https://www.tensorflow.org/?hl=en)의 내용과 일치하지 않을 수 있습니다.
# 이 번역에 개선할 부분이 있다면
# [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n/) 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
# 문서 번역이나 리뷰에 참여하려면
# [<EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ko)로
# 메일을 보내주시기 바랍니다.
# ## 개요
#
# GPU와 TPU는 하나의 학습 단계를 실행하는데 필요한 시간을 급격하게 줄일 수 있습니다. 최대 성능을 위해서는 현재 단계가 종료되기 전에 다음 스텝의 데이터를 운반하는 효율적인 입력 파이프라인이 필요합니다.`tf.data` API는 유연하고 효율적인 입력 파이프라인을 만드는데 도움이 됩니다. 이 문서는 다양한 모델과 가속기에서 고성능의 텐서플로 입력 파이프라인을 만드는 방법과 `tf.data` API의 특정을 설명합니다.
#
# 진행하기 전에, `tf.data` API 사용법을 익히려면 "[텐서플로 입력 파이프라인 빌드하기](./data.ipynb)" 가이드를 읽으십시오.
# ## 참고 자료
#
# * [텐서플로 입력 파이프라인 빌드하기](./data.ipynb)
# * `tf.data.Dataset` API
# ## 설정
# In[ ]:
import tensorflow as tf
import time
# 전반적인 가이드에서는 데이터셋을 반복하고 성능을 측정합니다.
# 재현 가능한 성능 벤치마크를 만드는 것은 그것에 영향을 미치는 다른 요인들로 인해 어려울 수 있습니다. 그 요인들로는:
#
# - 현재 CPU 로드,
# - 네트워크 트래픽,
# - 캐시와 같은 복잡한 메커니즘 등이 있습니다.
#
# 따라서 재현 가능한 벤치마크를 제공하기 위해 인공 예제를 빌드합니다.
# ### 데이터셋
#
# `tf.data.Dataset`에서 상속하여 `ArtificialDataset`이라 불리는 클래스를 정의합니다.
# 이 데이터셋은:
#
# - `num_samples`(기본값은 3)개의 샘플을 생성하기
# - 첫 번째 항목이 파일 열기를 시뮬레이션하기 전에 일정 시간 동안 휴면
# - 파일에서 데이터 읽기를 시뮬레이션하기 위해 각 항목을 생성하기 전에 일정 시간 동안 휴면
# In[ ]:
class ArtificialDataset(tf.data.Dataset):
def _generator(num_samples):
# 파일 열기
time.sleep(0.03)
for sample_idx in range(num_samples):
# 파일에서 데이터(줄, 기록) 읽기
time.sleep(0.015)
yield (sample_idx,)
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=tf.dtypes.int64,
output_shapes=(1,),
args=(num_samples,)
)
# 이 데이터셋은 `tf.data.Dataset.range`와 유사하며 각 샘플의 시작과 사이에 일정한 지연시간을 추가합니다.
# ### 훈련 루프
#
# 데이터셋을 반복하는 데 걸리는 시간을 측정하는 더미 훈련 루프를 작성합니다.
# 훈련 시간이 시뮬레이션됩니다.
# In[ ]:
def benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
for sample in dataset:
# 훈련 스텝마다 실행
time.sleep(0.01)
tf.print("실행 시간:", time.perf_counter() - start_time)
# ## 성능 최적화
#
# 성능을 최적화하는 방법을 보여주기 위해 `ArtificialDataset`의 성능을 향상시킵니다.
# ### 추상적 접근
#
# 트릭 없이 추상적 파이프라인으로 시작하여 데이터셋을 그대로 반복합니다.
# In[ ]:
benchmark(ArtificialDataset())
# 실제로는 다음과 같이 실행 시간이 소비되었습니다:
#
# 
#
# 이를 포함한 훈련 스텝을 수행하는 것을 볼 수 있습니다:
#
# - 아직 열지 않은 경우 파일 열기,
# - 파일에서 데이터 항목을 가져오기,
# - 훈련할 데이터 사용하기.
#
# 그러나 여기와 같은 추상적 동기 구현에서는 파이프라인이 데이터를 가져 오는 동안 모델이 유휴 상태입니다.
# 반대로, 모델이 훈련하는 동안 입력 파이프라인이 유휴 상태입니다.
# 따라서 훈련 스텝 시간은 모두 열기, 읽기 및 훈련 시간의 합계입니다.
#
# 다음 섹션에서는 이 입력 파이프라인을 구축하여 성능이 뛰어난 텐서플로 입력 파이프라인 설계를 위한 모범 사례를 보여줍니다.
# 가져오기(Prefetching)
#
# 가져오기는 전처리와 훈련 스텝의 모델 실행을 오버랩합니다.
# 모델이 `s`스텝 훈련을 실행하는 동안 입력 파이프라인은 `s+1`스텝의 데이터를 읽습니다.
# 이렇게 하면 훈련을 하는 최대(합과 반대로) 스텝 시간과 데이터를 추출하는 데 걸리는 시간을 단축시킬 수 있습니다.
#
# `tf.data` API는 소프트웨어 파이프라이닝 방법을 `tf.data.Dataset.prefetch` 변환을 통해 제공합니다. 이것은
# 데이터가 소비되는 시간과 데이터가 생성되는 시간 간의 의존성을 줄일 수 있습니다. 특히, 이 변환은 백그라운드 스레드와 내부 버퍼를 사용하여
# 요청된 시간 전에 입력 데이터셋에서 요소를 가져옵니다. 가져올 요소의 수는 하나의 훈련 스텝에서 소비한 배치의 수와
# 같거나 커야 합니다. 이 값을 수동으로 조정하거나 `tf.data.experimental.AUTOTUNE`으로 설정하면 tf.data 런타임이
# 실행 시에 동적으로 값을 조정하도록 만듭니다.
#
# 프리페치 변환은 "프로듀서"의 작업과 "컨슈머"의 작업과 오버랩이 가능할 때마다 이점을 제공합니다.
# In[ ]:
benchmark(
ArtificialDataset()
.prefetch(tf.data.experimental.AUTOTUNE)
)
# 
#
# 이번에는 훈련 스텝이 샘플 0에 대해 실행되는 동안 입력 파이프라인이 샘플 1에 대한 데이터를 읽고 등등 하는 방식을 볼 수 있습니다.
# ### 데이터 추출 병렬화
#
# 실제 환경에서는 입력 데이터가 로컬에 맞지 않거나 학습이 분산되어 있고 입력 데이터를 모든 컴퓨터에 복제하는 것은 적절하지 않기 때문에 입력
# 데이터를 원격으로(이를테면, GCS나 HDFS) 저장할 수 있습니다. 데이터를 로컬에서 읽는 데이터셋 파이프라인은 다음과 같은 로컬과 원격
# 저장소의 차이 때문에 원격으로 데이터를 읽을 때 입출력에 병목이 발생할 수 있습니다:
#
# * **첫 번째 바이트(Time-to-first-byte):** 원격 저장소에서 파일의 첫 번째 바이트를 읽는 것은 로컬 저장소에서 읽어
# 들이는 것보다 훨씬 오래 걸립니다.
# * **읽기 처리량(Read throughput):** 원격 저장소는 보통 큰 총 대역폭을 가지지만 하나의 파일을 읽을 때 이 대역폭의
# 일부만 활용할 수 있습니다.
#
# 게다가 바이트들이 메모리로 읽혀지면 데이터를 역직렬화 그리고/또는 해독할 필요가 있을 수 있습니다(예를 들면,
# [protobuf](https://developers.google.com/protocol-buffers/)). 이 작업은 추가적인 계산이
# 필요합니다. 이 오버헤드는 데이터가 로컬 또는 원격으로 저장되는지와는 관계없이 존재하지만 데이터가 효과적으로 프리페치되지 않으면 원격의 경우에
# 나빠질 수 있습니다.
#
# 다양한 데이터 추출 오버헤드의 영향을 줄이기 위해 `tf.data.Dataset.interleave` 변환은 (데이터 파일 판독기와 같은)다른
# 데이터셋의 내용을 인터리빙(interleaving)하여 데이터 추출 단계를 병렬화하는데 사용할 수 있습니다. 중첩할 데이터셋은
# `cycle_length` 매개변수에 의해 지정될 수 있는 반면, 병렬처리 수준은 `num_parallel_calls` 매개변수에 의해 지정될
# 수 있습니다. `prefetch`와 `map` 변환과 비슷하게 `interleave` 변환은
# `tf.data.experimental.AUTOTUNE`을 지원합니다. 이것은 어떤 수준의 병렬처리가 tf.data 런타임에 사용되는지에 대해
# 결정합니다.
# #### 순차적 인터리브
#
# `tf.data.Dataset.interleave` 변환의 기본 인수는 두 개의 데이터셋에서 단일 샘플을 순차적으로 인터리브합니다.
# In[ ]:
benchmark(
tf.data.Dataset.range(2)
.interleave(ArtificialDataset)
)
# 
#
# 이 그림을 사용하면 `interleave` 변환의 결과를 나타낼 수 있으며 사용가능한 두 데이터셋에서 샘플을 가져오는 것이 가능합니다.
# 그러나 여기에는 성능 향상이 포함되지 않습니다.
# #### 병렬 인터리브
#
# 이제 `interleave` 변환의 `num_parallel_calls` 인수를 사용합니다.
# 이는 여러 병렬 데이터셋을 불러오고, 파일을 여는 데 기다리는 시간을 단축할 수 있습니다.
# In[ ]:
benchmark(
tf.data.Dataset.range(2)
.interleave(
ArtificialDataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
)
# 
#
# 이번에는 읽은 두 데이터셋이 병렬화되어 전역 데이터 처리 시간이 줄어듭니다.
# ### 데이터 변환 병렬화
#
# 데이터를 준비할 때, 입력 요소들은 전처리가 필요할 수 있습니다.
# 이것 때문에 `tf.data` API가 `tf.data.Dataset.map` 변환을 제공하고, 그것은 사용자 정의 함수(예를 들어, 예제의 `parse_fn`)를 입력 데이터셋의 각 요소에 적용합니다.
# 입력 요소가 서로 독립적이기 때문에 전처리는 여러 개의 CPU 코어에서 병렬로 실행될 수 있습니다.
#
# 이를 가능하게 하기 위해 `prefetch` 및 `interleave` 변환과 유사하게 `map` 변환은 병렬 처리 레벨을 지정하기 위해 `num_parallel_calls` 인수를 제공합니다.
#
# 가장 좋은 `num_parallel_calls` 값은 하드웨어, 훈련 데이터(사이즈와 모양), 맵 함수의 비용, 그리고 CPU에서 동시에 어떤
# 처리가 수행되는지에 따라 다릅니다.
# 단순한 방법으로 가용한 CPU 코어의 숫자로 설정할 수 있습니다.
# 반면에, `num_parallel_calls`를 가용한 CPU 코어 숫자보다 훨씬 더 많이 설정한다면 비효율적인 스케줄링으로 느려질 것입니다.
# `prefetch`와 `interleave` 변환과 비슷하게 `map` 변환은 tf.data 런타임에 가용되는 병렬화 수준을 결정하는
# `tf.data.experimental.AUTOTUNE`을 제공합니다.
# In[ ]:
def mapped_function(s):
# Do some hard pre-processing
tf.py_function(lambda: time.sleep(0.03), [], ())
return s
# #### 순차적 매핑
#
# 병렬 처리 없이 `map` 변환을 기본 예제로 사용하여 시작하십시오.
# In[ ]:
benchmark(
ArtificialDataset()
.map(mapped_function)
)
# 
#
# [추상적 접근](#The-naive-approach)의 경우 여기에서 열기, 읽기, 전처리(매핑) 및 단일 반복을 위해 훈련 스텝에 소요된 시간이 합산됩니다.
# #### 병렬 매핑
#
# 이제 동일한 전처리 함수를 사용하지만 여러 샘플에 병렬로 적용하십시오.
# In[ ]:
benchmark(
ArtificialDataset()
.map(
mapped_function,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
)
# 
#
# 이제 그림(plot)에서 전처리 단계가 겹치므로 단일 반복의 전체 시간이 줄어 듭니다.
# ### 캐시하기
#
# `tf.data.Dataset.cache` 변환은 데이터셋을 메모리 또는 로컬 저장소에 캐시할 수 있습니다.
# 이렇게하면 각 에포크 동안 실행되는 일부 작업(파일 열기 및 데이터 읽기 등)이 저장됩니다.
# In[ ]:
benchmark(
ArtificialDataset()
.map( # 캐시 전 시간이 많이 걸리는 작업 적용
mapped_function
).cache(
),
5
)
# 
#
# 데이터셋을 캐시할 때, `cache` 이전의 변환(파일 열기 및 데이터 읽기와 같은)은 첫 번째 에포크 동안에만 실행됩니다.
# 다음 에포크에는 `cache` 변환에 의해 캐시된 데이터를 재사용 할 것입니다.
#
# `map` 변환에 전달된 사용자 정의 함수가 비싸면 결과 데이터셋이 여전히 메모리 또는 로컬 스토리지에 적합할 수 있는 한 `map` 변환 후 `cache` 변환을 적용합니다.사용자 정의 함수가 캐시 용량을 넘어서 데이터셋을 저장하는 데 필요한 공간을 늘리면 `cache` 변환 후 데이터셋을 적용하거나 훈련 작업 전에 데이터를 전처리하여 리소스 사용량을 줄입니다.
# ### 매핑 벡터화
#
# `map` 변환으로 전달된 사용자 정의 함수를 호출하면 사용자 정의 함수의 스케줄링 및 실행과 관련된 오버헤드가 있습니다.
# 사용자 정의 함수를 벡터화(즉, 한 번에 여러 입력에 대해 작동하도록)하고 `맵`을 변환하기 _전에_ `배치` 변환을 적용하는 것이 좋습니다.
#
# 이 모범 사례를 설명하는 데 인공 데이터셋은 적합하지 않습니다.
# 스케줄링 지연은 약 10 마이크로초(10e-6초)로, `ArtificialDataset`에 사용된 수십 밀리초보다 훨씬 짧으므로 그 영향을 보기가 어렵습니다.
#
# 이 예제에서는 기본 `tf.data.Dataset.range` 함수를 사용하고 훈련 루프를 가장 간단한 형태로 단순화하십시오.
# In[ ]:
fast_dataset = tf.data.Dataset.range(10000)
def fast_benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for _ in tf.data.Dataset.range(num_epochs):
for _ in dataset:
pass
tf.print("실행 시간:", time.perf_counter() - start_time)
def increment(x):
return x+1
# #### 스칼라 매핑
# In[ ]:
fast_benchmark(
fast_dataset
# 한 번에 한 항목씩 함수 적용
.map(increment)
# 배치
.batch(256)
)
# 
#
# 위의 그림은 (샘플이 적은) 진행 상황을 보여줍니다.
# 매핑된 함수가 각 샘플에 적용되어 있음을 알 수 있습니다.
# 이 기능은 매우 빠르지만 시간 성능에 영향을 주는 약간의 오버헤드가 있습니다.
# #### 매핑 벡터화됨
# In[ ]:
fast_benchmark(
fast_dataset
.batch(256)
# items의 배치에 함수 적용
# tf.Tensor.__add__ 메서드는 이미 배치를 다룸
.map(increment)
)
# 
#
# 이번에는 매핑된 함수가 한 번 호출되어 샘플 배치에 적용됩니다.
# 이 함수를 실행하는 데 시간이 더 걸릴 수 있지만 오버헤드는 한 번만 나타나므로 전체 시간 성능이 향상됩니다.
# ### 메모리 사용량(footprint) 줄이기
#
# `interleave`, `prefetch`, `shuffle`을 포함한 많은 변환은 요소들의 내부 버퍼를 유지합니다.
# 사용자 정의 함수가 `map` 변환에 전달된 경우 요소의 크기가 변경되고 맵 변환의 순서와 버퍼 요소가 메모리 사용에 영향을 줍니다.
# 일반적으로 순서를 다르게 하는 것이 성능에 도움이 되는 경우 메모리 사용량이 낮아지는 순서를 선택하는 것이 좋습니다.
#
# #### 부분 계산 캐싱
#
# 이 변환으로 인해 데이터가 너무 커서 메모리에 맞지 않는 경우를 제외하고 `map` 변환 후 데이터셋을 캐시하는 것이 좋습니다.
# 매핑된 기능을 시간 소모적인 부분과 메모리 소모적인 부분의 두 부분으로 나눌 수 있다면 교환이 성사될 수 있습니다.
# 이 경우 아래와 같이 변환을 연결할 수 있습니다:
#
# ```python
# dataset.map(time_consuming_mapping).cache().map(memory_consuming_mapping)
# ```
#
# 이런 식으로 시간이 많이 걸리는 부분은 첫 번째 에포크(epoch) 동안에만 실행되며 너무 많은 캐시 공간을 사용하지 않습니다.
# ## 가장 좋은 예제 요약
#
# 다음은 성능이 좋은 텐서플로 입력 파이프라인을 설계하기 위한 가장 좋은 예제를 요약한 것입니다:
#
# * [`prefetch` 변환](#Pipelining)을 사용하여 프로듀서와 컨슈머의 작업을 오버랩하세요.
# * `interleave` 변환을 이용해 [데이터 읽기 변환을 병렬화하세요](#Parallelizing-data-extraction).
# * `num_parallel_calls` 매개변수를 설정하여 [`map` 변환을 병렬 처리하세요](#Parallelizing-data-transformation).
# * 데이터가 메모리에 저장될 수 있는 경우, [`cache` 변환을 사용](#Caching)하여 첫 번째 에포크동안 데이터를 메모리에 캐시하세요.
# * `map` 변환에 전달된 [사용자 정의 함수를 벡터화](#Map-and-batch)하세요.
# * `interleave`, `prefetch`, 그리고 `shuffle` 변환을 적용하여 [메모리 사용을 줄이세요](#Reducing-memory-footprint).
# ## 그림 재현
#
# 참고: 이 노트북의 나머지 부분은 위의 그림을 재현하는 방법에 대한 것이며, 이 코드로 자유롭게 놀아볼 수 있지만 이해하는 것은 이 자습서의 필수적인 부분이 아닙니다.
#
# `tf.data.Dataset` API에 대해 더 깊이 이해하기 위해 자신만의 파이프라인을 사용할 수 있습니다.
# 다음은 이 안내서의 이미지를 그리는 데 사용되는 코드입니다.
# 다음과 같은 일반적인 어려움에 대한 해결 방법을 보여주는 좋은 출발점이 될 수 있습니다:
#
# - 실행 시간 재현성;
# - 매핑 된 기능 즉시 실행;
# - `interleave` 변환 호출 가능.
# In[ ]:
import itertools
from collections import defaultdict
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# ### 데이터셋
#
# `ArtificialDataset`과 비슷하게 각 단계에서 소요된 시간을 리턴하는 데이터셋을 빌드할 수 있습니다.
# In[ ]:
class TimeMeasuredDataset(tf.data.Dataset):
# 출력: (steps, timings, counters)
OUTPUT_TYPES = (tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32)
OUTPUT_SHAPES = ((2, 1), (2, 2), (2, 3))
_INSTANCES_COUNTER = itertools.count() # 생성된 데이터셋 수
_EPOCHS_COUNTER = defaultdict(itertools.count) # 각 데이터를 수행한 에포크 수
def _generator(instance_idx, num_samples):
epoch_idx = next(TimeMeasuredDataset._EPOCHS_COUNTER[instance_idx])
# 파일 열기
open_enter = time.perf_counter()
time.sleep(0.03)
open_elapsed = time.perf_counter() - open_enter
for sample_idx in range(num_samples):
# 파일에서 데이터(줄, 기록) 읽어오기
read_enter = time.perf_counter()
time.sleep(0.015)
read_elapsed = time.perf_counter() - read_enter
yield (
[("Open",), ("Read",)],
[(open_enter, open_elapsed), (read_enter, read_elapsed)],
[(instance_idx, epoch_idx, -1), (instance_idx, epoch_idx, sample_idx)]
)
open_enter, open_elapsed = -1., -1. # 음수는 필터링됨
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=cls.OUTPUT_TYPES,
output_shapes=cls.OUTPUT_SHAPES,
args=(next(cls._INSTANCES_COUNTER), num_samples)
)
# 이 데이터셋은 `[[2, 1], [2, 2], [2, 3]]`의 크기와 `[tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32]`의 타입을 가진 샘플을 제공합니다.
# 각 샘플은:
# ```
# (
# [("Open"), ("Read")],
# [(t0, d), (t0, d)],
# [(i, e, -1), (i, e, s)]
# )
# ```
#
# 이며,
#
# - `Open`과 `Read`는 스텝 식별자
# - `t0`는 해당 스텝이 시작된 타임스탬프
# - `d`는 해당 스텝에서 소비된 시간
# - `i`는 인스턴스의 인덱스
# - `e`는 에포크 인덱스(데이터셋이 반복된 횟수)
# - `s`는 샘플 인덱스입니다.
# ### 반복 루프
#
# 반복 루프를 조금 더 복잡하게 하여 모든 타이밍을 집계하십시오.
# 위에서 설명한 대로 샘플을 생성하는 데이터셋에서만 작동합니다.
# In[ ]:
def timelined_benchmark(dataset, num_epochs=2):
# 누산기 초기화
steps_acc = tf.zeros([0, 1], dtype=tf.dtypes.string)
times_acc = tf.zeros([0, 2], dtype=tf.dtypes.float32)
values_acc = tf.zeros([0, 3], dtype=tf.dtypes.int32)
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
epoch_enter = time.perf_counter()
for (steps, times, values) in dataset:
# 데이터셋 준비 정보 기록하기
steps_acc = tf.concat((steps_acc, steps), axis=0)
times_acc = tf.concat((times_acc, times), axis=0)
values_acc = tf.concat((values_acc, values), axis=0)
# 훈련 시간 시뮬레이션
train_enter = time.perf_counter()
time.sleep(0.01)
train_elapsed = time.perf_counter() - train_enter
# 훈련 정보 기록하기
steps_acc = tf.concat((steps_acc, [["Train"]]), axis=0)
times_acc = tf.concat((times_acc, [(train_enter, train_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [values[-1]]), axis=0)
epoch_elapsed = time.perf_counter() - epoch_enter
# 에포크 정보 기록하기
steps_acc = tf.concat((steps_acc, [["Epoch"]]), axis=0)
times_acc = tf.concat((times_acc, [(epoch_enter, epoch_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [[-1, epoch_num, -1]]), axis=0)
time.sleep(0.001)
tf.print("실행 시간:", time.perf_counter() - start_time)
return {"steps": steps_acc, "times": times_acc, "values": values_acc}
# ### 그리기(plotting) 메서드
#
# 마지막으로, `timelined_benchmark` 함수에 의해 리턴된 값이 주어지면 타임라인을 그릴 수 있는 함수를 정의하십시오.
# In[ ]:
def draw_timeline(timeline, title, width=0.5, annotate=False, save=False):
# 타임라인에서 유효하지 않은 항목(음수 또는 빈 스텝) 제거
invalid_mask = np.logical_and(timeline['times'] > 0, timeline['steps'] != b'')[:,0]
steps = timeline['steps'][invalid_mask].numpy()
times = timeline['times'][invalid_mask].numpy()
values = timeline['values'][invalid_mask].numpy()
# 처음 발견될 때 순서대로 다른 스텝을 가져옵니다.
step_ids, indices = np.stack(np.unique(steps, return_index=True))
step_ids = step_ids[np.argsort(indices)]
# 시작 시간을 0으로 하고 최대 시간 값을 계산하십시오.
min_time = times[:,0].min()
times[:,0] = (times[:,0] - min_time)
end = max(width, (times[:,0]+times[:,1]).max() + 0.01)
cmap = mpl.cm.get_cmap("plasma")
plt.close()
fig, axs = plt.subplots(len(step_ids), sharex=True, gridspec_kw={'hspace': 0})
fig.suptitle(title)
fig.set_size_inches(17.0, len(step_ids))
plt.xlim(-0.01, end)
for i, step in enumerate(step_ids):
step_name = step.decode()
ax = axs[i]
ax.set_ylabel(step_name)
ax.set_ylim(0, 1)
ax.set_yticks([])
ax.set_xlabel("time (s)")
ax.set_xticklabels([])
ax.grid(which="both", axis="x", color="k", linestyle=":")
# 주어진 단계에 대한 타이밍과 주석 얻기
entries_mask = np.squeeze(steps==step)
serie = np.unique(times[entries_mask], axis=0)
annotations = values[entries_mask]
ax.broken_barh(serie, (0, 1), color=cmap(i / len(step_ids)), linewidth=1, alpha=0.66)
if annotate:
for j, (start, width) in enumerate(serie):
annotation = "\n".join([f"{l}: {v}" for l,v in zip(("i", "e", "s"), annotations[j])])
ax.text(start + 0.001 + (0.001 * (j % 2)), 0.55 - (0.1 * (j % 2)), annotation,
horizontalalignment='left', verticalalignment='center')
if save:
plt.savefig(title.lower().translate(str.maketrans(" ", "_")) + ".svg")
# ### 매핑된 함수용 래퍼(wrappers) 사용
#
# eager 컨텍스트에서 매핑된 함수를 실행하려면 tf.py_function 호출 내에서 래핑해야 합니다.
# In[ ]:
def map_decorator(func):
def wrapper(steps, times, values):
# 자동 그래프가 메서드를 컴파일하지 못하도록 tf.py_function을 사용
return tf.py_function(
func,
inp=(steps, times, values),
Tout=(steps.dtype, times.dtype, values.dtype)
)
return wrapper
# ### 파이프라인 비교
# In[ ]:
_batch_map_num_items = 50
def dataset_generator_fun(*args):
return TimeMeasuredDataset(num_samples=_batch_map_num_items)
# #### Naive
# In[ ]:
@map_decorator
def naive_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001) # 시간 소비 스텝
time.sleep(0.0001) # 메모리 소비 스텝
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, [["Map"]]), axis=0),
tf.concat((times, [[map_enter, map_elapsed]]), axis=0),
tf.concat((values, [values[-1]]), axis=0)
)
naive_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.flat_map(dataset_generator_fun)
.map(naive_map)
.batch(_batch_map_num_items, drop_remainder=True)
.unbatch(),
5
)
# ### Optimized
# In[ ]:
@map_decorator
def time_consumming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001 * values.shape[0]) # 시간 소비 스텝
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, tf.tile([[["1st map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
@map_decorator
def memory_consumming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.0001 * values.shape[0]) # 메모리 소비 스텝
map_elapsed = time.perf_counter() - map_enter
# 배치 차원을 다루는 데 tf.tile 사용
return (
tf.concat((steps, tf.tile([[["2nd map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
optimized_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.interleave( # 데이터 읽기 병렬화
dataset_generator_fun,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.batch( # 매핑된 함수 벡터화
_batch_map_num_items,
drop_remainder=True)
.map( # 맵 변환 병렬화
time_consumming_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.cache() # 데이터 캐시
.map( # 메모리 사용량 줄이기
memory_consumming_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.prefetch( # 프로듀서와 컨슈머 작업 오버랩
tf.data.experimental.AUTOTUNE
)
.unbatch(),
5
)
# In[ ]:
draw_timeline(naive_timeline, "Naive", 15)
# In[ ]:
draw_timeline(optimized_timeline, "Optimized", 15)
| [
"matplotlib.pyplot.xlim",
"tensorflow.py_function",
"matplotlib.cm.get_cmap",
"numpy.logical_and",
"matplotlib.pyplot.close",
"time.perf_counter",
"itertools.count",
"tensorflow.data.Dataset.range",
"collections.defaultdict",
"time.sleep",
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow... | [((9611, 9639), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(10000)'], {}), '(10000)\n', (9632, 9639), True, 'import tensorflow as tf\n'), ((3456, 3475), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3473, 3475), False, 'import time\n'), ((9701, 9720), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9718, 9720), False, 'import time\n'), ((9734, 9767), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['num_epochs'], {}), '(num_epochs)\n', (9755, 9767), True, 'import tensorflow as tf\n'), ((12570, 12587), 'itertools.count', 'itertools.count', ([], {}), '()\n', (12585, 12587), False, 'import itertools\n'), ((12624, 12652), 'collections.defaultdict', 'defaultdict', (['itertools.count'], {}), '(itertools.count)\n', (12635, 12652), False, 'from collections import defaultdict\n'), ((14338, 14378), 'tensorflow.zeros', 'tf.zeros', (['[0, 1]'], {'dtype': 'tf.dtypes.string'}), '([0, 1], dtype=tf.dtypes.string)\n', (14346, 14378), True, 'import tensorflow as tf\n'), ((14395, 14436), 'tensorflow.zeros', 'tf.zeros', (['[0, 2]'], {'dtype': 'tf.dtypes.float32'}), '([0, 2], dtype=tf.dtypes.float32)\n', (14403, 14436), True, 'import tensorflow as tf\n'), ((14454, 14493), 'tensorflow.zeros', 'tf.zeros', (['[0, 3]'], {'dtype': 'tf.dtypes.int32'}), '([0, 3], dtype=tf.dtypes.int32)\n', (14462, 14493), True, 'import tensorflow as tf\n'), ((14516, 14535), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14533, 14535), False, 'import time\n'), ((16616, 16641), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (16631, 16641), True, 'import matplotlib as mpl\n'), ((16646, 16657), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16655, 16657), True, 'import matplotlib.pyplot as plt\n'), ((16814, 16834), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.01)', 'end'], {}), '(-0.01, end)\n', (16822, 16834), True, 'import matplotlib.pyplot as plt\n'), ((18535, 18554), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18552, 18554), False, 'import time\n'), ((18559, 18576), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (18569, 18576), False, 'import time\n'), ((18593, 18611), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (18603, 18611), False, 'import time\n'), ((19167, 19186), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19184, 19186), False, 'import time\n'), ((19191, 19226), 'time.sleep', 'time.sleep', (['(0.001 * values.shape[0])'], {}), '(0.001 * values.shape[0])\n', (19201, 19226), False, 'import time\n'), ((19668, 19687), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19685, 19687), False, 'import time\n'), ((19692, 19728), 'time.sleep', 'time.sleep', (['(0.0001 * values.shape[0])'], {}), '(0.0001 * values.shape[0])\n', (19702, 19728), False, 'import time\n'), ((2823, 2839), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (2833, 2839), False, 'import time\n'), ((3060, 3181), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['cls._generator'], {'output_types': 'tf.dtypes.int64', 'output_shapes': '(1,)', 'args': '(num_samples,)'}), '(cls._generator, output_types=tf.dtypes.int64,\n output_shapes=(1,), args=(num_samples,))\n', (3090, 3181), True, 'import tensorflow as tf\n'), ((12847, 12866), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12864, 12866), False, 'import time\n'), ((12875, 12891), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (12885, 12891), False, 'import time\n'), ((14598, 14617), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14615, 14617), False, 'import time\n'), ((15433, 15476), 'tensorflow.concat', 'tf.concat', (["(steps_acc, [['Epoch']])"], {'axis': '(0)'}), "((steps_acc, [['Epoch']]), axis=0)\n", (15442, 15476), True, 'import tensorflow as tf\n'), ((15497, 15559), 'tensorflow.concat', 'tf.concat', (['(times_acc, [(epoch_enter, epoch_elapsed)])'], {'axis': '(0)'}), '((times_acc, [(epoch_enter, epoch_elapsed)]), axis=0)\n', (15506, 15559), True, 'import tensorflow as tf\n'), ((15581, 15635), 'tensorflow.concat', 'tf.concat', (['(values_acc, [[-1, epoch_num, -1]])'], {'axis': '(0)'}), '((values_acc, [[-1, epoch_num, -1]]), axis=0)\n', (15590, 15635), True, 'import tensorflow as tf\n'), ((15644, 15661), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (15654, 15661), False, 'import time\n'), ((16048, 16111), 'numpy.logical_and', 'np.logical_and', (["(timeline['times'] > 0)", "(timeline['steps'] != b'')"], {}), "(timeline['times'] > 0, timeline['steps'] != b'')\n", (16062, 16111), True, 'import numpy as np\n'), ((16347, 16382), 'numpy.unique', 'np.unique', (['steps'], {'return_index': '(True)'}), '(steps, return_index=True)\n', (16356, 16382), True, 'import numpy as np\n'), ((16408, 16427), 'numpy.argsort', 'np.argsort', (['indices'], {}), '(indices)\n', (16418, 16427), True, 'import numpy as np\n'), ((17214, 17239), 'numpy.squeeze', 'np.squeeze', (['(steps == step)'], {}), '(steps == step)\n', (17224, 17239), True, 'import numpy as np\n'), ((17254, 17292), 'numpy.unique', 'np.unique', (['times[entries_mask]'], {'axis': '(0)'}), '(times[entries_mask], axis=0)\n', (17263, 17292), True, 'import numpy as np\n'), ((18124, 18224), 'tensorflow.py_function', 'tf.py_function', (['func'], {'inp': '(steps, times, values)', 'Tout': '(steps.dtype, times.dtype, values.dtype)'}), '(func, inp=(steps, times, values), Tout=(steps.dtype, times.\n dtype, values.dtype))\n', (18138, 18224), True, 'import tensorflow as tf\n'), ((18643, 18662), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (18660, 18662), False, 'import time\n'), ((18697, 18734), 'tensorflow.concat', 'tf.concat', (["(steps, [['Map']])"], {'axis': '(0)'}), "((steps, [['Map']]), axis=0)\n", (18706, 18734), True, 'import tensorflow as tf\n'), ((18744, 18798), 'tensorflow.concat', 'tf.concat', (['(times, [[map_enter, map_elapsed]])'], {'axis': '(0)'}), '((times, [[map_enter, map_elapsed]]), axis=0)\n', (18753, 18798), True, 'import tensorflow as tf\n'), ((18808, 18849), 'tensorflow.concat', 'tf.concat', (['(values, [values[-1]])'], {'axis': '(0)'}), '((values, [values[-1]]), axis=0)\n', (18817, 18849), True, 'import tensorflow as tf\n'), ((19257, 19276), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19274, 19276), False, 'import time\n'), ((19760, 19779), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19777, 19779), False, 'import time\n'), ((2940, 2957), 'time.sleep', 'time.sleep', (['(0.015)'], {}), '(0.015)\n', (2950, 2957), False, 'import time\n'), ((3584, 3600), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (3594, 3600), False, 'import time\n'), ((3632, 3651), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3649, 3651), False, 'import time\n'), ((6312, 6336), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(2)'], {}), '(2)\n', (6333, 6336), True, 'import tensorflow as tf\n'), ((6737, 6761), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(2)'], {}), '(2)\n', (6758, 6761), True, 'import tensorflow as tf\n'), ((7808, 7824), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (7818, 7824), False, 'import time\n'), ((9843, 9862), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9860, 9862), False, 'import time\n'), ((12915, 12934), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12932, 12934), False, 'import time\n'), ((13063, 13082), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13080, 13082), False, 'import time\n'), ((13095, 13112), 'time.sleep', 'time.sleep', (['(0.015)'], {}), '(0.015)\n', (13105, 13112), False, 'import time\n'), ((14719, 14756), 'tensorflow.concat', 'tf.concat', (['(steps_acc, steps)'], {'axis': '(0)'}), '((steps_acc, steps), axis=0)\n', (14728, 14756), True, 'import tensorflow as tf\n'), ((14781, 14818), 'tensorflow.concat', 'tf.concat', (['(times_acc, times)'], {'axis': '(0)'}), '((times_acc, times), axis=0)\n', (14790, 14818), True, 'import tensorflow as tf\n'), ((14844, 14883), 'tensorflow.concat', 'tf.concat', (['(values_acc, values)'], {'axis': '(0)'}), '((values_acc, values), axis=0)\n', (14853, 14883), True, 'import tensorflow as tf\n'), ((14949, 14968), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (14966, 14968), False, 'import time\n'), ((14981, 14997), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (14991, 14997), False, 'import time\n'), ((15122, 15165), 'tensorflow.concat', 'tf.concat', (["(steps_acc, [['Train']])"], {'axis': '(0)'}), "((steps_acc, [['Train']]), axis=0)\n", (15131, 15165), True, 'import tensorflow as tf\n'), ((15190, 15252), 'tensorflow.concat', 'tf.concat', (['(times_acc, [(train_enter, train_elapsed)])'], {'axis': '(0)'}), '((times_acc, [(train_enter, train_elapsed)]), axis=0)\n', (15199, 15252), True, 'import tensorflow as tf\n'), ((15278, 15323), 'tensorflow.concat', 'tf.concat', (['(values_acc, [values[-1]])'], {'axis': '(0)'}), '((values_acc, [values[-1]]), axis=0)\n', (15287, 15323), True, 'import tensorflow as tf\n'), ((15357, 15376), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15374, 15376), False, 'import time\n'), ((15698, 15717), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15715, 15717), False, 'import time\n'), ((13140, 13159), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13157, 13159), False, 'import time\n'), ((15026, 15045), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15043, 15045), False, 'import time\n'), ((19329, 19377), 'tensorflow.tile', 'tf.tile', (["[[['1st map']]]", '[steps.shape[0], 1, 1]'], {}), "([[['1st map']]], [steps.shape[0], 1, 1])\n", (19336, 19377), True, 'import tensorflow as tf\n'), ((19415, 19476), 'tensorflow.tile', 'tf.tile', (['[[[map_enter, map_elapsed]]]', '[times.shape[0], 1, 1]'], {}), '([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])\n', (19422, 19476), True, 'import tensorflow as tf\n'), ((19515, 19569), 'tensorflow.tile', 'tf.tile', (['[[values[:][-1][0]]]', '[values.shape[0], 1, 1]'], {}), '([[values[:][-1][0]]], [values.shape[0], 1, 1])\n', (19522, 19569), True, 'import tensorflow as tf\n'), ((19862, 19910), 'tensorflow.tile', 'tf.tile', (["[[['2nd map']]]", '[steps.shape[0], 1, 1]'], {}), "([[['2nd map']]], [steps.shape[0], 1, 1])\n", (19869, 19910), True, 'import tensorflow as tf\n'), ((19948, 20009), 'tensorflow.tile', 'tf.tile', (['[[[map_enter, map_elapsed]]]', '[times.shape[0], 1, 1]'], {}), '([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])\n', (19955, 20009), True, 'import tensorflow as tf\n'), ((20048, 20102), 'tensorflow.tile', 'tf.tile', (['[[values[:][-1][0]]]', '[values.shape[0], 1, 1]'], {}), '([[values[:][-1][0]]], [values.shape[0], 1, 1])\n', (20055, 20102), True, 'import tensorflow as tf\n'), ((18899, 18923), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(2)'], {}), '(2)\n', (18920, 18923), True, 'import tensorflow as tf\n'), ((20167, 20191), 'tensorflow.data.Dataset.range', 'tf.data.Dataset.range', (['(2)'], {}), '(2)\n', (20188, 20191), True, 'import tensorflow as tf\n')] |
from cmath import sqrt
import numpy as np
import networkx as nx
from math import sqrt
def undersegmentation_error(partition, groundtruth,
tolerance=0.05):
""" Computes the undersegmentation error defined as:
ue(G_i) = (sum_{Area(S_i)} - area(G_i)) / area(G_i)
where G_i is the groundtruth and
S_i is the obtained partition
The total error is the average accross regions
Parameters
----------
partition: (N,M) array
array with obtained labels
groundtruth: (N,M) array or list
array(list with groundtruth labels
tolerance: float, optional
threshold to consider oversegmentation
Returns
-------
The undersegmentation error
"""
gt_list = [];
if type(groundtruth) != list:
gt_list.append(groundtruth)
else:
gt_list = gt_list + groundtruth
# partition labels
seg_labels = np.unique(partition)
areas = {}
for s_i in seg_labels:
area = np.count_nonzero(partition == s_i)
areas[s_i] = area
# evaluate each groundtruth segmentation
err = 0
for segmentation in gt_list:
gt_labels = np.unique(segmentation)
err_s = 0
# get error for each groundtruth region
for g_i in gt_labels:
# get groundtruth area
area = np.count_nonzero(segmentation == g_i)
# compute intersection
total_area = 0.
for s_i in seg_labels:
n = np.count_nonzero((g_i == segmentation) *
(partition == s_i))
if n > tolerance*area:
total_area += areas[s_i]
err_s += abs(total_area - area) / area
err += err_s/len(gt_labels)
return err / len(gt_list)
def segmentation_accuracy(partition, groundtruth):
""" Computes the segmentation accuracy defined as:
accu(G_i) = (sum_{Area(S_k) \in area(G_i)}) / area(G_i)
where G_i is the groundtruth and
S_k is the obtained partition where the majority of S_k is in G_i
The total error is the average accross regions
Parameters
----------
partition: (N,M) array
array with obtained labels
groundtruth: (N,M) array or list
array(list with groundtruth labels
Returns
-------
The segmentation accuracy
"""
gt_list = [];
if type(groundtruth) != list:
gt_list.append(groundtruth)
else:
gt_list = gt_list + groundtruth
# partition labels
seg_labels = np.unique(partition)
# evaluate each groundtruth segmentation
accu = 0
for segmentation in gt_list:
gt_labels = np.unique(segmentation)
#find the area of each segment
area = np.bincount(segmentation.astype(np.int).flatten())
accu_s = 0
# match each pixel to a groundtruth segment
for s_k in seg_labels:
coords = np.where(partition == s_k)
#find the intersection
intersection = np.bincount(segmentation[coords].flatten().astype(np.int))
# get the maximum intersecting groundtruth segment
g_i = np.argmax(intersection)
accu_s += intersection[g_i] / area[g_i]
accu += accu_s/len(gt_labels)
return accu / len(gt_list)
def boundary_detection(partition, groundtruth, tolerance = 0.04):
""" Measures boundary detection
Parameters
----------
partition: (N,M) array
array with obtained labels
groundtruth: (N,M) array or list
array(list with groundtruth labels
tolerance: float, optional
maximum distance of considered boundaries relative
to the diagonal
Returns
-------
The precision recall boundaries
"""
# dictionary holding contours and their status (matched/not matched)
contours = {}
gt_contours = {}
# find horizontal contours for segmentation
seg_hx, seg_hy = np.where(partition[:-1, :] != partition[1:, :])
# find vertical contours for segmentation
seg_vx, seg_vy = np.where(partition[:, :-1] != partition[:, 1:])
# the third number reflects:
# 0/1: horizontal/vertical contour
# the forth number reflect
# 0/1: segmentation/groundtruth contour
for px,py in zip(seg_hx,seg_hy):
contours[(px, py, 0, 0)] = 0
for px,py in zip(seg_vx, seg_vy):
contours[(px, py, 1, 0)] = 0
# find horizontal contours for groundtruth
seg_hx, seg_hy = np.where(groundtruth[:-1, :] != groundtruth[1:, :])
# find vertical contours for groundtruth
seg_vx, seg_vy = np.where(groundtruth[:, :-1] != groundtruth[:, 1:])
# the third number reflects:
# 0/1: horizontal/vertical contour
# the forth number reflect
# 0/1: segmentation/groundtruth contour
for px,py in zip(seg_hx,seg_hy):
gt_contours[(px, py, 0, 1)] = 0
for px,py in zip(seg_vx, seg_vy):
gt_contours[(px, py, 1, 1)] = 0
# create a graph matching contours
bipartite = nx.Graph()
bipartite.add_nodes_from(contours)
bipartite.add_nodes_from(gt_contours)
diagonal = sqrt(partition.shape[0]**2 + partition.shape[1]**2)
# maximum distance to search for
D = int(tolerance * diagonal)
for contour in contours:
px = contour[0]
py = contour[1]
# find groundtruth contours around a neighborhood
for x in range(px - D, px + D + 1):
for y in range(py - D, py + D + 1):
hcontour = (x, y, 0, 1)
vcontour = (x, y, 1, 1)
# add an edge if a contour is found
if hcontour in bipartite:
bipartite.add_edge(contour, hcontour)
if vcontour in bipartite:
bipartite.add_edge(contour, hcontour)
# perform a matching
# matches contains twice the matchings
matches = nx.max_weight_matching(bipartite)
print("Contours {0} and {1} matches {2}".format(len(contours),
len(gt_contours), len(matches)))
# find precision/recall values
true_positives = len(matches)/2
false_positives = len(contours) - len(matches)/2
false_negatives = len(gt_contours) - len(matches)/2
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
return precision, recall
def explained_variation(img, partition):
""" Computes the explained variation defined as:
sum over voxels (\mu_i - \mu) / (\voxel - \mu)
where \mu is the video mean and \mu_i is the region mean
"""
# partition labels
seg_labels = np.unique(partition)
dimensions = img.shape
#compute the color mean
mu = np.zeros(dimensions[-1])
#create an array to compute the mse error
mse = np.zeros(dimensions[:-1])
for i in range(dimensions[-1]):
mu[i] = np.mean(img[..., i])
mse += (img[..., i] - mu[i])**2
#sum the error
mse_error = np.sum(mse)
#find the mse error for each
mse_reg = 0
for segment in seg_labels:
coords = np.where(partition == segment)
mu_i = np.mean(img[coords], axis=0)
mse_reg += np.sum((img[coords] - mu_i)**2)
return mse_reg / mse_error | [
"numpy.sum",
"numpy.count_nonzero",
"math.sqrt",
"numpy.argmax",
"numpy.zeros",
"networkx.max_weight_matching",
"numpy.where",
"networkx.Graph",
"numpy.mean",
"numpy.unique"
] | [((977, 997), 'numpy.unique', 'np.unique', (['partition'], {}), '(partition)\n', (986, 997), True, 'import numpy as np\n'), ((2655, 2675), 'numpy.unique', 'np.unique', (['partition'], {}), '(partition)\n', (2664, 2675), True, 'import numpy as np\n'), ((4112, 4159), 'numpy.where', 'np.where', (['(partition[:-1, :] != partition[1:, :])'], {}), '(partition[:-1, :] != partition[1:, :])\n', (4120, 4159), True, 'import numpy as np\n'), ((4228, 4275), 'numpy.where', 'np.where', (['(partition[:, :-1] != partition[:, 1:])'], {}), '(partition[:, :-1] != partition[:, 1:])\n', (4236, 4275), True, 'import numpy as np\n'), ((4643, 4694), 'numpy.where', 'np.where', (['(groundtruth[:-1, :] != groundtruth[1:, :])'], {}), '(groundtruth[:-1, :] != groundtruth[1:, :])\n', (4651, 4694), True, 'import numpy as np\n'), ((4762, 4813), 'numpy.where', 'np.where', (['(groundtruth[:, :-1] != groundtruth[:, 1:])'], {}), '(groundtruth[:, :-1] != groundtruth[:, 1:])\n', (4770, 4813), True, 'import numpy as np\n'), ((5174, 5184), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5182, 5184), True, 'import networkx as nx\n'), ((5282, 5337), 'math.sqrt', 'sqrt', (['(partition.shape[0] ** 2 + partition.shape[1] ** 2)'], {}), '(partition.shape[0] ** 2 + partition.shape[1] ** 2)\n', (5286, 5337), False, 'from math import sqrt\n'), ((6048, 6081), 'networkx.max_weight_matching', 'nx.max_weight_matching', (['bipartite'], {}), '(bipartite)\n', (6070, 6081), True, 'import networkx as nx\n'), ((6794, 6814), 'numpy.unique', 'np.unique', (['partition'], {}), '(partition)\n', (6803, 6814), True, 'import numpy as np\n'), ((6881, 6905), 'numpy.zeros', 'np.zeros', (['dimensions[-1]'], {}), '(dimensions[-1])\n', (6889, 6905), True, 'import numpy as np\n'), ((6963, 6988), 'numpy.zeros', 'np.zeros', (['dimensions[:-1]'], {}), '(dimensions[:-1])\n', (6971, 6988), True, 'import numpy as np\n'), ((7139, 7150), 'numpy.sum', 'np.sum', (['mse'], {}), '(mse)\n', (7145, 7150), True, 'import numpy as np\n'), ((1056, 1090), 'numpy.count_nonzero', 'np.count_nonzero', (['(partition == s_i)'], {}), '(partition == s_i)\n', (1072, 1090), True, 'import numpy as np\n'), ((1228, 1251), 'numpy.unique', 'np.unique', (['segmentation'], {}), '(segmentation)\n', (1237, 1251), True, 'import numpy as np\n'), ((2788, 2811), 'numpy.unique', 'np.unique', (['segmentation'], {}), '(segmentation)\n', (2797, 2811), True, 'import numpy as np\n'), ((7042, 7062), 'numpy.mean', 'np.mean', (['img[..., i]'], {}), '(img[..., i])\n', (7049, 7062), True, 'import numpy as np\n'), ((7249, 7279), 'numpy.where', 'np.where', (['(partition == segment)'], {}), '(partition == segment)\n', (7257, 7279), True, 'import numpy as np\n'), ((7295, 7323), 'numpy.mean', 'np.mean', (['img[coords]'], {'axis': '(0)'}), '(img[coords], axis=0)\n', (7302, 7323), True, 'import numpy as np\n'), ((7343, 7376), 'numpy.sum', 'np.sum', (['((img[coords] - mu_i) ** 2)'], {}), '((img[coords] - mu_i) ** 2)\n', (7349, 7376), True, 'import numpy as np\n'), ((1403, 1440), 'numpy.count_nonzero', 'np.count_nonzero', (['(segmentation == g_i)'], {}), '(segmentation == g_i)\n', (1419, 1440), True, 'import numpy as np\n'), ((3043, 3069), 'numpy.where', 'np.where', (['(partition == s_k)'], {}), '(partition == s_k)\n', (3051, 3069), True, 'import numpy as np\n'), ((3274, 3297), 'numpy.argmax', 'np.argmax', (['intersection'], {}), '(intersection)\n', (3283, 3297), True, 'import numpy as np\n'), ((1560, 1620), 'numpy.count_nonzero', 'np.count_nonzero', (['((g_i == segmentation) * (partition == s_i))'], {}), '((g_i == segmentation) * (partition == s_i))\n', (1576, 1620), True, 'import numpy as np\n')] |
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class MultiEnvEvaluator:
def __init__(self, make_net, activate_net, batch_size=1, max_env_steps=None, make_env=None, envs=None):
if envs is None:
self.envs = [make_env() for _ in range(batch_size)]
else:
self.envs = envs
self.make_net = make_net
self.activate_net = activate_net
self.batch_size = batch_size
self.max_env_steps = max_env_steps
def eval_genome(self, genome, config, debug=False):
net = self.make_net(genome, config, self.batch_size)
fitnesses = np.zeros(self.batch_size)
states = [env.reset() for env in self.envs]
dones = [False] * self.batch_size
step_num = 0
while True:
step_num += 1
if self.max_env_steps is not None and step_num == self.max_env_steps:
break
if debug:
actions = self.activate_net(
net, states, debug=True, step_num=step_num)
else:
actions = self.activate_net(net, states)
assert len(actions) == len(self.envs)
for i, (env, action, done) in enumerate(zip(self.envs, actions, dones)):
if not done:
state, reward, done, _ = env.step(action)
fitnesses[i] += reward
if not done:
states[i] = state
dones[i] = done
if all(dones):
break
return sum(fitnesses) / len(fitnesses)
| [
"numpy.zeros"
] | [((1191, 1216), 'numpy.zeros', 'np.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (1199, 1216), True, 'import numpy as np\n')] |
import os
import sys
import xml.etree.ElementTree as eT
from argparse import ArgumentParser
import json
from json import JSONEncoder
import numpy as np
import config
parser = ArgumentParser()
parser.add_argument('-f', '--filename', dest='filename', required=True)
args = parser.parse_args()
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
def get_room_type(room_id, _graph):
for room in _graph.findall(namespace + 'node'):
if room_id == room.get('id'):
data = room.findall(namespace + 'data')
for d in data:
if d.get('key') == 'roomType':
return d.text
room_types_sorted = sorted(config.room_type_codes.keys())
edge_types_sorted = sorted(config.edge_type_codes.keys())
def one_hot_vector(ind, size):
vector = []
for x in range(size):
if x == ind:
vector.append(1)
else:
vector.append(0)
return vector
def get_vector(entity_type, category):
if category == 'room':
ind = room_types_sorted.index(str(entity_type).upper())
# return one_hot_vector(ind, len(room_types_sorted))
else:
ind = edge_types_sorted.index(str(entity_type).upper())
# return one_hot_vector(ind, len(edge_types_sorted))
return one_hot_vector(ind, len(room_types_sorted)) # to ensure arrays of the same length as required by Tensorflow
def get_empty_connection():
conn = []
for ii in range(3):
vect = []
for jj in range(len(room_types_sorted)):
vect.append(0.0)
conn.append(vect)
return conn
namespace = '{http://graphml.graphdrawing.org/xmlns}'
dirname = os.path.dirname(os.path.realpath(sys.argv[0]))
filename = args.filename
full_filename = dirname + '/' + filename
tree = eT.parse(full_filename)
root = tree.getroot()
graph = root[0]
room_ids = [room.get('id') for room in graph.findall(namespace + 'node')]
length = 20 # len(room_ids)
connmap = []
for i in range(length):
id_from = ''
try:
id_from = room_ids[i]
except IndexError:
pass
for j in range(length):
# initialize with 'no connection'
connection = get_empty_connection()
id_to = ''
try:
id_to = room_ids[j]
except IndexError:
pass
if id_from != id_to and not (id_from == '' and id_to == ''):
for edge in graph.findall(namespace + 'edge'):
source_id = edge.get('source')
target_id = edge.get('target')
if id_from == source_id and id_to == target_id:
source_vector = get_vector(get_room_type(room_ids[i], graph), 'room')
target_vector = get_vector(get_room_type(target_id, graph), 'room')
edge_vector = get_vector(edge.find(namespace + 'data').text, 'edge')
connection = [source_vector, target_vector, edge_vector]
connmap.append(connection)
assert len(connmap) == length * length
# print(connmap)
connmap_arr = np.array(connmap) # .reshape((length, length))
# print(len(connmap_arr[0]))
numpyData = {"array": connmap_arr}
encodedNumpyData = json.dumps(numpyData, cls=NumpyArrayEncoder)
with open(full_filename + '_onehot.json', 'w') as onehot_json_file:
json.dump(encodedNumpyData, onehot_json_file)
# print("Printing JSON serialized NumPy array")
# print(encodedNumpyData)
#
# # Deserialization
# print("Decode JSON serialized NumPy array")
# decodedArrays = json.loads(encodedNumpyData)
#
# finalNumpyArray = np.asarray(decodedArrays["array"])
# print("NumPy Array")
# print(finalNumpyArray)
| [
"xml.etree.ElementTree.parse",
"json.dump",
"argparse.ArgumentParser",
"config.edge_type_codes.keys",
"os.path.realpath",
"json.dumps",
"numpy.array",
"config.room_type_codes.keys",
"json.JSONEncoder.default"
] | [((178, 194), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (192, 194), False, 'from argparse import ArgumentParser\n'), ((1912, 1935), 'xml.etree.ElementTree.parse', 'eT.parse', (['full_filename'], {}), '(full_filename)\n', (1920, 1935), True, 'import xml.etree.ElementTree as eT\n'), ((3168, 3185), 'numpy.array', 'np.array', (['connmap'], {}), '(connmap)\n', (3176, 3185), True, 'import numpy as np\n'), ((3301, 3345), 'json.dumps', 'json.dumps', (['numpyData'], {'cls': 'NumpyArrayEncoder'}), '(numpyData, cls=NumpyArrayEncoder)\n', (3311, 3345), False, 'import json\n'), ((797, 826), 'config.room_type_codes.keys', 'config.room_type_codes.keys', ([], {}), '()\n', (824, 826), False, 'import config\n'), ((855, 884), 'config.edge_type_codes.keys', 'config.edge_type_codes.keys', ([], {}), '()\n', (882, 884), False, 'import config\n'), ((1808, 1837), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1824, 1837), False, 'import os\n'), ((3418, 3463), 'json.dump', 'json.dump', (['encodedNumpyData', 'onehot_json_file'], {}), '(encodedNumpyData, onehot_json_file)\n', (3427, 3463), False, 'import json\n'), ((449, 479), 'json.JSONEncoder.default', 'JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (468, 479), False, 'from json import JSONEncoder\n')] |
import math
from typing import Optional, Tuple
import numpy as np
from squad.constants import HALF_PI, AngleType, Leg
from .base import BodyParameters
from .core import coord_rotate_xyz
def _foot_xyz_hip_frame(
hip_theta: float,
femur_theta: float,
leg_theta: float,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> Tuple[float, float, float]:
"""Gets the X, Y, and Z coordinates of the foot in the hip frame."""
b_ps = body_params if body_params is not None else BodyParameters()
if angle_type == AngleType.DEGREES:
r_h = math.radians(hip_theta)
r_f = math.radians(femur_theta + 90.0)
r_l = math.radians(leg_theta - 90.0)
else:
r_h = hip_theta
r_f = femur_theta + HALF_PI
r_l = leg_theta - HALF_PI
# - Precompute common ones for speed
s_h = math.sin(r_h)
c_h = math.cos(r_h)
c_f = math.cos(r_f)
c_fl = math.cos(r_f + r_l)
# - Compute coordinates
x_h = (
(b_ps.l_leg * s_h * c_fl)
+ (b_ps.l_femur * s_h * c_f)
+ (b_ps.l_hip * c_h)
)
y_h = (
-(b_ps.l_leg * c_h * c_fl)
- (b_ps.l_femur * c_h * c_f)
+ (b_ps.l_hip * s_h)
)
z_h = -(b_ps.l_leg * math.sin(r_f + r_l)) - (b_ps.l_femur * math.sin(r_f))
# - Return in the body's coordinate system (hence the ordering)
return z_h, x_h, y_h
def hip_xyz(
leg: Leg,
roll: float,
pitch: float,
yaw: float,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> Tuple[float, float, float]:
"""Gets the current origin of the hip based on the body orientation.
Parameters
----------
leg : Leg
The leg to compute the hip coordinate for.
roll : float
The roll of the main body to compute the hip coordinates for.
pitch : float
The pitch of the main body to compute the hip coordinates for.
yaw : float
The yaw of the main body to compute the hip coordinates for.
body_params : BodyParameters, optional
The parameters describing the robot body (if not provided then
the default values from the configuration are used).
angle_type : AngleType, default=AngleType.DEGREES
The type/units the `alpha`, `beta`, and `gamma` angles are given
in, either ``DEGREES`` (default) or ``RADIANS``.
Returns
-------
Tuple[float, float, float]
The X, Y, and Z coordinates of the hip for the specified `leg`,
based on the given angles, in the body's coordinate frame.
"""
b_ps = body_params if body_params is not None else BodyParameters()
if angle_type == AngleType.DEGREES:
r_r = math.radians(roll)
r_p = math.radians(-pitch)
r_y = math.radians(-yaw)
else:
r_r = roll
r_p = -pitch
r_y = -yaw
# - Modify for leg
if leg > 2:
d_x = -b_ps.l_body / 2.0
else:
d_x = b_ps.l_body / 2.0
if leg % 2 == 0:
d_y = -b_ps.w_body / 2.0
else:
d_y = b_ps.w_body / 2.0
# - Adjust for orientation
return coord_rotate_xyz(
d_x + b_ps.cm_dx,
d_y + b_ps.cm_dy,
b_ps.cm_dz,
r_r,
r_p,
r_y,
angle_type=AngleType.RADIANS,
)
def hip_pos(
leg: Leg,
orientation: np.ndarray,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> np.ndarray:
"""Gets the current origin of the hip based on the body orientation.
Parameters
----------
leg : Leg
The leg to compute the hip coordinate for.
orientation : np.ndarray
An orientation vector of the form (roll, pitch, yaw) for the
main body to compute the hip position for.
body_params : BodyParameters, optional
The parameters describing the robot body (if not provided then
the default values from the configuration are used).
angle_type : AngleType, default=AngleType.DEGREES
The type/units the `alpha`, `beta`, and `gamma` angles are given
in, either ``DEGREES`` (default) or ``RADIANS``.
Returns
-------
np.ndarray
The position vector of the form (X, Y, Z) of the hip for the
specified `leg`, based on the given `orientation`, in the body's
coordinate frame.
"""
return np.array(
hip_xyz(
leg,
orientation[0],
orientation[1],
orientation[2],
body_params=body_params,
angle_type=angle_type,
)
)
def foot_xyz(
leg: Leg,
hip_theta: float,
femur_theta: float,
leg_theta: float,
roll: float = 0.0,
pitch: float = 0.0,
yaw: float = 0.0,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> Tuple[float, float, float]:
"""Gets the X, Y, and Z coordinates of the foot for the given `leg`
in the main/body frame.
Parameters
----------
leg : Leg
The leg to compute the foot position for.
hip_theta : float
The rotation angle of the hip joint.
femur_theta : float
The rotation angle of the femur joint.
leg_theta : float
The rotation angle of the leg joint.
roll : float, default=0.0
The current roll of the main body (if any).
pitch : float, default=0.0
The current pitch of the main body (if any).
yaw : float, default=0.0
The current yaw of the main body (if any).
body_params : BodyParameters, optional
The parameters describing the robot body (if not provided then
the default values from the configuration are used).
angle_type : AngleType, default=AngleType.DEGREES
The type/units the `alpha`, `beta`, and `gamma` angles are given
in, either ``DEGREES`` (default) or ``RADIANS``.
Returns
-------
Tuple[float, float, float]
The X, Y, and Z coordinates of the foot, for the given `leg` and
angles, in the body's coordinate frame.
"""
b_ps = body_params if body_params is not None else BodyParameters()
if angle_type == AngleType.DEGREES:
r_h = math.radians(hip_theta)
r_f = math.radians(femur_theta)
r_l = math.radians(leg_theta)
r_r = math.radians(roll)
r_p = math.radians(pitch)
r_y = math.radians(yaw)
else:
r_h = hip_theta
r_f = femur_theta
r_l = leg_theta
r_r = roll
r_p = pitch
r_y = yaw
# - Get hip relative to body
x_h, y_h, z_h = hip_xyz(
leg,
r_r,
r_p,
r_y,
body_params=b_ps,
angle_type=AngleType.RADIANS,
)
# - Get foot relative to hip
x_f, y_f, z_f = _foot_xyz_hip_frame(
r_h,
r_f,
r_l,
body_params=b_ps,
angle_type=AngleType.RADIANS,
)
# - Format and return appropriate result
if leg % 2 == 0:
y_f = -y_f
return (x_h + x_f, y_h + y_f, z_h + z_f)
def foot_pos(
leg: Leg,
thetas: np.ndarray,
orientation: Optional[np.ndarray] = None,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> np.ndarray:
"""Gets the position vector of the foot for the given `leg` in the
main/body frame.
Parameters
----------
leg : Leg
The leg to compute the position vector for.
thetas : np.ndarray
The rotation angles of the hip, femur, and leg joints.
orientation : np.ndarray
An orientation vector of the form (roll, pitch, yaw) for the
main body to compute the foot position for.
body_params : BodyParameters, optional
The parameters describing the robot body (if not provided then
the default values from the configuration are used).
angle_type : AngleType, default=AngleType.DEGREES
The type/units the `alpha`, `beta`, and `gamma` angles are given
in, either ``DEGREES`` (default) or ``RADIANS``.
Returns
-------
np.ndarray
The position vector of X, Y, and Z coordinates of the foot, for
the given `leg` and `thetas`, in the body's coordinate frame.
"""
if orientation is None:
orn = np.array([0.0, 0.0, 0.0])
else:
orn = orientation
return np.array(
foot_xyz(
leg,
thetas[0],
thetas[1],
thetas[2],
roll=orn[0],
pitch=orn[1],
yaw=orn[2],
body_params=body_params,
angle_type=angle_type,
)
)
def leg_servo_to_knee_angle(
servo_theta: float,
body_params: Optional[BodyParameters] = None,
*,
angle_type: AngleType = AngleType.DEGREES,
) -> float:
"""Converts the given leg servo angle to the corresponding knee
joint angle.
Parameters
----------
servo_theta : float
The servo angle to convert to the corresponding knee joint
angle.
body_params : BodyParameters, optional
The parameters describing the robot body (if not provided then
the default values from the configuration are used).
angle_type : AngleType, default=AngleType.DEGREES
The type/units the `alpha`, `beta`, and `gamma` angles are given
in, either ``DEGREES`` (default) or ``RADIANS``.
Returns
-------
float
The corresponding knee angle based on the given `servo_angle`.
"""
b_ps = body_params if body_params is not None else BodyParameters()
if angle_type == AngleType.DEGREES:
ts = math.radians(servo_theta + 90.0)
else:
ts = servo_theta + HALF_PI
# - Pre-compute trig values for speed
s_ts = math.sin(ts)
c_ts = math.cos(ts)
# - Compute angle
h_adj = math.atan2(-b_ps.h_rod_femur, b_ps.l_rod_femur)
beta2 = (
(b_ps.l_rod_arm ** 2)
+ (b_ps.l_rod_femur ** 2)
- (2.0 * b_ps.l_rod_arm * b_ps.l_rod_femur * c_ts)
)
beta = beta2 ** 0.5
phi_opp = math.acos(
(beta2 - (b_ps.l_rod_leg ** 2) - (b_ps.l_rod ** 2))
/ (-2.0 * b_ps.l_rod_leg * b_ps.l_rod)
)
phi_1 = math.asin((b_ps.l_rod_arm * s_ts) / beta)
phi_2 = math.asin((b_ps.l_rod * math.sin(phi_opp)) / beta)
ret = HALF_PI - (phi_1 + phi_2 - h_adj)
if angle_type == AngleType.DEGREES:
return math.degrees(ret)
return ret
| [
"math.asin",
"math.atan2",
"math.radians",
"math.sin",
"math.acos",
"numpy.array",
"math.cos",
"math.degrees"
] | [((892, 905), 'math.sin', 'math.sin', (['r_h'], {}), '(r_h)\n', (900, 905), False, 'import math\n'), ((916, 929), 'math.cos', 'math.cos', (['r_h'], {}), '(r_h)\n', (924, 929), False, 'import math\n'), ((940, 953), 'math.cos', 'math.cos', (['r_f'], {}), '(r_f)\n', (948, 953), False, 'import math\n'), ((965, 984), 'math.cos', 'math.cos', (['(r_f + r_l)'], {}), '(r_f + r_l)\n', (973, 984), False, 'import math\n'), ((9821, 9833), 'math.sin', 'math.sin', (['ts'], {}), '(ts)\n', (9829, 9833), False, 'import math\n'), ((9845, 9857), 'math.cos', 'math.cos', (['ts'], {}), '(ts)\n', (9853, 9857), False, 'import math\n'), ((9893, 9940), 'math.atan2', 'math.atan2', (['(-b_ps.h_rod_femur)', 'b_ps.l_rod_femur'], {}), '(-b_ps.h_rod_femur, b_ps.l_rod_femur)\n', (9903, 9940), False, 'import math\n'), ((10124, 10226), 'math.acos', 'math.acos', (['((beta2 - b_ps.l_rod_leg ** 2 - b_ps.l_rod ** 2) / (-2.0 * b_ps.l_rod_leg *\n b_ps.l_rod))'], {}), '((beta2 - b_ps.l_rod_leg ** 2 - b_ps.l_rod ** 2) / (-2.0 * b_ps.\n l_rod_leg * b_ps.l_rod))\n', (10133, 10226), False, 'import math\n'), ((10261, 10300), 'math.asin', 'math.asin', (['(b_ps.l_rod_arm * s_ts / beta)'], {}), '(b_ps.l_rod_arm * s_ts / beta)\n', (10270, 10300), False, 'import math\n'), ((620, 643), 'math.radians', 'math.radians', (['hip_theta'], {}), '(hip_theta)\n', (632, 643), False, 'import math\n'), ((658, 690), 'math.radians', 'math.radians', (['(femur_theta + 90.0)'], {}), '(femur_theta + 90.0)\n', (670, 690), False, 'import math\n'), ((705, 735), 'math.radians', 'math.radians', (['(leg_theta - 90.0)'], {}), '(leg_theta - 90.0)\n', (717, 735), False, 'import math\n'), ((2760, 2778), 'math.radians', 'math.radians', (['roll'], {}), '(roll)\n', (2772, 2778), False, 'import math\n'), ((2793, 2813), 'math.radians', 'math.radians', (['(-pitch)'], {}), '(-pitch)\n', (2805, 2813), False, 'import math\n'), ((2828, 2846), 'math.radians', 'math.radians', (['(-yaw)'], {}), '(-yaw)\n', (2840, 2846), False, 'import math\n'), ((6265, 6288), 'math.radians', 'math.radians', (['hip_theta'], {}), '(hip_theta)\n', (6277, 6288), False, 'import math\n'), ((6303, 6328), 'math.radians', 'math.radians', (['femur_theta'], {}), '(femur_theta)\n', (6315, 6328), False, 'import math\n'), ((6343, 6366), 'math.radians', 'math.radians', (['leg_theta'], {}), '(leg_theta)\n', (6355, 6366), False, 'import math\n'), ((6381, 6399), 'math.radians', 'math.radians', (['roll'], {}), '(roll)\n', (6393, 6399), False, 'import math\n'), ((6414, 6433), 'math.radians', 'math.radians', (['pitch'], {}), '(pitch)\n', (6426, 6433), False, 'import math\n'), ((6448, 6465), 'math.radians', 'math.radians', (['yaw'], {}), '(yaw)\n', (6460, 6465), False, 'import math\n'), ((8346, 8371), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (8354, 8371), True, 'import numpy as np\n'), ((9689, 9721), 'math.radians', 'math.radians', (['(servo_theta + 90.0)'], {}), '(servo_theta + 90.0)\n', (9701, 9721), False, 'import math\n'), ((10466, 10483), 'math.degrees', 'math.degrees', (['ret'], {}), '(ret)\n', (10478, 10483), False, 'import math\n'), ((1315, 1328), 'math.sin', 'math.sin', (['r_f'], {}), '(r_f)\n', (1323, 1328), False, 'import math\n'), ((1276, 1295), 'math.sin', 'math.sin', (['(r_f + r_l)'], {}), '(r_f + r_l)\n', (1284, 1295), False, 'import math\n'), ((10339, 10356), 'math.sin', 'math.sin', (['phi_opp'], {}), '(phi_opp)\n', (10347, 10356), False, 'import math\n')] |
import base64
import gym
import io
import numpy as np
from typing import Dict
import zlib
from ray.rllib.utils.annotations import DeveloperAPI
def _serialize_ndarray(array: np.ndarray) -> str:
"""Pack numpy ndarray into Base64 encoded strings for serialization.
This function uses numpy.save() instead of pickling to ensure
compatibility.
Args:
array: numpy ndarray.
Returns:
b64 escaped string.
"""
buf = io.BytesIO()
np.save(buf, array)
return base64.b64encode(zlib.compress(buf.getvalue())).decode("ascii")
def _deserialize_ndarray(b64_string: str) -> np.ndarray:
"""Unpack b64 escaped string into numpy ndarray.
This function assumes the unescaped bytes are of npy format.
Args:
b64_string: Base64 escaped string.
Returns:
numpy ndarray.
"""
return np.load(io.BytesIO(zlib.decompress(base64.b64decode(b64_string))))
@DeveloperAPI
def gym_space_to_dict(space: gym.spaces.Space) -> Dict:
"""Serialize a gym Space into JSON-serializable dict.
Args:
space: gym.spaces.Space
Returns:
Serialized JSON string.
"""
def _box(sp: gym.spaces.Box) -> Dict:
return {
"space": "box",
"low": _serialize_ndarray(sp.low),
"high": _serialize_ndarray(sp.high),
"shape": sp._shape, # shape is a tuple.
"dtype": sp.dtype.str,
}
def _discrete(sp: gym.spaces.Discrete) -> Dict:
d = {
"space": "discrete",
"n": sp.n,
}
# Offset is a relatively new Discrete space feature.
if hasattr(sp, "start"):
d["start"] = sp.start
return d
def _multi_discrete(sp: gym.spaces.MultiDiscrete) -> Dict:
return {
"space": "multi-discrete",
"nvec": _serialize_ndarray(sp.nvec),
"dtype": sp.dtype.str,
}
def _tuple(sp: gym.spaces.Tuple) -> Dict:
return {
"space": "tuple",
"spaces": [gym_space_to_dict(sp) for sp in sp.spaces],
}
def _dict(sp: gym.spaces.Dict) -> Dict:
return {
"space": "dict",
"spaces": {k: gym_space_to_dict(sp) for k, sp in sp.spaces.items()},
}
if isinstance(space, gym.spaces.Box):
return _box(space)
elif isinstance(space, gym.spaces.Discrete):
return _discrete(space)
elif isinstance(space, gym.spaces.MultiDiscrete):
return _multi_discrete(space)
elif isinstance(space, gym.spaces.Tuple):
return _tuple(space)
elif isinstance(space, gym.spaces.Dict):
return _dict(space)
else:
raise ValueError("Unknown space type for serialization, ", type(space))
@DeveloperAPI
def gym_space_from_dict(d: Dict) -> gym.spaces.Space:
"""De-serialize a dict into gym Space.
Args:
str: serialized JSON str.
Returns:
De-serialized gym space.
"""
def __common(d: Dict):
"""Common updates to the dict before we use it to construct spaces"""
del d["space"]
if "dtype" in d:
d["dtype"] = np.dtype(d["dtype"])
return d
def _box(d: Dict) -> gym.spaces.Box:
d.update(
{
"low": _deserialize_ndarray(d["low"]),
"high": _deserialize_ndarray(d["high"]),
}
)
return gym.spaces.Box(**__common(d))
def _discrete(d: Dict) -> gym.spaces.Discrete:
return gym.spaces.Discrete(**__common(d))
def _multi_discrete(d: Dict) -> gym.spaces.Discrete:
d.update(
{
"nvec": _deserialize_ndarray(d["nvec"]),
}
)
return gym.spaces.MultiDiscrete(**__common(d))
def _tuple(d: Dict) -> gym.spaces.Discrete:
spaces = [gym_space_from_dict(sp) for sp in d["spaces"]]
return gym.spaces.Tuple(spaces=spaces)
def _dict(d: Dict) -> gym.spaces.Discrete:
spaces = {k: gym_space_from_dict(sp) for k, sp in d["spaces"].items()}
return gym.spaces.Dict(spaces=spaces)
space_map = {
"box": _box,
"discrete": _discrete,
"multi-discrete": _multi_discrete,
"tuple": _tuple,
"dict": _dict,
}
space_type = d["space"]
if space_type not in space_map:
raise ValueError("Unknown space type for de-serialization, ", space_type)
return space_map[space_type](d)
| [
"io.BytesIO",
"numpy.save",
"numpy.dtype",
"base64.b64decode",
"gym.spaces.Tuple",
"gym.spaces.Dict"
] | [((456, 468), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (466, 468), False, 'import io\n'), ((473, 492), 'numpy.save', 'np.save', (['buf', 'array'], {}), '(buf, array)\n', (480, 492), True, 'import numpy as np\n'), ((3900, 3931), 'gym.spaces.Tuple', 'gym.spaces.Tuple', ([], {'spaces': 'spaces'}), '(spaces=spaces)\n', (3916, 3931), False, 'import gym\n'), ((4074, 4104), 'gym.spaces.Dict', 'gym.spaces.Dict', ([], {'spaces': 'spaces'}), '(spaces=spaces)\n', (4089, 4104), False, 'import gym\n'), ((3150, 3170), 'numpy.dtype', 'np.dtype', (["d['dtype']"], {}), "(d['dtype'])\n", (3158, 3170), True, 'import numpy as np\n'), ((891, 919), 'base64.b64decode', 'base64.b64decode', (['b64_string'], {}), '(b64_string)\n', (907, 919), False, 'import base64\n')] |
import os
import numpy as np
from capreolus import ConfigOption, Dependency, evaluator
from capreolus.sampler import PredSampler
from capreolus.searcher import Searcher
from capreolus.task import Task
from capreolus.utils.loginit import get_logger
logger = get_logger(__name__)
@Task.register
class ReRerankTask(Task):
module_name = "rererank"
config_spec = [
ConfigOption("fold", "s1", "fold to run"),
ConfigOption("optimize", "map", "metric to maximize on the dev set"),
ConfigOption("topn", 100, "number of stage two results to rerank"),
]
dependencies = [
Dependency(
key="benchmark", module="benchmark", name="robust04.yang19", provide_this=True, provide_children=["collection"]
),
Dependency(key="rank", module="task", name="rank", provide_this=True),
Dependency(key="rerank1", module="task", name="rerank"),
Dependency(key="rerank2", module="task", name="rerank"),
]
commands = ["train", "evaluate", "traineval"] + Task.help_commands
default_command = "describe"
def traineval(self):
self.train()
self.evaluate()
def train(self):
fold = self.config["fold"]
logger.debug("results path: %s", self.get_results_path())
self.rank.search()
rank_results = self.rank.evaluate()
best_search_run_path = rank_results["path"][fold]
best_search_run = Searcher.load_trec_run(best_search_run_path)
second_stage_results = self.rerank1.rerank_run(best_search_run, self.rerank1.get_results_path(), include_train=True)
second_stage_topn = {
qid: dict(sorted(docids.items(), key=lambda x: x[1], reverse=True)[: self.config["topn"]])
for split in ("train", "dev", "test")
for qid, docids in second_stage_results[split].items()
}
third_stage_results = self.rerank2.rerank_run(second_stage_topn, self.get_results_path())
return third_stage_results
def evaluate(self):
fold = self.config["fold"]
train_output_path = self.get_results_path()
test_output_path = train_output_path / "pred" / "test" / "best"
logger.debug("results path: %s", train_output_path)
if os.path.exists(test_output_path):
test_preds = Searcher.load_trec_run(test_output_path)
else:
self.rank.search()
rank_results = self.rank.evaluate()
best_search_run_path = rank_results["path"][fold]
best_search_run = Searcher.load_trec_run(best_search_run_path)
docids = set(docid for querydocs in best_search_run.values() for docid in querydocs)
self.reranker.extractor.preprocess(
qids=best_search_run.keys(), docids=docids, topics=self.benchmark.topics[self.benchmark.query_type]
)
self.reranker.build_model()
self.reranker.searcher_scores = best_search_run
self.reranker.trainer.load_best_model(self.reranker, train_output_path)
test_run = {
qid: docs for qid, docs in best_search_run.items() if qid in self.benchmark.folds[fold]["predict"]["test"]
}
test_dataset = PredSampler()
test_dataset.prepare(test_run, self.benchmark.qrels, self.reranker.extractor)
test_preds = self.reranker.trainer.predict(self.reranker, test_dataset, test_output_path)
metrics = evaluator.eval_runs(test_preds, self.benchmark.qrels, evaluator.DEFAULT_METRICS, self.benchmark.relevance_level)
logger.info("rerank: fold=%s test metrics: %s", fold, metrics)
print("\ncomputing metrics across all folds")
avg = {}
found = 0
for fold in self.benchmark.folds:
# TODO fix by using multiple Tasks
from pathlib import Path
pred_path = Path(test_output_path.as_posix().replace("fold-" + self.config["fold"], "fold-" + fold))
if not os.path.exists(pred_path):
print("\tfold=%s results are missing and will not be included" % fold)
continue
found += 1
preds = Searcher.load_trec_run(pred_path)
metrics = evaluator.eval_runs(preds, self.benchmark.qrels, evaluator.DEFAULT_METRICS, self.benchmark.relevance_level)
for metric, val in metrics.items():
avg.setdefault(metric, []).append(val)
avg = {k: np.mean(v) for k, v in avg.items()}
logger.info("rerank: average cross-validated metrics when choosing iteration based on '%s':", self.config["optimize"])
for metric, score in sorted(avg.items()):
logger.info("%25s: %0.4f", metric, score)
| [
"capreolus.ConfigOption",
"capreolus.utils.loginit.get_logger",
"capreolus.Dependency",
"capreolus.sampler.PredSampler",
"os.path.exists",
"capreolus.searcher.Searcher.load_trec_run",
"capreolus.evaluator.eval_runs",
"numpy.mean"
] | [((260, 280), 'capreolus.utils.loginit.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (270, 280), False, 'from capreolus.utils.loginit import get_logger\n'), ((381, 422), 'capreolus.ConfigOption', 'ConfigOption', (['"""fold"""', '"""s1"""', '"""fold to run"""'], {}), "('fold', 's1', 'fold to run')\n", (393, 422), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((432, 500), 'capreolus.ConfigOption', 'ConfigOption', (['"""optimize"""', '"""map"""', '"""metric to maximize on the dev set"""'], {}), "('optimize', 'map', 'metric to maximize on the dev set')\n", (444, 500), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((510, 576), 'capreolus.ConfigOption', 'ConfigOption', (['"""topn"""', '(100)', '"""number of stage two results to rerank"""'], {}), "('topn', 100, 'number of stage two results to rerank')\n", (522, 576), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((613, 740), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""benchmark"""', 'module': '"""benchmark"""', 'name': '"""robust04.yang19"""', 'provide_this': '(True)', 'provide_children': "['collection']"}), "(key='benchmark', module='benchmark', name='robust04.yang19',\n provide_this=True, provide_children=['collection'])\n", (623, 740), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((768, 837), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""rank"""', 'module': '"""task"""', 'name': '"""rank"""', 'provide_this': '(True)'}), "(key='rank', module='task', name='rank', provide_this=True)\n", (778, 837), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((847, 902), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""rerank1"""', 'module': '"""task"""', 'name': '"""rerank"""'}), "(key='rerank1', module='task', name='rerank')\n", (857, 902), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((912, 967), 'capreolus.Dependency', 'Dependency', ([], {'key': '"""rerank2"""', 'module': '"""task"""', 'name': '"""rerank"""'}), "(key='rerank2', module='task', name='rerank')\n", (922, 967), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((1430, 1474), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['best_search_run_path'], {}), '(best_search_run_path)\n', (1452, 1474), False, 'from capreolus.searcher import Searcher\n'), ((2251, 2283), 'os.path.exists', 'os.path.exists', (['test_output_path'], {}), '(test_output_path)\n', (2265, 2283), False, 'import os\n'), ((3458, 3575), 'capreolus.evaluator.eval_runs', 'evaluator.eval_runs', (['test_preds', 'self.benchmark.qrels', 'evaluator.DEFAULT_METRICS', 'self.benchmark.relevance_level'], {}), '(test_preds, self.benchmark.qrels, evaluator.\n DEFAULT_METRICS, self.benchmark.relevance_level)\n', (3477, 3575), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((2310, 2350), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['test_output_path'], {}), '(test_output_path)\n', (2332, 2350), False, 'from capreolus.searcher import Searcher\n'), ((2536, 2580), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['best_search_run_path'], {}), '(best_search_run_path)\n', (2558, 2580), False, 'from capreolus.searcher import Searcher\n'), ((3232, 3245), 'capreolus.sampler.PredSampler', 'PredSampler', ([], {}), '()\n', (3243, 3245), False, 'from capreolus.sampler import PredSampler\n'), ((4174, 4207), 'capreolus.searcher.Searcher.load_trec_run', 'Searcher.load_trec_run', (['pred_path'], {}), '(pred_path)\n', (4196, 4207), False, 'from capreolus.searcher import Searcher\n'), ((4230, 4341), 'capreolus.evaluator.eval_runs', 'evaluator.eval_runs', (['preds', 'self.benchmark.qrels', 'evaluator.DEFAULT_METRICS', 'self.benchmark.relevance_level'], {}), '(preds, self.benchmark.qrels, evaluator.DEFAULT_METRICS,\n self.benchmark.relevance_level)\n', (4249, 4341), False, 'from capreolus import ConfigOption, Dependency, evaluator\n'), ((4460, 4470), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (4467, 4470), True, 'import numpy as np\n'), ((3991, 4016), 'os.path.exists', 'os.path.exists', (['pred_path'], {}), '(pred_path)\n', (4005, 4016), False, 'import os\n')] |
#!/usr/bin/env python3
import numpy as np
import pickle
from scipy.spatial.transform import Rotation
def load_poses(path):
f = open(path + '/ep_data.pkl', 'rb')
data = pickle.load(f)
gt_mat = np.zeros([0, 4, 4])
gt_obj_pose_mat = np.eye(4)
quat = data['obj_world_pose'][3:]
quat[0], quat[1], quat[2], quat[3] = quat[1], quat[2], quat[3], quat[0]
gt_obj_pose_mat[:3, :3] = (Rotation.from_quat(quat) * Rotation.from_euler('xyz', [0, np.pi, 0])).as_dcm()
gt_obj_pose_mat[:3, 3] = data['obj_world_pose'][:3]
gt_mat = np.concatenate([gt_mat, gt_obj_pose_mat[None, :]], axis=0)
for ind in range(len(data['cam_pose'])):
pose_mat = np.eye(4)
quat = data['cam_pose'][ind][3:]
quat[0], quat[1], quat[2], quat[3] = quat[1], quat[2], quat[3], quat[0]
pose_mat[:3, :3] = (Rotation.from_quat(quat) * Rotation.from_euler('xyz', [0, np.pi, 0])).as_dcm()
pose_mat[:3, 3] = data['cam_pose'][ind][:3]
gt_mat = np.concatenate([gt_mat, pose_mat[None, :]], axis=0)
return gt_mat[0], gt_mat[1:]
def save_poses(obj_pose, cam_poses, path):
f = open(path + '/obj_det_poses.pkl', 'wb')
data = {}
for ind, cam_pose in enumerate(cam_poses):
diff = np.linalg.inv(cam_pose) @ obj_pose
R = diff[:3, :3]
t = diff[:3, 3]
R = Rotation.from_euler('xyz', np.random.normal(0, 0.1, [3])).as_dcm() @ R
t += np.random.normal(0, 0.05, [3])
quat = Rotation.from_dcm(R).as_quat()
quat[1], quat[2], quat[3], quat[0] = quat[0], quat[1], quat[2], quat[3]
total_vec = np.hstack([t, quat])
data[str(ind) + '.png'] = total_vec
pickle.dump(data, f)
if __name__ == '__main__':
obj_pose, cam_poses = load_poses('../data/sim/ep_2')
save_poses(obj_pose, cam_poses, '../data/sim/ep_2') | [
"pickle.dump",
"scipy.spatial.transform.Rotation.from_euler",
"scipy.spatial.transform.Rotation.from_dcm",
"numpy.zeros",
"numpy.hstack",
"pickle.load",
"numpy.linalg.inv",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.random.normal",
"numpy.eye",
"numpy.concatenate"
] | [((178, 192), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (189, 192), False, 'import pickle\n'), ((207, 226), 'numpy.zeros', 'np.zeros', (['[0, 4, 4]'], {}), '([0, 4, 4])\n', (215, 226), True, 'import numpy as np\n'), ((249, 258), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (255, 258), True, 'import numpy as np\n'), ((552, 610), 'numpy.concatenate', 'np.concatenate', (['[gt_mat, gt_obj_pose_mat[None, :]]'], {'axis': '(0)'}), '([gt_mat, gt_obj_pose_mat[None, :]], axis=0)\n', (566, 610), True, 'import numpy as np\n'), ((1664, 1684), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1675, 1684), False, 'import pickle\n'), ((676, 685), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (682, 685), True, 'import numpy as np\n'), ((983, 1034), 'numpy.concatenate', 'np.concatenate', (['[gt_mat, pose_mat[None, :]]'], {'axis': '(0)'}), '([gt_mat, pose_mat[None, :]], axis=0)\n', (997, 1034), True, 'import numpy as np\n'), ((1417, 1447), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.05)', '[3]'], {}), '(0, 0.05, [3])\n', (1433, 1447), True, 'import numpy as np\n'), ((1594, 1614), 'numpy.hstack', 'np.hstack', (['[t, quat]'], {}), '([t, quat])\n', (1603, 1614), True, 'import numpy as np\n'), ((1237, 1260), 'numpy.linalg.inv', 'np.linalg.inv', (['cam_pose'], {}), '(cam_pose)\n', (1250, 1260), True, 'import numpy as np\n'), ((404, 428), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (['quat'], {}), '(quat)\n', (422, 428), False, 'from scipy.spatial.transform import Rotation\n'), ((431, 472), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[0, np.pi, 0]'], {}), "('xyz', [0, np.pi, 0])\n", (450, 472), False, 'from scipy.spatial.transform import Rotation\n'), ((1463, 1483), 'scipy.spatial.transform.Rotation.from_dcm', 'Rotation.from_dcm', (['R'], {}), '(R)\n', (1480, 1483), False, 'from scipy.spatial.transform import Rotation\n'), ((835, 859), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (['quat'], {}), '(quat)\n', (853, 859), False, 'from scipy.spatial.transform import Rotation\n'), ((862, 903), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', '[0, np.pi, 0]'], {}), "('xyz', [0, np.pi, 0])\n", (881, 903), False, 'from scipy.spatial.transform import Rotation\n'), ((1360, 1389), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.1)', '[3]'], {}), '(0, 0.1, [3])\n', (1376, 1389), True, 'import numpy as np\n')] |
"""
Advection of particles with nemo
"""
import numpy as np
from parcels import FieldSet, ParticleSet, JITParticle, ErrorCode, AdvectionRK4
from argparse import ArgumentParser
from datetime import timedelta
from datetime import datetime
from glob import glob
datadir = '/data2/imau/oceanparcels/hydrodynamic_data/NEMO-MEDUSA/ORCA0083-N006/' #Directory for nemo data
outputdir = '/scratch/wichm003/surface_mixing_output/' #Directory for output files
def DeleteParticle(particle, fieldset, time):
"""Kernel for deleting particles if they are out of bounds."""
particle.delete()
def periodicBC(particle, fieldset, time):
"""
Kernel for periodic values in longitude
"""
if particle.lon < 0.:
particle.lon += 360.
elif particle.lon >= 360.:
particle.lon -= 360.
def p_advect(outname='noname', coordinate_file='no_file_specified', y=2001, m=1, d=1, simdays=360):
"""
Main function for execution
- outname: name of the output file. Note that all important parameters are also in the file name.
- pos: Execution is manually parallelized over different initial position grids. These are indexed.
- y, m, d: year, month an day of the simulation start
- simdays: number of days to simulate
"""
print( '-------------------------')
print( 'Start run... Parameters: ')
print( '-------------------------')
print( 'Initial time (y, m, d): ', (y, m, d))
print( 'Simulation days', simdays)
print( '-------------------------')
#Load grid from external file
coordinates = np.load(coordinate_file)
lons = coordinates['lons']
lats = coordinates['lats']
times = [datetime(y, m, d)]*len(lons)
print( 'Number of particles: ', len(lons))
outfile = outputdir + outname + '_y'+ str(y) + '_m' + str(m) + '_d' + str(d) + '_simdays' + str(simdays)
ufiles = sorted(glob(datadir+'means/ORCA0083-N06_200?????d05U.nc'))
vfiles = sorted(glob(datadir+'means/ORCA0083-N06_200?????d05V.nc'))
mesh_mask = datadir + 'domain/coordinates.nc'
filenames = {'U': {'lon': mesh_mask, 'lat': mesh_mask, 'data': ufiles},
'V': {'lon': mesh_mask, 'lat': mesh_mask, 'data': vfiles}}
variables = {'U': 'uo',
'V': 'vo'}
dimensions = {'U': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'},
'V': {'lon': 'glamf', 'lat': 'gphif', 'time': 'time_counter'}}
fieldset = FieldSet.from_nemo(filenames, variables, dimensions)
fieldset.U.vmax = 10
fieldset.V.vmax = 10
pset = ParticleSet(fieldset=fieldset, pclass=JITParticle, lon=lons, lat=lats, time=times)
kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(periodicBC)
pset.execute(kernels, runtime=timedelta(days=simdays), dt=timedelta(minutes=10), output_file=pset.ParticleFile(name=outfile, outputdt=timedelta(days=15)),recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
if __name__=="__main__":
p = ArgumentParser(description="""Global advection of different particles""")
p.add_argument('-name', '--name', default='noname',help='Name of output file')
p.add_argument('-y', '--y', type=int,default=None,help='year of simulation start')
p.add_argument('-m', '--m', type=int,default=None,help='month of simulation start')
p.add_argument('-d', '--d', type=int,default=None,help='day of simulation start')
p.add_argument('-simdays', '--simdays', type=int,default=None,help='Simulation days')
p.add_argument('-coords', '--coords',help='Initial coordinate file')
args = p.parse_args()
p_advect(outname=args.name, coordinate_file=args.coords, y=args.y, m=args.m, d=args.d, simdays=args.simdays)
| [
"numpy.load",
"argparse.ArgumentParser",
"parcels.FieldSet.from_nemo",
"datetime.datetime",
"parcels.ParticleSet",
"datetime.timedelta",
"glob.glob"
] | [((1587, 1611), 'numpy.load', 'np.load', (['coordinate_file'], {}), '(coordinate_file)\n', (1594, 1611), True, 'import numpy as np\n'), ((2462, 2514), 'parcels.FieldSet.from_nemo', 'FieldSet.from_nemo', (['filenames', 'variables', 'dimensions'], {}), '(filenames, variables, dimensions)\n', (2480, 2514), False, 'from parcels import FieldSet, ParticleSet, JITParticle, ErrorCode, AdvectionRK4\n'), ((2579, 2666), 'parcels.ParticleSet', 'ParticleSet', ([], {'fieldset': 'fieldset', 'pclass': 'JITParticle', 'lon': 'lons', 'lat': 'lats', 'time': 'times'}), '(fieldset=fieldset, pclass=JITParticle, lon=lons, lat=lats, time\n =times)\n', (2590, 2666), False, 'from parcels import FieldSet, ParticleSet, JITParticle, ErrorCode, AdvectionRK4\n'), ((2981, 3050), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Global advection of different particles"""'}), "(description='Global advection of different particles')\n", (2995, 3050), False, 'from argparse import ArgumentParser\n'), ((1896, 1948), 'glob.glob', 'glob', (["(datadir + 'means/ORCA0083-N06_200?????d05U.nc')"], {}), "(datadir + 'means/ORCA0083-N06_200?????d05U.nc')\n", (1900, 1948), False, 'from glob import glob\n'), ((1968, 2020), 'glob.glob', 'glob', (["(datadir + 'means/ORCA0083-N06_200?????d05V.nc')"], {}), "(datadir + 'means/ORCA0083-N06_200?????d05V.nc')\n", (1972, 2020), False, 'from glob import glob\n'), ((1688, 1705), 'datetime.datetime', 'datetime', (['y', 'm', 'd'], {}), '(y, m, d)\n', (1696, 1705), False, 'from datetime import datetime\n'), ((2767, 2790), 'datetime.timedelta', 'timedelta', ([], {'days': 'simdays'}), '(days=simdays)\n', (2776, 2790), False, 'from datetime import timedelta\n'), ((2795, 2816), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (2804, 2816), False, 'from datetime import timedelta\n'), ((2871, 2889), 'datetime.timedelta', 'timedelta', ([], {'days': '(15)'}), '(days=15)\n', (2880, 2889), False, 'from datetime import timedelta\n')] |
import numpy as np
def softmax(x):
"""
Normalize any vector to probabilistic distribution.
:param x: numpy array or matrix
:return: numpy array or matrix of the same shape to x
"""
xmax = np.expand_dims(np.max(x, -1), -1)
e_x = np.exp(x - xmax)
x = e_x / np.expand_dims(np.sum(e_x, -1), -1)
return x
def sigmoid_gradient(f):
"""
Sigmoid gradient function
:param f: function value of sigmoid function
:return: gradient value of sigmoid function
"""
gradient = f * (1.0 - f)
return gradient
def tanh_gradient(f):
"""
Tanh gradient function
:param f: function value of tanh
:return: gradient value of tanh
"""
gradient = 1 - f ** 2
return gradient
| [
"numpy.max",
"numpy.sum",
"numpy.exp"
] | [((258, 274), 'numpy.exp', 'np.exp', (['(x - xmax)'], {}), '(x - xmax)\n', (264, 274), True, 'import numpy as np\n'), ((229, 242), 'numpy.max', 'np.max', (['x', '(-1)'], {}), '(x, -1)\n', (235, 242), True, 'import numpy as np\n'), ((304, 319), 'numpy.sum', 'np.sum', (['e_x', '(-1)'], {}), '(e_x, -1)\n', (310, 319), True, 'import numpy as np\n')] |
'''
Created on Jun 27, 2016
@author: rajajosh
'''
from _random import Random
import numpy
class MyRandomClassifier(object):
"Random classifier. To be used for testing/benchmarking purposes"
len=0
def __init__(self):
'''
Constructor
'''
pass
def fit(self, x_train, y_train):
self.len=len(x_train)
def predict(self, x_test):
randObj = Random()
retArr = []
for _ in range(0,len(x_test)):
retArr.append(int(randObj.random()*3))
return numpy.asarray(retArr)
print("done")
| [
"_random.Random",
"numpy.asarray"
] | [((438, 446), '_random.Random', 'Random', ([], {}), '()\n', (444, 446), False, 'from _random import Random\n'), ((576, 597), 'numpy.asarray', 'numpy.asarray', (['retArr'], {}), '(retArr)\n', (589, 597), False, 'import numpy\n')] |
from environments.racing.racing import RaceTrack
from td_learning import td
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Sarsa Racetrack Policy Improvement')
parser.add_argument('racetrack',
type=str,
help='Path to racetrack csv file')
parser.add_argument('policy',
type=str,
help='Path at which to save policy file')
parser.add_argument('--episodes',
type=int,
help='Number of episodes to train over',
default=1000)
parser.add_argument('--verbose',
type=bool,
help='Print (a lot of) log messages',
default=False)
args = parser.parse_args()
racetrack = RaceTrack(args.racetrack)
policy, Q = td.sarsa(
racetrack,
alpha_func=lambda n: 1/n,
epsilon_func=lambda ep, eps: 1 - (ep/eps),
episodes=args.episodes
)
np.save(args.policy, policy)
| [
"environments.racing.racing.RaceTrack",
"td_learning.td.sarsa",
"argparse.ArgumentParser",
"numpy.save"
] | [((122, 195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sarsa Racetrack Policy Improvement"""'}), "(description='Sarsa Racetrack Policy Improvement')\n", (145, 195), False, 'import argparse\n'), ((793, 818), 'environments.racing.racing.RaceTrack', 'RaceTrack', (['args.racetrack'], {}), '(args.racetrack)\n', (802, 818), False, 'from environments.racing.racing import RaceTrack\n'), ((831, 949), 'td_learning.td.sarsa', 'td.sarsa', (['racetrack'], {'alpha_func': '(lambda n: 1 / n)', 'epsilon_func': '(lambda ep, eps: 1 - ep / eps)', 'episodes': 'args.episodes'}), '(racetrack, alpha_func=lambda n: 1 / n, epsilon_func=lambda ep, eps:\n 1 - ep / eps, episodes=args.episodes)\n', (839, 949), False, 'from td_learning import td\n'), ((963, 991), 'numpy.save', 'np.save', (['args.policy', 'policy'], {}), '(args.policy, policy)\n', (970, 991), True, 'import numpy as np\n')] |
# coding: utf-8
import pytest
import numpy as np
from imageio import imread
from AxonDeepSeg.data_management.data_augmentation import *
class TestCore(object):
def setup(self):
# Remember that the stop value in "arrange" is not included
x = np.arange(0, 16, dtype='uint8')
y = np.arange(0, 16, dtype='uint8')
xv, yv = np.meshgrid(x, y)
self.testImage = xv+yv
self.mask = np.ones((16, 16, 3), dtype=int)
def teardown(self):
pass
# --------------data_augmentation.py tests-------------- #
# **NOTE** Because most data augmentation functions chose the parameters
# randomly within bounds set by the arguments, it's impossible to target
# test cases for known shifts, rotations, etc. Thus, only broad tests are
# defined (output dim = input dim, output patch different or same as input
# patch)
@pytest.mark.unit
def test_shifting_returns_different_image(self):
patch = [self.testImage, self.mask]
augmentedPatch = shifting(patch, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
assert np.not_equal(augmentedPatch[0], patch[0]).any()
@pytest.mark.unit
def test_rescaling_factor_1_returns_same_image(self):
patch = [self.testImage, self.mask]
augmentedPatch = rescaling(patch, factor_max=1, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
assert np.equal(augmentedPatch[0], patch[0]).all()
@pytest.mark.unit
def test_large_max_rescaling_factor_returns_different_image(self):
# Note: Since there
patch = [self.testImage, self.mask]
augmentedPatch = rescaling(patch, factor_max=100, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
assert np.not_equal(augmentedPatch[0], patch[0]).any()
# Note: due to therandomized rescaling factor choice,
# there is an unavoidable possibility that this test fails
# simply due to bad luck, so try running the test suite again.
@pytest.mark.unit
def test_random_rotation_returns_different_image(self):
patch = [self.testImage, self.mask]
augmentedPatch = random_rotation(patch, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
assert np.not_equal(augmentedPatch[0], patch[0]).any()
@pytest.mark.unit
def test_elastic_returns_different_image(self):
patch = [self.testImage, self.mask]
augmentedPatch = elastic(patch, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
assert np.not_equal(augmentedPatch[0], patch[0]).any()
@pytest.mark.unit
def test_flipping_returns_different_image(self):
# Since the flipping only occurs randomly thus has a probability of
# not getting flipped, this low-level test only covers the same image
# size assertions.
patch = [self.testImage, self.mask]
augmentedPatch = flipping(patch, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
@pytest.mark.unit
def test_gaussian_blur_returns_image_of_same_size(self):
# Because the sigma blur size is also random, it's very difficult to
# test for a known case (different/same image), so this low-level test
# only covers the same image size assertions.
patch = [self.testImage, self.mask]
augmentedPatch = gaussian_blur(patch, verbose=1)
assert augmentedPatch[0].shape == patch[0].shape
assert augmentedPatch[1].shape == patch[1].shape
| [
"numpy.meshgrid",
"numpy.ones",
"numpy.equal",
"numpy.not_equal",
"numpy.arange"
] | [((264, 295), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': '"""uint8"""'}), "(0, 16, dtype='uint8')\n", (273, 295), True, 'import numpy as np\n'), ((308, 339), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {'dtype': '"""uint8"""'}), "(0, 16, dtype='uint8')\n", (317, 339), True, 'import numpy as np\n'), ((357, 374), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (368, 374), True, 'import numpy as np\n'), ((427, 458), 'numpy.ones', 'np.ones', (['(16, 16, 3)'], {'dtype': 'int'}), '((16, 16, 3), dtype=int)\n', (434, 458), True, 'import numpy as np\n'), ((1187, 1228), 'numpy.not_equal', 'np.not_equal', (['augmentedPatch[0]', 'patch[0]'], {}), '(augmentedPatch[0], patch[0])\n', (1199, 1228), True, 'import numpy as np\n'), ((1557, 1594), 'numpy.equal', 'np.equal', (['augmentedPatch[0]', 'patch[0]'], {}), '(augmentedPatch[0], patch[0])\n', (1565, 1594), True, 'import numpy as np\n'), ((1967, 2008), 'numpy.not_equal', 'np.not_equal', (['augmentedPatch[0]', 'patch[0]'], {}), '(augmentedPatch[0], patch[0])\n', (1979, 2008), True, 'import numpy as np\n'), ((2532, 2573), 'numpy.not_equal', 'np.not_equal', (['augmentedPatch[0]', 'patch[0]'], {}), '(augmentedPatch[0], patch[0])\n', (2544, 2573), True, 'import numpy as np\n'), ((2880, 2921), 'numpy.not_equal', 'np.not_equal', (['augmentedPatch[0]', 'patch[0]'], {}), '(augmentedPatch[0], patch[0])\n', (2892, 2921), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#############################################################
# IMPORTS #
#############################################################
import os
import sys
import numpy as np
import cv2
from PIL import Image, ImageEnhance
from skimage import exposure
#############################################################
# PATH #
#############################################################
PATH = os.path.dirname(os.path.abspath(__file__))
os.chdir(PATH)
#############################################################
# CONTENT #
#############################################################
EXTENSION = (".jpg", ".jpeg", ".png", ".JPG", ".JPEG", ".PNG")
FOLDER = [file for file in sorted(os.listdir()) if file.endswith(EXTENSION) and not file == "watermark.png"]
TOTAL = len(FOLDER)
COLOR = 1.15
#############################################################
# MAIN #
#############################################################
for i, file in enumerate(FOLDER) :
os.system('cls' if os.name == 'nt' else 'clear')
print("Normalisation des images")
print("#" * 30)
print("Image {} sur {}".format(i+1, TOTAL))
if file.lower() == "watermark.pgn" :
pass
try :
img = cv2.imread(file)
except Exception :
pass
else :
gamma = (1/(np.average(img)/256))**2
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
result = cv2.addWeighted(img, 0.5, img_output, 0.5, gamma)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
result_image = Image.fromarray(result.astype('uint8'),'RGB')
# enhancer = ImageEnhance.Color(result_image)
# result_image = enhancer.enhance(COLOR)
result_image.save('OK_{}'.format(file), format='JPEG', subsampling=0, quality=100)
# cv2.waitKey(0)
print("Terminé !") | [
"os.listdir",
"cv2.equalizeHist",
"os.path.abspath",
"numpy.average",
"cv2.cvtColor",
"os.system",
"cv2.addWeighted",
"cv2.imread",
"os.chdir"
] | [((582, 596), 'os.chdir', 'os.chdir', (['PATH'], {}), '(PATH)\n', (590, 596), False, 'import os\n'), ((554, 579), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (569, 579), False, 'import os\n'), ((1233, 1281), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (1242, 1281), False, 'import os\n'), ((1481, 1497), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (1491, 1497), False, 'import cv2\n'), ((1619, 1655), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2YUV'], {}), '(img, cv2.COLOR_BGR2YUV)\n', (1631, 1655), False, 'import cv2\n'), ((1682, 1716), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img_yuv[:, :, 0]'], {}), '(img_yuv[:, :, 0])\n', (1698, 1716), False, 'import cv2\n'), ((1739, 1779), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(img_yuv, cv2.COLOR_YUV2BGR)\n', (1751, 1779), False, 'import cv2\n'), ((1798, 1847), 'cv2.addWeighted', 'cv2.addWeighted', (['img', '(0.5)', 'img_output', '(0.5)', 'gamma'], {}), '(img, 0.5, img_output, 0.5, gamma)\n', (1813, 1847), False, 'import cv2\n'), ((1868, 1907), 'cv2.cvtColor', 'cv2.cvtColor', (['result', 'cv2.COLOR_BGR2RGB'], {}), '(result, cv2.COLOR_BGR2RGB)\n', (1880, 1907), False, 'import cv2\n'), ((889, 901), 'os.listdir', 'os.listdir', ([], {}), '()\n', (899, 901), False, 'import os\n'), ((1569, 1584), 'numpy.average', 'np.average', (['img'], {}), '(img)\n', (1579, 1584), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 23 20:45:37 2018
@author: Alexandre
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import system
###############################################################################
###############################################################################
class MechanicalSystem( system.ContinuousDynamicSystem ):
"""
Mechanical system with Equation of Motion in the form of
-------------------------------------------------------
H(q) ddq + C(q,dq) dq + d(q,dq) + g(q) = B(q) u
-------------------------------------------------------
q : dim = (dof, 1) : position variables
dq : dim = (dof, 1) : velocity variables
ddq : dim = (dof, 1) : acceleration variables
u : dim = (m, 1) : force input variables
H(q) : dim = (dof, dof) : inertia matrix
C(q) : dim = (dof, dof) : corriolis matrix
B(q) : dim = (dof, m) : actuator matrix
ddq : dim = (dof, 1) : acceleration variables
d(q,dq): dim = (dof, 1) : state-dependent dissipative forces
g(q) : dim = (dof, 1) : state-dependent conservatives forces
"""
############################
def __init__(self, dof = 1 , actuators = None):
""" """
# Degree of Freedom
self.dof = dof
# Nb of actuators
if actuators == None: # If not specifyied the sys is fully actuated
actuators = dof
# Dimensions
n = dof * 2
m = actuators
p = dof * 2
# initialize standard params
system.ContinuousDynamicSystem.__init__(self, n, m, p)
# Name
self.name = str(dof) + 'DoF Mechanical System'
# Labels, bounds and units
for i in range(dof):
# joint angle states
self.x_ub[i] = + np.pi * 2
self.x_lb[i] = - np.pi * 2
self.state_label[i] = 'Angle '+ str(i)
self.state_units[i] = '[rad]'
# joint velocity states
self.x_ub[i+dof] = + np.pi * 2
self.x_lb[i+dof] = - np.pi * 2
self.state_label[i+dof] = 'Velocity ' + str(i)
self.state_units[i+dof] = '[rad/sec]'
for i in range(actuators):
self.u_ub[i] = + 5
self.u_lb[i] = - 5
self.input_label[i] = 'Torque ' + str(i)
self.input_units[i] ='[Nm]'
self.output_label = self.state_label
self.output_units = self.state_units
###########################################################################
# The following functions needs to be overloaded by child classes
# to represent the dynamic of the system
###########################################################################
###########################################################################
def H(self, q ):
"""
Inertia matrix
----------------------------------
dim( H ) = ( dof , dof )
such that --> Kinetic Energy = 0.5 * dq^T * H(q) * dq
"""
H = np.diag( np.ones( self.dof ) ) # Default is identity matrix
return H
###########################################################################
def C(self, q , dq ):
"""
Corriolis and Centrifugal Matrix
------------------------------------
dim( C ) = ( dof , dof )
such that --> d H / dt = C + C^T
"""
C = np.zeros( ( self.dof , self.dof ) ) # Default is zeros matrix
return C
###########################################################################
def B(self, q ):
"""
Actuator Matrix : dof x m
"""
B = np.zeros( ( self.dof , self.m ) )
for i in range(min(self.m,self.dof)):
B[i,i] = 1 # Diag matrix for the first m rows
return B
###########################################################################
def g(self, q ):
"""
Gravitationnal forces vector : dof x 1
"""
g = np.zeros( self.dof ) # Default is zero vector
return g
###########################################################################
def d(self, q , dq ):
"""
State-dependent dissipative forces : dof x 1
"""
d = np.zeros(self.dof ) # Default is zero vector
return d
###########################################################################
# No need to overwrite the following functions for custom system
###########################################################################
#############################
def x2q( self, x ):
""" from state vector (x) to angle and speeds (q,dq) """
q = x[ 0 : self.dof ]
dq = x[ self.dof : self.n ]
return [ q , dq ]
#############################
def q2x( self, q , dq ):
""" from angle and speeds (q,dq) to state vector (x) """
x = np.zeros( self.n )
x[ 0 : self.dof ] = q
x[ self.dof : self.n ] = dq
return x
#############################
def xut2q( self, x , u , t ):
""" compute configuration variables """
return self.x2q(x)[0]
##############################
def generalized_forces(self, q , dq , ddq , t = 0 ):
""" Computed generalized forces given a trajectory """
H = self.H( q )
C = self.C( q , dq )
g = self.g( q )
d = self.d( q , dq )
# Generalized forces
forces = np.dot( H , ddq ) + np.dot( C , dq ) + g + d
return forces
##############################
def actuator_forces(self, q , dq , ddq , t = 0 ):
""" Computed actuator forces given a trajectory (inverse dynamic) """
if self.dof == self.m:
B = self.B( q )
# Generalized forces
forces = self.generalized_forces( q , dq , ddq , t )
# Actuator forces
u = np.dot( np.linalg.inv( B ) , forces )
return u
else:
raise NotImplementedError
##############################
def ddq(self, q , dq , u , t = 0 ):
""" Computed accelerations given actuator forces (foward dynamic) """
H = self.H( q )
C = self.C( q , dq )
g = self.g( q )
d = self.d( q , dq)
B = self.B( q )
ddq = np.dot( np.linalg.inv( H ) , ( np.dot( B , u )
- np.dot( C , dq ) - g - d ) )
return ddq
###########################################################################
def f(self, x , u , t = 0 ):
"""
Continuous time foward dynamics evaluation
dx = f(x,u,t)
INPUTS
x : state vector n x 1
u : control inputs vector m x 1
t : time 1 x 1
OUPUTS
dx : state derivative vectror n x 1
"""
# from state vector (x) to angle and speeds (q,dq)
[ q , dq ] = self.x2q( x )
# compute joint acceleration
ddq = self.ddq( q , dq , u , t )
# from angle and speeds diff (dq,ddq) to state vector diff (dx)
dx = self.q2x( dq , ddq )
return dx
###########################################################################
def kinetic_energy(self, q , dq ):
""" Compute kinetic energy of manipulator """
e_k = 0.5 * np.dot( dq , np.dot( self.H( q ) , dq ) )
return e_k
class MechanicalSystemWithPositionInputs( MechanicalSystem ):
"""
Mechanical system with Equation of Motion in the form of
-------------------------------------------------------
H(q) ddq + C(q,dq) dq + d(q,dq) + g(q) = B(q,u) e(u)
-------------------------------------------------------
q : dim = (dof, 1) : position variables
dq : dim = (dof, 1) : velocity variables
ddq : dim = (dof, 1) : acceleration variables
e(u) : dim = (m, 1) : force input variables
H(q) : dim = (dof, dof) : inertia matrix
C(q) : dim = (dof, dof) : corriolis matrix
B(q,u) : dim = (dof, m) : actuator matrix
ddq : dim = (dof, 1) : acceleration variables
d(q,dq): dim = (dof, 1) : state-dependent dissipative forces
g(q) : dim = (dof, 1) : state-dependent conservatives forces
"""
############################
def __init__(self, dof = 1 , force_inputs = 1, other_inputs = 1):
""" """
# Degree of Freedom
self.dof = dof
# Nb of actuators
self.actuators = force_inputs
# Dimensions
n = dof * 2
m = force_inputs + other_inputs
p = dof * 2
# initialize standard params
system.ContinuousDynamicSystem.__init__(self, n, m, p)
# Name
self.name = str(dof) + 'DoF Mechanical System'
# Labels, bounds and units
for i in range(dof):
# joint angle states
self.x_ub[i] = + np.pi * 2
self.x_lb[i] = - np.pi * 2
self.state_label[i] = 'Angle '+ str(i)
self.state_units[i] = '[rad]'
# joint velocity states
self.x_ub[i+dof] = + np.pi * 2
self.x_lb[i+dof] = - np.pi * 2
self.state_label[i+dof] = 'Velocity ' + str(i)
self.state_units[i+dof] = '[rad/sec]'
for i in range(self.actuators):
self.u_ub[i] = + 5
self.u_lb[i] = - 5
self.input_label[i] = 'Force ' + str(i)
self.input_units[i] ='[N]'
self.output_label = self.state_label
self.output_units = self.state_units
###########################################################################
# The following functions needs to be overloaded by child classes
# to represent the dynamic of the system
###########################################################################
###########################################################################
def B(self, q , u ):
"""
Actuator Matrix : dof x m
"""
B = np.zeros( ( self.dof , self.actuators ) )
for i in range(min(self.actuators,self.dof)):
B[i,i] = 1 # Diag matrix for the first m rows
return B
###########################################################################
# No need to overwrite the following functions for custom system
###########################################################################
#############################
def u2e( self, u ):
""" """
e = u[ 0 : self.actuators ]
return e
##############################
def generalized_forces(self, q , dq , ddq , t = 0 ):
""" Computed generalized forces given a trajectory """
H = self.H( q )
C = self.C( q , dq )
g = self.g( q )
d = self.d( q , dq )
# Generalized forces
forces = np.dot( H , ddq ) + np.dot( C , dq ) + g + d
return forces
##############################
def actuator_forces(self, q , dq , ddq , t = 0 ):
""" Computed actuator forces given a trajectory (inverse dynamic) """
raise NotImplementedError
##############################
def ddq(self, q , dq , u , t = 0 ):
""" Computed accelerations given actuator forces (foward dynamic) """
H = self.H( q )
C = self.C( q , dq )
g = self.g( q )
d = self.d( q , dq)
B = self.B( q , u )
e = self.u2e( u )
ddq = np.dot( np.linalg.inv( H ) , ( np.dot( B , e )
- np.dot( C , dq ) - g - d ) )
return ddq
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
sys = MechanicalSystem( 2 )
sys.show( q = np.array([ 1.0, 2.0]) )
sys.show3( q = np.array([-0.5, 1.5]) )
sys.ubar = np.array([1,2])
sys.x0 = np.array([0,0,0,0])
sys.plot_trajectory()
sys.animate_simulation()
| [
"numpy.zeros",
"numpy.ones",
"numpy.linalg.inv",
"numpy.array",
"numpy.dot",
"pyro.dynamic.system.ContinuousDynamicSystem.__init__"
] | [((13252, 13268), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (13260, 13268), True, 'import numpy as np\n'), ((13283, 13305), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (13291, 13305), True, 'import numpy as np\n'), ((1800, 1854), 'pyro.dynamic.system.ContinuousDynamicSystem.__init__', 'system.ContinuousDynamicSystem.__init__', (['self', 'n', 'm', 'p'], {}), '(self, n, m, p)\n', (1839, 1854), False, 'from pyro.dynamic import system\n'), ((3782, 3812), 'numpy.zeros', 'np.zeros', (['(self.dof, self.dof)'], {}), '((self.dof, self.dof))\n', (3790, 3812), True, 'import numpy as np\n'), ((4062, 4090), 'numpy.zeros', 'np.zeros', (['(self.dof, self.m)'], {}), '((self.dof, self.m))\n', (4070, 4090), True, 'import numpy as np\n'), ((4454, 4472), 'numpy.zeros', 'np.zeros', (['self.dof'], {}), '(self.dof)\n', (4462, 4472), True, 'import numpy as np\n'), ((4745, 4763), 'numpy.zeros', 'np.zeros', (['self.dof'], {}), '(self.dof)\n', (4753, 4763), True, 'import numpy as np\n'), ((5470, 5486), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (5478, 5486), True, 'import numpy as np\n'), ((9661, 9715), 'pyro.dynamic.system.ContinuousDynamicSystem.__init__', 'system.ContinuousDynamicSystem.__init__', (['self', 'n', 'm', 'p'], {}), '(self, n, m, p)\n', (9700, 9715), False, 'from pyro.dynamic import system\n'), ((11070, 11106), 'numpy.zeros', 'np.zeros', (['(self.dof, self.actuators)'], {}), '((self.dof, self.actuators))\n', (11078, 11106), True, 'import numpy as np\n'), ((3353, 3370), 'numpy.ones', 'np.ones', (['self.dof'], {}), '(self.dof)\n', (3360, 3370), True, 'import numpy as np\n'), ((7108, 7124), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (7121, 7124), True, 'import numpy as np\n'), ((12690, 12706), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (12703, 12706), True, 'import numpy as np\n'), ((13165, 13185), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (13173, 13185), True, 'import numpy as np\n'), ((13208, 13229), 'numpy.array', 'np.array', (['[-0.5, 1.5]'], {}), '([-0.5, 1.5])\n', (13216, 13229), True, 'import numpy as np\n'), ((6631, 6647), 'numpy.linalg.inv', 'np.linalg.inv', (['B'], {}), '(B)\n', (6644, 6647), True, 'import numpy as np\n'), ((6111, 6125), 'numpy.dot', 'np.dot', (['H', 'ddq'], {}), '(H, ddq)\n', (6117, 6125), True, 'import numpy as np\n'), ((6131, 6144), 'numpy.dot', 'np.dot', (['C', 'dq'], {}), '(C, dq)\n', (6137, 6144), True, 'import numpy as np\n'), ((12012, 12026), 'numpy.dot', 'np.dot', (['H', 'ddq'], {}), '(H, ddq)\n', (12018, 12026), True, 'import numpy as np\n'), ((12032, 12045), 'numpy.dot', 'np.dot', (['C', 'dq'], {}), '(C, dq)\n', (12038, 12045), True, 'import numpy as np\n'), ((7132, 7144), 'numpy.dot', 'np.dot', (['B', 'u'], {}), '(B, u)\n', (7138, 7144), True, 'import numpy as np\n'), ((7196, 7209), 'numpy.dot', 'np.dot', (['C', 'dq'], {}), '(C, dq)\n', (7202, 7209), True, 'import numpy as np\n'), ((12714, 12726), 'numpy.dot', 'np.dot', (['B', 'e'], {}), '(B, e)\n', (12720, 12726), True, 'import numpy as np\n'), ((12778, 12791), 'numpy.dot', 'np.dot', (['C', 'dq'], {}), '(C, dq)\n', (12784, 12791), True, 'import numpy as np\n')] |
"""
Compute onset times from time-domain audio data
Spectra are computed as necessary
Supported methods:
- Time-domain: energy
- Spectral: flux
Last updated: 15 December 2012
"""
from pymir import Energy
from pymir import SpectralFlux
import numpy
from numpy import NaN, Inf, arange, isscalar, array, asarray
import matplotlib.pyplot as plt
def onsets(audioData, method='energy'):
onsets = []
if method == 'energy':
onsets = onsetsByEnergy(audioData)
elif method == 'flux':
onsets = onsetsByFlux(audioData)
return onsets
def onsetsByEnergy(audioData, frameSize = 512, threshold = 1):
"""
Compute onsets by using dEnergy (time-domain)
"""
e = Energy.energy(audioData, frameSize)
dE = Energy.dEnergy(audioData, frameSize)
peaks = peakPicking(dE, 2048, threshold)
return peaks
def onsetsByFlux(audioData, frameSize = 1024):
"""
Compute onsets by using spectral flux
"""
frames = audioData.frames(frameSize)
# Compute the spectra of each frame
spectra = [f.spectrum() for f in frames]
# Compute the spectral flux
flux = SpectralFlux.spectralFlux(spectra, rectify=True)
peaks = peakPicking(flux, windowSize = 10, threshold = 1e6)
peaks = [frameSize * p for p in peaks]
return peaks
def peakPicking(onsets, windowSize = 1024, threshold = 1):
peaks = []
peaks = peaksAboveAverage(onsets, windowSize)
# Compute a windowed (moving) average
#movingAverage = windowedAverage(onsets, windowSize)
#peaks = peakdet(movingAverage, 1, threshold = threshold)
#for i in range(0, len(movingAverage) - 1):
# if movingAverage[i] > movingAverage[i + 1]:
# peaks.append(movingAverage[i])
# else:
# peaks.append(0)
return peaks
def peaksAboveAverage(data, windowSize):
"""
Find peaks by the following method:
- Compute the average of all the data
- Using a non-sliding window, find the max within each window
- If the windowed max is above the average, add it to peaks
"""
data = numpy.array(data)
peaks = []
dataAverage = numpy.average(data)
dataAverage = dataAverage * 1
slideAmount = windowSize / 2
start = 0
end = windowSize
while start < len(data):
#print "Start: " + str(start)
#print "End: " + str(end)
windowMax = data[start:end].max()
windowMaxPos = data[start:end].argmax()
if windowMax > dataAverage:
if (start + windowMaxPos) not in peaks:
peaks.append(start + windowMaxPos)
start = start + slideAmount
end = end + slideAmount
return peaks
def windowedAverage(data, windowSize):
window = numpy.repeat(1.0, windowSize) / windowSize
return numpy.convolve(data, window)[windowSize - 1 : -(windowSize - 1)]
def peakdet(v, delta, x = None, threshold = 1):
"""
Adapted from code at: https://gist.github.com/250860
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% <NAME>, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx - delta and this > threshold:
#maxtab.append((mxpos, mx))
maxtab.append(mxpos)
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn + delta:
#mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
#return array(maxtab), array(mintab)
return maxtab | [
"numpy.average",
"pymir.SpectralFlux.spectralFlux",
"numpy.isscalar",
"numpy.asarray",
"numpy.convolve",
"numpy.array",
"pymir.Energy.energy",
"pymir.Energy.dEnergy",
"numpy.repeat"
] | [((662, 697), 'pymir.Energy.energy', 'Energy.energy', (['audioData', 'frameSize'], {}), '(audioData, frameSize)\n', (675, 697), False, 'from pymir import Energy\n'), ((704, 740), 'pymir.Energy.dEnergy', 'Energy.dEnergy', (['audioData', 'frameSize'], {}), '(audioData, frameSize)\n', (718, 740), False, 'from pymir import Energy\n'), ((1051, 1099), 'pymir.SpectralFlux.spectralFlux', 'SpectralFlux.spectralFlux', (['spectra'], {'rectify': '(True)'}), '(spectra, rectify=True)\n', (1076, 1099), False, 'from pymir import SpectralFlux\n'), ((1925, 1942), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (1936, 1942), False, 'import numpy\n'), ((1972, 1991), 'numpy.average', 'numpy.average', (['data'], {}), '(data)\n', (1985, 1991), False, 'import numpy\n'), ((3653, 3663), 'numpy.asarray', 'asarray', (['v'], {}), '(v)\n', (3660, 3663), False, 'from numpy import NaN, Inf, arange, isscalar, array, asarray\n'), ((2490, 2519), 'numpy.repeat', 'numpy.repeat', (['(1.0)', 'windowSize'], {}), '(1.0, windowSize)\n', (2502, 2519), False, 'import numpy\n'), ((2541, 2569), 'numpy.convolve', 'numpy.convolve', (['data', 'window'], {}), '(data, window)\n', (2555, 2569), False, 'import numpy\n'), ((3757, 3772), 'numpy.isscalar', 'isscalar', (['delta'], {}), '(delta)\n', (3765, 3772), False, 'from numpy import NaN, Inf, arange, isscalar, array, asarray\n')] |
"""
Test the datasets module
"""
# Author: <NAME>
# License: simplified BSD
import contextlib
import os
import shutil
import numpy as np
import zipfile
import tarfile
import gzip
from tempfile import mkdtemp, mkstemp
import pytest
from nilearn import datasets
from nilearn._utils.testing import (mock_request, wrap_chunk_read_,
FetchFilesMock)
currdir = os.path.dirname(os.path.abspath(__file__))
datadir = os.path.join(currdir, 'data')
url_request = None
file_mock = None
@pytest.fixture()
def request_mock():
setup_mock()
yield
teardown_mock()
def setup_mock(utils_mod=datasets.utils, dataset_mod=datasets.utils):
global original_url_request
global mock_url_request
mock_url_request = mock_request()
original_url_request = utils_mod._urllib.request
utils_mod._urllib.request = mock_url_request
global original_chunk_read
global mock_chunk_read
mock_chunk_read = wrap_chunk_read_(utils_mod._chunk_read_)
original_chunk_read = utils_mod._chunk_read_
utils_mod._chunk_read_ = mock_chunk_read
global original_fetch_files
global mock_fetch_files
mock_fetch_files = FetchFilesMock()
original_fetch_files = dataset_mod._fetch_files
dataset_mod._fetch_files = mock_fetch_files
def teardown_mock(utils_mod=datasets.utils, dataset_mod=datasets.utils):
global original_url_request
utils_mod._urllib.request = original_url_request
global original_chunk_read
utils_mod.chunk_read_ = original_chunk_read
global original_fetch_files
dataset_mod._fetch_files = original_fetch_files
def test_get_dataset_dir(tmp_path):
# testing folder creation under different environments, enforcing
# a custom clean install
os.environ.pop('NILEARN_DATA', None)
os.environ.pop('NILEARN_SHARED_DATA', None)
expected_base_dir = os.path.expanduser('~/nilearn_data')
data_dir = datasets.utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'test_nilearn_data')
os.environ['NILEARN_DATA'] = expected_base_dir
data_dir = datasets.utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'nilearn_shared_data')
os.environ['NILEARN_SHARED_DATA'] = expected_base_dir
data_dir = datasets.utils._get_dataset_dir('test', verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
expected_base_dir = str(tmp_path / 'env_data')
expected_dataset_dir = os.path.join(expected_base_dir, 'test')
data_dir = datasets.utils._get_dataset_dir(
'test', default_paths=[expected_dataset_dir], verbose=0)
assert data_dir == os.path.join(expected_base_dir, 'test')
assert os.path.exists(data_dir)
shutil.rmtree(data_dir)
no_write = str(tmp_path / 'no_write')
os.makedirs(no_write)
os.chmod(no_write, 0o400)
expected_base_dir = str(tmp_path / 'nilearn_shared_data')
os.environ['NILEARN_SHARED_DATA'] = expected_base_dir
data_dir = datasets.utils._get_dataset_dir('test',
default_paths=[no_write],
verbose=0)
# Non writeable dir is returned because dataset may be in there.
assert data_dir == no_write
assert os.path.exists(data_dir)
os.chmod(no_write, 0o600)
shutil.rmtree(data_dir)
# Verify exception for a path which exists and is a file
test_file = str(tmp_path / 'some_file')
with open(test_file, 'w') as out:
out.write('abcfeg')
with pytest.raises(
OSError,
match='Nilearn tried to store the dataset in the following '
'directories, but'):
datasets.utils._get_dataset_dir('test', test_file, verbose=0)
def test_md5_sum_file():
# Create dummy temporary file
out, f = mkstemp()
os.write(out, b'abcfeg')
os.close(out)
assert (datasets.utils._md5_sum_file(f) ==
'18f32295c556b2a1a3a8e68fe1ad40f7')
os.remove(f)
def test_read_md5_sum_file():
# Create dummy temporary file
out, f = mkstemp()
os.write(out, b'20861c8c3fe177da19a7e9539a5dbac /tmp/test\n'
b'70886dcabe7bf5c5a1c24ca24e4cbd94 test/some_image.nii')
os.close(out)
h = datasets.utils._read_md5_sum_file(f)
assert '/tmp/test' in h
assert not '/etc/test' in h
assert h['test/some_image.nii'] == '70886dcabe7bf5c5a1c24ca24e4cbd94'
assert h['/tmp/test'] == '20861c8c3fe177da19a7e9539a5dbac'
os.remove(f)
def test_tree():
# Create a dummy directory tree
parent = mkdtemp()
open(os.path.join(parent, 'file1'), 'w').close()
open(os.path.join(parent, 'file2'), 'w').close()
dir1 = os.path.join(parent, 'dir1')
dir11 = os.path.join(dir1, 'dir11')
dir12 = os.path.join(dir1, 'dir12')
dir2 = os.path.join(parent, 'dir2')
os.mkdir(dir1)
os.mkdir(dir11)
os.mkdir(dir12)
os.mkdir(dir2)
open(os.path.join(dir1, 'file11'), 'w').close()
open(os.path.join(dir1, 'file12'), 'w').close()
open(os.path.join(dir11, 'file111'), 'w').close()
open(os.path.join(dir2, 'file21'), 'w').close()
tree_ = datasets.utils._tree(parent)
# Check the tree
# assert_equal(tree_[0]['dir1'][0]['dir11'][0], 'file111')
# assert_equal(len(tree_[0]['dir1'][1]['dir12']), 0)
# assert_equal(tree_[0]['dir1'][2], 'file11')
# assert_equal(tree_[0]['dir1'][3], 'file12')
# assert_equal(tree_[1]['dir2'][0], 'file21')
# assert_equal(tree_[2], 'file1')
# assert_equal(tree_[3], 'file2')
assert tree_[0][1][0][1][0] == os.path.join(dir11, 'file111')
assert len(tree_[0][1][1][1]) == 0
assert tree_[0][1][2] == os.path.join(dir1, 'file11')
assert tree_[0][1][3] == os.path.join(dir1, 'file12')
assert tree_[1][1][0] == os.path.join(dir2, 'file21')
assert tree_[2] == os.path.join(parent, 'file1')
assert tree_[3] == os.path.join(parent, 'file2')
# Clean
shutil.rmtree(parent)
def test_movetree():
# Create a dummy directory tree
parent = mkdtemp()
dir1 = os.path.join(parent, 'dir1')
dir11 = os.path.join(dir1, 'dir11')
dir12 = os.path.join(dir1, 'dir12')
dir2 = os.path.join(parent, 'dir2')
os.mkdir(dir1)
os.mkdir(dir11)
os.mkdir(dir12)
os.mkdir(dir2)
os.mkdir(os.path.join(dir2, 'dir12'))
open(os.path.join(dir1, 'file11'), 'w').close()
open(os.path.join(dir1, 'file12'), 'w').close()
open(os.path.join(dir11, 'file111'), 'w').close()
open(os.path.join(dir12, 'file121'), 'w').close()
open(os.path.join(dir2, 'file21'), 'w').close()
datasets.utils.movetree(dir1, dir2)
assert not os.path.exists(dir11)
assert not os.path.exists(dir12)
assert not os.path.exists(os.path.join(dir1, 'file11'))
assert not os.path.exists(os.path.join(dir1, 'file12'))
assert not os.path.exists(os.path.join(dir11, 'file111'))
assert not os.path.exists(os.path.join(dir12, 'file121'))
dir11 = os.path.join(dir2, 'dir11')
dir12 = os.path.join(dir2, 'dir12')
assert os.path.exists(dir11)
assert os.path.exists(dir12)
assert os.path.exists(os.path.join(dir2, 'file11'))
assert os.path.exists(os.path.join(dir2, 'file12'))
assert os.path.exists(os.path.join(dir11, 'file111'))
assert os.path.exists(os.path.join(dir12, 'file121'))
def test_filter_columns():
# Create fake recarray
value1 = np.arange(500)
strings = np.asarray(['a', 'b', 'c'])
value2 = strings[value1 % 3]
values = np.asarray(list(zip(value1, value2)),
dtype=[('INT', int), ('STR', 'S1')])
f = datasets.utils._filter_columns(values, {'INT': (23, 46)})
assert np.sum(f) == 24
f = datasets.utils._filter_columns(values, {'INT': [0, 9, (12, 24)]})
assert np.sum(f) == 15
value1 = value1 % 2
values = np.asarray(list(zip(value1, value2)),
dtype=[('INT', int), ('STR', b'S1')])
# No filter
f = datasets.utils._filter_columns(values, [])
assert np.sum(f) == 500
f = datasets.utils._filter_columns(values, {'STR': b'b'})
assert np.sum(f) == 167
f = datasets.utils._filter_columns(values, {'STR': u'b'})
assert np.sum(f) == 167
f = datasets.utils._filter_columns(values, {'INT': 1, 'STR': b'b'})
assert np.sum(f) == 84
f = datasets.utils._filter_columns(values, {'INT': 1, 'STR': b'b'},
combination='or')
assert np.sum(f) == 333
def test_uncompress():
# for each kind of compression, we create:
# - a temporary directory (dtemp)
# - a compressed object (ztemp)
# - a temporary file-like object to compress into ztemp
# we then uncompress the ztemp object into dtemp under the name ftemp
# and check if ftemp exists
dtemp = mkdtemp()
ztemp = os.path.join(dtemp, 'test.zip')
ftemp = 'test'
try:
with contextlib.closing(zipfile.ZipFile(ztemp, 'w')) as testzip:
testzip.writestr(ftemp, ' ')
datasets.utils._uncompress_file(ztemp, verbose=0)
assert (os.path.exists(os.path.join(dtemp, ftemp)))
shutil.rmtree(dtemp)
dtemp = mkdtemp()
ztemp = os.path.join(dtemp, 'test.tar')
# Create dummy file in the dtemp folder, so that the finally statement
# can easily remove it
fd, temp = mkstemp(dir=dtemp)
os.close(fd)
with contextlib.closing(tarfile.open(ztemp, 'w')) as tar:
tar.add(temp, arcname=ftemp)
datasets.utils._uncompress_file(ztemp, verbose=0)
assert (os.path.exists(os.path.join(dtemp, ftemp)))
shutil.rmtree(dtemp)
dtemp = mkdtemp()
ztemp = os.path.join(dtemp, 'test.gz')
gzip.open(ztemp, 'wb').close()
datasets.utils._uncompress_file(ztemp, verbose=0)
# test.gz gets uncompressed into test
assert (os.path.exists(os.path.join(dtemp, 'test')))
shutil.rmtree(dtemp)
finally:
# all temp files are created into dtemp except temp
if os.path.exists(dtemp):
shutil.rmtree(dtemp)
def test_fetch_file_overwrite(tmp_path, request_mock):
# overwrite non-exiting file.
fil = datasets.utils._fetch_file(url='http://foo/', data_dir=str(tmp_path),
verbose=0, overwrite=True)
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil)
with open(fil, 'r') as fp:
assert fp.read() == ''
# Modify content
with open(fil, 'w') as fp:
fp.write('some content')
# Don't overwrite existing file.
fil = datasets.utils._fetch_file(url='http://foo/', data_dir=str(tmp_path),
verbose=0, overwrite=False)
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil)
with open(fil, 'r') as fp:
assert fp.read() == 'some content'
# Overwrite existing file.
# Overwrite existing file.
fil = datasets.utils._fetch_file(url='http://foo/', data_dir=str(tmp_path),
verbose=0, overwrite=True)
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil)
with open(fil, 'r') as fp:
assert fp.read() == ''
def test_fetch_files_overwrite(tmp_path, request_mock):
# overwrite non-exiting file.
files = ('1.txt', 'http://foo/1.txt')
fil = datasets.utils._fetch_files(data_dir=str(tmp_path), verbose=0,
files=[files + (dict(overwrite=True),)])
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil[0])
with open(fil[0], 'r') as fp:
assert fp.read() == ''
# Modify content
with open(fil[0], 'w') as fp:
fp.write('some content')
# Don't overwrite existing file.
fil = datasets.utils._fetch_files(data_dir=str(tmp_path), verbose=0,
files=[files + (dict(overwrite=False),)])
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil[0])
with open(fil[0], 'r') as fp:
assert fp.read() == 'some content'
# Overwrite existing file.
fil = datasets.utils._fetch_files(data_dir=str(tmp_path), verbose=0,
files=[files + (dict(overwrite=True),)])
assert len(mock_url_request.urls) == 1
assert os.path.exists(fil[0])
with open(fil[0], 'r') as fp:
assert fp.read() == ''
| [
"os.mkdir",
"os.remove",
"numpy.sum",
"os.close",
"numpy.arange",
"shutil.rmtree",
"os.environ.pop",
"os.path.join",
"os.path.abspath",
"nilearn.datasets.utils._filter_columns",
"os.path.exists",
"pytest.raises",
"tempfile.mkdtemp",
"nilearn.datasets.utils._md5_sum_file",
"tarfile.open",... | [((447, 476), 'os.path.join', 'os.path.join', (['currdir', '"""data"""'], {}), "(currdir, 'data')\n", (459, 476), False, 'import os\n'), ((516, 532), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (530, 532), False, 'import pytest\n'), ((410, 435), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (425, 435), False, 'import os\n'), ((755, 769), 'nilearn._utils.testing.mock_request', 'mock_request', ([], {}), '()\n', (767, 769), False, 'from nilearn._utils.testing import mock_request, wrap_chunk_read_, FetchFilesMock\n'), ((953, 993), 'nilearn._utils.testing.wrap_chunk_read_', 'wrap_chunk_read_', (['utils_mod._chunk_read_'], {}), '(utils_mod._chunk_read_)\n', (969, 993), False, 'from nilearn._utils.testing import mock_request, wrap_chunk_read_, FetchFilesMock\n'), ((1172, 1188), 'nilearn._utils.testing.FetchFilesMock', 'FetchFilesMock', ([], {}), '()\n', (1186, 1188), False, 'from nilearn._utils.testing import mock_request, wrap_chunk_read_, FetchFilesMock\n'), ((1755, 1791), 'os.environ.pop', 'os.environ.pop', (['"""NILEARN_DATA"""', 'None'], {}), "('NILEARN_DATA', None)\n", (1769, 1791), False, 'import os\n'), ((1796, 1839), 'os.environ.pop', 'os.environ.pop', (['"""NILEARN_SHARED_DATA"""', 'None'], {}), "('NILEARN_SHARED_DATA', None)\n", (1810, 1839), False, 'import os\n'), ((1865, 1901), 'os.path.expanduser', 'os.path.expanduser', (['"""~/nilearn_data"""'], {}), "('~/nilearn_data')\n", (1883, 1901), False, 'import os\n'), ((1917, 1967), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""'], {'verbose': '(0)'}), "('test', verbose=0)\n", (1948, 1967), False, 'from nilearn import datasets\n'), ((2042, 2066), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2056, 2066), False, 'import os\n'), ((2071, 2094), 'shutil.rmtree', 'shutil.rmtree', (['data_dir'], {}), '(data_dir)\n', (2084, 2094), False, 'import shutil\n'), ((2222, 2272), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""'], {'verbose': '(0)'}), "('test', verbose=0)\n", (2253, 2272), False, 'from nilearn import datasets\n'), ((2347, 2371), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2361, 2371), False, 'import os\n'), ((2376, 2399), 'shutil.rmtree', 'shutil.rmtree', (['data_dir'], {}), '(data_dir)\n', (2389, 2399), False, 'import shutil\n'), ((2536, 2586), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""'], {'verbose': '(0)'}), "('test', verbose=0)\n", (2567, 2586), False, 'from nilearn import datasets\n'), ((2661, 2685), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2675, 2685), False, 'import os\n'), ((2690, 2713), 'shutil.rmtree', 'shutil.rmtree', (['data_dir'], {}), '(data_dir)\n', (2703, 2713), False, 'import shutil\n'), ((2793, 2832), 'os.path.join', 'os.path.join', (['expected_base_dir', '"""test"""'], {}), "(expected_base_dir, 'test')\n", (2805, 2832), False, 'import os\n'), ((2848, 2941), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""'], {'default_paths': '[expected_dataset_dir]', 'verbose': '(0)'}), "('test', default_paths=[expected_dataset_dir\n ], verbose=0)\n", (2879, 2941), False, 'from nilearn import datasets\n'), ((3020, 3044), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (3034, 3044), False, 'import os\n'), ((3049, 3072), 'shutil.rmtree', 'shutil.rmtree', (['data_dir'], {}), '(data_dir)\n', (3062, 3072), False, 'import shutil\n'), ((3120, 3141), 'os.makedirs', 'os.makedirs', (['no_write'], {}), '(no_write)\n', (3131, 3141), False, 'import os\n'), ((3146, 3169), 'os.chmod', 'os.chmod', (['no_write', '(256)'], {}), '(no_write, 256)\n', (3154, 3169), False, 'import os\n'), ((3308, 3384), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""'], {'default_paths': '[no_write]', 'verbose': '(0)'}), "('test', default_paths=[no_write], verbose=0)\n", (3339, 3384), False, 'from nilearn import datasets\n'), ((3591, 3615), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (3605, 3615), False, 'import os\n'), ((3620, 3643), 'os.chmod', 'os.chmod', (['no_write', '(384)'], {}), '(no_write, 384)\n', (3628, 3643), False, 'import os\n'), ((3650, 3673), 'shutil.rmtree', 'shutil.rmtree', (['data_dir'], {}), '(data_dir)\n', (3663, 3673), False, 'import shutil\n'), ((4148, 4157), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (4155, 4157), False, 'from tempfile import mkdtemp, mkstemp\n'), ((4162, 4186), 'os.write', 'os.write', (['out', "b'abcfeg'"], {}), "(out, b'abcfeg')\n", (4170, 4186), False, 'import os\n'), ((4191, 4204), 'os.close', 'os.close', (['out'], {}), '(out)\n', (4199, 4204), False, 'import os\n'), ((4309, 4321), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (4318, 4321), False, 'import os\n'), ((4401, 4410), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (4408, 4410), False, 'from tempfile import mkdtemp, mkstemp\n'), ((4415, 4539), 'os.write', 'os.write', (['out', "b'20861c8c3fe177da19a7e9539a5dbac /tmp/test\\n70886dcabe7bf5c5a1c24ca24e4cbd94 test/some_image.nii'"], {}), "(out,\n b'20861c8c3fe177da19a7e9539a5dbac /tmp/test\\n70886dcabe7bf5c5a1c24ca24e4cbd94 test/some_image.nii'\n )\n", (4423, 4539), False, 'import os\n'), ((4552, 4565), 'os.close', 'os.close', (['out'], {}), '(out)\n', (4560, 4565), False, 'import os\n'), ((4574, 4610), 'nilearn.datasets.utils._read_md5_sum_file', 'datasets.utils._read_md5_sum_file', (['f'], {}), '(f)\n', (4607, 4610), False, 'from nilearn import datasets\n'), ((4812, 4824), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (4821, 4824), False, 'import os\n'), ((4893, 4902), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (4900, 4902), False, 'from tempfile import mkdtemp, mkstemp\n'), ((5021, 5049), 'os.path.join', 'os.path.join', (['parent', '"""dir1"""'], {}), "(parent, 'dir1')\n", (5033, 5049), False, 'import os\n'), ((5062, 5089), 'os.path.join', 'os.path.join', (['dir1', '"""dir11"""'], {}), "(dir1, 'dir11')\n", (5074, 5089), False, 'import os\n'), ((5102, 5129), 'os.path.join', 'os.path.join', (['dir1', '"""dir12"""'], {}), "(dir1, 'dir12')\n", (5114, 5129), False, 'import os\n'), ((5141, 5169), 'os.path.join', 'os.path.join', (['parent', '"""dir2"""'], {}), "(parent, 'dir2')\n", (5153, 5169), False, 'import os\n'), ((5174, 5188), 'os.mkdir', 'os.mkdir', (['dir1'], {}), '(dir1)\n', (5182, 5188), False, 'import os\n'), ((5193, 5208), 'os.mkdir', 'os.mkdir', (['dir11'], {}), '(dir11)\n', (5201, 5208), False, 'import os\n'), ((5213, 5228), 'os.mkdir', 'os.mkdir', (['dir12'], {}), '(dir12)\n', (5221, 5228), False, 'import os\n'), ((5233, 5247), 'os.mkdir', 'os.mkdir', (['dir2'], {}), '(dir2)\n', (5241, 5247), False, 'import os\n'), ((5471, 5499), 'nilearn.datasets.utils._tree', 'datasets.utils._tree', (['parent'], {}), '(parent)\n', (5491, 5499), False, 'from nilearn import datasets\n'), ((6270, 6291), 'shutil.rmtree', 'shutil.rmtree', (['parent'], {}), '(parent)\n', (6283, 6291), False, 'import shutil\n'), ((6364, 6373), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (6371, 6373), False, 'from tempfile import mkdtemp, mkstemp\n'), ((6386, 6414), 'os.path.join', 'os.path.join', (['parent', '"""dir1"""'], {}), "(parent, 'dir1')\n", (6398, 6414), False, 'import os\n'), ((6427, 6454), 'os.path.join', 'os.path.join', (['dir1', '"""dir11"""'], {}), "(dir1, 'dir11')\n", (6439, 6454), False, 'import os\n'), ((6467, 6494), 'os.path.join', 'os.path.join', (['dir1', '"""dir12"""'], {}), "(dir1, 'dir12')\n", (6479, 6494), False, 'import os\n'), ((6506, 6534), 'os.path.join', 'os.path.join', (['parent', '"""dir2"""'], {}), "(parent, 'dir2')\n", (6518, 6534), False, 'import os\n'), ((6539, 6553), 'os.mkdir', 'os.mkdir', (['dir1'], {}), '(dir1)\n', (6547, 6553), False, 'import os\n'), ((6558, 6573), 'os.mkdir', 'os.mkdir', (['dir11'], {}), '(dir11)\n', (6566, 6573), False, 'import os\n'), ((6578, 6593), 'os.mkdir', 'os.mkdir', (['dir12'], {}), '(dir12)\n', (6586, 6593), False, 'import os\n'), ((6598, 6612), 'os.mkdir', 'os.mkdir', (['dir2'], {}), '(dir2)\n', (6606, 6612), False, 'import os\n'), ((6924, 6959), 'nilearn.datasets.utils.movetree', 'datasets.utils.movetree', (['dir1', 'dir2'], {}), '(dir1, dir2)\n', (6947, 6959), False, 'from nilearn import datasets\n'), ((7291, 7318), 'os.path.join', 'os.path.join', (['dir2', '"""dir11"""'], {}), "(dir2, 'dir11')\n", (7303, 7318), False, 'import os\n'), ((7331, 7358), 'os.path.join', 'os.path.join', (['dir2', '"""dir12"""'], {}), "(dir2, 'dir12')\n", (7343, 7358), False, 'import os\n'), ((7371, 7392), 'os.path.exists', 'os.path.exists', (['dir11'], {}), '(dir11)\n', (7385, 7392), False, 'import os\n'), ((7404, 7425), 'os.path.exists', 'os.path.exists', (['dir12'], {}), '(dir12)\n', (7418, 7425), False, 'import os\n'), ((7723, 7737), 'numpy.arange', 'np.arange', (['(500)'], {}), '(500)\n', (7732, 7737), True, 'import numpy as np\n'), ((7752, 7779), 'numpy.asarray', 'np.asarray', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (7762, 7779), True, 'import numpy as np\n'), ((7935, 7992), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'INT': (23, 46)}"], {}), "(values, {'INT': (23, 46)})\n", (7965, 7992), False, 'from nilearn import datasets\n'), ((8029, 8094), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'INT': [0, 9, (12, 24)]}"], {}), "(values, {'INT': [0, 9, (12, 24)]})\n", (8059, 8094), False, 'from nilearn import datasets\n'), ((8285, 8327), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', '[]'], {}), '(values, [])\n', (8315, 8327), False, 'from nilearn import datasets\n'), ((8365, 8418), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'STR': b'b'}"], {}), "(values, {'STR': b'b'})\n", (8395, 8418), False, 'from nilearn import datasets\n'), ((8456, 8509), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'STR': u'b'}"], {}), "(values, {'STR': u'b'})\n", (8486, 8509), False, 'from nilearn import datasets\n'), ((8547, 8610), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'INT': 1, 'STR': b'b'}"], {}), "(values, {'INT': 1, 'STR': b'b'})\n", (8577, 8610), False, 'from nilearn import datasets\n'), ((8647, 8733), 'nilearn.datasets.utils._filter_columns', 'datasets.utils._filter_columns', (['values', "{'INT': 1, 'STR': b'b'}"], {'combination': '"""or"""'}), "(values, {'INT': 1, 'STR': b'b'}, combination\n ='or')\n", (8677, 8733), False, 'from nilearn import datasets\n'), ((9120, 9129), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (9127, 9129), False, 'from tempfile import mkdtemp, mkstemp\n'), ((9142, 9173), 'os.path.join', 'os.path.join', (['dtemp', '"""test.zip"""'], {}), "(dtemp, 'test.zip')\n", (9154, 9173), False, 'import os\n'), ((10698, 10717), 'os.path.exists', 'os.path.exists', (['fil'], {}), '(fil)\n', (10712, 10717), False, 'import os\n'), ((11103, 11122), 'os.path.exists', 'os.path.exists', (['fil'], {}), '(fil)\n', (11117, 11122), False, 'import os\n'), ((11458, 11477), 'os.path.exists', 'os.path.exists', (['fil'], {}), '(fil)\n', (11472, 11477), False, 'import os\n'), ((11880, 11902), 'os.path.exists', 'os.path.exists', (['fil[0]'], {}), '(fil[0])\n', (11894, 11902), False, 'import os\n'), ((12302, 12324), 'os.path.exists', 'os.path.exists', (['fil[0]'], {}), '(fil[0])\n', (12316, 12324), False, 'import os\n'), ((12640, 12662), 'os.path.exists', 'os.path.exists', (['fil[0]'], {}), '(fil[0])\n', (12654, 12662), False, 'import os\n'), ((1991, 2030), 'os.path.join', 'os.path.join', (['expected_base_dir', '"""test"""'], {}), "(expected_base_dir, 'test')\n", (2003, 2030), False, 'import os\n'), ((2296, 2335), 'os.path.join', 'os.path.join', (['expected_base_dir', '"""test"""'], {}), "(expected_base_dir, 'test')\n", (2308, 2335), False, 'import os\n'), ((2610, 2649), 'os.path.join', 'os.path.join', (['expected_base_dir', '"""test"""'], {}), "(expected_base_dir, 'test')\n", (2622, 2649), False, 'import os\n'), ((2969, 3008), 'os.path.join', 'os.path.join', (['expected_base_dir', '"""test"""'], {}), "(expected_base_dir, 'test')\n", (2981, 3008), False, 'import os\n'), ((3856, 3961), 'pytest.raises', 'pytest.raises', (['OSError'], {'match': '"""Nilearn tried to store the dataset in the following directories, but"""'}), "(OSError, match=\n 'Nilearn tried to store the dataset in the following directories, but')\n", (3869, 3961), False, 'import pytest\n'), ((4012, 4073), 'nilearn.datasets.utils._get_dataset_dir', 'datasets.utils._get_dataset_dir', (['"""test"""', 'test_file'], {'verbose': '(0)'}), "('test', test_file, verbose=0)\n", (4043, 4073), False, 'from nilearn import datasets\n'), ((4217, 4248), 'nilearn.datasets.utils._md5_sum_file', 'datasets.utils._md5_sum_file', (['f'], {}), '(f)\n', (4245, 4248), False, 'from nilearn import datasets\n'), ((5903, 5933), 'os.path.join', 'os.path.join', (['dir11', '"""file111"""'], {}), "(dir11, 'file111')\n", (5915, 5933), False, 'import os\n'), ((6002, 6030), 'os.path.join', 'os.path.join', (['dir1', '"""file11"""'], {}), "(dir1, 'file11')\n", (6014, 6030), False, 'import os\n'), ((6060, 6088), 'os.path.join', 'os.path.join', (['dir1', '"""file12"""'], {}), "(dir1, 'file12')\n", (6072, 6088), False, 'import os\n'), ((6118, 6146), 'os.path.join', 'os.path.join', (['dir2', '"""file21"""'], {}), "(dir2, 'file21')\n", (6130, 6146), False, 'import os\n'), ((6170, 6199), 'os.path.join', 'os.path.join', (['parent', '"""file1"""'], {}), "(parent, 'file1')\n", (6182, 6199), False, 'import os\n'), ((6223, 6252), 'os.path.join', 'os.path.join', (['parent', '"""file2"""'], {}), "(parent, 'file2')\n", (6235, 6252), False, 'import os\n'), ((6626, 6653), 'os.path.join', 'os.path.join', (['dir2', '"""dir12"""'], {}), "(dir2, 'dir12')\n", (6638, 6653), False, 'import os\n'), ((6976, 6997), 'os.path.exists', 'os.path.exists', (['dir11'], {}), '(dir11)\n', (6990, 6997), False, 'import os\n'), ((7013, 7034), 'os.path.exists', 'os.path.exists', (['dir12'], {}), '(dir12)\n', (7027, 7034), False, 'import os\n'), ((7452, 7480), 'os.path.join', 'os.path.join', (['dir2', '"""file11"""'], {}), "(dir2, 'file11')\n", (7464, 7480), False, 'import os\n'), ((7508, 7536), 'os.path.join', 'os.path.join', (['dir2', '"""file12"""'], {}), "(dir2, 'file12')\n", (7520, 7536), False, 'import os\n'), ((7564, 7594), 'os.path.join', 'os.path.join', (['dir11', '"""file111"""'], {}), "(dir11, 'file111')\n", (7576, 7594), False, 'import os\n'), ((7622, 7652), 'os.path.join', 'os.path.join', (['dir12', '"""file121"""'], {}), "(dir12, 'file121')\n", (7634, 7652), False, 'import os\n'), ((8004, 8013), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8010, 8013), True, 'import numpy as np\n'), ((8106, 8115), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8112, 8115), True, 'import numpy as np\n'), ((8339, 8348), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8345, 8348), True, 'import numpy as np\n'), ((8430, 8439), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8436, 8439), True, 'import numpy as np\n'), ((8521, 8530), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8527, 8530), True, 'import numpy as np\n'), ((8622, 8631), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8628, 8631), True, 'import numpy as np\n'), ((8779, 8788), 'numpy.sum', 'np.sum', (['f'], {}), '(f)\n', (8785, 8788), True, 'import numpy as np\n'), ((9324, 9373), 'nilearn.datasets.utils._uncompress_file', 'datasets.utils._uncompress_file', (['ztemp'], {'verbose': '(0)'}), '(ztemp, verbose=0)\n', (9355, 9373), False, 'from nilearn import datasets\n'), ((9442, 9462), 'shutil.rmtree', 'shutil.rmtree', (['dtemp'], {}), '(dtemp)\n', (9455, 9462), False, 'import shutil\n'), ((9480, 9489), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (9487, 9489), False, 'from tempfile import mkdtemp, mkstemp\n'), ((9506, 9537), 'os.path.join', 'os.path.join', (['dtemp', '"""test.tar"""'], {}), "(dtemp, 'test.tar')\n", (9518, 9537), False, 'import os\n'), ((9668, 9686), 'tempfile.mkstemp', 'mkstemp', ([], {'dir': 'dtemp'}), '(dir=dtemp)\n', (9675, 9686), False, 'from tempfile import mkdtemp, mkstemp\n'), ((9695, 9707), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (9703, 9707), False, 'import os\n'), ((9823, 9872), 'nilearn.datasets.utils._uncompress_file', 'datasets.utils._uncompress_file', (['ztemp'], {'verbose': '(0)'}), '(ztemp, verbose=0)\n', (9854, 9872), False, 'from nilearn import datasets\n'), ((9941, 9961), 'shutil.rmtree', 'shutil.rmtree', (['dtemp'], {}), '(dtemp)\n', (9954, 9961), False, 'import shutil\n'), ((9979, 9988), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (9986, 9988), False, 'from tempfile import mkdtemp, mkstemp\n'), ((10005, 10035), 'os.path.join', 'os.path.join', (['dtemp', '"""test.gz"""'], {}), "(dtemp, 'test.gz')\n", (10017, 10035), False, 'import os\n'), ((10083, 10132), 'nilearn.datasets.utils._uncompress_file', 'datasets.utils._uncompress_file', (['ztemp'], {'verbose': '(0)'}), '(ztemp, verbose=0)\n', (10114, 10132), False, 'from nilearn import datasets\n'), ((10248, 10268), 'shutil.rmtree', 'shutil.rmtree', (['dtemp'], {}), '(dtemp)\n', (10261, 10268), False, 'import shutil\n'), ((10353, 10374), 'os.path.exists', 'os.path.exists', (['dtemp'], {}), '(dtemp)\n', (10367, 10374), False, 'import os\n'), ((7065, 7093), 'os.path.join', 'os.path.join', (['dir1', '"""file11"""'], {}), "(dir1, 'file11')\n", (7077, 7093), False, 'import os\n'), ((7125, 7153), 'os.path.join', 'os.path.join', (['dir1', '"""file12"""'], {}), "(dir1, 'file12')\n", (7137, 7153), False, 'import os\n'), ((7185, 7215), 'os.path.join', 'os.path.join', (['dir11', '"""file111"""'], {}), "(dir11, 'file111')\n", (7197, 7215), False, 'import os\n'), ((7247, 7277), 'os.path.join', 'os.path.join', (['dir12', '"""file121"""'], {}), "(dir12, 'file121')\n", (7259, 7277), False, 'import os\n'), ((9405, 9431), 'os.path.join', 'os.path.join', (['dtemp', 'ftemp'], {}), '(dtemp, ftemp)\n', (9417, 9431), False, 'import os\n'), ((9904, 9930), 'os.path.join', 'os.path.join', (['dtemp', 'ftemp'], {}), '(dtemp, ftemp)\n', (9916, 9930), False, 'import os\n'), ((10210, 10237), 'os.path.join', 'os.path.join', (['dtemp', '"""test"""'], {}), "(dtemp, 'test')\n", (10222, 10237), False, 'import os\n'), ((10388, 10408), 'shutil.rmtree', 'shutil.rmtree', (['dtemp'], {}), '(dtemp)\n', (10401, 10408), False, 'import shutil\n'), ((4913, 4942), 'os.path.join', 'os.path.join', (['parent', '"""file1"""'], {}), "(parent, 'file1')\n", (4925, 4942), False, 'import os\n'), ((4966, 4995), 'os.path.join', 'os.path.join', (['parent', '"""file2"""'], {}), "(parent, 'file2')\n", (4978, 4995), False, 'import os\n'), ((5257, 5285), 'os.path.join', 'os.path.join', (['dir1', '"""file11"""'], {}), "(dir1, 'file11')\n", (5269, 5285), False, 'import os\n'), ((5309, 5337), 'os.path.join', 'os.path.join', (['dir1', '"""file12"""'], {}), "(dir1, 'file12')\n", (5321, 5337), False, 'import os\n'), ((5361, 5391), 'os.path.join', 'os.path.join', (['dir11', '"""file111"""'], {}), "(dir11, 'file111')\n", (5373, 5391), False, 'import os\n'), ((5415, 5443), 'os.path.join', 'os.path.join', (['dir2', '"""file21"""'], {}), "(dir2, 'file21')\n", (5427, 5443), False, 'import os\n'), ((6664, 6692), 'os.path.join', 'os.path.join', (['dir1', '"""file11"""'], {}), "(dir1, 'file11')\n", (6676, 6692), False, 'import os\n'), ((6716, 6744), 'os.path.join', 'os.path.join', (['dir1', '"""file12"""'], {}), "(dir1, 'file12')\n", (6728, 6744), False, 'import os\n'), ((6768, 6798), 'os.path.join', 'os.path.join', (['dir11', '"""file111"""'], {}), "(dir11, 'file111')\n", (6780, 6798), False, 'import os\n'), ((6822, 6852), 'os.path.join', 'os.path.join', (['dir12', '"""file121"""'], {}), "(dir12, 'file121')\n", (6834, 6852), False, 'import os\n'), ((6876, 6904), 'os.path.join', 'os.path.join', (['dir2', '"""file21"""'], {}), "(dir2, 'file21')\n", (6888, 6904), False, 'import os\n'), ((9234, 9261), 'zipfile.ZipFile', 'zipfile.ZipFile', (['ztemp', '"""w"""'], {}), "(ztemp, 'w')\n", (9249, 9261), False, 'import zipfile\n'), ((9740, 9764), 'tarfile.open', 'tarfile.open', (['ztemp', '"""w"""'], {}), "(ztemp, 'w')\n", (9752, 9764), False, 'import tarfile\n'), ((10044, 10066), 'gzip.open', 'gzip.open', (['ztemp', '"""wb"""'], {}), "(ztemp, 'wb')\n", (10053, 10066), False, 'import gzip\n')] |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" util unit tests
"""
import unittest
import numpy
from fqe import util
class UnitTest(unittest.TestCase):
"""unit tests
"""
def test_alpha_beta_electrons(self):
"""Check to make sure that the correct number of alpha and beta
electrons are parsed from the number and multiplicity
"""
self.assertTupleEqual((1, 1), util.alpha_beta_electrons(2, 0))
self.assertTupleEqual((4, 1), util.alpha_beta_electrons(5, 3))
self.assertTupleEqual((0, 5), util.alpha_beta_electrons(5, -5))
def test_alpha_beta_error(self):
"""Check to make sure that the correct number of alpha and beta
electrons are parsed from the number and multiplicity
"""
self.assertRaises(ValueError, util.alpha_beta_electrons, -1, 0)
self.assertRaises(ValueError, util.alpha_beta_electrons, 1, 2)
def test_bubblesort_order(self):
"""Check that bubble sort works.
"""
length = 5
test_list = [(length - 1 - i) for i in range(length)]
ordered_list = [i for i in range(length)]
util.bubblesort(test_list)
self.assertListEqual(ordered_list, test_list)
def test_bubblesort_permutation_count(self):
""" Make sure that we are counting the correct number of permutations
to sort the list
"""
length = 2
test_list = [(length - 1 - i) for i in range(length)]
self.assertEqual(1, util.bubblesort(test_list))
length = 3
test_list = [(length - 1 - i) for i in range(length)]
self.assertEqual(3, util.bubblesort(test_list))
test_list = [2, 0, 1]
self.assertEqual(2, util.bubblesort(test_list))
def test_reverse_bubblesort_permutation_count(self):
""" Make sure that we are counting the correct number of permutations
to sort the list
"""
test_list = [[0, 0], [1, 0]]
self.assertEqual(1, util.reverse_bubble_list(test_list))
test_list = [[0, 0], [1, 0], [2, 0]]
self.assertEqual(3, util.reverse_bubble_list(test_list))
test_list = [[0, 0], [2, 0], [1, 0]]
self.assertEqual(2, util.reverse_bubble_list(test_list))
def test_configuration_key_union_empty(self):
"""The union of no configuration keys should be an empty list
"""
self.assertListEqual([], util.configuration_key_union())
def test_configuration_key_union(self):
"""The union of no configuration keys should be an empty list
"""
configs0 = [(2, 0), (3, 1)]
configs1 = [(2, 0), (5, 1), (6, -2)]
testset = set([(2, 0), (3, 1), (5, 1), (6, -2)])
self.assertSetEqual(
testset, set(util.configuration_key_union(configs0, configs1)))
def test_configuration_key_union_many(self):
"""The union of many different keys should be all of them
"""
configs0 = [(2, 0)]
configs1 = [(5, 1)]
configs2 = [(6, -2)]
configs3 = [(3, -3)]
refset = set([(2, 0), (5, 1), (6, -2), (3, -3)])
testset = set(
util.configuration_key_union(configs0, configs1, configs2,
configs3))
self.assertSetEqual(testset, refset)
def test_configuration_key_intersection_none(self):
"""If there are no keys in common the intersection should be zero
"""
self.assertListEqual([],
util.configuration_key_intersection([(2, 0)],
[(2, 2)]))
def test_configuration_key_intersection(self):
"""Check that the intersection returns the intersection
"""
configs0 = [(10, 0), (3, 1), (5, -1)]
configs1 = [(2, 0), (3, 1), (3, -1)]
self.assertListEqual([(3, 1)],
util.configuration_key_intersection(
configs0, configs1))
def test_invert_bitstring_with_mask(self):
"""When inverting the occupation we want to maintain the number of orbitals
"""
ref = 8
self.assertEqual(ref, util.invert_bitstring_with_mask(7, 4))
ref = 8 + 16 + 32 + 64 + 128
self.assertEqual(ref, util.invert_bitstring_with_mask(7, 8))
def test_ltlt_index_min(self):
"""If we have a zero dimesnion tensor there should be no pointers to it
"""
_gtest = util.ltlt_index_generator(0)
_test = [i for i in _gtest]
self.assertListEqual(_test, [])
def test_ltlt_index(self):
"""Access unique elements of a lower triangular lower triangular
matrix
"""
index_list = [(0, 0, 0, 0), (1, 0, 0, 0), (1, 0, 1, 0), (1, 1, 0, 0),
(1, 1, 1, 0), (1, 1, 1, 1)]
_gtest = util.ltlt_index_generator(2)
_test = [i for i in _gtest]
self.assertListEqual(_test, index_list)
def test_bitstring_groundstate(self):
"""The ground state bitstring has the n lowest bits flipped
"""
self.assertEqual(15, util.init_bitstring_groundstate(4))
def test_qubit_particle_number_sector(self):
"""Find the vectors which are the basis for a particular particle
number.
"""
zero = 0
one = 1
ref = [
numpy.array([zero, zero, one, zero], dtype=numpy.int),
numpy.array([zero, one, zero, zero], dtype=numpy.int)
]
test = util.qubit_particle_number_sector(2, 1)
for i, j in zip(test, ref):
self.assertEqual(i.all(), j.all())
def test_qubit_particle_number_index_spin(self):
"""Find the indexes which point to the correct coefficients in a qubit
particle number sector and return the total spin.
"""
ref = [(3, 0), (5, -2), (6, 0), (9, 0), (10, 2), (12, 0)]
test = util.qubit_particle_number_index_spin(4, 2)
self.assertListEqual(ref, test)
def test_qubit_config_sector(self):
"""Find the basis vectors for a particular particle number and spin
configuration
"""
zero = 0
one = 1
lowstate = [
zero, zero, one, zero, zero, zero, zero, zero, zero, zero, zero,
zero, zero, zero, zero, zero
]
highstate = [
zero, zero, zero, zero, zero, zero, zero, zero, one, zero, zero,
zero, zero, zero, zero, zero
]
ref = [
numpy.array(lowstate, dtype=numpy.int),
numpy.array(highstate, dtype=numpy.int)
]
test = util.qubit_config_sector(4, 1, 1)
for i, j in zip(test, ref):
self.assertEqual(i.all(), j.all())
ref = [numpy.array([zero, zero, zero, one], dtype=numpy.int)]
test = util.qubit_config_sector(2, 2, 0)
for i, j in zip(test, ref):
self.assertEqual(i.all(), j.all())
def test_qubit_particle_number_index(self):
"""Find the indexes which point to the correct coefficients in a qubit
particle number sector and return the total spin.
"""
ref = [1, 2, 4, 8]
test = util.qubit_particle_number_index(4, 1)
self.assertListEqual(ref, test)
def test_qubit_vacuum(self):
"""The qubit vacuum is the first vector in the qubit basis.
"""
_gs = numpy.array([1. + .0j, 0. + .0j, 0. + .0j, 0. + .0j],
dtype=numpy.complex64)
self.assertListEqual(list(_gs), list(util.init_qubit_vacuum(2)))
def test_sort_config_keys(self):
"""Keys are sorted by particle number and then by m_s
"""
ref = [(0, 0), (1, -1), (3, -3), (3, 1), (5, -2), (5, 1)]
keys = [(5, 1), (5, -2), (0, 0), (1, -1), (3, -3), (3, 1)]
test = util.sort_configuration_keys(keys)
self.assertListEqual(test, ref)
def test_validate_config(self):
"""Make sure that the configuration validation routine identifies
problematic values
"""
self.assertRaises(ValueError, util.validate_config, 0, 0, -1)
self.assertRaises(ValueError, util.validate_config, 3, 0, 2)
self.assertRaises(ValueError, util.validate_config, 0, 3, 2)
self.assertRaises(ValueError, util.validate_config, -1, 1, 2)
self.assertRaises(ValueError, util.validate_config, 1, -1, 2)
self.assertIsNone(util.validate_config(0, 0, 0))
self.assertIsNone(util.validate_config(0, 0, 1))
def test_zero_transform(self):
"""Ensure that things that should transform do and those that shouldn't
dont
"""
self.assertFalse(util.zero_transform(1 + 2 + 4, 8, 3, 6))
self.assertTrue(util.zero_transform(2 + 4, 8, 1, 6))
self.assertTrue(util.zero_transform(2 + 4, 4, 2, 6))
def test_parity_sort_list(self):
"""Sort a list of lists according to the parity of the index in the
0th element.
"""
unchanged = [
[6, ['these', 'values', {
'dont': 6724
}, tuple(['mat', 'ter'])]],
[7, ['these', 'values', {
'dont': 6724
}, tuple(['mat', 'ter'])]],
[3, ['these', 'values', {
'dont': 6724
}, tuple(['mat', 'ter'])]],
[15, ['these', 'values', {
'dont': 6724
}, tuple(['mat', 'ter'])]]
]
nswap, test = util.paritysort_list(unchanged)
self.assertEqual(nswap, 0)
self.assertListEqual(unchanged, test)
| [
"fqe.util.ltlt_index_generator",
"fqe.util.qubit_particle_number_sector",
"fqe.util.qubit_particle_number_index_spin",
"fqe.util.configuration_key_union",
"fqe.util.invert_bitstring_with_mask",
"fqe.util.init_qubit_vacuum",
"fqe.util.configuration_key_intersection",
"fqe.util.sort_configuration_keys",... | [((1692, 1718), 'fqe.util.bubblesort', 'util.bubblesort', (['test_list'], {}), '(test_list)\n', (1707, 1718), False, 'from fqe import util\n'), ((5036, 5064), 'fqe.util.ltlt_index_generator', 'util.ltlt_index_generator', (['(0)'], {}), '(0)\n', (5061, 5064), False, 'from fqe import util\n'), ((5418, 5446), 'fqe.util.ltlt_index_generator', 'util.ltlt_index_generator', (['(2)'], {}), '(2)\n', (5443, 5446), False, 'from fqe import util\n'), ((6078, 6117), 'fqe.util.qubit_particle_number_sector', 'util.qubit_particle_number_sector', (['(2)', '(1)'], {}), '(2, 1)\n', (6111, 6117), False, 'from fqe import util\n'), ((6485, 6528), 'fqe.util.qubit_particle_number_index_spin', 'util.qubit_particle_number_index_spin', (['(4)', '(2)'], {}), '(4, 2)\n', (6522, 6528), False, 'from fqe import util\n'), ((7197, 7230), 'fqe.util.qubit_config_sector', 'util.qubit_config_sector', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (7221, 7230), False, 'from fqe import util\n'), ((7399, 7432), 'fqe.util.qubit_config_sector', 'util.qubit_config_sector', (['(2)', '(2)', '(0)'], {}), '(2, 2, 0)\n', (7423, 7432), False, 'from fqe import util\n'), ((7756, 7794), 'fqe.util.qubit_particle_number_index', 'util.qubit_particle_number_index', (['(4)', '(1)'], {}), '(4, 1)\n', (7788, 7794), False, 'from fqe import util\n'), ((7963, 8052), 'numpy.array', 'numpy.array', (['[1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]'], {'dtype': 'numpy.complex64'}), '([1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], dtype=numpy.\n complex64)\n', (7974, 8052), False, 'import numpy\n'), ((8399, 8433), 'fqe.util.sort_configuration_keys', 'util.sort_configuration_keys', (['keys'], {}), '(keys)\n', (8427, 8433), False, 'from fqe import util\n'), ((10044, 10075), 'fqe.util.paritysort_list', 'util.paritysort_list', (['unchanged'], {}), '(unchanged)\n', (10064, 10075), False, 'from fqe import util\n'), ((959, 990), 'fqe.util.alpha_beta_electrons', 'util.alpha_beta_electrons', (['(2)', '(0)'], {}), '(2, 0)\n', (984, 990), False, 'from fqe import util\n'), ((1030, 1061), 'fqe.util.alpha_beta_electrons', 'util.alpha_beta_electrons', (['(5)', '(3)'], {}), '(5, 3)\n', (1055, 1061), False, 'from fqe import util\n'), ((1101, 1133), 'fqe.util.alpha_beta_electrons', 'util.alpha_beta_electrons', (['(5)', '(-5)'], {}), '(5, -5)\n', (1126, 1133), False, 'from fqe import util\n'), ((2047, 2073), 'fqe.util.bubblesort', 'util.bubblesort', (['test_list'], {}), '(test_list)\n', (2062, 2073), False, 'from fqe import util\n'), ((2184, 2210), 'fqe.util.bubblesort', 'util.bubblesort', (['test_list'], {}), '(test_list)\n', (2199, 2210), False, 'from fqe import util\n'), ((2270, 2296), 'fqe.util.bubblesort', 'util.bubblesort', (['test_list'], {}), '(test_list)\n', (2285, 2296), False, 'from fqe import util\n'), ((2536, 2571), 'fqe.util.reverse_bubble_list', 'util.reverse_bubble_list', (['test_list'], {}), '(test_list)\n', (2560, 2571), False, 'from fqe import util\n'), ((2646, 2681), 'fqe.util.reverse_bubble_list', 'util.reverse_bubble_list', (['test_list'], {}), '(test_list)\n', (2670, 2681), False, 'from fqe import util\n'), ((2756, 2791), 'fqe.util.reverse_bubble_list', 'util.reverse_bubble_list', (['test_list'], {}), '(test_list)\n', (2780, 2791), False, 'from fqe import util\n'), ((2959, 2989), 'fqe.util.configuration_key_union', 'util.configuration_key_union', ([], {}), '()\n', (2987, 2989), False, 'from fqe import util\n'), ((3695, 3763), 'fqe.util.configuration_key_union', 'util.configuration_key_union', (['configs0', 'configs1', 'configs2', 'configs3'], {}), '(configs0, configs1, configs2, configs3)\n', (3723, 3763), False, 'from fqe import util\n'), ((4056, 4111), 'fqe.util.configuration_key_intersection', 'util.configuration_key_intersection', (['[(2, 0)]', '[(2, 2)]'], {}), '([(2, 0)], [(2, 2)])\n', (4091, 4111), False, 'from fqe import util\n'), ((4465, 4520), 'fqe.util.configuration_key_intersection', 'util.configuration_key_intersection', (['configs0', 'configs1'], {}), '(configs0, configs1)\n', (4500, 4520), False, 'from fqe import util\n'), ((4746, 4783), 'fqe.util.invert_bitstring_with_mask', 'util.invert_bitstring_with_mask', (['(7)', '(4)'], {}), '(7, 4)\n', (4777, 4783), False, 'from fqe import util\n'), ((4852, 4889), 'fqe.util.invert_bitstring_with_mask', 'util.invert_bitstring_with_mask', (['(7)', '(8)'], {}), '(7, 8)\n', (4883, 4889), False, 'from fqe import util\n'), ((5683, 5717), 'fqe.util.init_bitstring_groundstate', 'util.init_bitstring_groundstate', (['(4)'], {}), '(4)\n', (5714, 5717), False, 'from fqe import util\n'), ((5932, 5985), 'numpy.array', 'numpy.array', (['[zero, zero, one, zero]'], {'dtype': 'numpy.int'}), '([zero, zero, one, zero], dtype=numpy.int)\n', (5943, 5985), False, 'import numpy\n'), ((5999, 6052), 'numpy.array', 'numpy.array', (['[zero, one, zero, zero]'], {'dtype': 'numpy.int'}), '([zero, one, zero, zero], dtype=numpy.int)\n', (6010, 6052), False, 'import numpy\n'), ((7080, 7118), 'numpy.array', 'numpy.array', (['lowstate'], {'dtype': 'numpy.int'}), '(lowstate, dtype=numpy.int)\n', (7091, 7118), False, 'import numpy\n'), ((7132, 7171), 'numpy.array', 'numpy.array', (['highstate'], {'dtype': 'numpy.int'}), '(highstate, dtype=numpy.int)\n', (7143, 7171), False, 'import numpy\n'), ((7329, 7382), 'numpy.array', 'numpy.array', (['[zero, zero, zero, one]'], {'dtype': 'numpy.int'}), '([zero, zero, zero, one], dtype=numpy.int)\n', (7340, 7382), False, 'import numpy\n'), ((8998, 9027), 'fqe.util.validate_config', 'util.validate_config', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (9018, 9027), False, 'from fqe import util\n'), ((9055, 9084), 'fqe.util.validate_config', 'util.validate_config', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (9075, 9084), False, 'from fqe import util\n'), ((9252, 9291), 'fqe.util.zero_transform', 'util.zero_transform', (['(1 + 2 + 4)', '(8)', '(3)', '(6)'], {}), '(1 + 2 + 4, 8, 3, 6)\n', (9271, 9291), False, 'from fqe import util\n'), ((9317, 9352), 'fqe.util.zero_transform', 'util.zero_transform', (['(2 + 4)', '(8)', '(1)', '(6)'], {}), '(2 + 4, 8, 1, 6)\n', (9336, 9352), False, 'from fqe import util\n'), ((9378, 9413), 'fqe.util.zero_transform', 'util.zero_transform', (['(2 + 4)', '(4)', '(2)', '(6)'], {}), '(2 + 4, 4, 2, 6)\n', (9397, 9413), False, 'from fqe import util\n'), ((3310, 3358), 'fqe.util.configuration_key_union', 'util.configuration_key_union', (['configs0', 'configs1'], {}), '(configs0, configs1)\n', (3338, 3358), False, 'from fqe import util\n'), ((8111, 8136), 'fqe.util.init_qubit_vacuum', 'util.init_qubit_vacuum', (['(2)'], {}), '(2)\n', (8133, 8136), False, 'from fqe import util\n')] |
from typing import Dict
import numpy as np
import pandas as pd
from ira.series.Indicators import ATR, MovingMinMax
from qlearn.tracking.trackers import TakeStopTracker
class Pyramiding(TakeStopTracker):
"""
Pyramiding tracker.
1. Open position = size on signal, stop at entry - stop_mx * ATR
2. If price hits entry + next_mx * ATR and # entry < max_positions
it adds to position: size * pyramiding_factor
stop = avg_position_price - stop_mx * ATR
next_add_level = avg_position_price + next_mx * ATR
3. Skip any other signals if position is open
Tracker's parameters
--------------------
size: basic position size (in contracts). On first entry tracker will buy or sell this amount of contracts.
stop_mx: how many ATRs we set stop at (stop = entry_price -/+ stop_mx * ATR) [default 3]
next_mx: how many ATRs next level is (next = entry_price +/- next_mx * ATR) [default 3]
At next level tracker may performs following actions:
- increase position if level's number >= pyramiding_start_step and it doesn't exceed maximal number of pyramiding positions (see max_position)
- pull up/down stop level to breakeven
- close position in profit if
pyramiding_factor: position decaying factor. On every next step we will add to position: prev_size * pyramiding_factor.
For example if pyramiding_factor = 0.5 and size = 10
- at first initial step tracker open 10 contracts
- at next step it add 10 * 0.5 = 5 contracts
- at third step it adds 5 * 0.5 = 2 contracts
- at 4'th step: 2 * 0.5 = 1 contract
- at 5'th step 1 * 0.5 = 0 - so it final step and it will close position in profit if flat_on_max_step is set
so 0.5 reproduces classical approach: first 100%, 50%, 25%, 12% ...
If pyramiding_factor=1 tracker adds same fixed amount (==size) at every step.
pyramiding_start_step: level number when it is allowed to increase position.
Classical way is to just move stop to breakeven at step 2 (no position increasing) and start pyramiding at step 3
(so pyramiding_start_step = 3 for this case)
max_positions: maximal allowed number of pyramiding steps
flat_on_max_step: if this flag is set tracker will close position when max number of position increasig steps reached
atr_period: period of ATR indicator [default 22]
atr_timeframe: timeframe for ATR indicator [default daily bars]
round_size: minimal [default 1]
Example
-------
Pyramiding(size=10, stop_mx=3, next_mx=3, pyramiding_factor=0.5,
max_positions=5, flat_on_max_step=True, pyramiding_start_step=3, round_size=1)
- Signal generator produces signal to oen long position, current price is $100.00, ATR=5.00
- Tracker will open 10 contracts long at $100.00 and set up stop at 100 - 3 * 5 = \$85, next level (#2) is 100 + 3 * 5 = $115
- If price drops below $85 tracker will close position
- If price goes above $115 tracker will do following actions:
- just pull up stop at breakeven level at entry price = $100.00 because pyramiding should start from level N 3 but it's N 2
- calculate ATR at this moment, let's say it's 7
- starts waiting for next level (N3) == 115 + 3 * 7 = $136.00
- When (if) price touches level N3 ($136) tracker will:
- add to first 10 contracts another 10 * 0.5 = 5 contracts and position now is 15
- calculate position price, it will be (10 100 + 5 136)/15 = $112
- calculate ATR, let's say it's 3
- set stop level at 112 - 3 * 3 = $103
- calculate next level N4 == 136 + 3 * 3 = $145 (here it uses entry price 136 not average position price !!!)
- When price reaches level N4 at let's (price is 145.00):
- add to existing 15 contracts another 5 * 0.5 = 2.5 -> 2 contracts (we round it on round_size=1) and position now is 17
- position size is (10 100 + 5 136 + 2 * 145) / 17 = 115.88
- calculate ATR, let's say it's 4
- move stop to 115.88 - 3 * 4 = 103.88
- next level (#5) is 145 + 3*4 = $157
- When price touches level #5 at $157 tracker will close position at take because max_positions is set to 5 and flat_on_max_step=True
"""
def __init__(self, size, stop_mx=3, next_mx=3, pyramiding_factor=0.5, max_positions=3,
flat_on_max_step=False, pyramiding_start_step=3,
atr_period=22, atr_timeframe='1d', atr_smoother='sma',
round_size=1, debug=False, take_by_limit_orders=False):
super().__init__(debug, take_by_limit_orders=take_by_limit_orders)
self.size = size
self.stop_mx = stop_mx
self.next_mx = next_mx
self.pyramiding_factor = pyramiding_factor
self.pyramiding_start_step = max(abs(pyramiding_start_step), 2)
self.max_positions = max_positions
self.flat_on_max_step = flat_on_max_step
self.atr_period = atr_period
self.atr_timeframe = atr_timeframe
self.atr_smoother = atr_smoother
self.log10_round_size = int(np.log10(max(round_size, 1)))
def initialize(self):
self.n_entry = 0
self.next_level = np.nan
# indicators stuff
self.ohlc = self.get_ohlc_series(self.atr_timeframe)
self.atr = ATR(self.atr_period, self.atr_smoother)
self.ohlc.attach(self.atr)
def get_position_size_for_step(self, n):
n = n - self.pyramiding_start_step + 2
return np.round(self.size * (self.pyramiding_factor) ** n, self.log10_round_size)
def on_quote(self, quote_time, bid, ask, bid_size, ask_size, **kwargs):
tr = self.atr[1]
qty = self._position.quantity
if qty != 0 and tr is not None and np.isfinite(tr):
# --- long position processing
if qty > 0:
px = ask
D = +1
# --- long position processing
if qty < 0:
px = bid
D = -1
# price hits target's level
if (px - self.next_level) * D >= 0:
# we've aleady reached this level so next will be recomputed
mesg = f"[{quote_time}] {self._instrument} {px:.3f} touched {self.next_level:.3f} "
self.next_level = np.nan
# if we can increase
if self.n_entry + 1 <= self.max_positions:
inc_size = self.get_position_size_for_step(self.n_entry + 1)
if inc_size > 0:
self.n_entry += 1
self.next_level = px + D * self.next_mx * tr
if self.n_entry >= self.pyramiding_start_step:
mesg += f'step ({self.n_entry}) -> {D * inc_size:+.0f} at ${px:.3f} next: {self.next_level:.3f}'
# increase position
self.trade(quote_time, qty + D * inc_size, mesg)
# average position price
avg_price = self._position.cost_usd / self._position.quantity
# set new stop
n_stop = avg_price - D * self.stop_mx * tr
else:
# set stop to breakeven only (not increase position !)
mesg += f'step ({self.n_entry}) at ${px:.3f} move stop to breakeven next: {self.next_level:.3f}'
# average position price
avg_price = self._position.cost_usd / self._position.quantity
n_stop = avg_price
mesg += f", stop: {n_stop:.3f}, avg_price: {avg_price:.3f}"
self.stop_at(quote_time, n_stop)
else:
if self.flat_on_max_step:
mesg += "closing position because max possible step is reached and flat_on_max_step"
self.trade(quote_time, 0, "Take profit")
else:
mesg += "position increasing size is zero: skip this step"
else:
# increase position
if self.flat_on_max_step:
mesg += "closing position because max step is and flat_on_max_step"
self.trade(quote_time, 0, "Take profit")
else:
mesg += f'skip increasing atep: max number of entries ({self.max_positions}) '
self.debug(mesg)
super().on_quote(quote_time, bid, ask, bid_size, ask_size, **kwargs)
def on_signal(self, signal_time, signal_qty, quote_time, bid, ask, bid_size, ask_size):
tr = self.atr[1]
# we skip all signals if position is not flat or indicators not ready
if self._position.quantity != 0 or tr is None or not np.isfinite(tr):
return None
pos = None
if signal_qty > 0:
# open initial long
pos = self.size
self.n_entry = 1
self.stop_at(signal_time, ask - self.stop_mx * tr)
self.next_level = ask + self.next_mx * tr
self.debug(
f'[{quote_time}] {self._instrument} step ({self.n_entry}) -> {pos} at ${ask:.3f} stop: {ask - self.stop_mx * tr:.3f}, next: {self.next_level:.3f}'
)
elif signal_qty < 0:
# open initial long
pos = -self.size
self.stop_at(signal_time, bid + self.stop_mx * tr)
self.next_level = bid - self.next_mx * tr
self.debug(
f'[{quote_time}] {self._instrument} step ({self.n_entry}) -> {pos} at ${bid:.3f} stop: {bid + self.stop_mx * tr:.3f}, next: {self.next_level:.3f}'
)
self.n_entry = 1
return pos
class RADChandelier(TakeStopTracker):
"""
RAD chandelier position tracker (trailing stop based on ATR no take target)
https://corporatefinanceinstitute.com/resources/knowledge/trading-investing/chandelier-exit/
"""
def __init__(self, size, timeframe, period, stop_risk_mx, atr_smoother='sma', debug=False, take_by_limit_orders=False):
super().__init__(debug, take_by_limit_orders=take_by_limit_orders)
self.timeframe = timeframe
self.period = period
self.position_size = size
self.stop_risk_mx = abs(stop_risk_mx)
self.atr_smoother = atr_smoother
def initialize(self):
self.atr = ATR(self.period, self.atr_smoother)
self.mm = MovingMinMax(self.period)
self.ohlc = self.get_ohlc_series(self.timeframe)
self.ohlc.attach(self.atr)
self.ohlc.attach(self.mm)
# current stop level
self.level = None
self.side = 0 # +1: up trend, -1: down trend
self._dbg_values = {}
def statistics(self):
return super().statistics()
def get_stops(self):
return self._stops(1)
def _stops(self, n):
av, m = self.atr[n], self.mm[n]
if av is None or m is None:
return None, None
ll, hh = m
if not np.isfinite(av) or not np.isfinite(ll) or not np.isfinite(hh):
return None, None
l_stop = hh - self.stop_risk_mx * av
s_stop = ll + self.stop_risk_mx * av
return s_stop, l_stop
def update_stop_level(self) -> bool:
if not self.ohlc.is_new_bar:
return False
# new bar just started
s2, l2 = self._stops(2)
s1, l1 = self._stops(1)
if s2 is None:
return False
c1 = self.ohlc[1].close
c2 = self.ohlc[2].close
if c2 > l2 and c1 < l1:
self.side = -1
self.level = s1
if c2 < s2 and c1 > s1:
self.side = +1
self.level = l1
if self.side > 0:
self.level = max(self.level, l1)
if self.side < 0:
self.level = min(self.level, s1)
def on_quote(self, quote_time, bid, ask, bid_size, ask_size, **kwargs):
# refresh current stop level
self.update_stop_level()
if self.side == 0 or self.level is None:
return None
qty = self._position.quantity
# debug
# self._dbg_values[self.ohlc[0].time] = {'Side': self.side, 'Level': self.level}
if qty != 0:
if qty > 0 and self.level > self.stop:
self.stop_at(quote_time, self.level)
self.debug(f'[{quote_time}] {self._instrument} pull up stop to {self.level}')
if qty < 0 and self.level < self.stop:
self.stop_at(quote_time, self.level)
self.debug(f'[{quote_time}] {self._instrument} pull down stop to {self.level}')
super().on_quote(quote_time, bid, ask, bid_size, ask_size, **kwargs)
def on_signal(self, signal_time, signal_qty, quote_time, bid, ask, bid_size, ask_size):
qty = self._position.quantity
if qty != 0:
return None
if self.side == 0 or self.level is None:
self.debug(
f'[{quote_time}] {self._instrument} skip entry indicators are not ready: {self.level} / {self.side}')
return None
if signal_qty > 0:
if self.side > 0 and ask > self.level:
self.stop_at(signal_time, self.level)
self.debug(f'[{quote_time}] {self._instrument} entry long at ${ask} stop to {self.level}')
else:
self.debug(f'[{quote_time}] {self._instrument} skip long : stop {self.level} is above entry {ask}')
signal_qty = np.nan
elif signal_qty < 0:
if self.side < 0 and bid < self.level:
self.stop_at(signal_time, self.level)
self.debug(f'[{quote_time}] {self._instrument} entry short at ${bid} stop to {self.level}')
else:
self.debug(f'[{quote_time}] {self._instrument} skip short : stop {self.level} is below entry {bid}')
signal_qty = np.nan
# call super method
return signal_qty * self.position_size | [
"numpy.round",
"ira.series.Indicators.ATR",
"ira.series.Indicators.MovingMinMax",
"numpy.isfinite"
] | [((5729, 5768), 'ira.series.Indicators.ATR', 'ATR', (['self.atr_period', 'self.atr_smoother'], {}), '(self.atr_period, self.atr_smoother)\n', (5732, 5768), False, 'from ira.series.Indicators import ATR, MovingMinMax\n'), ((5912, 5984), 'numpy.round', 'np.round', (['(self.size * self.pyramiding_factor ** n)', 'self.log10_round_size'], {}), '(self.size * self.pyramiding_factor ** n, self.log10_round_size)\n', (5920, 5984), True, 'import numpy as np\n'), ((10961, 10996), 'ira.series.Indicators.ATR', 'ATR', (['self.period', 'self.atr_smoother'], {}), '(self.period, self.atr_smoother)\n', (10964, 10996), False, 'from ira.series.Indicators import ATR, MovingMinMax\n'), ((11015, 11040), 'ira.series.Indicators.MovingMinMax', 'MovingMinMax', (['self.period'], {}), '(self.period)\n', (11027, 11040), False, 'from ira.series.Indicators import ATR, MovingMinMax\n'), ((6171, 6186), 'numpy.isfinite', 'np.isfinite', (['tr'], {}), '(tr)\n', (6182, 6186), True, 'import numpy as np\n'), ((9343, 9358), 'numpy.isfinite', 'np.isfinite', (['tr'], {}), '(tr)\n', (9354, 9358), True, 'import numpy as np\n'), ((11592, 11607), 'numpy.isfinite', 'np.isfinite', (['av'], {}), '(av)\n', (11603, 11607), True, 'import numpy as np\n'), ((11615, 11630), 'numpy.isfinite', 'np.isfinite', (['ll'], {}), '(ll)\n', (11626, 11630), True, 'import numpy as np\n'), ((11638, 11653), 'numpy.isfinite', 'np.isfinite', (['hh'], {}), '(hh)\n', (11649, 11653), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from math import sqrt
from math import floor
from matplotlib.finance import candlestick_ohlc
import csv
import pandas as pd
plt.rc('text', usetex=True)
#Parameters brownian()
#
#x[0]: X(0), initial stock price
#N: number of increments
#T: time period
#dt: time step
#delta: "speed" of the Brownian motion, variance = delta**2t
N = 900
T = 150
dt = T/N
delta = .01
delta2 = .05
x = np.empty((N+1))
xL = np.empty((N+1))
xH = np.empty((N+1))
dxL = np.empty((N+1))
dxH = np.empty((N+1))
buya = np.empty((N+1))
x[0] = 10
xL[0] = 0.0
xH[0] = xL[0]
t = np.arange(0, T, dt)
def brownian(x0, n, dt, delta, out=None):
x0 = np.asarray(x0)
r = norm.rvs(size=x0.shape + (n,), scale=delta*sqrt(dt))
if out is None:
out = np.empty(r.shape)
np.cumsum(r, axis=-1, out=out)
out += np.expand_dims(x0, axis=-1)
return out
def brownianu(s, N):
np.random.seed(s)
return np.cumsum(np.random.normal(0., 1., N)*sqrt(1./N))
#Parameters GBM()
#
#x[0]: X(0), initial stock price (used by brownian())
#mu: drift coefficient
#sigma: volatility coefficient
#x: brownian motion calculated by brownian()
#t: np array, 0, dt, 2dt, ..., T
mu = 0.05
sigma = 0.2
def GBM(x0, mu, sigma, x, T, N):
t = np.linspace(0., 1., N+1)
S = []
S.append(x0)
for i in range(1, N+1):
dr = (mu - 0.5 * sigma**2)*t[i]
diff = sigma * x[i-1]
S.append(x0*np.exp(dr+diff))
return S, t
def createportfolio(LEN, srsit, qty, qty2):
portfolio = np.zeros(shape=(LEN,3))
portfolio[srsit,1] = 0
for i in range(0, srsit+1) :
portfolio[i,0] = qty #initial condition (qty of money 0)
portfolio[i,1] = qty2 #initial condition (qty of money 1)
return portfolio
#Parameters stochrsi()
#
#x: numpy array, prices (brownian, GBM, real datas)
#i: stochrsi() computes the stochrsi of x at i
#stochrsit: stochrsi based on the last stochrsit prices, condition : stochrsit=n*dt, where n is a natural number and stochrsit is a natural number
stochrsit = 20
srsifiltered = np.empty((N-stochrsit))
portfolio = createportfolio(N+1, stochrsit, 1, 0)
def stochrsi(x, i, stochrsit):
xH = np.amax(x[i-stochrsit:i])
xL = np.amin(x[i-stochrsit:i])
return (x[i-1]-xL)/(xH-xL)
#Paramters stochrsib()
#
#srsi: numpy array of the values calculated by stochrsi()
#alpha: treshold for the buying point
#beta: treshold for the selling point, |0.5-c|=alpha, -|0.5-c|=beta
def stochrsib(srsi, alpha, beta):
for i in range(0, len(srsi)):
try:
if(srsi[i]<alpha): #treshold
srsifiltered[i] = 0
elif(srsi[i]>=beta): #treshold
srsifiltered[i] = 1
else:
srsifiltered[i]=srsi[i]
except IndexError:
print("EH")
pass
return srsifiltered
#Parameters EMA()
#
#alpha: smoothing constant
#x0: initial condition
#x: set of datas
expava = np.empty((N+1))
def EMA(alpha, x0, x):
expava[0] = x0
for i in range(1,len(x)):
try:
expava[i] = alpha*x[i] + (1-alpha)*expava[i-1]
except IndexError:
pass
continue
return np.delete(expava, 1, axis = 0)
#Parameters SMA()
#
#n SMA() return the arithmetic average on the last n values of x
#x: data set
def SMA(x, n, dt):
arava = np.empty((N-n))
t = np.arange(n*dt, T, dt)
for j in range(1, len(arava)):
arava[j] = arava[j-1] + (x[j]-x[j-n])/n
return t, arava
def buy(x, t, q):
return [-q],[q/x[t]]
def sell(x, t, q):
return [q*x[t]],[-q]
def plotkav(t, k, name, i):
fig, ax = plt.subplots()
plt.plot(t, k)
plt.xlabel(r'\textbf{time} ($t = k\Delta t$)')
if i == 0:
plt.title(r"EMA$(B(t)) = \alpha B(t)+(1-\alpha) $EMA$({B(t-\Delta t)})$",
fontsize=16, color='black')
elif i == 1:
plt.title(r"SMA$(B(t)) = $ SMA$(B(t-\Delta t))+\frac{B(t)-B(t-n)}{n}$",
fontsize=16, color='black')
elif i == 2:
plt.title(r"SA$(B(t)) = \frac{1}{t}\mathrm{SA}(B(t-\Delta t))*(t-\Delta)+B(t))$",
fontsize=16, color='black')
plt.savefig('PLOT/'+name+'.png')
def plotsrsi(t, srsi, stochrsit, name):
fig, ax = plt.subplots()
plt.xlabel(r'\textbf{time} ($t = k\Delta t$)')
plt.title(r"StochRSI$(t, \tau)$")
srsi = np.delete([tuple([stochrsi(x, i, stochrsit)]) for i in range(stochrsit,len(t)+1)], 1, axis=0)
plt.plot(np.arange(stochrsit*dt, T, dt), srsi)
plt.savefig('PLOT/'+name+'.png')
def plotportfoliog(i, portfolio, lsrsi, name, x, stochrsit):
fig, ax = plt.subplots()
tplub = np.arange(stochrsit, lsrsi+stochrsit)
plt.xlabel(r'\textbf{time} ($k = \frac{t}{\Delta t}$)')
if i==0 or i==1:
plt.title(r'EQPortfolio(t,'+str(1-i)+')')
elif i>=2:
plt.title(r'Portfolio(t, '+str(i-2)+')')
p = np.delete([tuple([portfoliogain(i, j, portfolio, x)]) for j in range(((portfolio.shape[0])))], 1, axis=0)
plt.plot(range(portfolio.shape[0]-1), p)
plt.savefig('PLOT/'+name+'.png')
def portfoliogain(i, j, portfolio, x):
if i == 1:
return portfolio[j, 0] + portfolio[j, 2]*portfolio[j, 1]
elif i == 0:
return portfolio[j, 0]/portfolio[j, 2] + portfolio[j, 1]
elif i >=2:
return portfolio[j, i-2]
def plotsrsifiltered(t, srsifiltered, stochrsit, name):
fig, ax = plt.subplots()
plt.plot(np.arange(stochrsit*dt, T, dt), srsifiltered)
plt.xlabel(r'\textbf{time} ($t = k\Delta t$)')
plt.title(r'FilteredStochRSI($\alpha$,$\beta$,$t$,$\tau$)')
plt.savefig('PLOT/'+name+'.png')
def plotbrownian(x, dxH, dxL, dt, t, name, b):
dxL = -abs(dxL)
dxH = abs(dxH)
quotes = []
for i in range(len(x)):
try:
dxL[i] *= (x[i]-x[i-1])/x[i]
dxH[i] *= (x[i]-x[i-1])/x[i]
quotes = [tuple([t[i],
x[i],
x[i+1]+dxH[i+1],
x[i+1]+dxL[i+1],
x[i+1]]) for i in range(b,len(x)-1)]
except IndexError:
pass
continue
fig, ax = plt.subplots()
plt.xlabel(r'\textbf{time} ($t = k\Delta t$)')
candlestick_ohlc(ax, quotes, width=dt, colorup='g', colordown='r', alpha=1.0)
if b == 1:
plt.title(r'$B_g(t, \sigma, \mu, B)$')
elif b == 0:
plt.title(r'$B(t, \delta)$')
else:
plt.title(r'STOCK PRICE')
plt.xlabel(r'\textbf{time} ($k = \frac{t}{\Delta t}$)')
plt.savefig('PLOT/' + name + '.png')
def stochrsiarray(x, t, stochrsit):
return np.delete([tuple([stochrsi(x, i, stochrsit)]) for i in range(stochrsit,len(t)+1)], 1, axis=0)
#Parameters method1()
#
#C: treshold to sell and continue the process, C should depend on indicators at each time (tip : if you don't want to use it, make C large)
def c1(portfolio, srsib, expava, sl, cd, i, b, stochrsit, x):
if b==0:
return portfolio[i, 0] > 0 and srsib[i-stochrsit] == 0 and expava[i]<expava[i-1]
elif b==1:
return portfolio[i, 1] > 0 and x[i]>cd*buya[i-1] and srsib[i-stochrsit] == 1 and (x[i]*portfolio[i-1,1]+portfolio[i-1,0])<sl*portfoliogain(1, i-1, portfolio, x)
def method1(stochrsit, t, x, portfolio, srsib, srsi, dt, C, expava, arava, name, sl, cd):
for i in range(stochrsit, len(srsi)+stochrsit):
portfolio[i, 2] = x[i]
if portfoliogain(1, i, portfolio, x) < C*portfolio[0, 0]:
if c1(portfolio, srsib, expava, sl, cd, i, 0, stochrsit, x):
if(buy(x, i+1, (1-srsib[i-stochrsit])*portfolio[i,0])[1] > [0.0]):
'''print("B " + str(buy(x, i+1, (1-srsib[i-stochrsit])*portfolio[i,0])[1]) + " F " + str(buy(x, i+1, (1-srsib[i-stochrsit])*portfolio[i,0])[0]) + " AT " + str(x[i+1]))'''
portfolio[i+1,0] = portfolio[i, 0] + buy(x, i+1, (1-srsib[i-stochrsit])*portfolio[i,0])[0]
portfolio[i+1,1] = portfolio[i, 1] + buy(x, i+1, (1-srsib[i-stochrsit])*portfolio[i,0])[1]
buya[i] = x[i]
with open(name+".txt", "a") as myfile:
myfile.write("B("+str(i)+") : " + str(portfolio[i,0]) + ", " + str(portfolio[i,1]) + " -> " + str(portfolio[i+1,0])+", "+str(portfolio[i+1,1]) + "*" + str(x[i+1]) + "\n")
elif c1(portfolio, srsib, expava, sl, cd, i, 1, stochrsit, x):
if(sell(x, i+1, (srsib[i-stochrsit])*portfolio[i,1])[0] > [0.0]):
'''print("S " + str(sell(x, i+1, (srsib[i-stochrsit])*portfolio[i,1])[1]) + " F " + str(sell(x, i+1, (srsib[i-stochrsit])*portfolio[i,1])[0]) + " AT " + str(x[i+1]) + " V " + str(portfoliogain(1, i, portfolio, x)))'''
portfolio[i+1,0] = portfolio[i,0] + sell(x, i+1, srsib[i-stochrsit]*portfolio[i, 1])[0]
portfolio[i+1,1] = portfolio[i,1] + sell(x, i+1, srsib[i-stochrsit]*portfolio[i, 1])[1]
with open(name+".txt", "a") as myfile:
myfile.write("S("+str(i)+") : " + str(portfolio[i, 0]) + ", " + str(portfolio[i, 1]) + " -> " + str(portfolio[i+1,0]) + ", " + str(portfolio[i+1,1]) + "*" + str(x[i+1])+ "\n")
buya[i] = buya[i-1]
else:
portfolio[i+1, 0] = portfolio[i, 0]
portfolio[i+1, 1] = portfolio[i, 1]
buya[i] = buya[i-1]
with open(name+".txt", "a") as myfile:
myfile.write("K("+str(i)+") : " + str(portfolio[i, 0]) + ", " + str(portfolio[i, 1])+ " -> " + str(portfolio[i+1,0]) + ", " + str(portfolio[i+1,1])+"*"+str(x[i+1])+"\n")
else:
portfolio[i+1, 0] = portfolio[i, 0] + sell(x, i+1, portfolio[i, 1])[0]
portfolio[i+1, 1] = portfolio[i, 1] + sell(x, i+1, portfolio[i, 1])[1]
portfolio[len(srsi)+stochrsit, 0] = portfolio[i + 1, 0]
portfolio[len(srsi)+stochrsit, 1] = portfolio[i + 1, 1]
C += 0.0
i = len(srsi)+stochrsit
portfolio[len(srsi)+stochrsit,2]=x[len(srsi)+stochrsit]
return portfolio
def game(x, N, dt, delta, t, portfolio, dxH, dxL, j):
brownian(x[0], N, dt, delta, out=x[1:])
xgbm = GBM(x[0], mu, sigma, x, T, N)[0]
plotbrownian(xgbm, dxH*0, dxL*0, dt, t, 'GBMGAME'+str(j), 1)
portfolio[len(xgbm)-1, 2] = xgbm[len(xgbm)-1]
for i in range(1, len(xgbm)-1):
portfolio[i, 2] = xgbm[i]
print(portfoliogain(1, i, portfolio, xgbm))
print("P " + str(xgbm[i]))
command = input("?")
portfolio[i, 2] = xgbm[i]
if(command=="b"):
portfolio[i+1,0] = portfolio[i, 0] + buy(xgbm, i+1, portfolio[i,0])[0]
portfolio[i+1,1] = portfolio[i, 1] + buy(xgbm, i+1, portfolio[i,0])[1]
elif(command=="s"):
portfolio[i+1,0] = portfolio[i,0] + sell(xgbm, i+1, (portfolio[i, 1]))[0]
portfolio[i+1,1] = portfolio[i,1] + sell(xgbm, i+1, (portfolio[i, 1]))[1]
else:
portfolio[i+1, 0] = portfolio[i, 0]
portfolio[i+1, 1] = portfolio[i, 1]
plotportfoliog(1, portfolio, len(portfolio), 'PFGAME'+str(j), x)
def get_data_csv(filename):
prices = []
with open(filename, 'r') as csvf:
csvFR = csv.reader(csvf)
next(csvFR)
for row in csvFR:
try:
prices.append(float(row[1]))
except ValueError:
pass
return prices
def BestRatio(x):
s = 1
p = 0
for k in range(1, len(x)-1):
if(x[k]>x[k-1] and x[k]>x[k+1]):
s = s*x[k]
p += 1
elif(x[k]<x[i-1] and x[k]<x[i+1]):
s = s/x[k]
p += 1
if p/2 != floor(p/2):
s = s/x[len(x)-1]
return s;
def runBrownian(x, N, dt, delta, dxL, delta2, dxH, t, i, portfolio, alpha, beta, C, sl, cd, smoothCE, smoothCA):
k = 0
brownian(x[0], N, dt, delta, out=x[1:])
'''brownian(dxL[0], N, dt, delta2, out=dxL[1:])
brownian(dxH[0], N, dt, delta2, out=dxH[1:])'''
'''plotbrownian(x, dxH*0, dxL*0, dt, t, 'B'+str(i), 0)'''
xgbm = GBM(x[0], mu, sigma, x, T, N)[0]
plotbrownian(xgbm, dxH*0, dxL*0, dt, t, 'GBM'+str(i), 1)
"""srsi = stochrsiarray(x, t, stochrsit)
srsib = stochrsib(srsi, alpha, beta)"""
srsig = stochrsiarray(xgbm, t, stochrsit)
srsigb = stochrsib(srsig, alpha, beta)
expava = EMA(smoothCE, x[0], x)
expavag = EMA(smoothCE, xgbm[1], xgbm)
"""plotkav(t, expavag, 'EXPAVGB'+str(i), 0)
plotkav(t, expava, 'EXPAVB'+str(i), 0)"""
"""arava = SMA(x, 10, dt)"""
"""plotkav(arava[0], arava[1], 'ARAVAB'+str(i), 1)"""
aravag = SMA(xgbm, smoothCA, dt)
"""plotkav(aravag[0], aravag[1], 'ARAVAGB'+str(i), 1)
plotsrsifiltered(t, srsigb, stochrsit, 'SRSIGF'+str(i))
plotsrsi(t, srsig, stochrsit, 'plotsrsig'+str(i))
plotsrsifiltered(t, srsib, stochrsit, 'SRSIF'+str(i))
plotsrsi(t, srsi, stochrsit, 'plotsrsi'+str(i))"""
"""print("RUNBM"+str(i))"""
'''portfolio = method1(stochrsit, t, x, portfolio, srsib, srsi, dt, C, expava, 0, "run"+str(i), sl, cd)'''
"""if(portfoliogain(1, len(srsi)+stochrsit, portfolio, x)>portfolio[stochrsit,0]):
k += 1"""
'''plotportfoliog(1, portfolio, len(srsi), 'PF0BMEQ'+str(i), x, stochrsit)'''
"""plotportfoliog(0, portfolio, len(srsi), 'PF1BMEQ'+str(i), x, stochrsit)"""
print("RUNGBM"+str(i))
portfolio2 = method1(stochrsit, t, xgbm, portfolio, srsigb, srsig, dt, C, expavag, 0, "rung"+str(i), sl, cd)
"""if(portfoliogain(1, len(srsi)+stochrsit, portfolio2, x)>portfolio2[stochrsit,0]):
k += 1"""
"""plotportfoliog(0, portfolio2, len(srsi), 'PF1GBMEQ'+str(i), x, stochrsit)"""
plotportfoliog(1, portfolio2, len(srsig), 'PF0GBMEQ'+str(i), x, stochrsit)
"""plotportfoliog(2, portfolio, len(srsi), 'PF0BM'+str(i), x, stochrsit)
plotportfoliog(2, portfolio2, len(srsi), 'PF0GBM'+str(i), x, stochrsit)"""
save_data_csv(xgbm, "xgbm"+str(i)+".csv")
def runBacktest(srsit, C, sl, cd, i, data, qty, qty2, alpha, beta, smoothCE, plot):
pdata = get_data_csv(data)
portfolioBt = createportfolio(len(pdata), srsit, qty, qty2)
t = np.arange(0, len(pdata)-1, 1)
srsip = stochrsiarray(pdata, t , srsit)
srsibp = stochrsib(srsip, alpha, beta)
emap = EMA(smoothCE, pdata[1], pdata)
portfolioBt = method1(srsit, t, pdata, portfolioBt, srsibp, srsip, 1, C, emap, 0, "run"+str(data)+str(i), sl, cd)
if plot==1:
plotbrownian(pdata, dxH*0, dxL*0, 1, t, str(data)+str(i), 2)
plotportfoliog(1, portfolioBt, len(srsip), 'PF0'+str(data)+str(i), x, 2)
def save_data_csv(x, filename):
df = pd.DataFrame(x)
df.to_csv(filename, header=None)
def LearningCSL(srsit, cd, i, data, qty, qty2, alpha, beta, smoothCE): #averaging C & sl on brownian motions of same volatility etc, is a good strategy to approximate the min & max.
C = 0
sl = 1
pdata = get_data_csv(data)
portfolioBt = createportfolio(len(pdata), srsit, qty, qty2)
t = np.arange(0, len(pdata)-1, 1)
srsip = stochrsiarray(pdata, t , srsit)
srsibp = stochrsib(srsip, alpha, beta)
emap = EMA(smoothCE, pdata[1], pdata)
portfolioBt = method1(srsit, t, pdata, portfolioBt, srsibp, srsip, 1, 100, emap, 0, "run"+str(data)+str(i), 0, cd) #C=100,sl=0, it wont reach it.
d = portfoliogain(1, stochrsit, portfolioBt, data)
for i in range(srsit, len(portfolioBt)):
r = portfoliogain(1, i, portfolioBt, data)/d
if r>C:
C = r
if r<sl:
sl = r
return C,sl
n = 10
for i in range(0, n):
runBrownian(x, N, dt, delta, dxL, delta2, dxH, t, i, portfolio, 0.4, 0.6, 1.1, 1.01, 0.95, 0.1, 10)
"""game(x, N, dt, delta, t, portfolio, dxH, dxL, i)"""
"""runBacktest(2, 1.01, 1.005, 0.95, 0, "GOOG.csv", 1, 0, 0.4, 0.6)
runBacktest(5, 1.01, 1.005, 0.95, 0, "AAPL.csv", 1, 0, 0.4, 0.6)
runBacktest(10, 1.32, 1.05, 0.99, 0, "FB.csv", 1, 0, 0.4, 0.6)
runBacktest(10, 1.5, 1.005, 0.99, 0, "GOOG2.csv", 1, 0, 0.4, 0.6)
runBacktest(10, 1.2, 1.05, 0.999, 0, "CDI.PA.csv", 1, 0, 0.4, 0.6)
runBacktest(2, 1.01, 1.005, 0.95, 0, "xgbm0.csv", 1, 0, 0.4, 0.6)"""
C = 0
sl = 0
Csl = []
for i in range(0, n):
Csl = LearningCSL(20, 1.05, 0, "xgbm"+str(i)+".csv", 1, 0, 0.4, 0.6, 0.1)
C += Csl[0]
sl += Csl[1]
C = C/n
sl = sl/n #averaging CSL on brownian motions
epsilon = 0.001
for i in range(0, n):
runBacktest(20, C-epsilon, 1.05, sl+epsilon, 0, "xgbm"+str(i)+".csv", 1, 0, 0.4, 0.6, 0.1, 0)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"numpy.amin",
"csv.reader",
"numpy.empty",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"pandas.DataFrame",
"numpy.cumsum",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.finance.candlestick_ohlc",
... | [((205, 232), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (211, 232), True, 'import matplotlib.pyplot as plt\n'), ((495, 510), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (503, 510), True, 'import numpy as np\n'), ((516, 531), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (524, 531), True, 'import numpy as np\n'), ((537, 552), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (545, 552), True, 'import numpy as np\n'), ((559, 574), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (567, 574), True, 'import numpy as np\n'), ((581, 596), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (589, 596), True, 'import numpy as np\n'), ((604, 619), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (612, 619), True, 'import numpy as np\n'), ((660, 679), 'numpy.arange', 'np.arange', (['(0)', 'T', 'dt'], {}), '(0, T, dt)\n', (669, 679), True, 'import numpy as np\n'), ((2177, 2200), 'numpy.empty', 'np.empty', (['(N - stochrsit)'], {}), '(N - stochrsit)\n', (2185, 2200), True, 'import numpy as np\n'), ((3080, 3095), 'numpy.empty', 'np.empty', (['(N + 1)'], {}), '(N + 1)\n', (3088, 3095), True, 'import numpy as np\n'), ((733, 747), 'numpy.asarray', 'np.asarray', (['x0'], {}), '(x0)\n', (743, 747), True, 'import numpy as np\n'), ((865, 895), 'numpy.cumsum', 'np.cumsum', (['r'], {'axis': '(-1)', 'out': 'out'}), '(r, axis=-1, out=out)\n', (874, 895), True, 'import numpy as np\n'), ((907, 934), 'numpy.expand_dims', 'np.expand_dims', (['x0'], {'axis': '(-1)'}), '(x0, axis=-1)\n', (921, 934), True, 'import numpy as np\n'), ((976, 993), 'numpy.random.seed', 'np.random.seed', (['s'], {}), '(s)\n', (990, 993), True, 'import numpy as np\n'), ((1358, 1386), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(N + 1)'], {}), '(0.0, 1.0, N + 1)\n', (1369, 1386), True, 'import numpy as np\n'), ((1623, 1647), 'numpy.zeros', 'np.zeros', ([], {'shape': '(LEN, 3)'}), '(shape=(LEN, 3))\n', (1631, 1647), True, 'import numpy as np\n'), ((2292, 2319), 'numpy.amax', 'np.amax', (['x[i - stochrsit:i]'], {}), '(x[i - stochrsit:i])\n', (2299, 2319), True, 'import numpy as np\n'), ((2327, 2354), 'numpy.amin', 'np.amin', (['x[i - stochrsit:i]'], {}), '(x[i - stochrsit:i])\n', (2334, 2354), True, 'import numpy as np\n'), ((3313, 3341), 'numpy.delete', 'np.delete', (['expava', '(1)'], {'axis': '(0)'}), '(expava, 1, axis=0)\n', (3322, 3341), True, 'import numpy as np\n'), ((3493, 3508), 'numpy.empty', 'np.empty', (['(N - n)'], {}), '(N - n)\n', (3501, 3508), True, 'import numpy as np\n'), ((3517, 3541), 'numpy.arange', 'np.arange', (['(n * dt)', 'T', 'dt'], {}), '(n * dt, T, dt)\n', (3526, 3541), True, 'import numpy as np\n'), ((3775, 3789), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3787, 3789), True, 'import matplotlib.pyplot as plt\n'), ((3794, 3808), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'k'], {}), '(t, k)\n', (3802, 3808), True, 'import matplotlib.pyplot as plt\n'), ((3813, 3860), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($t = k\\\\Delta t$)"""'], {}), "('\\\\textbf{time} ($t = k\\\\Delta t$)')\n", (3823, 3860), True, 'import matplotlib.pyplot as plt\n'), ((4275, 4311), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('PLOT/' + name + '.png')"], {}), "('PLOT/' + name + '.png')\n", (4286, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4363, 4377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4375, 4377), True, 'import matplotlib.pyplot as plt\n'), ((4382, 4429), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($t = k\\\\Delta t$)"""'], {}), "('\\\\textbf{time} ($t = k\\\\Delta t$)')\n", (4392, 4429), True, 'import matplotlib.pyplot as plt\n'), ((4433, 4466), 'matplotlib.pyplot.title', 'plt.title', (['"""StochRSI$(t, \\\\tau)$"""'], {}), "('StochRSI$(t, \\\\tau)$')\n", (4442, 4466), True, 'import matplotlib.pyplot as plt\n'), ((4627, 4663), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('PLOT/' + name + '.png')"], {}), "('PLOT/' + name + '.png')\n", (4638, 4663), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4750), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4748, 4750), True, 'import matplotlib.pyplot as plt\n'), ((4763, 4802), 'numpy.arange', 'np.arange', (['stochrsit', '(lsrsi + stochrsit)'], {}), '(stochrsit, lsrsi + stochrsit)\n', (4772, 4802), True, 'import numpy as np\n'), ((4805, 4862), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($k = \\\\frac{t}{\\\\Delta t}$)"""'], {}), "('\\\\textbf{time} ($k = \\\\frac{t}{\\\\Delta t}$)')\n", (4815, 4862), True, 'import matplotlib.pyplot as plt\n'), ((5159, 5195), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('PLOT/' + name + '.png')"], {}), "('PLOT/' + name + '.png')\n", (5170, 5195), True, 'import matplotlib.pyplot as plt\n'), ((5514, 5528), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5526, 5528), True, 'import matplotlib.pyplot as plt\n'), ((5592, 5639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($t = k\\\\Delta t$)"""'], {}), "('\\\\textbf{time} ($t = k\\\\Delta t$)')\n", (5602, 5639), True, 'import matplotlib.pyplot as plt\n'), ((5643, 5704), 'matplotlib.pyplot.title', 'plt.title', (['"""FilteredStochRSI($\\\\alpha$,$\\\\beta$,$t$,$\\\\tau$)"""'], {}), "('FilteredStochRSI($\\\\alpha$,$\\\\beta$,$t$,$\\\\tau$)')\n", (5652, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5743), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('PLOT/' + name + '.png')"], {}), "('PLOT/' + name + '.png')\n", (5718, 5743), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6263), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6261, 6263), True, 'import matplotlib.pyplot as plt\n'), ((6268, 6315), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($t = k\\\\Delta t$)"""'], {}), "('\\\\textbf{time} ($t = k\\\\Delta t$)')\n", (6278, 6315), True, 'import matplotlib.pyplot as plt\n'), ((6319, 6396), 'matplotlib.finance.candlestick_ohlc', 'candlestick_ohlc', (['ax', 'quotes'], {'width': 'dt', 'colorup': '"""g"""', 'colordown': '"""r"""', 'alpha': '(1.0)'}), "(ax, quotes, width=dt, colorup='g', colordown='r', alpha=1.0)\n", (6335, 6396), False, 'from matplotlib.finance import candlestick_ohlc\n'), ((6625, 6661), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('PLOT/' + name + '.png')"], {}), "('PLOT/' + name + '.png')\n", (6636, 6661), True, 'import matplotlib.pyplot as plt\n'), ((14702, 14717), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (14714, 14717), True, 'import pandas as pd\n'), ((843, 860), 'numpy.empty', 'np.empty', (['r.shape'], {}), '(r.shape)\n', (851, 860), True, 'import numpy as np\n'), ((3883, 3991), 'matplotlib.pyplot.title', 'plt.title', (['"""EMA$(B(t)) = \\\\alpha B(t)+(1-\\\\alpha) $EMA$({B(t-\\\\Delta t)})$"""'], {'fontsize': '(16)', 'color': '"""black"""'}), "('EMA$(B(t)) = \\\\alpha B(t)+(1-\\\\alpha) $EMA$({B(t-\\\\Delta t)})$',\n fontsize=16, color='black')\n", (3892, 3991), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4617), 'numpy.arange', 'np.arange', (['(stochrsit * dt)', 'T', 'dt'], {}), '(stochrsit * dt, T, dt)\n', (4594, 4617), True, 'import numpy as np\n'), ((5542, 5574), 'numpy.arange', 'np.arange', (['(stochrsit * dt)', 'T', 'dt'], {}), '(stochrsit * dt, T, dt)\n', (5551, 5574), True, 'import numpy as np\n'), ((6420, 6459), 'matplotlib.pyplot.title', 'plt.title', (['"""$B_g(t, \\\\sigma, \\\\mu, B)$"""'], {}), "('$B_g(t, \\\\sigma, \\\\mu, B)$')\n", (6429, 6459), True, 'import matplotlib.pyplot as plt\n'), ((11303, 11319), 'csv.reader', 'csv.reader', (['csvf'], {}), '(csvf)\n', (11313, 11319), False, 'import csv\n'), ((11752, 11764), 'math.floor', 'floor', (['(p / 2)'], {}), '(p / 2)\n', (11757, 11764), False, 'from math import floor\n'), ((1015, 1044), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', 'N'], {}), '(0.0, 1.0, N)\n', (1031, 1044), True, 'import numpy as np\n'), ((1043, 1056), 'math.sqrt', 'sqrt', (['(1.0 / N)'], {}), '(1.0 / N)\n', (1047, 1056), False, 'from math import sqrt\n'), ((4019, 4123), 'matplotlib.pyplot.title', 'plt.title', (['"""SMA$(B(t)) = $ SMA$(B(t-\\\\Delta t))+\\\\frac{B(t)-B(t-n)}{n}$"""'], {'fontsize': '(16)', 'color': '"""black"""'}), "('SMA$(B(t)) = $ SMA$(B(t-\\\\Delta t))+\\\\frac{B(t)-B(t-n)}{n}$',\n fontsize=16, color='black')\n", (4028, 4123), True, 'import matplotlib.pyplot as plt\n'), ((6484, 6512), 'matplotlib.pyplot.title', 'plt.title', (['"""$B(t, \\\\delta)$"""'], {}), "('$B(t, \\\\delta)$')\n", (6493, 6512), True, 'import matplotlib.pyplot as plt\n'), ((6531, 6555), 'matplotlib.pyplot.title', 'plt.title', (['"""STOCK PRICE"""'], {}), "('STOCK PRICE')\n", (6540, 6555), True, 'import matplotlib.pyplot as plt\n'), ((6565, 6622), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\textbf{time} ($k = \\\\frac{t}{\\\\Delta t}$)"""'], {}), "('\\\\textbf{time} ($k = \\\\frac{t}{\\\\Delta t}$)')\n", (6575, 6622), True, 'import matplotlib.pyplot as plt\n'), ((799, 807), 'math.sqrt', 'sqrt', (['dt'], {}), '(dt)\n', (803, 807), False, 'from math import sqrt\n'), ((1529, 1546), 'numpy.exp', 'np.exp', (['(dr + diff)'], {}), '(dr + diff)\n', (1535, 1546), True, 'import numpy as np\n'), ((4152, 4274), 'matplotlib.pyplot.title', 'plt.title', (['"""SA$(B(t)) = \\\\frac{1}{t}\\\\mathrm{SA}(B(t-\\\\Delta t))*(t-\\\\Delta)+B(t))$"""'], {'fontsize': '(16)', 'color': '"""black"""'}), "(\n 'SA$(B(t)) = \\\\frac{1}{t}\\\\mathrm{SA}(B(t-\\\\Delta t))*(t-\\\\Delta)+B(t))$',\n fontsize=16, color='black')\n", (4161, 4274), True, 'import matplotlib.pyplot as plt\n')] |
# coding: utf-8
# # Deep Learning & Art: Neural Style Transfer
#
# Welcome to the second assignment of this week. In this assignment, you will learn about Neural Style Transfer. This algorithm was created by Gatys et al. (2015) (https://arxiv.org/abs/1508.06576).
#
# **In this assignment, you will:**
# - Implement the neural style transfer algorithm
# - Generate novel artistic images using your algorithm
#
# Most of the algorithms you've studied optimize a cost function to get a set of parameter values. In Neural Style Transfer, you'll optimize a cost function to get pixel values!
# In[1]:
import os
import sys
import scipy.io
import scipy.misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from nst_utils import *
import numpy as np
import tensorflow as tf
get_ipython().magic('matplotlib inline')
# ## 1 - Problem Statement
#
# Neural Style Transfer (NST) is one of the most fun techniques in deep learning. As seen below, it merges two images, namely, a "content" image (C) and a "style" image (S), to create a "generated" image (G). The generated image G combines the "content" of the image C with the "style" of image S.
#
# In this example, you are going to generate an image of the Louvre museum in Paris (content image C), mixed with a painting by <NAME>, a leader of the impressionist movement (style image S).
# <img src="images/louvre_generated.png" style="width:750px;height:200px;">
#
# Let's see how you can do this.
# ## 2 - Transfer Learning
#
# Neural Style Transfer (NST) uses a previously trained convolutional network, and builds on top of that. The idea of using a network trained on a different task and applying it to a new task is called transfer learning.
#
# Following the original NST paper (https://arxiv.org/abs/1508.06576), we will use the VGG network. Specifically, we'll use VGG-19, a 19-layer version of the VGG network. This model has already been trained on the very large ImageNet database, and thus has learned to recognize a variety of low level features (at the earlier layers) and high level features (at the deeper layers).
#
# Run the following code to load parameters from the VGG model. This may take a few seconds.
# In[2]:
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
print(model)
# The model is stored in a python dictionary where each variable name is the key and the corresponding value is a tensor containing that variable's value. To run an image through this network, you just have to feed the image to the model. In TensorFlow, you can do so using the [tf.assign](https://www.tensorflow.org/api_docs/python/tf/assign) function. In particular, you will use the assign function like this:
# ```python
# model["input"].assign(image)
# ```
# This assigns the image as an input to the model. After this, if you want to access the activations of a particular layer, say layer `4_2` when the network is run on this image, you would run a TensorFlow session on the correct tensor `conv4_2`, as follows:
# ```python
# sess.run(model["conv4_2"])
# ```
# ## 3 - Neural Style Transfer
#
# We will build the NST algorithm in three steps:
#
# - Build the content cost function $J_{content}(C,G)$
# - Build the style cost function $J_{style}(S,G)$
# - Put it together to get $J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$.
#
# ### 3.1 - Computing the content cost
#
# In our running example, the content image C will be the picture of the Louvre Museum in Paris. Run the code below to see a picture of the Louvre.
# In[3]:
content_image = scipy.misc.imread("images/louvre.jpg")
imshow(content_image)
# The content image (C) shows the Louvre museum's pyramid surrounded by old Paris buildings, against a sunny sky with a few clouds.
#
# ** 3.1.1 - How do you ensure the generated image G matches the content of the image C?**
#
# As we saw in lecture, the earlier (shallower) layers of a ConvNet tend to detect lower-level features such as edges and simple textures, and the later (deeper) layers tend to detect higher-level features such as more complex textures as well as object classes.
#
# We would like the "generated" image G to have similar content as the input image C. Suppose you have chosen some layer's activations to represent the content of an image. In practice, you'll get the most visually pleasing results if you choose a layer in the middle of the network--neither too shallow nor too deep. (After you have finished this exercise, feel free to come back and experiment with using different layers, to see how the results vary.)
#
# So, suppose you have picked one particular hidden layer to use. Now, set the image C as the input to the pretrained VGG network, and run forward propagation. Let $a^{(C)}$ be the hidden layer activations in the layer you had chosen. (In lecture, we had written this as $a^{[l](C)}$, but here we'll drop the superscript $[l]$ to simplify the notation.) This will be a $n_H \times n_W \times n_C$ tensor. Repeat this process with the image G: Set G as the input, and run forward progation. Let $$a^{(G)}$$ be the corresponding hidden layer activation. We will define as the content cost function as:
#
# $$J_{content}(C,G) = \frac{1}{4 \times n_H \times n_W \times n_C}\sum _{ \text{all entries}} (a^{(C)} - a^{(G)})^2\tag{1} $$
#
# Here, $n_H, n_W$ and $n_C$ are the height, width and number of channels of the hidden layer you have chosen, and appear in a normalization term in the cost. For clarity, note that $a^{(C)}$ and $a^{(G)}$ are the volumes corresponding to a hidden layer's activations. In order to compute the cost $J_{content}(C,G)$, it might also be convenient to unroll these 3D volumes into a 2D matrix, as shown below. (Technically this unrolling step isn't needed to compute $J_{content}$, but it will be good practice for when you do need to carry out a similar operation later for computing the style const $J_{style}$.)
#
# <img src="images/NST_LOSS.png" style="width:800px;height:400px;">
#
# **Exercise:** Compute the "content cost" using TensorFlow.
#
# **Instructions**: The 3 steps to implement this function are:
# 1. Retrieve dimensions from a_G:
# - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`
# 2. Unroll a_C and a_G as explained in the picture above
# - If you are stuck, take a look at [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape).
# 3. Compute the content cost:
# - If you are stuck, take a look at [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract).
# In[8]:
# GRADED FUNCTION: compute_content_cost
def compute_content_cost(a_C, a_G):
"""
Computes the content cost
Arguments:
a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G
Returns:
J_content -- scalar that you compute using equation 1 above.
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G (≈2 lines)
a_C_unrolled = tf.transpose(a_C)
a_G_unrolled = tf.transpose(a_G)
# compute the cost with tensorflow (≈1 line)
J_content = (1 / (4 * n_W * n_H * n_C)) * tf.reduce_sum(tf.pow(a_C_unrolled - a_G_unrolled, 2))
### END CODE HERE ###
return J_content
# In[9]:
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_C = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_content = compute_content_cost(a_C, a_G)
print("J_content = " + str(J_content.eval()))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **J_content**
# </td>
# <td>
# 6.76559
# </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - The content cost takes a hidden layer activation of the neural network, and measures how different $a^{(C)}$ and $a^{(G)}$ are.
# - When we minimize the content cost later, this will help make sure $G$ has similar content as $C$.
# ### 3.2 - Computing the style cost
#
# For our running example, we will use the following style image:
# In[10]:
style_image = scipy.misc.imread("images/monet_800600.jpg")
imshow(style_image)
# This painting was painted in the style of *[impressionism](https://en.wikipedia.org/wiki/Impressionism)*.
#
# Lets see how you can now define a "style" const function $J_{style}(S,G)$.
# ### 3.2.1 - Style matrix
#
# The style matrix is also called a "Gram matrix." In linear algebra, the Gram matrix G of a set of vectors $(v_{1},\dots ,v_{n})$ is the matrix of dot products, whose entries are ${\displaystyle G_{ij} = v_{i}^T v_{j} = np.dot(v_{i}, v_{j}) }$. In other words, $G_{ij}$ compares how similar $v_i$ is to $v_j$: If they are highly similar, you would expect them to have a large dot product, and thus for $G_{ij}$ to be large.
#
# Note that there is an unfortunate collision in the variable names used here. We are following common terminology used in the literature, but $G$ is used to denote the Style matrix (or Gram matrix) as well as to denote the generated image $G$. We will try to make sure which $G$ we are referring to is always clear from the context.
#
# In NST, you can compute the Style matrix by multiplying the "unrolled" filter matrix with their transpose:
#
# <img src="images/NST_GM.png" style="width:900px;height:300px;">
#
# The result is a matrix of dimension $(n_C,n_C)$ where $n_C$ is the number of filters. The value $G_{ij}$ measures how similar the activations of filter $i$ are to the activations of filter $j$.
#
# One important part of the gram matrix is that the diagonal elements such as $G_{ii}$ also measures how active filter $i$ is. For example, suppose filter $i$ is detecting vertical textures in the image. Then $G_{ii}$ measures how common vertical textures are in the image as a whole: If $G_{ii}$ is large, this means that the image has a lot of vertical texture.
#
# By capturing the prevalence of different types of features ($G_{ii}$), as well as how much different features occur together ($G_{ij}$), the Style matrix $G$ measures the style of an image.
#
# **Exercise**:
# Using TensorFlow, implement a function that computes the Gram matrix of a matrix A. The formula is: The gram matrix of A is $G_A = AA^T$. If you are stuck, take a look at [Hint 1](https://www.tensorflow.org/api_docs/python/tf/matmul) and [Hint 2](https://www.tensorflow.org/api_docs/python/tf/transpose).
# In[11]:
# GRADED FUNCTION: gram_matrix
def gram_matrix(A):
"""
Argument:
A -- matrix of shape (n_C, n_H*n_W)
Returns:
GA -- Gram matrix of A, of shape (n_C, n_C)
"""
### START CODE HERE ### (≈1 line)
GA = tf.matmul(A, tf.transpose(A))
### END CODE HERE ###
return GA
# In[12]:
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
A = tf.random_normal([3, 2*1], mean=1, stddev=4)
GA = gram_matrix(A)
print("GA = " + str(GA.eval()))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **GA**
# </td>
# <td>
# [[ 6.42230511 -4.42912197 -2.09668207] <br>
# [ -4.42912197 19.46583748 19.56387138] <br>
# [ -2.09668207 19.56387138 20.6864624 ]]
# </td>
# </tr>
#
# </table>
# ### 3.2.2 - Style cost
# After generating the Style matrix (Gram matrix), your goal will be to minimize the distance between the Gram matrix of the "style" image S and that of the "generated" image G. For now, we are using only a single hidden layer $a^{[l]}$, and the corresponding style cost for this layer is defined as:
#
# $$J_{style}^{[l]}(S,G) = \frac{1}{4 \times {n_C}^2 \times (n_H \times n_W)^2} \sum _{i=1}^{n_C}\sum_{j=1}^{n_C}(G^{(S)}_{ij} - G^{(G)}_{ij})^2\tag{2} $$
#
# where $G^{(S)}$ and $G^{(G)}$ are respectively the Gram matrices of the "style" image and the "generated" image, computed using the hidden layer activations for a particular hidden layer in the network.
#
# **Exercise**: Compute the style cost for a single layer.
#
# **Instructions**: The 3 steps to implement this function are:
# 1. Retrieve dimensions from the hidden layer activations a_G:
# - To retrieve dimensions from a tensor X, use: `X.get_shape().as_list()`
# 2. Unroll the hidden layer activations a_S and a_G into 2D matrices, as explained in the picture above.
# - You may find [Hint1](https://www.tensorflow.org/versions/r1.3/api_docs/python/tf/transpose) and [Hint2](https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/reshape) useful.
# 3. Compute the Style matrix of the images S and G. (Use the function you had previously written.)
# 4. Compute the Style cost:
# - You may find [Hint3](https://www.tensorflow.org/api_docs/python/tf/reduce_sum), [Hint4](https://www.tensorflow.org/api_docs/python/tf/square) and [Hint5](https://www.tensorflow.org/api_docs/python/tf/subtract) useful.
# In[13]:
# GRADED FUNCTION: compute_layer_style_cost
def compute_layer_style_cost(a_S, a_G):
"""
Arguments:
a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S
a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G
Returns:
J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
"""
### START CODE HERE ###
# Retrieve dimensions from a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
a_S = tf.transpose(tf.reshape(a_S, [n_H*n_W, n_C]))
a_G = tf.transpose(tf.reshape(a_G, [n_H*n_W, n_C]))
# Computing gram_matrices for both images S and G (≈2 lines)
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Computing the loss (≈1 line)
J_style_layer = (1./(4 * n_C**2 * (n_H*n_W)**2)) * tf.reduce_sum(tf.pow((GS - GG), 2))
### END CODE HERE ###
return J_style_layer
# In[14]:
tf.reset_default_graph()
with tf.Session() as test:
tf.set_random_seed(1)
a_S = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
a_G = tf.random_normal([1, 4, 4, 3], mean=1, stddev=4)
J_style_layer = compute_layer_style_cost(a_S, a_G)
print("J_style_layer = " + str(J_style_layer.eval()))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **J_style_layer**
# </td>
# <td>
# 9.19028
# </td>
# </tr>
#
# </table>
# ### 3.2.3 Style Weights
#
# So far you have captured the style from only one layer. We'll get better results if we "merge" style costs from several different layers. After completing this exercise, feel free to come back and experiment with different weights to see how it changes the generated image $G$. But for now, this is a pretty reasonable default:
# In[15]:
STYLE_LAYERS = [
('conv1_1', 0.2),
('conv2_1', 0.2),
('conv3_1', 0.2),
('conv4_1', 0.2),
('conv5_1', 0.2)]
# You can combine the style costs for different layers as follows:
#
# $$J_{style}(S,G) = \sum_{l} \lambda^{[l]} J^{[l]}_{style}(S,G)$$
#
# where the values for $\lambda^{[l]}$ are given in `STYLE_LAYERS`.
#
# We've implemented a compute_style_cost(...) function. It simply calls your `compute_layer_style_cost(...)` several times, and weights their results using the values in `STYLE_LAYERS`. Read over it to make sure you understand what it's doing.
#
# <!--
# 2. Loop over (layer_name, coeff) from STYLE_LAYERS:
# a. Select the output tensor of the current layer. As an example, to call the tensor from the "conv1_1" layer you would do: out = model["conv1_1"]
# b. Get the style of the style image from the current layer by running the session on the tensor "out"
# c. Get a tensor representing the style of the generated image from the current layer. It is just "out".
# d. Now that you have both styles. Use the function you've implemented above to compute the style_cost for the current layer
# e. Add (style_cost x coeff) of the current layer to overall style cost (J_style)
# 3. Return J_style, which should now be the sum of the (style_cost x coeff) for each layer.
# !-->
#
# In[16]:
def compute_style_cost(model, STYLE_LAYERS):
"""
Computes the overall style cost from several chosen layers
Arguments:
model -- our tensorflow model
STYLE_LAYERS -- A python list containing:
- the names of the layers we would like to extract style from
- a coefficient for each of them
Returns:
J_style -- tensor representing a scalar value, style cost defined above by equation (2)
"""
# initialize the overall style cost
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
# Select the output tensor of the currently selected layer
out = model[layer_name]
# Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute style_cost for the current layer
J_style_layer = compute_layer_style_cost(a_S, a_G)
# Add coeff * J_style_layer of this layer to overall style cost
J_style += coeff * J_style_layer
return J_style
# **Note**: In the inner-loop of the for-loop above, `a_G` is a tensor and hasn't been evaluated yet. It will be evaluated and updated at each iteration when we run the TensorFlow graph in model_nn() below.
#
# <!--
# How do you choose the coefficients for each layer? The deeper layers capture higher-level concepts, and the features in the deeper layers are less localized in the image relative to each other. So if you want the generated image to softly follow the style image, try choosing larger weights for deeper layers and smaller weights for the first layers. In contrast, if you want the generated image to strongly follow the style image, try choosing smaller weights for deeper layers and larger weights for the first layers
# !-->
#
#
# <font color='blue'>
# **What you should remember**:
# - The style of an image can be represented using the Gram matrix of a hidden layer's activations. However, we get even better results combining this representation from multiple different layers. This is in contrast to the content representation, where usually using just a single hidden layer is sufficient.
# - Minimizing the style cost will cause the image $G$ to follow the style of the image $S$.
# </font color='blue'>
#
#
# ### 3.3 - Defining the total cost to optimize
# Finally, let's create a cost function that minimizes both the style and the content cost. The formula is:
#
# $$J(G) = \alpha J_{content}(C,G) + \beta J_{style}(S,G)$$
#
# **Exercise**: Implement the total cost function which includes both the content cost and the style cost.
# In[17]:
# GRADED FUNCTION: total_cost
def total_cost(J_content, J_style, alpha = 10, beta = 40):
"""
Computes the total cost function
Arguments:
J_content -- content cost coded above
J_style -- style cost coded above
alpha -- hyperparameter weighting the importance of the content cost
beta -- hyperparameter weighting the importance of the style cost
Returns:
J -- total cost as defined by the formula above.
"""
### START CODE HERE ### (≈1 line)
J = alpha * J_content + beta * J_style
### END CODE HERE ###
return J
# In[18]:
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(3)
J_content = np.random.randn()
J_style = np.random.randn()
J = total_cost(J_content, J_style)
print("J = " + str(J))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **J**
# </td>
# <td>
# 35.34667875478276
# </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - The total cost is a linear combination of the content cost $J_{content}(C,G)$ and the style cost $J_{style}(S,G)$
# - $\alpha$ and $\beta$ are hyperparameters that control the relative weighting between content and style
# ## 4 - Solving the optimization problem
# Finally, let's put everything together to implement Neural Style Transfer!
#
#
# Here's what the program will have to do:
# <font color='purple'>
#
# 1. Create an Interactive Session
# 2. Load the content image
# 3. Load the style image
# 4. Randomly initialize the image to be generated
# 5. Load the VGG16 model
# 7. Build the TensorFlow graph:
# - Run the content image through the VGG16 model and compute the content cost
# - Run the style image through the VGG16 model and compute the style cost
# - Compute the total cost
# - Define the optimizer and the learning rate
# 8. Initialize the TensorFlow graph and run it for a large number of iterations, updating the generated image at every step.
#
# </font>
# Lets go through the individual steps in detail.
# You've previously implemented the overall cost $J(G)$. We'll now set up TensorFlow to optimize this with respect to $G$. To do so, your program has to reset the graph and use an "[Interactive Session](https://www.tensorflow.org/api_docs/python/tf/InteractiveSession)". Unlike a regular session, the "Interactive Session" installs itself as the default session to build a graph. This allows you to run variables without constantly needing to refer to the session object, which simplifies the code.
#
# Lets start the interactive session.
# In[19]:
# Reset the graph
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
# Let's load, reshape, and normalize our "content" image (the Louvre museum picture):
# In[20]:
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
# Let's load, reshape and normalize our "style" image (Claude Monet's painting):
# In[21]:
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
# Now, we initialize the "generated" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. (Feel free to look in `nst_utils.py` to see the details of `generate_noise_image(...)`; to do so, click "File-->Open..." at the upper-left corner of this Jupyter notebook.)
# In[22]:
generated_image = generate_noise_image(content_image)
imshow(generated_image[0])
# Next, as explained in part (2), let's load the VGG16 model.
# In[23]:
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
# To get the program to compute the content cost, we will now assign `a_C` and `a_G` to be the appropriate hidden layer activations. We will use layer `conv4_2` to compute the content cost. The code below does the following:
#
# 1. Assign the content image to be the input to the VGG model.
# 2. Set a_C to be the tensor giving the hidden layer activation for layer "conv4_2".
# 3. Set a_G to be the tensor giving the hidden layer activation for the same layer.
# 4. Compute the content cost using a_C and a_G.
# In[24]:
# Assign the content image to be the input of the VGG model.
sess.run(model['input'].assign(content_image))
# Select the output tensor of layer conv4_2
out = model['conv4_2']
# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# **Note**: At this point, a_G is a tensor and hasn't been evaluated. It will be evaluated and updated at each iteration when we run the Tensorflow graph in model_nn() below.
# In[25]:
# Assign the input of the model to be the "style" image
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
# **Exercise**: Now that you have J_content and J_style, compute the total cost J by calling `total_cost()`. Use `alpha = 10` and `beta = 40`.
# In[26]:
### START CODE HERE ### (1 line)
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
### END CODE HERE ###
# You'd previously learned how to set up the Adam optimizer in TensorFlow. Lets do that here, using a learning rate of 2.0. [See reference](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
# In[27]:
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(2.0)
# define train_step (1 line)
train_step = optimizer.minimize(J)
# **Exercise**: Implement the model_nn() function which initializes the variables of the tensorflow graph, assigns the input image (initial generated image) as the input of the VGG16 model and runs the train_step for a large number of steps.
# In[28]:
def model_nn(sess, input_image, num_iterations = 200):
# Initialize global variables (you need to run the session on the initializer)
### START CODE HERE ### (1 line)
sess.run(tf.global_variables_initializer())
### END CODE HERE ###
# Run the noisy input image (initial generated image) through the model. Use assign().
### START CODE HERE ### (1 line)
sess.run(model['input'].assign(input_image))
### END CODE HERE ###
for i in range(num_iterations):
# Run the session on the train_step to minimize the total cost
### START CODE HERE ### (1 line)
sess.run(train_step)
### END CODE HERE ###
# Compute the generated image by running the session on the current model['input']
### START CODE HERE ### (1 line)
generated_image = sess.run(model['input'])
### END CODE HERE ###
# Print every 20 iteration.
if i%20 == 0:
Jt, Jc, Js = sess.run([J, J_content, J_style])
print("Iteration " + str(i) + " :")
print("total cost = " + str(Jt))
print("content cost = " + str(Jc))
print("style cost = " + str(Js))
# save current generated image in the "/output" directory
save_image("output/" + str(i) + ".png", generated_image)
# save last generated image
save_image('output/generated_image.jpg', generated_image)
return generated_image
# Run the following cell to generate an artistic image. It should take about 3min on CPU for every 20 iterations but you start observing attractive results after ≈140 iterations. Neural Style Transfer is generally trained using GPUs.
# In[29]:
model_nn(sess, generated_image)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **Iteration 0 : **
# </td>
# <td>
# total cost = 5.05035e+09 <br>
# content cost = 7877.67 <br>
# style cost = 1.26257e+08
# </td>
# </tr>
#
# </table>
# You're done! After running this, in the upper bar of the notebook click on "File" and then "Open". Go to the "/output" directory to see all the saved images. Open "generated_image" to see the generated image! :)
#
# You should see something the image presented below on the right:
#
# <img src="images/louvre_generated.png" style="width:800px;height:300px;">
#
# We didn't want you to wait too long to see an initial result, and so had set the hyperparameters accordingly. To get the best looking results, running the optimization algorithm longer (and perhaps with a smaller learning rate) might work better. After completing and submitting this assignment, we encourage you to come back and play more with this notebook, and see if you can generate even better looking images.
# Here are few other examples:
#
# - The beautiful ruins of the ancient city of Persepolis (Iran) with the style of Van Gogh (The Starry Night)
# <img src="images/perspolis_vangogh.png" style="width:750px;height:300px;">
#
# - The tomb of Cyrus the great in Pasargadae with the style of a Ceramic Kashi from Ispahan.
# <img src="images/pasargad_kashi.png" style="width:750px;height:300px;">
#
# - A scientific study of a turbulent fluid with the style of a abstract blue fluid painting.
# <img src="images/circle_abstract.png" style="width:750px;height:300px;">
# ## 5 - Test with your own image (Optional/Ungraded)
# Finally, you can also rerun the algorithm on your own images!
#
# To do so, go back to part 4 and change the content image and style image with your own pictures. In detail, here's what you should do:
#
# 1. Click on "File -> Open" in the upper tab of the notebook
# 2. Go to "/images" and upload your images (requirement: (WIDTH = 300, HEIGHT = 225)), rename them "my_content.png" and "my_style.png" for example.
# 3. Change the code in part (3.4) from :
# ```python
# content_image = scipy.misc.imread("images/louvre.jpg")
# style_image = scipy.misc.imread("images/claude-monet.jpg")
# ```
# to:
# ```python
# content_image = scipy.misc.imread("images/my_content.jpg")
# style_image = scipy.misc.imread("images/my_style.jpg")
# ```
# 4. Rerun the cells (you may need to restart the Kernel in the upper tab of the notebook).
#
# You can also tune your hyperparameters:
# - Which layers are responsible for representing the style? STYLE_LAYERS
# - How many iterations do you want to run the algorithm? num_iterations
# - What is the relative weighting between content and style? alpha/beta
# ## 6 - Conclusion
#
# Great job on completing this assignment! You are now able to use Neural Style Transfer to generate artistic images. This is also your first time building a model in which the optimization algorithm updates the pixel values rather than the neural network's parameters. Deep learning has many different types of models and this is only one of them!
#
# <font color='blue'>
# What you should remember:
# - Neural Style Transfer is an algorithm that given a content image C and a style image S can generate an artistic image
# - It uses representations (hidden layer activations) based on a pretrained ConvNet.
# - The content cost function is computed using one hidden layer's activations.
# - The style cost function for one layer is computed using the Gram matrix of that layer's activations. The overall style cost function is obtained using several hidden layers.
# - Optimizing the total cost function results in synthesizing new images.
#
#
#
# This was the final programming exercise of this course. Congratulations--you've finished all the programming exercises of this course on Convolutional Networks! We hope to also see you in Course 5, on Sequence models!
#
# ### References:
#
# The Neural Style Transfer algorithm was due to Gatys et al. (2015). <NAME> and Github user "log0" also have highly readable write-ups from which we drew inspiration. The pre-trained network used in this implementation is a VGG network, which is due to Simonyan and Zisserman (2015). Pre-trained weights were from the work of the MathConvNet team.
#
# - <NAME>, <NAME>, <NAME>, (2015). A Neural Algorithm of Artistic Style (https://arxiv.org/abs/1508.06576)
# - <NAME>, Convolutional neural networks for artistic style transfer. https://harishnarayanan.org/writing/artistic-style-transfer/
# - Log0, TensorFlow Implementation of "A Neural Algorithm of Artistic Style". http://www.chioka.in/tensorflow-implementation-neural-algorithm-of-artistic-style
# - <NAME> and <NAME> (2015). Very deep convolutional networks for large-scale image recognition (https://arxiv.org/pdf/1409.1556.pdf)
# - MatConvNet. http://www.vlfeat.org/matconvnet/pretrained/
#
# In[ ]:
| [
"numpy.random.seed",
"numpy.random.randn",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.imshow",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.Session",
"tensorflow.pow",
"tensorflow.transpose",
"tensorflow.set_random_seed",
"tensorflow.random_normal",
"t... | [((3646, 3667), 'matplotlib.pyplot.imshow', 'imshow', (['content_image'], {}), '(content_image)\n', (3652, 3667), False, 'from matplotlib.pyplot import imshow\n'), ((7742, 7766), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7764, 7766), True, 'import tensorflow as tf\n'), ((8691, 8710), 'matplotlib.pyplot.imshow', 'imshow', (['style_image'], {}), '(style_image)\n', (8697, 8710), False, 'from matplotlib.pyplot import imshow\n'), ((11310, 11334), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (11332, 11334), True, 'import tensorflow as tf\n'), ((14544, 14568), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (14566, 14568), True, 'import tensorflow as tf\n'), ((20414, 20438), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (20436, 20438), True, 'import tensorflow as tf\n'), ((22499, 22523), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (22521, 22523), True, 'import tensorflow as tf\n'), ((22560, 22583), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (22581, 22583), True, 'import tensorflow as tf\n'), ((23569, 23595), 'matplotlib.pyplot.imshow', 'imshow', (['generated_image[0]'], {}), '(generated_image[0])\n', (23575, 23595), False, 'from matplotlib.pyplot import imshow\n'), ((25833, 25860), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(2.0)'], {}), '(2.0)\n', (25855, 25860), True, 'import tensorflow as tf\n'), ((7469, 7486), 'tensorflow.transpose', 'tf.transpose', (['a_C'], {}), '(a_C)\n', (7481, 7486), True, 'import tensorflow as tf\n'), ((7506, 7523), 'tensorflow.transpose', 'tf.transpose', (['a_G'], {}), '(a_G)\n', (7518, 7523), True, 'import tensorflow as tf\n'), ((7773, 7785), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7783, 7785), True, 'import tensorflow as tf\n'), ((7799, 7820), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (7817, 7820), True, 'import tensorflow as tf\n'), ((7831, 7879), 'tensorflow.random_normal', 'tf.random_normal', (['[1, 4, 4, 3]'], {'mean': '(1)', 'stddev': '(4)'}), '([1, 4, 4, 3], mean=1, stddev=4)\n', (7847, 7879), True, 'import tensorflow as tf\n'), ((7890, 7938), 'tensorflow.random_normal', 'tf.random_normal', (['[1, 4, 4, 3]'], {'mean': '(1)', 'stddev': '(4)'}), '([1, 4, 4, 3], mean=1, stddev=4)\n', (7906, 7938), True, 'import tensorflow as tf\n'), ((11341, 11353), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11351, 11353), True, 'import tensorflow as tf\n'), ((11367, 11388), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (11385, 11388), True, 'import tensorflow as tf\n'), ((11397, 11443), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 2 * 1]'], {'mean': '(1)', 'stddev': '(4)'}), '([3, 2 * 1], mean=1, stddev=4)\n', (11413, 11443), True, 'import tensorflow as tf\n'), ((14575, 14587), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (14585, 14587), True, 'import tensorflow as tf\n'), ((14601, 14622), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (14619, 14622), True, 'import tensorflow as tf\n'), ((14633, 14681), 'tensorflow.random_normal', 'tf.random_normal', (['[1, 4, 4, 3]'], {'mean': '(1)', 'stddev': '(4)'}), '([1, 4, 4, 3], mean=1, stddev=4)\n', (14649, 14681), True, 'import tensorflow as tf\n'), ((14692, 14740), 'tensorflow.random_normal', 'tf.random_normal', (['[1, 4, 4, 3]'], {'mean': '(1)', 'stddev': '(4)'}), '([1, 4, 4, 3], mean=1, stddev=4)\n', (14708, 14740), True, 'import tensorflow as tf\n'), ((20445, 20457), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (20455, 20457), True, 'import tensorflow as tf\n'), ((20471, 20488), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (20485, 20488), True, 'import numpy as np\n'), ((20505, 20522), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (20520, 20522), True, 'import numpy as np\n'), ((20541, 20558), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (20556, 20558), True, 'import numpy as np\n'), ((11235, 11250), 'tensorflow.transpose', 'tf.transpose', (['A'], {}), '(A)\n', (11247, 11250), True, 'import tensorflow as tf\n'), ((14136, 14169), 'tensorflow.reshape', 'tf.reshape', (['a_S', '[n_H * n_W, n_C]'], {}), '(a_S, [n_H * n_W, n_C])\n', (14146, 14169), True, 'import tensorflow as tf\n'), ((14192, 14225), 'tensorflow.reshape', 'tf.reshape', (['a_G', '[n_H * n_W, n_C]'], {}), '(a_G, [n_H * n_W, n_C])\n', (14202, 14225), True, 'import tensorflow as tf\n'), ((26375, 26408), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (26406, 26408), True, 'import tensorflow as tf\n'), ((7638, 7676), 'tensorflow.pow', 'tf.pow', (['(a_C_unrolled - a_G_unrolled)', '(2)'], {}), '(a_C_unrolled - a_G_unrolled, 2)\n', (7644, 7676), True, 'import tensorflow as tf\n'), ((14448, 14466), 'tensorflow.pow', 'tf.pow', (['(GS - GG)', '(2)'], {}), '(GS - GG, 2)\n', (14454, 14466), True, 'import tensorflow as tf\n')] |
"""
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
<NAME>*, <NAME>*, <NAME>*, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
""" utility functions used in multiple scripts """
import cv2
import numpy as np
import skimage.io
import tensorflow as tf
import os
import random
from absl import logging
from PIL import Image
from utils.LP import LaplacianPyramid as LP
from utils.tf_LP import TF_LaplacianPyramid as tf_LP
from utils.const import *
class Utils(object):
""" use for training only. """
@staticmethod
def create_photo_loss_mask_from_seg(seg, glassframe):
# create photo loss mask from face segmentation
# use skin, nose, eyeglass, lrow, rbrow, ulip, llip
mask = (
seg[:, :, :, SEG_SKIN]
+ seg[:, :, :, SEG_NOSE] * 1.0
+ seg[:, :, :, SEG_EYEG] * (1 - glassframe) * 0.5
+ seg[:, :, :, SEG_LBROW] * 1.0
+ seg[:, :, :, SEG_RBROW] * 1.0
+ seg[:, :, :, SEG_ULIP] * 1.0
+ seg[:, :, :, SEG_LLIP] * 1.0
)
mask = tf.expand_dims(mask, axis=-1)
return mask
def tf_detect_glassframe(rgb_img, seg_img):
# input range [0,1]
dX = seg_img[:, :, 1:] - seg_img[:, :, :-1]
dX = tf.pad(tensor=dX, paddings=((0, 0), (0, 0), (0, 1)), mode="constant")
dY = seg_img[:, 1:, :] - seg_img[:, :-1, :]
dY = tf.pad(tensor=dY, paddings=((0, 0), (0, 1), (0, 0)), mode="constant")
G = tf.sqrt(tf.square(dX) + tf.square(dY))
G = tf.compat.v1.where(tf.greater(G, 0.1), tf.ones_like(G), tf.zeros_like(G))
G = tf.expand_dims(G, axis=3)
k = 10
kernel = np.ones((k, k), np.float32) / (k * k)
kernel = tf.reshape(kernel, [k, k, 1, 1])
# from tf_LP import TF_LaplacianPyramid as tf_LP
# mask = tf_LP.conv_depthwise(G, kernel, strides=[1, 1, 1, 1], padding="SAME")
# mask = tf.where(tf.greater(mask, 0.01), tf.ones_like(mask), tf.zeros_like(mask))
# mask = tf.squeeze(mask)
# convert rgb to hsv
hsv_img = tf_rgb_to_hsv(rgb_img)
v_img = hsv_img[:, :, :, 2]
# v_mask = tf.where(tf.less(v_img, 0.6), tf.ones_like(v_img), tf.zeros_like(v_img))
# glassframe = v_mask * mask * seg_img
glassframe = seg_img
return glassframe
def tf_rgb_to_hsv(rgb_image):
rgb_norm_image = rgb_image / 255.0
batch_size, image_height, image_width, n_channel = rgb_image.get_shape().as_list()
r_img = tf.strided_slice(
rgb_norm_image, [0, 0, 0, 0], [batch_size, image_height, image_width, 1]
)
g_img = tf.strided_slice(
rgb_norm_image, [0, 0, 0, 1], [batch_size, image_height, image_width, 2]
)
b_img = tf.strided_slice(
rgb_norm_image, [0, 0, 0, 2], [batch_size, image_height, image_width, 3]
)
# r_img, g_img, b_img = tf.split(rgb_norm_image, 3, axis=3)
c_max = tf.reduce_max(input_tensor=rgb_norm_image, axis=3, keepdims=True)
c_min = tf.reduce_min(input_tensor=rgb_norm_image, axis=3, keepdims=True)
delta = c_max - c_min
EPS = 1e-8
h_branch1 = tf.zeros_like(c_max)
h_branch2 = 60 * tf.compat.v1.div(g_img - b_img, c_max - c_min + EPS)
h_branch3 = 60 * tf.compat.v1.div(g_img - b_img, c_max - c_min + EPS) + 360
h_branch4 = 60 * tf.compat.v1.div(b_img - r_img, c_max - c_min + EPS) + 120
h_branch5 = 60 * tf.compat.v1.div(r_img - g_img, c_max - c_min + EPS) + 240
h2 = tf.compat.v1.where(
tf.logical_and(
tf.equal(c_max, r_img),
tf.logical_or(tf.equal(g_img, b_img), tf.greater(g_img, b_img)),
),
h_branch2,
tf.zeros_like(h_branch2),
)
h3 = tf.compat.v1.where(
tf.logical_and(tf.equal(c_max, r_img), tf.less(g_img, b_img)),
h_branch3,
tf.zeros_like(h_branch3),
)
h4 = tf.compat.v1.where(tf.equal(c_max, g_img), h_branch4, tf.zeros_like(h_branch4))
h5 = tf.compat.v1.where(tf.equal(c_max, b_img), h_branch5, tf.zeros_like(h_branch5))
h = h2 + h3 + h4 + h5
s_branch2 = 1 - tf.compat.v1.div(c_min, c_max + EPS)
s = tf.compat.v1.where(tf.equal(c_max, 0), s_branch2, tf.zeros_like(s_branch2))
v = c_max
hsv_img = tf.concat([h, s, v], axis=3)
return hsv_img
def tf_rgb_to_yuv(rgb_image):
batch_size, image_height, image_width, n_channel = rgb_image.get_shape().as_list()
R = tf.strided_slice(
rgb_image, [0, 0, 0, 0], [batch_size, image_height, image_width, 1]
)
G = tf.strided_slice(
rgb_image, [0, 0, 0, 1], [batch_size, image_height, image_width, 2]
)
B = tf.strided_slice(
rgb_image, [0, 0, 0, 2], [batch_size, image_height, image_width, 3]
)
# R, G, B = tf.split(rgb_image, 3, axis=3)
Y = 0.3 * R + 0.59 * G + 0.11 * B
U = (B - Y) * 0.493
V = (R - Y) * 0.877
yuv_image = tf.concat([Y, U, V], axis=3)
return yuv_image
def tf_yuv_to_rgb(yuv_image):
batch_size, image_height, image_width, n_channel = yuv_image.get_shape().as_list()
Y = tf.strided_slice(
yuv_image, [0, 0, 0, 0], [batch_size, image_height, image_width, 1]
)
U = tf.strided_slice(
yuv_image, [0, 0, 0, 1], [batch_size, image_height, image_width, 2]
)
V = tf.strided_slice(
yuv_image, [0, 0, 0, 2], [batch_size, image_height, image_width, 3]
)
# Y, U, V = tf.split(yuv_image, 3, axis=3)
R = Y + 1.14 * V
G = Y - 0.39 * U - 0.58 * V
B = Y + 2.03 * U
rgb_image = tf.concat([R, G, B], axis=3)
return rgb_image
def tf_blend_uv(
base_uv,
face_uv,
face_mask,
match_color=False,
times=5,
):
# when match_color=True, use color tone
assert len(base_uv.get_shape().as_list()) == 4
assert len(face_uv.get_shape().as_list()) == 4
uv_size = base_uv.get_shape().as_list()[1]
k_blur = int(31 * uv_size / 2048)
kernel_blur = np.ones((k_blur, 1), dtype=np.float32) / k_blur
kernel_blur_row = np.reshape(kernel_blur, (k_blur, 1, 1, 1))
kernel_blur_row = tf.constant(kernel_blur_row, name="kernel_blur_row")
kernel_blur_col = np.reshape(kernel_blur, (1, k_blur, 1, 1))
kernel_blur_col = tf.constant(kernel_blur_col, name="kernel_blur_col")
face_mask_blur = face_mask
face_mask_blur = tf.nn.conv2d(
input=face_mask_blur, filters=kernel_blur_row, strides=[1, 1, 1, 1], padding="SAME"
)
face_mask_blur = tf.nn.conv2d(
input=face_mask_blur, filters=kernel_blur_col, strides=[1, 1, 1, 1], padding="SAME"
)
face_mask = face_mask_blur * face_mask
if match_color:
# select color from patch
base_uv_yuv = tf_rgb_to_yuv(base_uv)
face_uv_yuv = tf_rgb_to_yuv(face_uv)
sum_base = tf.reduce_sum(input_tensor=base_uv_yuv * face_mask, axis=(1, 2), keepdims=True)
sum_face = tf.reduce_sum(input_tensor=face_uv_yuv * face_mask, axis=(1, 2), keepdims=True)
sum_cnt = tf.reduce_sum(input_tensor=face_mask, axis=(1, 2), keepdims=True)
mu_base = sum_base / sum_cnt
mu_face = sum_face / sum_cnt
sum_base_sq = tf.reduce_sum(
input_tensor=tf.square(base_uv_yuv) * face_mask, axis=(1, 2), keepdims=True
)
sum_face_sq = tf.reduce_sum(
input_tensor=tf.square(face_uv_yuv) * face_mask, axis=(1, 2), keepdims=True
)
mu_base_sq = sum_base_sq / sum_cnt
mu_face_sq = sum_face_sq / sum_cnt
std_base = tf.sqrt((mu_base_sq - tf.square(mu_base)))
std_face = tf.sqrt((mu_face_sq - tf.square(mu_face)))
base_uv_yuv = (base_uv_yuv - mu_base) / std_base * std_face + mu_face
base_uv = tf_yuv_to_rgb(base_uv_yuv)
face_uv = face_uv * face_mask + base_uv * (1 - face_mask)
pyramids1 = tf_LP.buildLaplacianPyramids(base_uv, times)
pyramids2 = tf_LP.buildLaplacianPyramids(face_uv, times)
mask_list = tf_LP.downSamplePyramids(face_mask, times)
blend_pyramids = []
for j in range(len(pyramids1)):
mask = tf.clip_by_value(mask_list[j], 0.0, 1.0)
blend_pyramids.append(pyramids1[j] * (1 - mask) + pyramids2[j] * mask)
cur_uv = tf_LP.reconstruct(blend_pyramids)
cur_uv = tf.clip_by_value(cur_uv, 0.0, 1)
return cur_uv
def blend_uv(
base_uv,
face_uv,
face_mask,
match_color=False,
times=5,
):
# when is_normal=True, do not use color tone
base_uv = base_uv.astype(np.float32)
face_uv = face_uv.astype(np.float32)
face_mask = face_mask.astype(np.float32)
# contract the face mask a little and blur it
k_contract = 5
kernel_contract = np.ones((k_contract, 1), dtype=np.float32) / k_contract
face_mask_contract = face_mask
for i in range(int(5 * face_mask.shape[0] / 2048)):
face_mask_contract = cv2.filter2D(face_mask_contract, -1, kernel_contract)
face_mask_contract = cv2.filter2D(
face_mask_contract, -1, np.transpose(kernel_contract)
)
face_mask_contract[face_mask_contract < 1] = 0
# so that the blending edges are smoothy
k_blur = 41 * base_uv.shape[0] // 2048
kernel_blur = np.ones((k_blur, 1), dtype=np.float32) / k_blur
face_mask_blur = face_mask_contract
for i in range(int(5 * face_mask.shape[0] / 2048)):
face_mask_blur = cv2.filter2D(face_mask_blur, -1, kernel_blur) # * face_mask
face_mask_blur = cv2.filter2D(
face_mask_blur, -1, np.transpose(kernel_blur)
) # * face_mask
face_mask_blend = face_mask_contract * face_mask_blur
if match_color:
# select color from patch
base_uv_yuv = skimage.color.convert_colorspace(base_uv, "rgb", "yuv")
face_uv_yuv = skimage.color.convert_colorspace(face_uv, "rgb", "yuv")
is_valid = face_mask[:, :, 0] > 0.5
mu_base = np.mean(base_uv_yuv[is_valid], axis=0, keepdims=True) # part
mu_face = np.mean(face_uv_yuv[is_valid], axis=0, keepdims=True) # core
std_base = np.std(base_uv_yuv[is_valid], axis=0, keepdims=True)
std_face = np.std(face_uv_yuv[is_valid], axis=0, keepdims=True)
base_uv_yuv = (base_uv_yuv - mu_base) / std_base * std_face + mu_face
base_uv_cvt = skimage.color.convert_colorspace(base_uv_yuv, "yuv", "rgb")
base_uv = np.clip(base_uv_cvt, 0, 1)
face_uv = face_uv * face_mask + base_uv * (1 - face_mask)
pyramids1 = LP.buildLaplacianPyramids(base_uv, times)
pyramids2 = LP.buildLaplacianPyramids(face_uv, times)
mask_list = LP.downSamplePyramids(face_mask_blend, times)
blend_pyramids = []
for j in range(len(pyramids1)):
mask = np.clip(mask_list[j], 0, 1)
blend_pyramids.append(pyramids1[j] * (1 - mask) + pyramids2[j] * mask)
cur_uv = LP.reconstruct(blend_pyramids)
cur_uv = np.clip(cur_uv, 0, 1)
return cur_uv
| [
"utils.tf_LP.TF_LaplacianPyramid.downSamplePyramids",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.reshape",
"tensorflow.zeros_like",
"numpy.clip",
"numpy.ones",
"utils.LP.LaplacianPyramid.buildLaplacianPyramids",
"numpy.mean",
"tensorflow.nn.conv2d",
"tensorflow.reduce_max",... | [((2493, 2562), 'tensorflow.pad', 'tf.pad', ([], {'tensor': 'dX', 'paddings': '((0, 0), (0, 0), (0, 1))', 'mode': '"""constant"""'}), "(tensor=dX, paddings=((0, 0), (0, 0), (0, 1)), mode='constant')\n", (2499, 2562), True, 'import tensorflow as tf\n'), ((2620, 2689), 'tensorflow.pad', 'tf.pad', ([], {'tensor': 'dY', 'paddings': '((0, 0), (0, 1), (0, 0))', 'mode': '"""constant"""'}), "(tensor=dY, paddings=((0, 0), (0, 1), (0, 0)), mode='constant')\n", (2626, 2689), True, 'import tensorflow as tf\n'), ((2827, 2852), 'tensorflow.expand_dims', 'tf.expand_dims', (['G'], {'axis': '(3)'}), '(G, axis=3)\n', (2841, 2852), True, 'import tensorflow as tf\n'), ((2929, 2961), 'tensorflow.reshape', 'tf.reshape', (['kernel', '[k, k, 1, 1]'], {}), '(kernel, [k, k, 1, 1])\n', (2939, 2961), True, 'import tensorflow as tf\n'), ((3660, 3754), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_norm_image', '[0, 0, 0, 0]', '[batch_size, image_height, image_width, 1]'], {}), '(rgb_norm_image, [0, 0, 0, 0], [batch_size, image_height,\n image_width, 1])\n', (3676, 3754), True, 'import tensorflow as tf\n'), ((3777, 3871), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_norm_image', '[0, 0, 0, 1]', '[batch_size, image_height, image_width, 2]'], {}), '(rgb_norm_image, [0, 0, 0, 1], [batch_size, image_height,\n image_width, 2])\n', (3793, 3871), True, 'import tensorflow as tf\n'), ((3894, 3988), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_norm_image', '[0, 0, 0, 2]', '[batch_size, image_height, image_width, 3]'], {}), '(rgb_norm_image, [0, 0, 0, 2], [batch_size, image_height,\n image_width, 3])\n', (3910, 3988), True, 'import tensorflow as tf\n'), ((4075, 4140), 'tensorflow.reduce_max', 'tf.reduce_max', ([], {'input_tensor': 'rgb_norm_image', 'axis': '(3)', 'keepdims': '(True)'}), '(input_tensor=rgb_norm_image, axis=3, keepdims=True)\n', (4088, 4140), True, 'import tensorflow as tf\n'), ((4153, 4218), 'tensorflow.reduce_min', 'tf.reduce_min', ([], {'input_tensor': 'rgb_norm_image', 'axis': '(3)', 'keepdims': '(True)'}), '(input_tensor=rgb_norm_image, axis=3, keepdims=True)\n', (4166, 4218), True, 'import tensorflow as tf\n'), ((4277, 4297), 'tensorflow.zeros_like', 'tf.zeros_like', (['c_max'], {}), '(c_max)\n', (4290, 4297), True, 'import tensorflow as tf\n'), ((5384, 5412), 'tensorflow.concat', 'tf.concat', (['[h, s, v]'], {'axis': '(3)'}), '([h, s, v], axis=3)\n', (5393, 5412), True, 'import tensorflow as tf\n'), ((5559, 5648), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_image', '[0, 0, 0, 0]', '[batch_size, image_height, image_width, 1]'], {}), '(rgb_image, [0, 0, 0, 0], [batch_size, image_height,\n image_width, 1])\n', (5575, 5648), True, 'import tensorflow as tf\n'), ((5667, 5756), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_image', '[0, 0, 0, 1]', '[batch_size, image_height, image_width, 2]'], {}), '(rgb_image, [0, 0, 0, 1], [batch_size, image_height,\n image_width, 2])\n', (5683, 5756), True, 'import tensorflow as tf\n'), ((5775, 5864), 'tensorflow.strided_slice', 'tf.strided_slice', (['rgb_image', '[0, 0, 0, 2]', '[batch_size, image_height, image_width, 3]'], {}), '(rgb_image, [0, 0, 0, 2], [batch_size, image_height,\n image_width, 3])\n', (5791, 5864), True, 'import tensorflow as tf\n'), ((6024, 6052), 'tensorflow.concat', 'tf.concat', (['[Y, U, V]'], {'axis': '(3)'}), '([Y, U, V], axis=3)\n', (6033, 6052), True, 'import tensorflow as tf\n'), ((6201, 6290), 'tensorflow.strided_slice', 'tf.strided_slice', (['yuv_image', '[0, 0, 0, 0]', '[batch_size, image_height, image_width, 1]'], {}), '(yuv_image, [0, 0, 0, 0], [batch_size, image_height,\n image_width, 1])\n', (6217, 6290), True, 'import tensorflow as tf\n'), ((6309, 6398), 'tensorflow.strided_slice', 'tf.strided_slice', (['yuv_image', '[0, 0, 0, 1]', '[batch_size, image_height, image_width, 2]'], {}), '(yuv_image, [0, 0, 0, 1], [batch_size, image_height,\n image_width, 2])\n', (6325, 6398), True, 'import tensorflow as tf\n'), ((6417, 6506), 'tensorflow.strided_slice', 'tf.strided_slice', (['yuv_image', '[0, 0, 0, 2]', '[batch_size, image_height, image_width, 3]'], {}), '(yuv_image, [0, 0, 0, 2], [batch_size, image_height,\n image_width, 3])\n', (6433, 6506), True, 'import tensorflow as tf\n'), ((6654, 6682), 'tensorflow.concat', 'tf.concat', (['[R, G, B]'], {'axis': '(3)'}), '([R, G, B], axis=3)\n', (6663, 6682), True, 'import tensorflow as tf\n'), ((7123, 7165), 'numpy.reshape', 'np.reshape', (['kernel_blur', '(k_blur, 1, 1, 1)'], {}), '(kernel_blur, (k_blur, 1, 1, 1))\n', (7133, 7165), True, 'import numpy as np\n'), ((7188, 7240), 'tensorflow.constant', 'tf.constant', (['kernel_blur_row'], {'name': '"""kernel_blur_row"""'}), "(kernel_blur_row, name='kernel_blur_row')\n", (7199, 7240), True, 'import tensorflow as tf\n'), ((7264, 7306), 'numpy.reshape', 'np.reshape', (['kernel_blur', '(1, k_blur, 1, 1)'], {}), '(kernel_blur, (1, k_blur, 1, 1))\n', (7274, 7306), True, 'import numpy as np\n'), ((7329, 7381), 'tensorflow.constant', 'tf.constant', (['kernel_blur_col'], {'name': '"""kernel_blur_col"""'}), "(kernel_blur_col, name='kernel_blur_col')\n", (7340, 7381), True, 'import tensorflow as tf\n'), ((7435, 7537), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'face_mask_blur', 'filters': 'kernel_blur_row', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=face_mask_blur, filters=kernel_blur_row, strides=[1, 1, \n 1, 1], padding='SAME')\n", (7447, 7537), True, 'import tensorflow as tf\n'), ((7568, 7670), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', ([], {'input': 'face_mask_blur', 'filters': 'kernel_blur_col', 'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(input=face_mask_blur, filters=kernel_blur_col, strides=[1, 1, \n 1, 1], padding='SAME')\n", (7580, 7670), True, 'import tensorflow as tf\n'), ((8911, 8955), 'utils.tf_LP.TF_LaplacianPyramid.buildLaplacianPyramids', 'tf_LP.buildLaplacianPyramids', (['base_uv', 'times'], {}), '(base_uv, times)\n', (8939, 8955), True, 'from utils.tf_LP import TF_LaplacianPyramid as tf_LP\n'), ((8972, 9016), 'utils.tf_LP.TF_LaplacianPyramid.buildLaplacianPyramids', 'tf_LP.buildLaplacianPyramids', (['face_uv', 'times'], {}), '(face_uv, times)\n', (9000, 9016), True, 'from utils.tf_LP import TF_LaplacianPyramid as tf_LP\n'), ((9033, 9075), 'utils.tf_LP.TF_LaplacianPyramid.downSamplePyramids', 'tf_LP.downSamplePyramids', (['face_mask', 'times'], {}), '(face_mask, times)\n', (9057, 9075), True, 'from utils.tf_LP import TF_LaplacianPyramid as tf_LP\n'), ((9286, 9319), 'utils.tf_LP.TF_LaplacianPyramid.reconstruct', 'tf_LP.reconstruct', (['blend_pyramids'], {}), '(blend_pyramids)\n', (9303, 9319), True, 'from utils.tf_LP import TF_LaplacianPyramid as tf_LP\n'), ((9333, 9365), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['cur_uv', '(0.0)', '(1)'], {}), '(cur_uv, 0.0, 1)\n', (9349, 9365), True, 'import tensorflow as tf\n'), ((11512, 11553), 'utils.LP.LaplacianPyramid.buildLaplacianPyramids', 'LP.buildLaplacianPyramids', (['base_uv', 'times'], {}), '(base_uv, times)\n', (11537, 11553), True, 'from utils.LP import LaplacianPyramid as LP\n'), ((11570, 11611), 'utils.LP.LaplacianPyramid.buildLaplacianPyramids', 'LP.buildLaplacianPyramids', (['face_uv', 'times'], {}), '(face_uv, times)\n', (11595, 11611), True, 'from utils.LP import LaplacianPyramid as LP\n'), ((11628, 11673), 'utils.LP.LaplacianPyramid.downSamplePyramids', 'LP.downSamplePyramids', (['face_mask_blend', 'times'], {}), '(face_mask_blend, times)\n', (11649, 11673), True, 'from utils.LP import LaplacianPyramid as LP\n'), ((11869, 11899), 'utils.LP.LaplacianPyramid.reconstruct', 'LP.reconstruct', (['blend_pyramids'], {}), '(blend_pyramids)\n', (11883, 11899), True, 'from utils.LP import LaplacianPyramid as LP\n'), ((11913, 11934), 'numpy.clip', 'np.clip', (['cur_uv', '(0)', '(1)'], {}), '(cur_uv, 0, 1)\n', (11920, 11934), True, 'import numpy as np\n'), ((2316, 2345), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (2330, 2345), True, 'import tensorflow as tf\n'), ((2764, 2782), 'tensorflow.greater', 'tf.greater', (['G', '(0.1)'], {}), '(G, 0.1)\n', (2774, 2782), True, 'import tensorflow as tf\n'), ((2784, 2799), 'tensorflow.ones_like', 'tf.ones_like', (['G'], {}), '(G)\n', (2796, 2799), True, 'import tensorflow as tf\n'), ((2801, 2817), 'tensorflow.zeros_like', 'tf.zeros_like', (['G'], {}), '(G)\n', (2814, 2817), True, 'import tensorflow as tf\n'), ((2878, 2905), 'numpy.ones', 'np.ones', (['(k, k)', 'np.float32'], {}), '((k, k), np.float32)\n', (2885, 2905), True, 'import numpy as np\n'), ((4319, 4371), 'tensorflow.compat.v1.div', 'tf.compat.v1.div', (['(g_img - b_img)', '(c_max - c_min + EPS)'], {}), '(g_img - b_img, c_max - c_min + EPS)\n', (4335, 4371), True, 'import tensorflow as tf\n'), ((4817, 4841), 'tensorflow.zeros_like', 'tf.zeros_like', (['h_branch2'], {}), '(h_branch2)\n', (4830, 4841), True, 'import tensorflow as tf\n'), ((4976, 5000), 'tensorflow.zeros_like', 'tf.zeros_like', (['h_branch3'], {}), '(h_branch3)\n', (4989, 5000), True, 'import tensorflow as tf\n'), ((5036, 5058), 'tensorflow.equal', 'tf.equal', (['c_max', 'g_img'], {}), '(c_max, g_img)\n', (5044, 5058), True, 'import tensorflow as tf\n'), ((5071, 5095), 'tensorflow.zeros_like', 'tf.zeros_like', (['h_branch4'], {}), '(h_branch4)\n', (5084, 5095), True, 'import tensorflow as tf\n'), ((5125, 5147), 'tensorflow.equal', 'tf.equal', (['c_max', 'b_img'], {}), '(c_max, b_img)\n', (5133, 5147), True, 'import tensorflow as tf\n'), ((5160, 5184), 'tensorflow.zeros_like', 'tf.zeros_like', (['h_branch5'], {}), '(h_branch5)\n', (5173, 5184), True, 'import tensorflow as tf\n'), ((5233, 5269), 'tensorflow.compat.v1.div', 'tf.compat.v1.div', (['c_min', '(c_max + EPS)'], {}), '(c_min, c_max + EPS)\n', (5249, 5269), True, 'import tensorflow as tf\n'), ((5297, 5315), 'tensorflow.equal', 'tf.equal', (['c_max', '(0)'], {}), '(c_max, 0)\n', (5305, 5315), True, 'import tensorflow as tf\n'), ((5328, 5352), 'tensorflow.zeros_like', 'tf.zeros_like', (['s_branch2'], {}), '(s_branch2)\n', (5341, 5352), True, 'import tensorflow as tf\n'), ((7053, 7091), 'numpy.ones', 'np.ones', (['(k_blur, 1)'], {'dtype': 'np.float32'}), '((k_blur, 1), dtype=np.float32)\n', (7060, 7091), True, 'import numpy as np\n'), ((7888, 7967), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(base_uv_yuv * face_mask)', 'axis': '(1, 2)', 'keepdims': '(True)'}), '(input_tensor=base_uv_yuv * face_mask, axis=(1, 2), keepdims=True)\n', (7901, 7967), True, 'import tensorflow as tf\n'), ((7987, 8066), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': '(face_uv_yuv * face_mask)', 'axis': '(1, 2)', 'keepdims': '(True)'}), '(input_tensor=face_uv_yuv * face_mask, axis=(1, 2), keepdims=True)\n', (8000, 8066), True, 'import tensorflow as tf\n'), ((8085, 8150), 'tensorflow.reduce_sum', 'tf.reduce_sum', ([], {'input_tensor': 'face_mask', 'axis': '(1, 2)', 'keepdims': '(True)'}), '(input_tensor=face_mask, axis=(1, 2), keepdims=True)\n', (8098, 8150), True, 'import tensorflow as tf\n'), ((9152, 9192), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['mask_list[j]', '(0.0)', '(1.0)'], {}), '(mask_list[j], 0.0, 1.0)\n', (9168, 9192), True, 'import tensorflow as tf\n'), ((9749, 9791), 'numpy.ones', 'np.ones', (['(k_contract, 1)'], {'dtype': 'np.float32'}), '((k_contract, 1), dtype=np.float32)\n', (9756, 9791), True, 'import numpy as np\n'), ((9925, 9978), 'cv2.filter2D', 'cv2.filter2D', (['face_mask_contract', '(-1)', 'kernel_contract'], {}), '(face_mask_contract, -1, kernel_contract)\n', (9937, 9978), False, 'import cv2\n'), ((10256, 10294), 'numpy.ones', 'np.ones', (['(k_blur, 1)'], {'dtype': 'np.float32'}), '((k_blur, 1), dtype=np.float32)\n', (10263, 10294), True, 'import numpy as np\n'), ((10425, 10470), 'cv2.filter2D', 'cv2.filter2D', (['face_mask_blur', '(-1)', 'kernel_blur'], {}), '(face_mask_blur, -1, kernel_blur)\n', (10437, 10470), False, 'import cv2\n'), ((10940, 10993), 'numpy.mean', 'np.mean', (['base_uv_yuv[is_valid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(base_uv_yuv[is_valid], axis=0, keepdims=True)\n', (10947, 10993), True, 'import numpy as np\n'), ((11020, 11073), 'numpy.mean', 'np.mean', (['face_uv_yuv[is_valid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(face_uv_yuv[is_valid], axis=0, keepdims=True)\n', (11027, 11073), True, 'import numpy as np\n'), ((11101, 11153), 'numpy.std', 'np.std', (['base_uv_yuv[is_valid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(base_uv_yuv[is_valid], axis=0, keepdims=True)\n', (11107, 11153), True, 'import numpy as np\n'), ((11173, 11225), 'numpy.std', 'np.std', (['face_uv_yuv[is_valid]'], {'axis': '(0)', 'keepdims': '(True)'}), '(face_uv_yuv[is_valid], axis=0, keepdims=True)\n', (11179, 11225), True, 'import numpy as np\n'), ((11405, 11431), 'numpy.clip', 'np.clip', (['base_uv_cvt', '(0)', '(1)'], {}), '(base_uv_cvt, 0, 1)\n', (11412, 11431), True, 'import numpy as np\n'), ((11749, 11776), 'numpy.clip', 'np.clip', (['mask_list[j]', '(0)', '(1)'], {}), '(mask_list[j], 0, 1)\n', (11756, 11776), True, 'import numpy as np\n'), ((2706, 2719), 'tensorflow.square', 'tf.square', (['dX'], {}), '(dX)\n', (2715, 2719), True, 'import tensorflow as tf\n'), ((2722, 2735), 'tensorflow.square', 'tf.square', (['dY'], {}), '(dY)\n', (2731, 2735), True, 'import tensorflow as tf\n'), ((4393, 4445), 'tensorflow.compat.v1.div', 'tf.compat.v1.div', (['(g_img - b_img)', '(c_max - c_min + EPS)'], {}), '(g_img - b_img, c_max - c_min + EPS)\n', (4409, 4445), True, 'import tensorflow as tf\n'), ((4473, 4525), 'tensorflow.compat.v1.div', 'tf.compat.v1.div', (['(b_img - r_img)', '(c_max - c_min + EPS)'], {}), '(b_img - r_img, c_max - c_min + EPS)\n', (4489, 4525), True, 'import tensorflow as tf\n'), ((4553, 4605), 'tensorflow.compat.v1.div', 'tf.compat.v1.div', (['(r_img - g_img)', '(c_max - c_min + EPS)'], {}), '(r_img - g_img, c_max - c_min + EPS)\n', (4569, 4605), True, 'import tensorflow as tf\n'), ((4678, 4700), 'tensorflow.equal', 'tf.equal', (['c_max', 'r_img'], {}), '(c_max, r_img)\n', (4686, 4700), True, 'import tensorflow as tf\n'), ((4901, 4923), 'tensorflow.equal', 'tf.equal', (['c_max', 'r_img'], {}), '(c_max, r_img)\n', (4909, 4923), True, 'import tensorflow as tf\n'), ((4925, 4946), 'tensorflow.less', 'tf.less', (['g_img', 'b_img'], {}), '(g_img, b_img)\n', (4932, 4946), True, 'import tensorflow as tf\n'), ((10058, 10087), 'numpy.transpose', 'np.transpose', (['kernel_contract'], {}), '(kernel_contract)\n', (10070, 10087), True, 'import numpy as np\n'), ((10557, 10582), 'numpy.transpose', 'np.transpose', (['kernel_blur'], {}), '(kernel_blur)\n', (10569, 10582), True, 'import numpy as np\n'), ((4728, 4750), 'tensorflow.equal', 'tf.equal', (['g_img', 'b_img'], {}), '(g_img, b_img)\n', (4736, 4750), True, 'import tensorflow as tf\n'), ((4752, 4776), 'tensorflow.greater', 'tf.greater', (['g_img', 'b_img'], {}), '(g_img, b_img)\n', (4762, 4776), True, 'import tensorflow as tf\n'), ((8624, 8642), 'tensorflow.square', 'tf.square', (['mu_base'], {}), '(mu_base)\n', (8633, 8642), True, 'import tensorflow as tf\n'), ((8686, 8704), 'tensorflow.square', 'tf.square', (['mu_face'], {}), '(mu_face)\n', (8695, 8704), True, 'import tensorflow as tf\n'), ((8288, 8310), 'tensorflow.square', 'tf.square', (['base_uv_yuv'], {}), '(base_uv_yuv)\n', (8297, 8310), True, 'import tensorflow as tf\n'), ((8423, 8445), 'tensorflow.square', 'tf.square', (['face_uv_yuv'], {}), '(face_uv_yuv)\n', (8432, 8445), True, 'import tensorflow as tf\n')] |
# python 2.7
from __future__ import absolute_import, division, print_function
import io
from math import floor
import numpy as np
from time import sleep, time
from os.path import join
from control.motor import Motor
from control import policy
class Controller(object):
def __init__(self):
self.motor = Motor(slient=True)
self._start_time = time()
self.is_recording = False
self.K_im_traj = np.load('./control/K_traj_IM_VI.npy')
self.K_coupled = np.load('./control/coupled_k/0221.npy')
self.dis_sum = 0
self.z = np.zeros((2))
self.threshold = 500
self.init_record()
def init_record(self):
self.is_recording = True
self.counter = 1
self.record = []
def finish_control(self):
print('contorller: stop')
self.motor.motor_stop()
def make_decision_with_policy(self, policy_type, *args):
""" Make decision with different policies.
@param policy_type
1: ADP
2: pure pursuit
3: Car following with ADP
5: Coupled Car Following Controller
"""
if policy_type == 1: # ADP
assert len(args) == 2, 'args should be exactly 2'
cur_K = -self.K_im_traj[-1]
distance_2_tan, radian_at_tan = args
self.dis_sum += distance_2_tan
pwm_l_new, pwm_r_new = policy.adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K)
elif policy_type == 2: # pure pursuit
l_d, sin_alpha = args
amp = 150
pwm_l_new, pwm_r_new = policy.pure_pursuit(l_d, sin_alpha, amp)
elif policy_type == 3: # Car following with ADP
assert len(args) == 3, 'args should be exactly 3'
cur_K = -self.K_im_traj[-1]
distance_2_tan, radian_at_tan, estimated_dis = args
self.dis_sum += distance_2_tan
if self.is_recording and self.counter % 100 == 0:
np.save('./.out/record', self.record)
pwm_l_new, pwm_r_new = policy.car_following_with_adp(distance_2_tan, radian_at_tan, self.dis_sum, cur_K, estimated_dis, self.record)
print(self.counter)
self.counter += 1
elif policy_type == 4:
K = 0.5
dis2car, = args
pwm_l_new, pwm_r_new = policy.car_following(dis2car, K)
elif policy_type == 5:
d_arc, d_curve, theta = args
pwm_l_new, pwm_r_new = policy.adp_coupled_car_following(d_arc, d_curve, theta, self.z, self.K_coupled)
else:
pwm_l_new, pwm_r_new = 0, 0
print('Policy Not Found')
self.motor.motor_set_new_speed(pwm_l_new, pwm_r_new)
def start(self):
self.motor.motor_startup()
def cleanup(self):
self.motor.motor_cleanup()
def collision_avoid(self, start_time):
""" Hardcoded collision avoidance behavior.
"""
while True:
cur_time = time() - start_time
if cur_time < 3:
self.motor.motor_set_new_speed(100, 9)
print('ob2', time() - start_time)
elif cur_time < 5.7:
self.motor.motor_set_new_speed(15, 98)
print('ob3', time() - start_time)
elif cur_time < 6.3:
self.motor.motor_set_new_speed(100, 20)
print('ob4', time() - start_time)
else:
print('obchange', time() - start_time)
break
| [
"numpy.load",
"control.policy.adp",
"numpy.save",
"control.policy.adp_coupled_car_following",
"control.motor.Motor",
"numpy.zeros",
"time.time",
"control.policy.car_following_with_adp",
"control.policy.car_following",
"control.policy.pure_pursuit"
] | [((317, 335), 'control.motor.Motor', 'Motor', ([], {'slient': '(True)'}), '(slient=True)\n', (322, 335), False, 'from control.motor import Motor\n'), ((363, 369), 'time.time', 'time', ([], {}), '()\n', (367, 369), False, 'from time import sleep, time\n'), ((429, 466), 'numpy.load', 'np.load', (['"""./control/K_traj_IM_VI.npy"""'], {}), "('./control/K_traj_IM_VI.npy')\n", (436, 466), True, 'import numpy as np\n'), ((492, 531), 'numpy.load', 'np.load', (['"""./control/coupled_k/0221.npy"""'], {}), "('./control/coupled_k/0221.npy')\n", (499, 531), True, 'import numpy as np\n'), ((574, 585), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (582, 585), True, 'import numpy as np\n'), ((1424, 1486), 'control.policy.adp', 'policy.adp', (['distance_2_tan', 'radian_at_tan', 'self.dis_sum', 'cur_K'], {}), '(distance_2_tan, radian_at_tan, self.dis_sum, cur_K)\n', (1434, 1486), False, 'from control import policy\n'), ((1625, 1665), 'control.policy.pure_pursuit', 'policy.pure_pursuit', (['l_d', 'sin_alpha', 'amp'], {}), '(l_d, sin_alpha, amp)\n', (1644, 1665), False, 'from control import policy\n'), ((3009, 3015), 'time.time', 'time', ([], {}), '()\n', (3013, 3015), False, 'from time import sleep, time\n'), ((2083, 2196), 'control.policy.car_following_with_adp', 'policy.car_following_with_adp', (['distance_2_tan', 'radian_at_tan', 'self.dis_sum', 'cur_K', 'estimated_dis', 'self.record'], {}), '(distance_2_tan, radian_at_tan, self.dis_sum,\n cur_K, estimated_dis, self.record)\n', (2112, 2196), False, 'from control import policy\n'), ((2010, 2047), 'numpy.save', 'np.save', (['"""./.out/record"""', 'self.record'], {}), "('./.out/record', self.record)\n", (2017, 2047), True, 'import numpy as np\n'), ((2369, 2401), 'control.policy.car_following', 'policy.car_following', (['dis2car', 'K'], {}), '(dis2car, K)\n', (2389, 2401), False, 'from control import policy\n'), ((3142, 3148), 'time.time', 'time', ([], {}), '()\n', (3146, 3148), False, 'from time import sleep, time\n'), ((2509, 2588), 'control.policy.adp_coupled_car_following', 'policy.adp_coupled_car_following', (['d_arc', 'd_curve', 'theta', 'self.z', 'self.K_coupled'], {}), '(d_arc, d_curve, theta, self.z, self.K_coupled)\n', (2541, 2588), False, 'from control import policy\n'), ((3280, 3286), 'time.time', 'time', ([], {}), '()\n', (3284, 3286), False, 'from time import sleep, time\n'), ((3419, 3425), 'time.time', 'time', ([], {}), '()\n', (3423, 3425), False, 'from time import sleep, time\n'), ((3494, 3500), 'time.time', 'time', ([], {}), '()\n', (3498, 3500), False, 'from time import sleep, time\n')] |
import scipy.special
import scipy.integrate
import numpy as np
import numpy.polynomial
def legendre_series(f, n):
r"""
Calculate the terms of the Legendre series expansion of the function
..math:`f(x)` with the first ..math:`n_terms` terms. This will be the
terms up to but _excluding_ the coefficient of ..math:`P_n(x)`.
The resultant object can be called like a function to return the value of
the approximation at values of ..math:`x`.
"""
if n < 1:
raise ValueError("'n' must be at least 1.")
def integrand(x, k):
return scipy.special.eval_legendre(k, x)*f(x)
# Approximate the inner product integral for each of the polynomials,
# including the normalisation factor. `scipy.integrate.quad` performs
# numerical integration (also called 'quadrature') until a particular
# precision goal is reached.
return np.polynomial.legendre.Legendre(np.array([
scipy.integrate.quad(integrand, -1, 1, args=(k,))[0] * (k + 0.5)
for k in range(n)
]))
def taylor_coefficient(f, k, a=15):
r"""
Calculate the ..math:`k`th coefficient in the Taylor expansion of
..math:`f(x)` around the point ..math:`x_0 = 0`. The first term is
..math:`k = 0`, as this is the zeroth-order term.
``a`` is a precision factor, and should probably just be left as-is.
"""
if k == 0:
return f(0)
# The standard way of defining Taylor series with derivatives and
# factorials doesn't play nicely with numerical methods. This method is
# based on contour integration (magic).
scale = np.exp(-a/k)
return np.exp(a)/k * sum(
(-1)**n * np.imag(f(scale * np.exp(1j*np.pi*(0.5-n)/k)))
for n in range(1, k+1)
)
def taylor_series(f, n, a=15):
r"""
Calculate the first ..math:`n` terms of the Taylor series expansion of
..math:`f(x)` around the point ..math:`x_0 = 0` up to but excluding the
term ..math:`x^n`.
The resultant object can be called like a function to return the value of
the approximation at values of ..math:`x`.
"""
if n < 1:
raise ValueError("'n' must be at least 1.")
return np.polynomial.Polynomial([
taylor_coefficient(f, k, a)
for k in range(n)
])
class fourier_series:
r"""
Calculate the first ..math:`n` terms of the Fourier series expansion of
..math:`f(x)` when mapped to the period ..math:`[-1, 1)`.
The terms are "numbered" in the order
..math::
a_0, b_1, a_1, b_2, a_2, \dotsc
This is by analogy to Taylor series; the first term is the constant, then
the lowest-order odd term, the next-lowest even term, and so on.
The resultant object can be called like a function to return the value of
the approximation at values of ..math:`x`.
"""
def __init__(self, f, n):
if n < 1:
raise ValueError("'n' must be at least 1.")
self._n_a = (n + 1) // 2
self._n_b = n - self._n_a
self.a = np.empty((self._n_a,), dtype=np.float64)
# To keep the labelling clear I store the `b[0] = 0` too.
self.b = np.empty((self._n_b + 1,), dtype=np.float64)
self.a[0] = 0.5 * scipy.integrate.quad(f, -1, 1)[0]
self.b[0] = 0
def cosint(x, k): return f(x) * np.cos(k*np.pi*x)
def sinint(x, k): return f(x) * np.sin(k*np.pi*x)
for k in range(1, self._n_a):
self.a[k] = scipy.integrate.quad(cosint, -1, 1, args=(k,))[0]
for k in range(1, self._n_b):
self.b[k] = scipy.integrate.quad(sinint, -1, 1, args=(k,))[0]
def __call__(self, xs):
out = np.zeros_like(xs)
for k in range(self._n_a):
out += self.a[k] * np.cos(k*np.pi * xs)
for k in range(1, self._n_b):
out += self.b[k] * np.sin(k*np.pi * xs)
return out
def high_order_polynomial(x):
return np.polynomial.Polynomial([
-0.0372875, 0.674885, 1.34898, -12.652, -7.15369, 57.7268,
8.73373, -104.258, 10.0257, 79.9955, -21.4594, -21.5587, 8.38861,
])(x)
def logistic(x):
return 1 / (1 + np.exp(-5*x))
def lorentzian(x, c=0.2):
return 1 / (c*np.pi + (x/c)**2)
_FS = {
'polynomial': high_order_polynomial,
'logistic': logistic,
'lorentzian': lorentzian,
}
_SERIES = {
'taylor': taylor_series,
'legendre': legendre_series,
'fourier': fourier_series,
}
if __name__ == '__main__':
orders = [('low', 5), ('high', 13)]
series = [taylor_series, legendre_series, fourier_series]
xs = np.linspace(-1, 1, 201)
out = np.empty((len(xs), 2+len(orders)*len(_SERIES)), dtype=np.float64)
fmt = " ".join(["{:+10.6e}"] * out.shape[1])
for name, f in _FS.items():
out[:, 0] = xs
out[:, 1] = [f(x) for x in xs]
ptr = 2
for order_name, order in orders:
for s in series:
out[:, ptr] = s(f, order)(xs)
ptr += 1
with open(name + ".dat", "w") as outf:
for line in out:
print(fmt.format(*line), file=outf)
| [
"numpy.zeros_like",
"numpy.empty",
"numpy.polynomial.Polynomial",
"numpy.sin",
"numpy.exp",
"numpy.linspace",
"numpy.cos"
] | [((1600, 1614), 'numpy.exp', 'np.exp', (['(-a / k)'], {}), '(-a / k)\n', (1606, 1614), True, 'import numpy as np\n'), ((4546, 4569), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(201)'], {}), '(-1, 1, 201)\n', (4557, 4569), True, 'import numpy as np\n'), ((3003, 3043), 'numpy.empty', 'np.empty', (['(self._n_a,)'], {'dtype': 'np.float64'}), '((self._n_a,), dtype=np.float64)\n', (3011, 3043), True, 'import numpy as np\n'), ((3127, 3171), 'numpy.empty', 'np.empty', (['(self._n_b + 1,)'], {'dtype': 'np.float64'}), '((self._n_b + 1,), dtype=np.float64)\n', (3135, 3171), True, 'import numpy as np\n'), ((3637, 3654), 'numpy.zeros_like', 'np.zeros_like', (['xs'], {}), '(xs)\n', (3650, 3654), True, 'import numpy as np\n'), ((3894, 4049), 'numpy.polynomial.Polynomial', 'np.polynomial.Polynomial', (['[-0.0372875, 0.674885, 1.34898, -12.652, -7.15369, 57.7268, 8.73373, -\n 104.258, 10.0257, 79.9955, -21.4594, -21.5587, 8.38861]'], {}), '([-0.0372875, 0.674885, 1.34898, -12.652, -7.15369,\n 57.7268, 8.73373, -104.258, 10.0257, 79.9955, -21.4594, -21.5587, 8.38861])\n', (3918, 4049), True, 'import numpy as np\n'), ((1624, 1633), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (1630, 1633), True, 'import numpy as np\n'), ((4111, 4125), 'numpy.exp', 'np.exp', (['(-5 * x)'], {}), '(-5 * x)\n', (4117, 4125), True, 'import numpy as np\n'), ((3294, 3315), 'numpy.cos', 'np.cos', (['(k * np.pi * x)'], {}), '(k * np.pi * x)\n', (3300, 3315), True, 'import numpy as np\n'), ((3352, 3373), 'numpy.sin', 'np.sin', (['(k * np.pi * x)'], {}), '(k * np.pi * x)\n', (3358, 3373), True, 'import numpy as np\n'), ((3721, 3743), 'numpy.cos', 'np.cos', (['(k * np.pi * xs)'], {}), '(k * np.pi * xs)\n', (3727, 3743), True, 'import numpy as np\n'), ((3811, 3833), 'numpy.sin', 'np.sin', (['(k * np.pi * xs)'], {}), '(k * np.pi * xs)\n', (3817, 3833), True, 'import numpy as np\n'), ((1679, 1715), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (0.5 - n) / k)'], {}), '(1.0j * np.pi * (0.5 - n) / k)\n', (1685, 1715), True, 'import numpy as np\n')] |
# %% [markdown]
# # Libraries and Data
# %%
from zipfile import ZipFile
import altair as alt
import numpy as np
import pandas as pd
alt.data_transformers.disable_max_rows()
# %% [markdown]
# After downloading the dataset, we can load the train set from the zip file.
#
# However, for faster reloads, we stored it in a feather file.
#
# You can comment/uncomment the specific rows for each use case.
# %%
def first_load(save_feather=False):
# Original data load
zipfile = ZipFile("./data/avazu-ctr-prediction.zip")
train = pd.read_csv(
zipfile.open("train.gz"), compression="gzip", usecols=["click", "hour"]
)
# Save to feather, for faster data reloads
if save_feather:
train.to_feather("./data/train.feather")
assert train.equals(pd.read_feather("./data/train.feather"))
return train
train = first_load(save_feather=True)
# Load from feather
# train = pd.read_feather("./data/train.feather")
# %% [markdown]
# We validate data features. Because of the size of the data set,
# we rely on numerical operations for a faster process in exchange for some
# intelligibility.
# %%
assert all(train["click"].unique() == [0, 1]), "Invalid `click` values."
assert all(train["hour"] // 1e6 == 14), "Invalid year data"
assert all((train["hour"] - 14e6) // 1e4 == 10), "Invalid month data"
assert all(
((train["hour"] - 14e6 - 10e4) // 1e2).isin(range(1, 31 + 1))
), "Invalid day data"
assert all(((train["hour"] - 14e6 - 10e4) % 1e2).isin(range(24))), "Invalid hour data"
# %%
# Transform the datetimeformat to pandas datetime
train["dthour"] = pd.to_datetime(train["hour"], format="%y%m%d%H")
assert (
train["hour"].astype(str).str[-2:].astype(int) == train["dthour"].dt.hour
).all(), "Hour transformation do not match"
train = train.set_index("dthour").drop(columns="hour")
# %%
# Validate transformation results
assert all(
pd.Series(train.index).diff().iloc[1:].dt.total_seconds().unique() / 60**2
== [
0,
1,
]
), f"Incorrect timestamp deltas"
assert train.index.is_monotonic, "Timestamp is not monotonic."
# %% [markdown]
# # Data aggregation
#
# - We assume that it is meaningful to use all the ads in a single
# group and to plot them all onto the same time series.
# - We assume that a row in the dataset stands for an 'impression' and,
# therefore, we can get hourly CTRs by dividing the number of clicks with
# the number of total impressions within that hour.
#
#
# %% [markdown]
# The number of records varies a lot by each hour.
# %%
display(train.groupby(pd.Grouper(freq="h")).size())
alt.Chart(
train.groupby(pd.Grouper(freq="h")).size().rename("records").reset_index()
).mark_line().encode(x="dthour:T", y="records:Q").properties(
title="Number of Records", width=600, height=150
)
# %%
hourly = pd.DataFrame()
hourly["clicks"] = train.resample("H")["click"].sum()
hourly["impressions"] = train.resample("H")["click"].count()
display(
hourly[["clicks", "impressions"]]
.describe()
.loc[["mean", "std", "min", "max"], :]
.style.format(precision=0, thousands=" ")
)
# %% [markdown]
# Average CTR is around 17% with some considerable deviation
# between ~10% and ~22%.
# %%
hourly["CTR"] = train.resample("H")["click"].mean()
mean = hourly["clicks"].sum() / hourly["impressions"].sum()
std = np.sqrt(
(((hourly["clicks"] / hourly["impressions"] - mean) ** 2).sum() / hourly.size)
)
display(
pd.DataFrame(
{
"CTR": {
"mean": mean,
"std": std,
"min": hourly["CTR"].min(),
"max": hourly["CTR"].max(),
}
}
).loc[["mean", "std", "min", "max"], :]
)
line = alt.Chart(hourly.reset_index()).mark_line().encode(x="dthour:T", y="CTR:Q")
points = (
alt.Chart(hourly.reset_index())
.mark_point()
.encode(
x="dthour:T",
y=alt.Y("CTR:Q", scale=alt.Scale(zero=False)),
tooltip=["dthour", "CTR"],
)
)
(line + points).properties(title="CTR", width=600, height=150)
# %% [markdown]
# # Outlier detection
#
# As the data contains only a single weekend, we cannot tell too much about the weekly
# patterns.
#
#
# ## Assumptions
#
# - We do this for retrospective analysis, and therefore we can use a
# centered moving window.
# - We do not have a specific use case, so we can experiment with different
# window sizes. For in-day outliers we can set it to 6H, while for in-week
# or in-month outliers we can set it to 3D, 7D, etc.
#
#
# ## Calculation
#
# Because CTR is already an aggregate metric, we need to calculate the
# rolling metrics from the original `clicks` column.
# %%
def rolling_metrics(hourly, window):
try:
hourly.set_index("dthour", inplace=True)
except KeyError:
print("`dthour` is already an index")
# CTR mean
hourly[f"{window}-mean"] = (
hourly.rolling(window, center=True)["clicks", "impressions"]
.sum()
.apply(lambda x: x["clicks"] / x["impressions"], axis=1)
)
# CTR std
hourly["squared_error"] = (hourly["CTR"] - hourly[f"{window}-mean"]) ** 2
hourly[f"{window}-squared_error"] = (
hourly["squared_error"].rolling(window, center=True).sum()
)
hourly["hours_in_window"] = (
hourly.rolling(window, center=True).apply(lambda x: x.size).iloc[:, 0]
)
hourly[f"{window}-std"] = np.sqrt(
hourly[f"{window}-squared_error"] / hourly["hours_in_window"]
)
return hourly
# %%
def define_outliers(hourly, window):
hourly["top"] = hourly[f"{window}-mean"] + hourly[f"{window}-std"] * 1.5
hourly["bottom"] = hourly[f"{window}-mean"] - hourly[f"{window}-std"] * 1.5
hourly["outlier"] = (hourly["CTR"] > hourly["top"]) | (
hourly["CTR"] < hourly["bottom"]
).astype(bool)
return hourly
# %%
def plot_outliers(hourly):
try:
hourly.reset_index("dthour", inplace=True)
except KeyError:
print("`dthour` is already a column")
points = (
alt.Chart(hourly)
.mark_point()
.encode(
x="dthour:T",
y=alt.Y("CTR:Q", scale=alt.Scale(zero=False)),
color=alt.Color("outlier:N"),
tooltip=["dthour:T", "CTR", "outlier"],
)
)
lines = alt.layer(
alt.Chart(hourly)
.mark_line(opacity=0.5, color="grey")
.encode(x="dthour:T", y="CTR:Q"),
alt.Chart(hourly)
.mark_line(opacity=0.5, color="red")
.encode(x="dthour:T", y=f"{window}-mean:Q"),
alt.Chart(hourly)
.mark_area(opacity=0.2)
.encode(x="dthour:T", y="top:Q", y2="bottom:Q"),
)
(points + lines).properties(title="CTR Outliers", width=600, height=150).display()
# %%
window = "1D"
hourly = rolling_metrics(hourly, window)
hourly = define_outliers(hourly, window)
display(
hourly.loc[
:, ["CTR", f"{window}-mean", f"{window}-std", "top", "bottom", "outlier"]
].sample(10)
)
plot_outliers(hourly)
# %% [markdown]
# # Possible improvements
#
# - For a more programmatic identification, use an error/distance metric
# to measure the distance of the outliers from the rest of the samples
# - Examine the relationship between CTR and change of total impressions
# - Combine smaller and bigger windows
| [
"pandas.DataFrame",
"zipfile.ZipFile",
"altair.Chart",
"pandas.read_feather",
"pandas.to_datetime",
"altair.data_transformers.disable_max_rows",
"altair.Color",
"pandas.Grouper",
"pandas.Series",
"altair.Scale",
"numpy.sqrt"
] | [((134, 174), 'altair.data_transformers.disable_max_rows', 'alt.data_transformers.disable_max_rows', ([], {}), '()\n', (172, 174), True, 'import altair as alt\n'), ((1594, 1642), 'pandas.to_datetime', 'pd.to_datetime', (["train['hour']"], {'format': '"""%y%m%d%H"""'}), "(train['hour'], format='%y%m%d%H')\n", (1608, 1642), True, 'import pandas as pd\n'), ((2807, 2821), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2819, 2821), True, 'import pandas as pd\n'), ((482, 524), 'zipfile.ZipFile', 'ZipFile', (['"""./data/avazu-ctr-prediction.zip"""'], {}), "('./data/avazu-ctr-prediction.zip')\n", (489, 524), False, 'from zipfile import ZipFile\n'), ((5375, 5445), 'numpy.sqrt', 'np.sqrt', (["(hourly[f'{window}-squared_error'] / hourly['hours_in_window'])"], {}), "(hourly[f'{window}-squared_error'] / hourly['hours_in_window'])\n", (5382, 5445), True, 'import numpy as np\n'), ((778, 817), 'pandas.read_feather', 'pd.read_feather', (['"""./data/train.feather"""'], {}), "('./data/train.feather')\n", (793, 817), True, 'import pandas as pd\n'), ((6163, 6185), 'altair.Color', 'alt.Color', (['"""outlier:N"""'], {}), "('outlier:N')\n", (6172, 6185), True, 'import altair as alt\n'), ((2554, 2574), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""h"""'}), "(freq='h')\n", (2564, 2574), True, 'import pandas as pd\n'), ((3903, 3924), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)'}), '(zero=False)\n', (3912, 3924), True, 'import altair as alt\n'), ((6003, 6020), 'altair.Chart', 'alt.Chart', (['hourly'], {}), '(hourly)\n', (6012, 6020), True, 'import altair as alt\n'), ((6121, 6142), 'altair.Scale', 'alt.Scale', ([], {'zero': '(False)'}), '(zero=False)\n', (6130, 6142), True, 'import altair as alt\n'), ((6287, 6304), 'altair.Chart', 'alt.Chart', (['hourly'], {}), '(hourly)\n', (6296, 6304), True, 'import altair as alt\n'), ((6401, 6418), 'altair.Chart', 'alt.Chart', (['hourly'], {}), '(hourly)\n', (6410, 6418), True, 'import altair as alt\n'), ((6525, 6542), 'altair.Chart', 'alt.Chart', (['hourly'], {}), '(hourly)\n', (6534, 6542), True, 'import altair as alt\n'), ((1886, 1908), 'pandas.Series', 'pd.Series', (['train.index'], {}), '(train.index)\n', (1895, 1908), True, 'import pandas as pd\n'), ((2613, 2633), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""h"""'}), "(freq='h')\n", (2623, 2633), True, 'import pandas as pd\n')] |
'''
from functions import *
'''
import pickle
import os.path
import numpy as np
from FNN import FNN
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
OPTIMIZERS = [tf.train.GradientDescentOptimizer, tf.train.AdamOptimizer,\
tf.train.ProximalGradientDescentOptimizer, tf.train.RMSPropOptimizer]
HORIZONTAL_VELOCITY = 4
FLAP_VELOCITY = -9
MAX_VELOCITY = 10
STEPS = 4
MAX_MIN = [[200, 387, 10],[0, -225, -9]]
SCALER = MinMaxScaler(feature_range=(-1,1))
SCALER.fit(MAX_MIN)
VERTICAL_ACCURACY_OF_STATES = 5
HORIZONTAL_ACCURACY_OF_STATES = 5
TANH_POSITIVE = np.tanh(1)
TANH_NEGATIVE = -1
FLAP_POSITION = 0
NOT_FLAP_POSITION = 1
FLAP = 1
NOT_FLAP = 0
NETWORKS_COUNT = 5
'''
from init_train_functions import *
q = get_q_values("../models/QLearning/q__")
'''
def get_q_values(file_name):
if os.path.isfile(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
'''
from init_train_functions import *
q = get_q_values("../models/QLearning/q__")
positive_and_negative_values(q)
'''
def positive_and_negative_values(q):
positive_sum = 0
positive = 0
negative_sum = 0
negative = 0
for dic in q:
if 'iterations' == dic:
continue
for val in q[dic]:
if q[dic][val] < 0:
negative_sum += q[dic][val]
negative += 1
else:
positive_sum += q[dic][val]
positive += 1
result = {"positive": {"count": positive, "sum": positive_sum, "average": positive_sum/positive},\
"negative": {"count": negative, "sum": negative_sum, "average": negative_sum/negative} }
print("Positive: " + str(result["positive"]["count"]) + " with sum: " + str(result["positive"]["sum"])\
+ " average: " + str(result["positive"]["average"]))
print("Negative: " + str(result["negative"]["count"]) + " with sum: " + str(result["negative"]["sum"])\
+ " average: " + str(result["negative"]["average"]))
return result
'''
from init_train_functions import *
q = get_q_values("../models/QLearning/q__")
max_min_value(q)
'''
def max_min_value(q):
maxx = float("-inf")
minn = float("inf")
for dic in q:
if 'iterations' == dic:
continue
for val in q[dic]:
if q[dic][val] > maxx:
maxx = q[dic][val]
if q[dic][val] < minn:
minn = q[dic][val]
print("Max: " + str(maxx))
print("Min: " + str(minn))
def __prepare_for_training(x, y, v):
return SCALER.transform([[x,y,v]])[0].tolist()
'''
from init_train_functions import *
q = get_q_values("../models/QLearning/q__")
get_data_for_training(q)
'''
def get_data_for_training(q):
inn = []
outt = []
for dic in q:
if 'iterations' == dic or dic[0] == 'f' or dic[0] > 200:
continue
action = q[dic]["action"]
not_action = FLAP if action == NOT_FLAP else NOT_FLAP
inn.append(__prepare_for_training(dic[0], dic[1], dic[2]))
tmp_out = [0]*2
action_bool = True if action == 1 else False
tmp_out[__get_out_index(action)] = TANH_POSITIVE
val = __q_function(q, dic[0], dic[1], dic[2], not_action)
tmp_out[__get_out_index(not_action)] = TANH_NEGATIVE
outt.append(tmp_out)
return inn, outt
def __get_bool_of_action(action):
return True if action == 1 else False
def __get_out_index(action):
return FLAP_POSITION if action == FLAP else NOT_FLAP_POSITION
'''
from init_train_functions import *
q = get_q_values("../models/QLearning/q__")
save_decisions(q, "q")
'''
def save_decisions(q, file_name):
for dic in q:
if 'iterations' == dic or dic[0] == 'f':
continue
action = __get_action_by_policy(__q_function, q, dic[0], dic[1], dic[2])
q[dic]["action"] = action
with open(file_name, 'wb') as f:
pickle.dump(q, f, pickle.HIGHEST_PROTOCOL)
print('values saved')
'''
from init_train_functions import *
q = get_q_values("q")
inn, outt = get_data_for_training(q)
count = len(inn)
networks = [FNN(OPTIMIZERS[i % len(OPTIMIZERS)], i) for i in range(NETWORKS_COUNT)]
train_networks(networks, inn[:count], outt[:count], 10)
'''
def train_networks(networks, data_in, data_out, train_count):
for i in range(len(networks)):
network = networks[i]
print("start training network " + str(i))
network.train_step(data_in, data_out, train_count)
network.save()
print("finished training network " + str(i))
return networks
'''
from init_train_functions import *
q = get_q_values("q")
inn, outt = get_data_for_training(q)
count = len(inn)
networks = [FNN(OPTIMIZERS[i % len(OPTIMIZERS)], i) for i in range(NETWORKS_COUNT)]
networks = train_networks(networks, inn[:count], outt[:count], 100)
compare_computation(networks, q, 100)
'''
def compare_computation(networks, q, size):
same_output = 0
different_output = 0
i = 0
for t in q:
if 'iterations' == t or t[0] == 'f' or t[0] > 200:
continue
x = t[0]
y = t[1]
v = t[2]
q_action = q[t]["action"]#__get_q_action(q, x, y, v)
n_action = __get_n_action(networks, x, y, v)
if q_action == n_action:
same_output += 1
else:
different_output += 1
print(t)
if i == size: break
i += 1
print("Same: " + str(same_output))
print("Different: " + str(different_output))
def __q_function(q, x, y, v, a):
s = __get_state(x, y, v)
action = True if a == 1 else False
return q[s][action] if s in q and action in q[s] else 0
def __n_function(n, x, y, v):
return n.predict([SCALER.transform([[x, y, v]])[0].tolist()])[0]
actual_variation = None
actual_sum = None
variations = None
def __get_action_by_policy(function, q_or_net, x, y, v):
global actual_variation, actual_sum, variations
actual_variation = ""
actual_sum = 0
variations = {}
__generate_variations(0, x, y, v, function, q_or_net)
max = float("-inf")
action_to_take = 0
for key, value in variations.iteritems():
if max <= value:
if max == value and action_to_take == 0:
continue
max = value
action_to_take = int(key[0])
return action_to_take
def __generate_variations(idx, xdif, ydif, vel, function, q_or_net):
global actual_variation, actual_sum, variations
if idx == STEPS:
variations[actual_variation] = actual_sum
return
q_value = function(q_or_net, xdif, ydif, vel, 1)
actual_sum += q_value
actual_variation += "1"
__generate_variations(idx + 1, xdif - HORIZONTAL_VELOCITY, ydif - FLAP_VELOCITY, FLAP_VELOCITY, function, q_or_net)
actual_variation = actual_variation[:-1]
actual_sum -= q_value
q_value = function(q_or_net, xdif, ydif, vel, 0)
actual_sum += q_value
actual_variation += "0"
new_velocity = min(vel + 1, MAX_VELOCITY)
__generate_variations(idx + 1, xdif - 4, ydif - new_velocity, new_velocity, function, q_or_net)
actual_variation = actual_variation[:-1]
actual_sum -= q_value
def __get_q_action(q, x, y, v):
return __get_action_by_policy(__q_function, q, x, y, v)
def __get_network_action(network, x, y, v):
predict = __n_function(network, x, y, v)
return predict[FLAP_POSITION], predict[NOT_FLAP_POSITION]
def __get_n_action(networks, x, y, v):
flaps = 0
not_flaps = 0
for network in networks:
f,n_f = __get_network_action(network, x, y, v)
if (f < 0 and n_f < 0) or (f > 0 and n_f > 0):
continue
a = NOT_FLAP if f <= n_f else FLAP
if a == FLAP:
flaps += 1
else:
not_flaps += 1
return NOT_FLAP if not_flaps >= flaps else FLAP
def __state_round(num, base):
return int(base * round(float(int(num))/base))
def __get_state(xdif, ydif, vel):
return (__state_round(xdif, HORIZONTAL_ACCURACY_OF_STATES),\
__state_round(ydif, VERTICAL_ACCURACY_OF_STATES), vel)
| [
"pickle.load",
"pickle.dump",
"sklearn.preprocessing.MinMaxScaler",
"numpy.tanh"
] | [((440, 475), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (452, 475), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((579, 589), 'numpy.tanh', 'np.tanh', (['(1)'], {}), '(1)\n', (586, 589), True, 'import numpy as np\n'), ((3528, 3570), 'pickle.dump', 'pickle.dump', (['q', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(q, f, pickle.HIGHEST_PROTOCOL)\n', (3539, 3570), False, 'import pickle\n'), ((886, 900), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (897, 900), False, 'import pickle\n')] |
"""Find the maximum firing rate of the somas as driven by bias
Set soma offset bias to max
Set soma refractory bias to max (minimum refractory)
Iterate through the somas, and collect spikes
Plot results
"""
import os
from time import sleep
import argparse
import numpy as np
import matplotlib.pyplot as plt
from pystorm.hal import HAL
from pystorm.hal.hal import parse_hal_spikes
from pystorm.hal.neuromorph import graph
from pystorm.PyDriver import bddriver as bd
from utils.exp import clear_spikes
from utils.file_io import load_txt_data, set_data_dir
HAL = HAL()
CORE = 0
MAX_NEURONS = 4096
BIAS_REF = 1024
BIAS_OFFSET = 1024
TIME_SCALE = 1E-9
NEURONS = 4096
RUN_TIME = 0.1
INTER_RUN_TIME = 0.1
DATA_DIR = set_data_dir(__file__)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Characterize the soma max firing rates')
parser.add_argument("-r", action="store_true", dest="use_saved_data", help='reuse cached data')
args = parser.parse_args()
return args
def build_net():
"""Builds the HAL-level network for testing"""
dim = 1
tap_matrix = np.zeros((MAX_NEURONS, dim))
net = graph.Network("net")
pool = net.create_pool("pool", tap_matrix)
HAL.map(net)
return pool
def set_analog(hal):
"""Sets the soma config bits and the bias currents"""
for nrn_idx in range(MAX_NEURONS):
hal.driver.SetSomaGain(CORE, nrn_idx, bd.bdpars.SomaGainId.ONE)
hal.driver.SetSomaOffsetSign(CORE, nrn_idx, bd.bdpars.SomaOffsetSignId.POSITIVE)
hal.driver.SetSomaOffsetMultiplier(CORE, nrn_idx, bd.bdpars.SomaOffsetMultiplierId.THREE)
hal.driver.SetSomaEnableStatus(CORE, nrn_idx, bd.bdpars.SomaStatusId.DISABLED)
hal.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_REF, BIAS_REF)
hal.driver.SetDACCount(CORE, bd.bdpars.BDHornEP.DAC_SOMA_OFFSET, BIAS_OFFSET)
hal.flush()
def set_hal(hal):
"""Sets the HAL settings that remain constant throughout the experiment"""
hal.disable_output_recording(flush=True)
def toggle_hal(nrn_idx):
"""Start and stop HAL traffic"""
# clear queues
aer_nrn_idx = HAL.driver.BDPars.GetSomaAERAddr(nrn_idx)
HAL.driver.SetSomaEnableStatus(CORE, aer_nrn_idx, bd.bdpars.SomaStatusId.ENABLED)
HAL.set_time_resolution(upstream_ns=10000)
HAL.start_traffic(flush=False)
HAL.enable_spike_recording(flush=True)
sleep(RUN_TIME)
HAL.driver.SetSomaEnableStatus(CORE, aer_nrn_idx, bd.bdpars.SomaStatusId.DISABLED)
HAL.stop_traffic(flush=False)
HAL.disable_spike_recording(flush=True)
HAL.set_time_resolution(upstream_ns=10000000)
HAL.flush()
def measure_soma_max_rate(pool, nrn_idx):
"""Collect spikes to find a single soma's max firing rate"""
clear_spikes(HAL, INTER_RUN_TIME)
toggle_hal(nrn_idx)
hal_spikes = parse_hal_spikes(HAL.get_spikes())
# print("\nTesting nrn {}. Detected the following spikes".format(nrn_idx))
# for idx in hal_spikes[pool]:
# print("nrn_idx {} spikes {}".format(idx, len(hal_spikes[pool][idx])))
soma_spikes = np.array(hal_spikes[pool][nrn_idx])[:, 0]
soma_spikes -= soma_spikes[0]
soma_spikes = soma_spikes
n_spks = len(soma_spikes)-1
time_period = (soma_spikes[-1]- soma_spikes[0])*TIME_SCALE
max_rate = n_spks/time_period
clear_spikes(HAL, INTER_RUN_TIME)
return max_rate
def plot_max_rates(max_rates):
"""Plot the data"""
neurons = len(max_rates)
fig_1d = plt.figure()
plt.plot(max_rates, 'o', markersize=1)
plt.xlim(0, neurons-1)
plt.xlabel("Soma Index")
plt.ylabel("Max Firing Rate (Hz)")
max_rates_2d = max_rates.reshape((int(np.sqrt(neurons)), -1))
fig_2d_heatmap = plt.figure()
ims = plt.imshow(max_rates_2d)
plt.colorbar(ims)
plt.xlabel("Soma X Coordinate")
plt.ylabel("Soma Y Coordinate")
plt.title("Max Firing Rate (Hz)")
fig_hist = plt.figure()
bins = min(max(10, neurons), 80)
max_rates_mean = np.mean(max_rates)
max_rates_median = np.median(max_rates)
max_rates_min = np.min(max_rates)
max_rates_max = np.max(max_rates)
plt.hist(max_rates, bins=bins)
plt.axvline(max_rates_mean, color="k", label="mean")
plt.axvline(max_rates_median, color="r", label="median")
plt.xlabel("Max firing Rate (Hz)")
plt.ylabel("Counts")
plt.title("Mean:{:,.0f} Median:{:,.0f} Min:{:,.0f} Max:{:,.0f}".format(
max_rates_mean, max_rates_median, max_rates_min, max_rates_max))
plt.legend()
fig_1d.savefig(DATA_DIR + "nrn_idx_vs_max_rate.pdf")
fig_2d_heatmap.savefig(DATA_DIR + "2d_heatmap.pdf")
fig_hist.savefig(DATA_DIR + "histogram.pdf")
def check_soma_max_rates(parsed_args):
"""Run the check"""
use_saved_data = parsed_args.use_saved_data
if use_saved_data:
max_rates = load_txt_data(DATA_DIR + "max_rates.txt")
else:
pool = build_net()
set_analog(HAL)
set_hal(HAL)
max_rates = np.zeros(NEURONS)
for nrn_idx in range(NEURONS):
max_rates[nrn_idx] = measure_soma_max_rate(pool, nrn_idx)
np.savetxt(DATA_DIR + "max_rates.txt", max_rates)
plot_max_rates(max_rates)
print("Max firing rates:")
print(max_rates)
plt.show()
if __name__ == "__main__":
check_soma_max_rates(parse_args())
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"pystorm.hal.HAL.stop_traffic",
"pystorm.hal.HAL",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.axvline",
"pystorm.hal.HAL.driver.BDPars.GetSomaAERAddr",
"utils.file_io.set_data_dir",
"matplotlib.pyplot.imshow",
"numpy.savetxt... | [((564, 569), 'pystorm.hal.HAL', 'HAL', ([], {}), '()\n', (567, 569), False, 'from pystorm.hal import HAL\n'), ((716, 738), 'utils.file_io.set_data_dir', 'set_data_dir', (['__file__'], {}), '(__file__)\n', (728, 738), False, 'from utils.file_io import load_txt_data, set_data_dir\n'), ((810, 887), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Characterize the soma max firing rates"""'}), "(description='Characterize the soma max firing rates')\n", (833, 887), False, 'import argparse\n'), ((1133, 1161), 'numpy.zeros', 'np.zeros', (['(MAX_NEURONS, dim)'], {}), '((MAX_NEURONS, dim))\n', (1141, 1161), True, 'import numpy as np\n'), ((1172, 1192), 'pystorm.hal.neuromorph.graph.Network', 'graph.Network', (['"""net"""'], {}), "('net')\n", (1185, 1192), False, 'from pystorm.hal.neuromorph import graph\n'), ((1244, 1256), 'pystorm.hal.HAL.map', 'HAL.map', (['net'], {}), '(net)\n', (1251, 1256), False, 'from pystorm.hal import HAL\n'), ((2155, 2196), 'pystorm.hal.HAL.driver.BDPars.GetSomaAERAddr', 'HAL.driver.BDPars.GetSomaAERAddr', (['nrn_idx'], {}), '(nrn_idx)\n', (2187, 2196), False, 'from pystorm.hal import HAL\n'), ((2201, 2287), 'pystorm.hal.HAL.driver.SetSomaEnableStatus', 'HAL.driver.SetSomaEnableStatus', (['CORE', 'aer_nrn_idx', 'bd.bdpars.SomaStatusId.ENABLED'], {}), '(CORE, aer_nrn_idx, bd.bdpars.SomaStatusId.\n ENABLED)\n', (2231, 2287), False, 'from pystorm.hal import HAL\n'), ((2287, 2329), 'pystorm.hal.HAL.set_time_resolution', 'HAL.set_time_resolution', ([], {'upstream_ns': '(10000)'}), '(upstream_ns=10000)\n', (2310, 2329), False, 'from pystorm.hal import HAL\n'), ((2334, 2364), 'pystorm.hal.HAL.start_traffic', 'HAL.start_traffic', ([], {'flush': '(False)'}), '(flush=False)\n', (2351, 2364), False, 'from pystorm.hal import HAL\n'), ((2369, 2407), 'pystorm.hal.HAL.enable_spike_recording', 'HAL.enable_spike_recording', ([], {'flush': '(True)'}), '(flush=True)\n', (2395, 2407), False, 'from pystorm.hal import HAL\n'), ((2412, 2427), 'time.sleep', 'sleep', (['RUN_TIME'], {}), '(RUN_TIME)\n', (2417, 2427), False, 'from time import sleep\n'), ((2432, 2519), 'pystorm.hal.HAL.driver.SetSomaEnableStatus', 'HAL.driver.SetSomaEnableStatus', (['CORE', 'aer_nrn_idx', 'bd.bdpars.SomaStatusId.DISABLED'], {}), '(CORE, aer_nrn_idx, bd.bdpars.SomaStatusId.\n DISABLED)\n', (2462, 2519), False, 'from pystorm.hal import HAL\n'), ((2519, 2548), 'pystorm.hal.HAL.stop_traffic', 'HAL.stop_traffic', ([], {'flush': '(False)'}), '(flush=False)\n', (2535, 2548), False, 'from pystorm.hal import HAL\n'), ((2553, 2592), 'pystorm.hal.HAL.disable_spike_recording', 'HAL.disable_spike_recording', ([], {'flush': '(True)'}), '(flush=True)\n', (2580, 2592), False, 'from pystorm.hal import HAL\n'), ((2597, 2642), 'pystorm.hal.HAL.set_time_resolution', 'HAL.set_time_resolution', ([], {'upstream_ns': '(10000000)'}), '(upstream_ns=10000000)\n', (2620, 2642), False, 'from pystorm.hal import HAL\n'), ((2647, 2658), 'pystorm.hal.HAL.flush', 'HAL.flush', ([], {}), '()\n', (2656, 2658), False, 'from pystorm.hal import HAL\n'), ((2771, 2804), 'utils.exp.clear_spikes', 'clear_spikes', (['HAL', 'INTER_RUN_TIME'], {}), '(HAL, INTER_RUN_TIME)\n', (2783, 2804), False, 'from utils.exp import clear_spikes\n'), ((3332, 3365), 'utils.exp.clear_spikes', 'clear_spikes', (['HAL', 'INTER_RUN_TIME'], {}), '(HAL, INTER_RUN_TIME)\n', (3344, 3365), False, 'from utils.exp import clear_spikes\n'), ((3485, 3497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3495, 3497), True, 'import matplotlib.pyplot as plt\n'), ((3502, 3540), 'matplotlib.pyplot.plot', 'plt.plot', (['max_rates', '"""o"""'], {'markersize': '(1)'}), "(max_rates, 'o', markersize=1)\n", (3510, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3569), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(neurons - 1)'], {}), '(0, neurons - 1)\n', (3553, 3569), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Soma Index"""'], {}), "('Soma Index')\n", (3582, 3596), True, 'import matplotlib.pyplot as plt\n'), ((3601, 3635), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Max Firing Rate (Hz)"""'], {}), "('Max Firing Rate (Hz)')\n", (3611, 3635), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3734, 3736), True, 'import matplotlib.pyplot as plt\n'), ((3747, 3771), 'matplotlib.pyplot.imshow', 'plt.imshow', (['max_rates_2d'], {}), '(max_rates_2d)\n', (3757, 3771), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3793), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ims'], {}), '(ims)\n', (3788, 3793), True, 'import matplotlib.pyplot as plt\n'), ((3798, 3829), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Soma X Coordinate"""'], {}), "('Soma X Coordinate')\n", (3808, 3829), True, 'import matplotlib.pyplot as plt\n'), ((3834, 3865), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Soma Y Coordinate"""'], {}), "('Soma Y Coordinate')\n", (3844, 3865), True, 'import matplotlib.pyplot as plt\n'), ((3870, 3903), 'matplotlib.pyplot.title', 'plt.title', (['"""Max Firing Rate (Hz)"""'], {}), "('Max Firing Rate (Hz)')\n", (3879, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3920, 3932), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3930, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4009), 'numpy.mean', 'np.mean', (['max_rates'], {}), '(max_rates)\n', (3998, 4009), True, 'import numpy as np\n'), ((4033, 4053), 'numpy.median', 'np.median', (['max_rates'], {}), '(max_rates)\n', (4042, 4053), True, 'import numpy as np\n'), ((4074, 4091), 'numpy.min', 'np.min', (['max_rates'], {}), '(max_rates)\n', (4080, 4091), True, 'import numpy as np\n'), ((4112, 4129), 'numpy.max', 'np.max', (['max_rates'], {}), '(max_rates)\n', (4118, 4129), True, 'import numpy as np\n'), ((4134, 4164), 'matplotlib.pyplot.hist', 'plt.hist', (['max_rates'], {'bins': 'bins'}), '(max_rates, bins=bins)\n', (4142, 4164), True, 'import matplotlib.pyplot as plt\n'), ((4169, 4221), 'matplotlib.pyplot.axvline', 'plt.axvline', (['max_rates_mean'], {'color': '"""k"""', 'label': '"""mean"""'}), "(max_rates_mean, color='k', label='mean')\n", (4180, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4226, 4282), 'matplotlib.pyplot.axvline', 'plt.axvline', (['max_rates_median'], {'color': '"""r"""', 'label': '"""median"""'}), "(max_rates_median, color='r', label='median')\n", (4237, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4321), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Max firing Rate (Hz)"""'], {}), "('Max firing Rate (Hz)')\n", (4297, 4321), True, 'import matplotlib.pyplot as plt\n'), ((4326, 4346), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (4336, 4346), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4512), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4510, 4512), True, 'import matplotlib.pyplot as plt\n'), ((5247, 5257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5255, 5257), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2879), 'pystorm.hal.HAL.get_spikes', 'HAL.get_spikes', ([], {}), '()\n', (2877, 2879), False, 'from pystorm.hal import HAL\n'), ((3093, 3128), 'numpy.array', 'np.array', (['hal_spikes[pool][nrn_idx]'], {}), '(hal_spikes[pool][nrn_idx])\n', (3101, 3128), True, 'import numpy as np\n'), ((4831, 4872), 'utils.file_io.load_txt_data', 'load_txt_data', (["(DATA_DIR + 'max_rates.txt')"], {}), "(DATA_DIR + 'max_rates.txt')\n", (4844, 4872), False, 'from utils.file_io import load_txt_data, set_data_dir\n'), ((4975, 4992), 'numpy.zeros', 'np.zeros', (['NEURONS'], {}), '(NEURONS)\n', (4983, 4992), True, 'import numpy as np\n'), ((5110, 5159), 'numpy.savetxt', 'np.savetxt', (["(DATA_DIR + 'max_rates.txt')", 'max_rates'], {}), "(DATA_DIR + 'max_rates.txt', max_rates)\n", (5120, 5159), True, 'import numpy as np\n'), ((3679, 3695), 'numpy.sqrt', 'np.sqrt', (['neurons'], {}), '(neurons)\n', (3686, 3695), True, 'import numpy as np\n')] |
import autoaim
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import time
# Devices
if torch.cuda.is_available():
device = torch.device('cuda')
print('Device: GPU.')
else:
device = torch.device('cpu')
print('Device: CPU.')
# device = torch.device('cpu')
# Dataset
def preprocess(t, h):
# shuffling
r = torch.randperm(t.size(0))
t = t[r, :]
# GIVE ME MORE!!
_ = t[:, :-1]
t = torch.cat((_, t[:, -1:]), 1)
return t
def load(filename):
header, data = autoaim.helpers.read_csv(filename)
data = torch.Tensor(data).to(device)
data = preprocess(data,header)
x = data[:, :-1]
y = data[:, -1:]
return x, y, header
x_train, y_train, header = load('test_lamp_train.csv')
x_test, y_test, _ = load('test_lamp_test.csv')
train_dataset_size = x_train.size(0)
test_dataset_size = x_test.size(0)
input_size = x_train.size(1)
output_size = 1
print('====== Input ======')
print('train_dataset_size: {}'.format(train_dataset_size))
print('test_dataset_size: {}'.format(test_dataset_size))
print('input_size: {}'.format(input_size))
# Model
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(input_size, output_size)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
y_pred = self.sigmoid(self.linear(x))
return y_pred
# Training loop
@autoaim.helpers.time_this
def train(learning_rate, epoch_num):
# Loss and optimizer
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train loop
print('====== Config ======')
print('learning_rate: {}'.format(learning_rate))
print('epoch_num: {}'.format(epoch_num))
for epoch in range(epoch_num):
# Forward pass
y_pred = model(x_train)
loss = criterion(y_pred, y_train)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch == 0 or (epoch+1) % (epoch_num/10) == 0:
y_pred = model(x_test)
loss_test = criterion(y_pred, y_test)
print("Epoch: [{!s:6}/{!s:6}], Loss: {:.2f}, Test loss: {:.2f}"
.format(epoch+1, epoch_num, loss, loss_test))
def analyse(x_anls, y_anls, threshold):
# Predict
y_pred = model(x_anls)
# Convert to numpy array
x_anls, y_anls, y_pred = (t.numpy() for t in [x_anls, y_anls, y_pred])
# Sort
_1, _2 = np.where(y_anls == 1)[0], np.where(y_anls == 0)[0]
x_anls, y_anls, y_pred = (np.concatenate(
(t[_1, :], t[_2, :])) for t in (x_anls, y_anls, y_pred))
# Distribution
print('Data Distribution')
x = np.arange(0, x_anls.shape[0], dtype=int)
# x_anls = np.arange(0, 40, dtype=int)
plt.plot(x, y_pred[x, :], 'bo', label='Predict')
plt.plot(x, y_anls[x, :], 'ro', label='Data')
plt.legend()
plt.show()
# ROC
print('ROC')
num_positive = len(np.where(y_anls == 1)[0])
num_negative = len(np.where(y_anls == 0)[0])
_ = np.where(y_pred >= threshold)[0]
num_true_positive = len(np.where(y_anls[_, :] == 1)[0])
num_false_positive = len(np.where(y_anls[_, :] == 0)[0])
_ = np.where(y_pred < threshold)[0]
num_false_negative = len(np.where(y_anls[_, :] == 1)[0])
num_true_negative = len(np.where(y_anls[_, :] == 0)[0])
print('true positive: {}'.format(num_true_positive))
print('false positive: {}'.format(num_false_positive))
print('true negative: {}'.format(num_true_negative))
print('false negative: {}\n'.format(num_false_negative))
# Weight
x = np.linspace(0, 1)
w = [wi.data.cpu() for wi in model.parameters()]
w = torch.cat((w[0][0], w[1])).numpy()
print('Weight')
b = w[-1]
for i in range(input_size):
a = w[i]
y = a*x+b
plt.plot(x, (y-y.min())/(y.max()-y.min()), linestyle='-')
plt.plot(x_anls[:, i], y_pred, 'bo', label='Predict')
plt.plot(x_anls[:, i], y_anls, 'ro', label='Data')
plt.legend()
plt.show()
_1, _2 = i % (len(header) - 1), int((i+1)/len(header)+1)
print('w[{}] {} #{}: {}'.format(i, header[_1], _2, w[i]))
# Save
def save(filename):
dataloader = autoaim.DataLoader()
autoaim.helpers.new_csv(filename, autoaim.aimmat.enabled_props)
w = [wi.data.cpu() for wi in model.parameters()]
w = torch.cat((w[0][0], w[1])).numpy()
autoaim.helpers.append_csv(filename, w)
def test(n):
# CPU
start_time = time.time()
a = torch.ones(n, n)
for _ in range(1000):
a += a
elapsed_time = time.time() - start_time
print('CPU time = ', elapsed_time)
# GPU
start_time = time.time()
b = torch.ones(n, n).cuda()
for _ in range(1000):
b += b
elapsed_time = time.time() - start_time
print('GPU time = ', elapsed_time)
if __name__ == '__main__':
test(2048)
model = Model().to(device)
train(0.01, 10000)
with torch.no_grad():
# x_test, y_test,*_ = load('test.csv', 0)
save('weight.csv')
# analyse(x_test, y_test, 0.5)
| [
"autoaim.helpers.read_csv",
"torch.cat",
"numpy.arange",
"torch.device",
"torch.no_grad",
"torch.ones",
"torch.nn.BCELoss",
"torch.Tensor",
"numpy.linspace",
"torch.nn.Linear",
"autoaim.DataLoader",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"torch.cuda.is_available",
"autoaim... | [((186, 211), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (209, 211), False, 'import torch\n'), ((226, 246), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (238, 246), False, 'import torch\n'), ((292, 311), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (304, 311), False, 'import torch\n'), ((518, 546), 'torch.cat', 'torch.cat', (['(_, t[:, -1:])', '(1)'], {}), '((_, t[:, -1:]), 1)\n', (527, 546), False, 'import torch\n'), ((601, 635), 'autoaim.helpers.read_csv', 'autoaim.helpers.read_csv', (['filename'], {}), '(filename)\n', (625, 635), False, 'import autoaim\n'), ((1616, 1628), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1626, 1628), True, 'import torch.nn as nn\n'), ((2810, 2850), 'numpy.arange', 'np.arange', (['(0)', 'x_anls.shape[0]'], {'dtype': 'int'}), '(0, x_anls.shape[0], dtype=int)\n', (2819, 2850), True, 'import numpy as np\n'), ((2898, 2946), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_pred[x, :]', '"""bo"""'], {'label': '"""Predict"""'}), "(x, y_pred[x, :], 'bo', label='Predict')\n", (2906, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2996), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_anls[x, :]', '"""ro"""'], {'label': '"""Data"""'}), "(x, y_anls[x, :], 'ro', label='Data')\n", (2959, 2996), True, 'import matplotlib.pyplot as plt\n'), ((3001, 3013), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3011, 3013), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3028), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3026, 3028), True, 'import matplotlib.pyplot as plt\n'), ((3734, 3751), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (3745, 3751), True, 'import numpy as np\n'), ((4353, 4373), 'autoaim.DataLoader', 'autoaim.DataLoader', ([], {}), '()\n', (4371, 4373), False, 'import autoaim\n'), ((4378, 4441), 'autoaim.helpers.new_csv', 'autoaim.helpers.new_csv', (['filename', 'autoaim.aimmat.enabled_props'], {}), '(filename, autoaim.aimmat.enabled_props)\n', (4401, 4441), False, 'import autoaim\n'), ((4542, 4581), 'autoaim.helpers.append_csv', 'autoaim.helpers.append_csv', (['filename', 'w'], {}), '(filename, w)\n', (4568, 4581), False, 'import autoaim\n'), ((4624, 4635), 'time.time', 'time.time', ([], {}), '()\n', (4633, 4635), False, 'import time\n'), ((4644, 4660), 'torch.ones', 'torch.ones', (['n', 'n'], {}), '(n, n)\n', (4654, 4660), False, 'import torch\n'), ((4814, 4825), 'time.time', 'time.time', ([], {}), '()\n', (4823, 4825), False, 'import time\n'), ((1314, 1354), 'torch.nn.Linear', 'torch.nn.Linear', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (1329, 1354), False, 'import torch\n'), ((1378, 1396), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (1394, 1396), False, 'import torch\n'), ((2670, 2706), 'numpy.concatenate', 'np.concatenate', (['(t[_1, :], t[_2, :])'], {}), '((t[_1, :], t[_2, :]))\n', (2684, 2706), True, 'import numpy as np\n'), ((3163, 3192), 'numpy.where', 'np.where', (['(y_pred >= threshold)'], {}), '(y_pred >= threshold)\n', (3171, 3192), True, 'import numpy as np\n'), ((3325, 3353), 'numpy.where', 'np.where', (['(y_pred < threshold)'], {}), '(y_pred < threshold)\n', (3333, 3353), True, 'import numpy as np\n'), ((4023, 4076), 'matplotlib.pyplot.plot', 'plt.plot', (['x_anls[:, i]', 'y_pred', '"""bo"""'], {'label': '"""Predict"""'}), "(x_anls[:, i], y_pred, 'bo', label='Predict')\n", (4031, 4076), True, 'import matplotlib.pyplot as plt\n'), ((4085, 4135), 'matplotlib.pyplot.plot', 'plt.plot', (['x_anls[:, i]', 'y_anls', '"""ro"""'], {'label': '"""Data"""'}), "(x_anls[:, i], y_anls, 'ro', label='Data')\n", (4093, 4135), True, 'import matplotlib.pyplot as plt\n'), ((4144, 4156), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4154, 4156), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4173, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4732), 'time.time', 'time.time', ([], {}), '()\n', (4730, 4732), False, 'import time\n'), ((4918, 4929), 'time.time', 'time.time', ([], {}), '()\n', (4927, 4929), False, 'import time\n'), ((5090, 5105), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5103, 5105), False, 'import torch\n'), ((647, 665), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (659, 665), False, 'import torch\n'), ((2589, 2610), 'numpy.where', 'np.where', (['(y_anls == 1)'], {}), '(y_anls == 1)\n', (2597, 2610), True, 'import numpy as np\n'), ((2615, 2636), 'numpy.where', 'np.where', (['(y_anls == 0)'], {}), '(y_anls == 0)\n', (2623, 2636), True, 'import numpy as np\n'), ((3080, 3101), 'numpy.where', 'np.where', (['(y_anls == 1)'], {}), '(y_anls == 1)\n', (3088, 3101), True, 'import numpy as np\n'), ((3129, 3150), 'numpy.where', 'np.where', (['(y_anls == 0)'], {}), '(y_anls == 0)\n', (3137, 3150), True, 'import numpy as np\n'), ((3224, 3251), 'numpy.where', 'np.where', (['(y_anls[_, :] == 1)'], {}), '(y_anls[_, :] == 1)\n', (3232, 3251), True, 'import numpy as np\n'), ((3285, 3312), 'numpy.where', 'np.where', (['(y_anls[_, :] == 0)'], {}), '(y_anls[_, :] == 0)\n', (3293, 3312), True, 'import numpy as np\n'), ((3386, 3413), 'numpy.where', 'np.where', (['(y_anls[_, :] == 1)'], {}), '(y_anls[_, :] == 1)\n', (3394, 3413), True, 'import numpy as np\n'), ((3446, 3473), 'numpy.where', 'np.where', (['(y_anls[_, :] == 0)'], {}), '(y_anls[_, :] == 0)\n', (3454, 3473), True, 'import numpy as np\n'), ((3813, 3839), 'torch.cat', 'torch.cat', (['(w[0][0], w[1])'], {}), '((w[0][0], w[1]))\n', (3822, 3839), False, 'import torch\n'), ((4503, 4529), 'torch.cat', 'torch.cat', (['(w[0][0], w[1])'], {}), '((w[0][0], w[1]))\n', (4512, 4529), False, 'import torch\n'), ((4834, 4850), 'torch.ones', 'torch.ones', (['n', 'n'], {}), '(n, n)\n', (4844, 4850), False, 'import torch\n')] |
from typing import Dict, Tuple, Optional, List
import os
from abc import ABCMeta, abstractmethod
import numpy as np
import json
from scipy.sparse import csr_matrix, dok_matrix
import panqec
from panqec.bpauli import bcommute, get_effective_error
from panqec import bsparse
os.environ['PANQEC_ROOT_DIR'] = os.path.dirname(panqec.__file__)
Operator = Dict[Tuple, str] # Coordinate to pauli ('X', 'Y' or 'Z')
class StabilizerCode(metaclass=ABCMeta):
"""Abstract class for generic stabilizer codes (CSS or not)
Any subclass should override the following four methods:
- get_qubit_coordinates() to define all the coordinates in the lattice
that contain qubits
- get_stabilizer_coordinates() to define all the coordinates in the lattice
that contain stabilizers
- qubit_axis(location) to return the axis of a qubit at a given location
(when qubit have an orientation in space, for instance when they are
edges)
Using only those methods, a StabilizerCode will then automatically create
the corresponding parity-check matrix (in self.stabilizers) and can be used
to make a visualization in the GUI or calculate thresholds.
"""
X_AXIS = 0
Y_AXIS = 1
Z_AXIS = 2
def __init__(
self, L_x: int,
L_y: Optional[int] = None,
L_z: Optional[int] = None,
deformed_axis: Optional[str] = None
):
"""Constructor for the StabilizerCode class
Parameters
----------
L_x : int
Dimension of the lattice in the x direction (or in all directions
if L_y and L_z are not given)
L_y: int, optional
Dimension of the lattice in the y direction
L_z: int, optional
Dimension of the lattice in the z direction
deformed_axis: str, optional
If given, will determine whether to apply a Clifford deformation on
this axis.
The axis is a string in ['x', 'y', 'z'].
Can be used to easily create codes such as the XZZX surface code
(arXiv: 2009.07851)
"""
if L_y is None:
L_y = L_x
if L_z is None:
L_z = L_x
self._deformed_axis = deformed_axis
self._size: Tuple
if self.dimension == 2:
self._size = (L_x, L_y)
else:
self._size = (L_x, L_y, L_z)
self._qubit_coordinates: List = []
self._stabilizer_coordinates: List[Tuple] = []
self._qubit_index: Dict[Tuple, int] = {}
self._stabilizer_index: Dict[Tuple, int] = {}
self._stabilizer_matrix = bsparse.empty_row(2*self.n)
self._Hx = bsparse.empty_row(self.n)
self._Hz = bsparse.empty_row(self.n)
self._logicals_x: Optional[np.ndarray] = None
self._logicals_z: Optional[np.ndarray] = None
self._is_css: Optional[bool] = None
self._x_indices: Optional[np.ndarray] = None
self._z_indices: Optional[np.ndarray] = None
self._d: Optional[int] = None
self._stabilizer_types: Optional[List[str]] = None
self.colormap = {'red': '0xFF4B3E',
'blue': '0x48BEFF',
'green': '0x058C42',
'pink': '0xffbcbc',
'white': '0xf2f2fc',
'gold': '0xf1c232',
'coral': '0xFA824C',
'light-yellow': '0xFAFAC6',
'salmon': '0xe79e90',
'light-orange': '0xFA824C',
'orange': '0xfa7921'}
@property
@abstractmethod
def dimension(self) -> int:
"""Dimension of the code (usually 2 or 3)"""
@property
@abstractmethod
def label(self) -> str:
"""Label uniquely identifying a code, including its lattice dimensions
Example: 'Toric 3D {Lx}x{Ly}x{Lz}'
"""
@property
def id(self) -> str:
"""Returns a string identifying the class (usually the code name)"""
return self.__class__.__name__
@property
def n(self) -> int:
"""Number of physical qubits"""
return len(self.qubit_coordinates)
@property
def k(self) -> int:
"""Number of logical qubits"""
return self.logicals_x.shape[0]
@property
def d(self) -> int:
"""Distance of the code"""
if self._d is None:
weights_z = np.sum(
np.logical_or(
self.logicals_z[:, :self.n],
self.logicals_z[:, self.n:]
),
axis=1
)
weights_x = np.sum(
np.logical_or(
self.logicals_x[:, :self.n],
self.logicals_x[:, self.n:]
), axis=1
)
self._d = min(np.min(weights_x), np.min(weights_z))
return self._d
@property
def qubit_coordinates(self) -> List[Tuple]:
"""List of all the coordinates that contain a qubit"""
if len(self._qubit_coordinates) == 0:
self._qubit_coordinates = self.get_qubit_coordinates()
return self._qubit_coordinates
@property
def stabilizer_coordinates(self) -> List[Tuple]:
"""List of all the coordinates that contain a stabilizer"""
if len(self._stabilizer_coordinates) == 0:
self._stabilizer_coordinates = self.get_stabilizer_coordinates()
return self._stabilizer_coordinates
@property
def qubit_index(self) -> Dict[Tuple, int]:
"""Dictionary that assigns an index to a given qubit location"""
if len(self._qubit_index) == 0:
self._qubit_index = {
loc: i for i, loc in enumerate(self.qubit_coordinates)
}
return self._qubit_index
@property
def stabilizer_index(self) -> Dict[Tuple, int]:
"""Dictionary that assigns an index to a given stabilizer location"""
if len(self._stabilizer_index) == 0:
self._stabilizer_index = {
loc: i for i, loc in enumerate(self.stabilizer_coordinates)
}
return self._stabilizer_index
@property
def n_stabilizers(self) -> int:
"""Number of stabilizer generators"""
return len(self.stabilizer_index)
@property
def logicals_x(self) -> np.ndarray:
"""Logical X operator, as a k x 2n sparse matrix in the binary
symplectic format, where k is the number of logical X operators,
and n the number of qubits.
"""
if self._logicals_x is None:
logical_ops = self.get_logicals_x()
k = len(logical_ops)
self._logicals_x = np.zeros((k, 2*self.n), dtype='uint8')
for i, logical_op in enumerate(logical_ops):
self._logicals_x[i] = self.to_bsf(logical_op)
return self._logicals_x
@property
def logicals_z(self) -> np.ndarray:
"""Logical Z operators in the binary symplectic format.
It is a sparse matrix of dimension k x 2n, where k is the number
of Z logicals and n the number of qubits.
"""
if self._logicals_z is None:
logical_ops = self.get_logicals_z()
k = len(logical_ops)
self._logicals_z = np.zeros((k, 2*self.n), dtype='uint8')
for i, logical_op in enumerate(logical_ops):
self._logicals_z[i] = self.to_bsf(logical_op)
return self._logicals_z
@property
def is_css(self) -> bool:
"""Determines if a code is CSS, i.e. if it has separate X
and Z stabilizers
"""
if self._is_css is None:
self._is_css = not np.any(
np.logical_and(self.x_indices, self.z_indices)
)
return self._is_css
@property
def stabilizer_matrix(self) -> csr_matrix:
"""Parity-check matrix of the code in the binary symplectic format.
It is a sparse matrix of dimension k x 2n, where k is the total number
of stabilizers and n the number of qubits
"""
if bsparse.is_empty(self._stabilizer_matrix):
sparse_dict: Dict = dict()
self._stabilizer_matrix = dok_matrix(
(self.n_stabilizers, 2*self.n),
dtype='uint8'
)
for i_stab, stabilizer_location in enumerate(
self.stabilizer_coordinates
):
stabilizer_op = self.get_stabilizer(
stabilizer_location, deformed_axis=self._deformed_axis
)
for qubit_location in stabilizer_op.keys():
if stabilizer_op[qubit_location] in ['X', 'Y']:
i_qubit = self.qubit_index[qubit_location]
if (i_stab, i_qubit) in sparse_dict.keys():
sparse_dict[(i_stab, i_qubit)] += 1
else:
sparse_dict[(i_stab, i_qubit)] = 1
if stabilizer_op[qubit_location] in ['Y', 'Z']:
i_qubit = self.n + self.qubit_index[qubit_location]
if (i_stab, i_qubit) in sparse_dict.keys():
sparse_dict[(i_stab, i_qubit)] += 1
else:
sparse_dict[(i_stab, i_qubit)] = 1
self._stabilizer_matrix._update(sparse_dict)
self._stabilizer_matrix = self._stabilizer_matrix.tocsr()
self._stabilizer_matrix.data %= 2
return self._stabilizer_matrix
@property
def size(self) -> Tuple:
"""Dimensions of the lattice."""
return self._size
@property
def Hx(self) -> csr_matrix:
"""Parity-check matrix corresponding to the X stabilizers of the code.
It is a sparse matrix of dimension k x n, where k is the number of
X stabilizers and n the number of qubits.
Works only for CSS codes.
"""
if not self.is_css:
raise ValueError("Impossible to extract Hz: the code is not CSS")
if self._Hx.shape[0] == 0:
H = self.stabilizer_matrix[:, :self.n]
self._Hx = H[self.x_indices]
return self._Hx
@property
def Hz(self) -> csr_matrix:
"""Parity-check matrix corresponding to the Z stabilizers of the code.
It is a sparse matrix of dimension k x n, where k is the number of
Z stabilizers and n the number of qubits.
Works only for CSS codes.
"""
if not self.is_css:
raise ValueError("Impossible to extract Hz: the code is not CSS")
if self._Hz.shape[0] == 0:
H = self.stabilizer_matrix[:, self.n:]
self._Hz = H[self.z_indices]
return self._Hz
@property
def x_indices(self) -> np.ndarray:
"""Indices of the X stabilizers in the parity-check matrix,
as a boolean array s.t. x_indices[i] is True if stabilizer H[i]
only contain X operators and False otherwise"""
if self._x_indices is None:
Hx = self.stabilizer_matrix[:, :self.n]
self._x_indices = (Hx.getnnz(1) > 0)
return self._x_indices
@property
def z_indices(self) -> np.ndarray:
"""Indices of the Z stabilizers in the parity-check matrix,
as a boolean array s.t. z_indices[i] is True if stabilizer H[i]
only contain Z operators and False otherwise"""
if self._z_indices is None:
Hz = self.stabilizer_matrix[:, self.n:]
self._z_indices = (Hz.getnnz(1) > 0)
return self._z_indices
def in_codespace(self, error: np.ndarray) -> bool:
"""Check whether or not a given error is in the codespace,
i.e. whether it has a zero syndrome or not.
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
bool
Whether or not the error is in the codespace
"""
return bool(np.all(bcommute(self.stabilizer_matrix, error) == 0))
def logical_errors(self, error: np.ndarray) -> np.ndarray:
"""Return the logical errors, as an array of size 2k
(where k is the number of logicals), such that each component is
1 if and only if it anticommutes with the corresponding logical.
By convention, the first k indices correspond to the X logicals
and the last k to the the Z logicals
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
logical_errors: np.ndarray
Array of size 2k (where k is the number of logicals)
indicating whether the error commute with each X and Z logical.
"""
return get_effective_error(
error, self.logicals_x, self.logicals_z
)
def is_logical_error(self, error) -> bool:
"""Check whether or not a given error is in the codespace,
i.e. whether it has a zero syndrome or not.
Parameters
----------
error: np.ndarray
Error as an array of size 2n (where n is the number of qubits)
in the binary symplectic format
Returns
-------
bool
Whether or not the error is in the codespace
"""
return bool(np.any(self.logical_errors(error) != 0))
def extract_x_syndrome(self, syndrome: np.ndarray) -> np.ndarray:
"""For CSS codes only. Returns the part of the syndrome that
corresponds to X stabilizers.
Parameters
----------
syndrome: np.ndarray
Syndrome as a sparse row of dimension 1xm, where m is the number
of stabilizers.
Returns
-------
x_syndrome: np.ndarray
Syndrome reduced to X stabilizers
"""
return syndrome[self.x_indices]
def extract_z_syndrome(self, syndrome: np.ndarray) -> np.ndarray:
"""For CSS codes only. Returns the part of the syndrome that
corresponds to Z stabilizers.
Parameters
----------
syndrome: np.ndarray
Syndrome as a sparse row of dimension 1xm, where m is the number
of stabilizers.
Returns
-------
z_syndrome: np.ndarray
Syndrome reduced to X stabilizers
"""
return syndrome[self.z_indices]
def to_bsf(self, operator: Operator) -> np.ndarray:
"""Convert an operator (given as a dictionary qubit_location -> pauli)
to an array in the binary symplectic format.
Parameters
----------
operator: Dict[Tuple, str]
Operator given as a dictionary that assigns a Pauli operator
('X', 'Y' or 'Z') to each qubit location in its support
Returns
-------
bsf_operator: np.ndarray
Array of dimension 2n in the binary symplectic format
(where n is the number of qubits)
"""
bsf_operator = np.zeros(2*self.n, dtype=np.uint)
for qubit_location in operator.keys():
if operator[qubit_location] in ['X', 'Y']:
bsf_operator[self.qubit_index[qubit_location]] += 1
if operator[qubit_location] in ['Y', 'Z']:
bsf_operator[self.n + self.qubit_index[qubit_location]] += 1
return bsf_operator
def from_bsf(self, bsf_operator: np.ndarray) -> Operator:
"""Convert an operator given as a sparse row in the binary
symplectic format to a dictionary qubit_location -> pauli.
Parameters
----------
bsf_operator: np.ndarray
Array of dimension (1, 2n) in the binary symplectic format
(where n is the number of qubits)
Returns
-------
operator: Dict[Tuple, str]
Operator given as a dictionary that assigns a Pauli operator
('X', 'Y' or 'Z') to each qubit location in its support
"""
assert (
bsf_operator.shape[0] == 1 or len(bsf_operator.shape) == 1
), "Can only take one operator at a time."
operator = dict()
if len(bsf_operator.shape) == 1:
cols = bsf_operator.nonzero()[0]
else:
rows, cols = bsf_operator.nonzero()
for col in cols:
if col < self.n:
location = self.qubit_coordinates[col]
operator[location] = 'X'
else:
location = self.qubit_coordinates[col - self.n]
if location in operator.keys():
operator[location] = 'Y'
else:
operator[location] = 'Z'
return operator
def measure_syndrome(self, error: np.ndarray) -> np.ndarray:
"""Noiseless syndrome corresponding to a given Pauli error.
Parameters
----------
error: np.ndarray
Error given as an array of dimension 2n in the binary
symplectic format.
Returns
-------
syndrome: np.ndarray
Syndrome, as an array of dimension m (where m is the number
of stabilizers)
"""
return bcommute(self.stabilizer_matrix, error)
def is_stabilizer(self, location: Tuple, stab_type: str = None):
"""Returns whether a given location in the coordinate system
corresponds to a stabilizer or not
"""
_is_stabilizer = (
(location in self.stabilizer_index) and
(stab_type is None or self.stabilizer_type(location) == stab_type)
)
return _is_stabilizer
def is_qubit(self, location: Tuple):
"""Returns whether a given location in the coordinate system
corresponds to a qubit or not. It is done by checking that the input
location is a key in the dictionary `self.qubit_index`.
Parameters
----------
location : Tuple
Location as a tuple of coordinates
Returns
-------
Bool
Whether the location is a qubit in the coordinate system.
"""
return location in self.qubit_index
@abstractmethod
def get_qubit_coordinates(self) -> List[Tuple]:
"""Give the list of all the qubit coordinates, in a coordinate system
that should contain both the qubits and the stabilizers.
This function is used to set the attributes `self.qubit_coordinates`
and `self.qubit_index`.
Returns
-------
qubit_coordinates: List[Tuple]
List of coordinates
"""
@abstractmethod
def get_stabilizer_coordinates(self) -> List[Tuple]:
"""Create list of stabilizer coordinates, in a coordinate system
that should contain both the qubits and the stabilizers.
This function is used to set the attributes
`self.stabilizer_coordinates` and `self.stabilizer_index`.
"""
@abstractmethod
def qubit_axis(self, location: Tuple) -> str:
""" Return the orientation of a qubit sitting at given location
(as a string representing the axis 'x', 'y' or 'z').
Useful when qubits have an orientation in space, for instance when
they are edges, to help establish the visual representation of the
code in the GUI, to simplify the construction of stabilizers,
and to create Clifford deformations.
Parameters
----------
location: Tuple
Location of the qubit in the coordinate system.
Returns
-------
axis: str
Either 'x', 'y' or 'z', depending on the orientation axis of the
qubit.
"""
@abstractmethod
def stabilizer_type(self, location: Tuple) -> str:
""" Returns the type of a stabilizer sitting at a given location.
E.g. 'vertex' or 'face' in toric codes
"""
@abstractmethod
def get_stabilizer(
self, location: Tuple, deformed_axis: str = None
) -> Operator:
""" Returns a stabilizer, formatted as dictionary that assigns a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support of
the stabilizer.
For example, for a vertex stabilizer in the 2D toric code, we could
have
`get_stabilizer((1,1)) -> {(1,0): 'X', (0, 1): 'X', (2, 1): 'X',
(1, 2): 'X'}`
Parameters
----------
location: Tuple
Location of the stabilizer in the coordinate system
deformed_axis: str, optional
If given, represents an axis ('x', 'y' or 'z') that we want to
Clifford-deform, by applying a Clifford transformation to all the
qubits oriented along the given axis
(e.g. `deformed_axis='x'` in the 2D toric code could give an
XZZX surface code, where the transformation Pauli X <-> Z
has been applied to all the vertical qubits of the code)
Returns
-------
stabilizer: Dict[Tuple, str]
Dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in the support of the stabilizer
"""
@abstractmethod
def get_logicals_x(self) -> List[Operator]:
"""Returns the list of logical X operators, where each operator is a
dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in its support.
Returns
-------
logicals: List[Dict[Tuple, str]]
List of dictionaries, where each dictionary assign a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support
of the logical operator.
"""
@abstractmethod
def get_logicals_z(self) -> List[Operator]:
"""Returns the list of logical Z operators, where each operator is a
dictionary that assigns a Pauli operator ('X', 'Y' or 'Z') to each
qubit location in its support.
Returns
-------
logicals: List[Dict[Tuple, str]]
List of dictionaries, where each dictionary assign a Pauli
operator ('X', 'Y' or 'Z') to each qubit location in the support
of the logical operator.
"""
def stabilizer_representation(self,
location: Tuple,
rotated_picture=False,
json_file=None) -> Dict:
"""Returns a dictionary of visualization parameters for the input
stabilizer, that can be used by the web visualizer.
It should contain 4 keys:
- 'type': the type of stabilizer, e.g. 'vertex'
- 'location': [x, y, z],
- 'object': the type of object to use for visualization, e.g. 'sphere'
- 'params': a dictionary of parameters for the chosen object
Parameters
----------
location: Tuple
Coordinates of the stabilizer
rotated_picture: bool
For codes that have a rotated picture, can be used to differentiate
the two types visualizations
json_file: str
File with the initial configuration for the code
Returns
-------
representation: Dict
Dictionary to send to the GUI
"""
if json_file is None:
json_file = os.path.join(
os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json'
)
stab_type = self.stabilizer_type(location)
with open(json_file, 'r') as f:
data = json.load(f)
code_name = self.id
picture = 'rotated' if rotated_picture else 'kitaev'
representation = data[code_name]['stabilizers'][picture][stab_type]
representation['type'] = stab_type
representation['location'] = location
for activation in ['activated', 'deactivated']:
color_name = representation['color'][activation]
representation['color'][activation] = self.colormap[color_name]
return representation
def qubit_representation(self,
location: Tuple,
rotated_picture=False,
json_file=None) -> Dict:
"""Returns a dictionary of visualization parameters for the input
qubit, that can be used by the web visualizer.
- 'location': [x, y, z],
- 'object': the type of object to use for visualization, e.g. 'sphere'
- 'params': a dictionary of parameters for the chosen object
Parameters
----------
location: Tuple
Coordinates of the qubit
rotated_picture: bool
For codes that have a rotated picture, can be used to differentiate
the two types visualizations
json_file: str
File with the initial configuration for the code
Returns
-------
representation: Dict
Dictionary to send to the GUI
"""
if json_file is None:
json_file = os.path.join(
os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json'
)
with open(json_file, 'r') as f:
data = json.load(f)
code_name = self.id
# if self.id == 'MyToric3DCode':
# print(data)
# print()
# print()
# print(data[code_name])
# print()
# print(data[code_name]['qubits'])
# print(data[code_name]['qubits'][picture])
picture = 'rotated' if rotated_picture else 'kitaev'
representation = data[code_name]['qubits'][picture]
representation['params']['axis'] = self.qubit_axis(location)
representation['location'] = location
for pauli in ['I', 'X', 'Y', 'Z']:
color_name = representation['color'][pauli]
representation['color'][pauli] = self.colormap[color_name]
return representation
def type_index(self, stab_type: str) -> Dict[Tuple, int]:
"""Dictionary of locations and indices for given stabilizer type.
Parameters
----------
stab_type: str
Stabilizer type ot index.
Returns
-------
index: Dict[Tuple, int]
Dictionary of qubit indices for each stabilizer location that
matches the given type.
"""
return {
location: index
for index, location in enumerate(self.stabilizer_coordinates)
if self.stabilizer_type(location) == stab_type
}
@property
def stabilizer_types(self):
if self._stabilizer_types is None:
self._stabilizer_types = list(set(
self.stabilizer_type(location)
for location in self.stabilizer_coordinates
))
return self._stabilizer_types
def site(self, operator: Operator, pauli: str, location: Tuple) -> None:
"""Apply a Pauli on operator at site location.
Note that the operator is a (mutable) dict.
Parameters
----------
operator: Operator
Operator in dictionary representation.
pauli: str
Pauli to apply.
"""
product_map = {
('X', 'Y'): 'Z',
('X', 'Z'): 'Y',
('Y', 'X'): 'Z',
('Y', 'Z'): 'X',
('Z', 'X'): 'Y',
('Z', 'Y'): 'X',
}
if location in operator:
if operator[location] == pauli:
operator.pop(location)
else:
operator[location] = product_map[(operator[location], pauli)]
else:
operator[location] = pauli
| [
"json.load",
"panqec.bsparse.is_empty",
"numpy.logical_and",
"os.path.dirname",
"panqec.bsparse.empty_row",
"numpy.zeros",
"numpy.min",
"panqec.bpauli.get_effective_error",
"numpy.logical_or",
"panqec.bpauli.bcommute",
"scipy.sparse.dok_matrix",
"os.path.join"
] | [((307, 339), 'os.path.dirname', 'os.path.dirname', (['panqec.__file__'], {}), '(panqec.__file__)\n', (322, 339), False, 'import os\n'), ((2628, 2657), 'panqec.bsparse.empty_row', 'bsparse.empty_row', (['(2 * self.n)'], {}), '(2 * self.n)\n', (2645, 2657), False, 'from panqec import bsparse\n'), ((2675, 2700), 'panqec.bsparse.empty_row', 'bsparse.empty_row', (['self.n'], {}), '(self.n)\n', (2692, 2700), False, 'from panqec import bsparse\n'), ((2720, 2745), 'panqec.bsparse.empty_row', 'bsparse.empty_row', (['self.n'], {}), '(self.n)\n', (2737, 2745), False, 'from panqec import bsparse\n'), ((8157, 8198), 'panqec.bsparse.is_empty', 'bsparse.is_empty', (['self._stabilizer_matrix'], {}), '(self._stabilizer_matrix)\n', (8173, 8198), False, 'from panqec import bsparse\n'), ((13073, 13133), 'panqec.bpauli.get_effective_error', 'get_effective_error', (['error', 'self.logicals_x', 'self.logicals_z'], {}), '(error, self.logicals_x, self.logicals_z)\n', (13092, 13133), False, 'from panqec.bpauli import bcommute, get_effective_error\n'), ((15329, 15364), 'numpy.zeros', 'np.zeros', (['(2 * self.n)'], {'dtype': 'np.uint'}), '(2 * self.n, dtype=np.uint)\n', (15337, 15364), True, 'import numpy as np\n'), ((17521, 17560), 'panqec.bpauli.bcommute', 'bcommute', (['self.stabilizer_matrix', 'error'], {}), '(self.stabilizer_matrix, error)\n', (17529, 17560), False, 'from panqec.bpauli import bcommute, get_effective_error\n'), ((6752, 6792), 'numpy.zeros', 'np.zeros', (['(k, 2 * self.n)'], {'dtype': '"""uint8"""'}), "((k, 2 * self.n), dtype='uint8')\n", (6760, 6792), True, 'import numpy as np\n'), ((7347, 7387), 'numpy.zeros', 'np.zeros', (['(k, 2 * self.n)'], {'dtype': '"""uint8"""'}), "((k, 2 * self.n), dtype='uint8')\n", (7355, 7387), True, 'import numpy as np\n'), ((8277, 8336), 'scipy.sparse.dok_matrix', 'dok_matrix', (['(self.n_stabilizers, 2 * self.n)'], {'dtype': '"""uint8"""'}), "((self.n_stabilizers, 2 * self.n), dtype='uint8')\n", (8287, 8336), False, 'from scipy.sparse import csr_matrix, dok_matrix\n'), ((23707, 23778), 'os.path.join', 'os.path.join', (["os.environ['PANQEC_ROOT_DIR']", '"""codes"""', '"""gui-config.json"""'], {}), "(os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json')\n", (23719, 23778), False, 'import os\n'), ((23921, 23933), 'json.load', 'json.load', (['f'], {}), '(f)\n', (23930, 23933), False, 'import json\n'), ((25419, 25490), 'os.path.join', 'os.path.join', (["os.environ['PANQEC_ROOT_DIR']", '"""codes"""', '"""gui-config.json"""'], {}), "(os.environ['PANQEC_ROOT_DIR'], 'codes', 'gui-config.json')\n", (25431, 25490), False, 'import os\n'), ((25581, 25593), 'json.load', 'json.load', (['f'], {}), '(f)\n', (25590, 25593), False, 'import json\n'), ((4482, 4553), 'numpy.logical_or', 'np.logical_or', (['self.logicals_z[:, :self.n]', 'self.logicals_z[:, self.n:]'], {}), '(self.logicals_z[:, :self.n], self.logicals_z[:, self.n:])\n', (4495, 4553), True, 'import numpy as np\n'), ((4698, 4769), 'numpy.logical_or', 'np.logical_or', (['self.logicals_x[:, :self.n]', 'self.logicals_x[:, self.n:]'], {}), '(self.logicals_x[:, :self.n], self.logicals_x[:, self.n:])\n', (4711, 4769), True, 'import numpy as np\n'), ((4877, 4894), 'numpy.min', 'np.min', (['weights_x'], {}), '(weights_x)\n', (4883, 4894), True, 'import numpy as np\n'), ((4896, 4913), 'numpy.min', 'np.min', (['weights_z'], {}), '(weights_z)\n', (4902, 4913), True, 'import numpy as np\n'), ((7776, 7822), 'numpy.logical_and', 'np.logical_and', (['self.x_indices', 'self.z_indices'], {}), '(self.x_indices, self.z_indices)\n', (7790, 7822), True, 'import numpy as np\n'), ((12216, 12255), 'panqec.bpauli.bcommute', 'bcommute', (['self.stabilizer_matrix', 'error'], {}), '(self.stabilizer_matrix, error)\n', (12224, 12255), False, 'from panqec.bpauli import bcommute, get_effective_error\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on April 9, 2013
@author: alfoa
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
import os
import numpy as np
import xarray as xr
from ..utils import InputData, InputTypes, xmlUtils, mathUtils
from .Database import DateBase
class NetCDF(DateBase):
"""
Stores data in netCDF format
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, spec, InputData.ParameterInput, class to use for
specifying input of cls.
"""
spec = super(NetCDF, cls).getInputSpecification()
spec.description = r"""File storage format based on NetCDF4 protocol, which is natively compatible
with xarray DataSets used in RAVEN DataObjects."""
return spec
def __init__(self):
"""
Constructor
@ In, runInfoDict, dict, info from RunInfo block
@ Out, None
"""
super().__init__()
self.printTag = 'DATABASE-NetCDF' # For printing verbosity labels
self._format = 'netcdf4' # writing format for disk
self._extension = '.nc'
def saveDataToFile(self, source):
"""
Saves the given data as database to file.
@ In, source, DataObjects.DataObject, object to write to file
@ Out, None
"""
ds, meta = source.getData()
# we actually just tell the DataSet to write out as netCDF
path = self.get_fullpath()
# TODO set up to use dask for on-disk operations
# convert metadata into writeable
for key, xml in meta.items():
ds.attrs[key] = xmlUtils.prettify(xml.getRoot())
# get rid of "object" types
for var in ds:
if ds[var].dtype == np.dtype(object):
# is it a string?
if mathUtils.isAString(ds[var].values[0]):
ds[var] = ds[var].astype(str)
# is there existing data? Read it in and merge it, if so
# -> we've already wiped the file in initializeDatabase if it's in write mode
if os.path.isfile(path):
exists = xr.load_dataset(path)
if 'RAVEN_sample_ID' in exists:
floor = int(exists['RAVEN_sample_ID'].values[-1]) + 1
new = ds['RAVEN_sample_ID'].values + floor
ds = ds.assign_coords(RAVEN_sample_ID=new)
# NOTE order matters! This preserves the sampling order in which data was inserted
# into this database
ds = xr.concat((exists, ds), 'RAVEN_sample_ID')
# if this is open somewhere else, we can't write to it
# TODO is there a way to check if it's writable? I can't find one ...
try:
ds.to_netcdf(path, engine=self._format)
except PermissionError:
self.raiseAnError(PermissionError, f'NetCDF file "{path}" denied RAVEN permission to write! Is it open in another program?')
def loadIntoData(self, target):
"""
Loads this database into the target data object
@ In, target, DataObjects.DataObjet, object to write data into
@ Out, None
"""
# the main data
# NOTE: DO NOT use open_dataset unless you wrap it in a "with xr.open_dataset(f) as ds"!
# -> open_dataset does NOT close the file object after loading!
# -> however, load_dataset fully loads the ds into memory and closes the file.
ds = xr.load_dataset(self.get_fullpath(), engine=self._format)
# the meta data, convert from string to xml
meta = dict((key, xmlUtils.staticFromString(val)) for key, val in ds.attrs.items())
# set D.O. properties
target.setData(ds, meta)
def addRealization(self, rlz):
"""
Adds a "row" (or "sample") to this database.
This is the method to add data to this database.
Note that rlz can include many more variables than this database actually wants.
Before actually adding the realization, data is formatted for this data object.
@ In, rlz, dict, {var:val} format where
"var" is the variable name as a string,
"val" is either a float or a np.ndarray of values.
@ Out, None
"""
# apparently we're storing samples!
# -> do we already have data present?
path = self.get_fullpath()
if os.path.isfile(path):
# load data as 100 sample chunks, lazily (not into memory)
# -> using the argument "chunks" triggers the lazy loading using dask
# existing = xr.open_dataset(path, chunks={'RAVEN_sample_ID': 100}) # TODO user option
existing = True
with xr.open_dataset(path) as ds: # autocloses at end of scope
counter = int(ds.RAVEN_sample_ID.values[-1]) + 1
else:
existing = None
counter = 0
# create DS from realization # TODO make a feature of the Realization object
indexMap = rlz.get('_indexMap', [{}])[0]
indices = list(set().union(*(set(x) for x in indexMap.values())))
# verbose but slower
xarrs = {}
for var in rlz:
if var == '_indexMap' or var in indices + ['SampledVars', 'SampledVarsPb', 'crowDist', 'SamplerType']:
continue
if self.variables is not None and var not in self.variables:
continue
vals = rlz[var]
dims = indexMap.get(var, [])
if not dims and len(vals) == 1:
vals = vals[0]
coords = dict((idx, rlz[idx]) for idx in indexMap.get(var, []))
xarrs[var] = xr.DataArray(vals, dims=dims, coords=coords).expand_dims(dim={'RAVEN_sample_ID': [counter]})
rlzDS = xr.Dataset(xarrs)
if existing:
with xr.open_dataset(path) as ds: # autocloses at end of scope
# after research, best approach is concatenating xr.DataSet along RAVEN_sample_ID dim
new = xr.concat((ds, rlzDS), dim='RAVEN_sample_ID')
else:
new = rlzDS
new.to_netcdf(path) # TODO would appending instead of writing work for new samples? I doubt it.
| [
"numpy.dtype",
"xarray.open_dataset",
"xarray.concat",
"xarray.Dataset",
"os.path.isfile",
"xarray.DataArray",
"xarray.load_dataset"
] | [((2878, 2898), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2892, 2898), False, 'import os\n'), ((5029, 5049), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5043, 5049), False, 'import os\n'), ((6261, 6278), 'xarray.Dataset', 'xr.Dataset', (['xarrs'], {}), '(xarrs)\n', (6271, 6278), True, 'import xarray as xr\n'), ((2915, 2936), 'xarray.load_dataset', 'xr.load_dataset', (['path'], {}), '(path)\n', (2930, 2936), True, 'import xarray as xr\n'), ((3271, 3313), 'xarray.concat', 'xr.concat', (['(exists, ds)', '"""RAVEN_sample_ID"""'], {}), "((exists, ds), 'RAVEN_sample_ID')\n", (3280, 3313), True, 'import xarray as xr\n'), ((2593, 2609), 'numpy.dtype', 'np.dtype', (['object'], {}), '(object)\n', (2601, 2609), True, 'import numpy as np\n'), ((5318, 5339), 'xarray.open_dataset', 'xr.open_dataset', (['path'], {}), '(path)\n', (5333, 5339), True, 'import xarray as xr\n'), ((6307, 6328), 'xarray.open_dataset', 'xr.open_dataset', (['path'], {}), '(path)\n', (6322, 6328), True, 'import xarray as xr\n'), ((6473, 6518), 'xarray.concat', 'xr.concat', (['(ds, rlzDS)'], {'dim': '"""RAVEN_sample_ID"""'}), "((ds, rlzDS), dim='RAVEN_sample_ID')\n", (6482, 6518), True, 'import xarray as xr\n'), ((6156, 6200), 'xarray.DataArray', 'xr.DataArray', (['vals'], {'dims': 'dims', 'coords': 'coords'}), '(vals, dims=dims, coords=coords)\n', (6168, 6200), True, 'import xarray as xr\n')] |
import pytest
import numpy as np
from msibi.potentials import tail_correction, mie, alpha_array
def test_tail_correction():
dr = 0.05
r = np.arange(0, 2.5, dr)
V = mie(r, 1, 1)
smooth_V = tail_correction(r, V, r_switch=2.25)
assert smooth_V[-1] == 0.0
def test_calc_alpha_array():
alpha0 = 1.0
dr = 0.1
r = np.arange(0, 2.5, dr)
form = 'linear'
alpha = alpha_array(alpha0, r, form)
assert alpha[0] == alpha0
assert alpha[-1] == 0.0
form = 'margaret-thatcher'
with pytest.raises(ValueError):
alpha = alpha_array(alpha0, r, form)
| [
"pytest.raises",
"numpy.arange",
"msibi.potentials.alpha_array",
"msibi.potentials.tail_correction",
"msibi.potentials.mie"
] | [((149, 170), 'numpy.arange', 'np.arange', (['(0)', '(2.5)', 'dr'], {}), '(0, 2.5, dr)\n', (158, 170), True, 'import numpy as np\n'), ((179, 191), 'msibi.potentials.mie', 'mie', (['r', '(1)', '(1)'], {}), '(r, 1, 1)\n', (182, 191), False, 'from msibi.potentials import tail_correction, mie, alpha_array\n'), ((207, 243), 'msibi.potentials.tail_correction', 'tail_correction', (['r', 'V'], {'r_switch': '(2.25)'}), '(r, V, r_switch=2.25)\n', (222, 243), False, 'from msibi.potentials import tail_correction, mie, alpha_array\n'), ((344, 365), 'numpy.arange', 'np.arange', (['(0)', '(2.5)', 'dr'], {}), '(0, 2.5, dr)\n', (353, 365), True, 'import numpy as np\n'), ((398, 426), 'msibi.potentials.alpha_array', 'alpha_array', (['alpha0', 'r', 'form'], {}), '(alpha0, r, form)\n', (409, 426), False, 'from msibi.potentials import tail_correction, mie, alpha_array\n'), ((526, 551), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (539, 551), False, 'import pytest\n'), ((569, 597), 'msibi.potentials.alpha_array', 'alpha_array', (['alpha0', 'r', 'form'], {}), '(alpha0, r, form)\n', (580, 597), False, 'from msibi.potentials import tail_correction, mie, alpha_array\n')] |
#!/usr/bin/env python
from __future__ import print_function
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Empty
from nav_msgs.msg import Odometry
from simple_pid import PID
import numpy as np
from time import sleep
import sys
class CoordinateSystem:
def __init__(self, speed=0.5, turnSpeed=1):
self.position = None
self.theta = None
self.velocity = None
self.odomSubs = rospy.Subscriber('odom', Odometry, self.updatePosition)
self.inverted = 1
self.pid = list()
self.Kp = 0.4
self.Ki = 0
self.Kd = 1
self.rotError = 0.001
self.KpT = 1
self.KiT = 0
self.KdT = 0
self.frequency = 10
self.rate = rospy.Rate(self.frequency)
self.pose = np.asarray([0,0,0,0])
self.speed = speed
self.turnSpeed = turnSpeed
self.state = "LAND"
self.posePub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.takeoffPub = rospy.Publisher('takeoff', Empty, queue_size=1)
self.flattrimPub = rospy.Publisher('flattrim', Empty, queue_size=1)
self.landPub = rospy.Publisher('land', Empty, queue_size=1)
self.empty_msg = Empty()
self.twist = Twist()
def updatePosition(self, data):
position = data.pose.pose.position
orientation = data.pose.pose.orientation
linear = data.twist.twist.linear
angular = data.twist.twist.angular
self.position = np.array([position.x, position.y, position.z])
self.orientation = np.array([orientation.x,orientation.y,orientation.z])
self.velocity = np.array([linear.x,linear.y,linear.z,angular.z])
self.zTheta = orientation.w
self.PIDMove()
def publishTwist(self):
self.twist.linear.x = self.pose[0]
self.twist.linear.y = self.pose[1]
self.twist.linear.z = self.pose[2]
self.twist.angular.x = 0
self.twist.angular.y = 0
self.twist.angular.z = self.pose[3]
self.posePub.publish(self.twist)
def RotMat(self):
t = self.theta
return np.asarray([
[np.cos(t), -np.sin(t), 0],
[np.sin(t), np.cos(t) , 0],
[0 , 0 , 1]
])
def rotate(self):
prev = self.inverted
setpoint = 0 if prev==1 else 1
self.pidTheta = PID(self.KpT, self.KiT, self.KdT, setpoint=setpoint, output_limits=(-self.turnSpeed,self.turnSpeed))
self.state = "ROTATING"
while(self.inverted==prev):
self.rate.sleep()
print("Done Rotating!")
self.state = "AIR"
def PIDMove(self):
error = np.zeros(3)
newPose = np.zeros(4)
if(self.state == "AIR"):
for i in range(3):
error[i] = newPose[i] = self.pid[i](self.position[i])
self.pose = self.inverted*newPose
if self.inverted == -1:
self.pose[2] = -self.pose[2] #SO Z stays the same
# print("Velocity: %s"%np.linalg.norm(error))
self.publishTwist()
elif(self.state == "ROTATING"):
speed = self.pidTheta(self.zTheta)
newPose[3] = speed
self.pose = newPose.copy()
self.publishTwist()
sp = self.pidTheta.setpoint
act = self.zTheta
print("Goal is %s.\nActual is %s\nDifference: %s"%(sp, act, abs(sp-act)))
if(abs(sp - act)< self.rotError):
self.inverted *= -1
def setPIDDestiny(self, destiny):
for i in range(3):
self.pid[i].setpoint = destiny[i]
def moveTo(self, destiny): #NEEDS TO IMPLEMENT ROTATION
destiny = np.asarray(destiny)
print("Current position: %s"%self.position)
print("Destiny is %s"%destiny)
self.rate.sleep()
self.setPIDDestiny(destiny)
err = np.linalg.norm(destiny-self.position)
while (err > 0.1):
self.rate.sleep()
err = np.linalg.norm(destiny-self.position)
# print("\nDistance: %s"%err)
print("Arrived to destiny")
def takeoff(self):
self.takeoffPub.publish(self.empty_msg)
sleep(8)
destiny = self.position.copy()
for i in range(3):
self.pid.append(PID(self.Kp, self.Ki, self.Kd, setpoint=destiny[i], output_limits=(-self.speed,self.speed)))
self.state = "AIR"
def land(self):
self.state = "LAND"
for i in range(100):
self.landPub.publish(self.empty_msg)
def flattrim(self):
self.flattrimPub.publish(self.empty_msg)
sleep(2) | [
"simple_pid.PID",
"rospy.Subscriber",
"numpy.asarray",
"numpy.zeros",
"rospy.Publisher",
"rospy.Rate",
"std_msgs.msg.Empty",
"geometry_msgs.msg.Twist",
"time.sleep",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos"
] | [((435, 490), 'rospy.Subscriber', 'rospy.Subscriber', (['"""odom"""', 'Odometry', 'self.updatePosition'], {}), "('odom', Odometry, self.updatePosition)\n", (451, 490), False, 'import rospy\n'), ((749, 775), 'rospy.Rate', 'rospy.Rate', (['self.frequency'], {}), '(self.frequency)\n', (759, 775), False, 'import rospy\n'), ((805, 829), 'numpy.asarray', 'np.asarray', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (815, 829), True, 'import numpy as np\n'), ((958, 1005), 'rospy.Publisher', 'rospy.Publisher', (['"""cmd_vel"""', 'Twist'], {'queue_size': '(1)'}), "('cmd_vel', Twist, queue_size=1)\n", (973, 1005), False, 'import rospy\n'), ((1033, 1080), 'rospy.Publisher', 'rospy.Publisher', (['"""takeoff"""', 'Empty'], {'queue_size': '(1)'}), "('takeoff', Empty, queue_size=1)\n", (1048, 1080), False, 'import rospy\n'), ((1108, 1156), 'rospy.Publisher', 'rospy.Publisher', (['"""flattrim"""', 'Empty'], {'queue_size': '(1)'}), "('flattrim', Empty, queue_size=1)\n", (1123, 1156), False, 'import rospy\n'), ((1180, 1224), 'rospy.Publisher', 'rospy.Publisher', (['"""land"""', 'Empty'], {'queue_size': '(1)'}), "('land', Empty, queue_size=1)\n", (1195, 1224), False, 'import rospy\n'), ((1259, 1266), 'std_msgs.msg.Empty', 'Empty', ([], {}), '()\n', (1264, 1266), False, 'from std_msgs.msg import Empty\n'), ((1288, 1295), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (1293, 1295), False, 'from geometry_msgs.msg import Twist\n'), ((1534, 1580), 'numpy.array', 'np.array', (['[position.x, position.y, position.z]'], {}), '([position.x, position.y, position.z])\n', (1542, 1580), True, 'import numpy as np\n'), ((1608, 1663), 'numpy.array', 'np.array', (['[orientation.x, orientation.y, orientation.z]'], {}), '([orientation.x, orientation.y, orientation.z])\n', (1616, 1663), True, 'import numpy as np\n'), ((1686, 1737), 'numpy.array', 'np.array', (['[linear.x, linear.y, linear.z, angular.z]'], {}), '([linear.x, linear.y, linear.z, angular.z])\n', (1694, 1737), True, 'import numpy as np\n'), ((2423, 2529), 'simple_pid.PID', 'PID', (['self.KpT', 'self.KiT', 'self.KdT'], {'setpoint': 'setpoint', 'output_limits': '(-self.turnSpeed, self.turnSpeed)'}), '(self.KpT, self.KiT, self.KdT, setpoint=setpoint, output_limits=(-self.\n turnSpeed, self.turnSpeed))\n', (2426, 2529), False, 'from simple_pid import PID\n'), ((2721, 2732), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2729, 2732), True, 'import numpy as np\n'), ((2751, 2762), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (2759, 2762), True, 'import numpy as np\n'), ((3767, 3786), 'numpy.asarray', 'np.asarray', (['destiny'], {}), '(destiny)\n', (3777, 3786), True, 'import numpy as np\n'), ((3954, 3993), 'numpy.linalg.norm', 'np.linalg.norm', (['(destiny - self.position)'], {}), '(destiny - self.position)\n', (3968, 3993), True, 'import numpy as np\n'), ((4263, 4271), 'time.sleep', 'sleep', (['(8)'], {}), '(8)\n', (4268, 4271), False, 'from time import sleep\n'), ((4695, 4703), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (4700, 4703), False, 'from time import sleep\n'), ((4067, 4106), 'numpy.linalg.norm', 'np.linalg.norm', (['(destiny - self.position)'], {}), '(destiny - self.position)\n', (4081, 4106), True, 'import numpy as np\n'), ((4366, 4463), 'simple_pid.PID', 'PID', (['self.Kp', 'self.Ki', 'self.Kd'], {'setpoint': 'destiny[i]', 'output_limits': '(-self.speed, self.speed)'}), '(self.Kp, self.Ki, self.Kd, setpoint=destiny[i], output_limits=(-self.\n speed, self.speed))\n', (4369, 4463), False, 'from simple_pid import PID\n'), ((2191, 2200), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2197, 2200), True, 'import numpy as np\n'), ((2231, 2240), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2237, 2240), True, 'import numpy as np\n'), ((2242, 2251), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2248, 2251), True, 'import numpy as np\n'), ((2203, 2212), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2209, 2212), True, 'import numpy as np\n')] |
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import unittest
from pathlib import Path
import sys
from tensorflow import keras
import tensorflow as tf
import numpy as np
sys.path.append(str(Path(__file__).absolute().parent.parent))
from model.model_editor import ModelEditor
class CopyModelWeightsTest(unittest.TestCase):
def test_copy_model_with_weights(self):
# example usage
initial_model = keras.applications.resnet50.ResNet50(
weights='imagenet', input_shape=(224, 224, 3), classes=1000)
model_editor = ModelEditor(initial_model)
copied_model = model_editor.update_model_with_func(user_fn=lambda current_layer, layer_inputs: None)
# test
np.random.seed(0)
image_0 = np.zeros((1, 224, 224, 3))
image_1 = np.random.random((1, 224, 224, 3)) * 10
assert((initial_model.predict(image_0) == copied_model.predict(image_0)).all())
assert((initial_model.predict(image_1) == copied_model.predict(image_1)).all())
# test for copy weights when some of the layers has changed
def test_copy_not_all_weights(self):
# example usage
def add_dummy_layers_fn(current_layer, layer_input):
if isinstance(current_layer, keras.layers.InputLayer):
layer_output = DummyLayer()(layer_input)
return layer_output
if current_layer.name == model_editor.model.layers[-1].name:
layer_output = current_layer(layer_input)
layer_output = DummyLayer()(layer_output)
return layer_output
initial_model = keras.applications.resnet50.ResNet50(
weights='imagenet', input_shape=(224, 224, 3), classes=1000)
model_editor = ModelEditor(initial_model)
copied_model = model_editor.update_model_with_func(user_fn=add_dummy_layers_fn)
# test
np.random.seed(0)
image_0 = np.zeros((1, 224, 224, 3))
image_1 = np.random.random((1, 224, 224, 3)) * 10
assert(len(copied_model.layers) == len(initial_model.layers) + 2)
assert((initial_model.predict(image_0) == copied_model.predict(image_0)).all())
assert((initial_model.predict(image_1) == copied_model.predict(image_1)).all())
class ReplaceInputTest(unittest.TestCase):
def test_preappend_layers(self):
# example usage
def preappend_layers_fn(current_layer, sub_input):
if isinstance(current_layer, keras.layers.InputLayer):
sub_output = keras.layers.MaxPooling2D(name='added1')(sub_input)
sub_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3, name='added2')(sub_output)
sub_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3, name='added3')(sub_output)
sub_output = keras.layers.Add(name='added4')([sub_output_1, sub_output_2])
return sub_output
initial_model = initial_model_1()
model_editor = ModelEditor(initial_model)
modified_model = model_editor.update_model_with_func(preappend_layers_fn, copy_weights=False)
# test
expected_model = expected_model_1()
assert_same_config(self, modified_model, expected_model)
class ReplaceMiddleLayersTest(unittest.TestCase):
def test_replace_layers_by_name(self):
# example usage
def middle_layers_fn(current_layer, sub_input):
if isinstance(current_layer, keras.layers.MaxPooling2D):
sub_output = keras.layers.BatchNormalization()(sub_input)
return sub_output
if isinstance(current_layer, keras.layers.Flatten):
sub_output = keras.layers.MaxPooling2D()(sub_input)
sub_output = keras.layers.Flatten()(sub_output)
return sub_output
initial_model = initial_model_1()
model_editor = ModelEditor(initial_model)
modified_model = model_editor.update_model_with_func(middle_layers_fn)
# test
expected_model = expected_model_2()
assert_same_config(self, modified_model, expected_model)
class ReplaceLastLayerTest(unittest.TestCase):
def test_append_layer(self):
# example usage
def append_layer_fn(current_layer, sub_input):
if current_layer.name == model_editor.model.layers[-1].name:
sub_output = current_layer(sub_input)
sub_output = keras.layers.Dense(10)(sub_output)
return sub_output
initial_model = initial_model_1()
model_editor = ModelEditor(initial_model)
modified_model = model_editor.update_model_with_func(append_layer_fn)
# test
expected_model = expected_model_3()
assert_same_config(self, modified_model, expected_model)
class EdgeCasesTest(unittest.TestCase):
def test_layer_with_two_outputs(self):
# example usage
initial_model = initial_model_2()
model_editor = ModelEditor(initial_model)
modified_model = model_editor.update_model_with_func(user_fn=lambda current_layer, layer_inputs: None)
# test
expected_model = initial_model
assert_same_config(self, modified_model, expected_model)
def test_layer_multi_outputs_inputs(self):
# example usage
initial_model = initial_model_3()
model_editor = ModelEditor(initial_model)
modified_model = model_editor.update_model_with_func(user_fn=lambda current_layer, layer_inputs: None)
# test
expected_model = initial_model
assert_same_config(self, modified_model, expected_model)
# assert that model have the same configuration
def assert_same_config(self, modified_model, expected_model):
self.assertEqual(len(modified_model.layers), len(expected_model.layers),
'number of layers in modified model is not the same as in expected model')
for modified_layer, expected_layer in zip(modified_model.layers, expected_model.layers):
modified_config, expected_config = modified_layer.get_config(), expected_layer.get_config()
modified_config.pop('name'), expected_config.pop('name')
self.assertEqual(modified_config, expected_config,
f'failed on the following layers: {modified_layer.name} != {expected_layer.name}')
# custom Layers for tests
class DummyLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(DummyLayer, self).__init__()
def call(self, input_tensor):
return input_tensor
class MyTwoOutputLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(MyTwoOutputLayer, self).__init__()
self.conv2a = keras.layers.Conv2D(32, (1, 1))
self.bn2a = keras.layers.BatchNormalization()
def call(self, input_tensor):
x1 = self.conv2a(input_tensor)
x2 = self.bn2a(x1)
return tf.nn.relu(x1), tf.nn.relu(x2)
# Toy examples to test ModelManipulator (all expected models are editied initial_model_1)
def initial_model_1():
model_input = keras.Input(shape=(32, 32, 3))
model_output = keras.layers.MaxPooling2D()(model_input)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
model_output = keras.layers.Flatten()(model_output)
return keras.Model(model_input, model_output)
def initial_model_2():
model_input = keras.Input(shape=(32, 32, 3))
x1, x2 = MyTwoOutputLayer()(model_input)
x1 = keras.layers.Conv2D(filters=32, kernel_size=3)(x1)
x2 = keras.layers.Conv2D(filters=32, kernel_size=3)(x2)
x = keras.layers.Add()([x1, x2])
return keras.Model(model_input, x)
def initial_model_3():
model_input_1 = keras.Input(shape=(32, 32, 3))
model_input_2 = keras.Input(shape=(32, 32, 3))
x1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_input_1)
x2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_input_2)
x = keras.layers.Add()([x1, x2])
x1 = keras.layers.Conv2D(filters=32, kernel_size=3)(x)
x2 = keras.layers.Conv2D(filters=32, kernel_size=3)(x)
return keras.Model(inputs=[model_input_1, model_input_2], outputs=[x1, x2])
def expected_model_1():
model_input = keras.Input(shape=(32, 32, 3))
model_output = keras.layers.MaxPooling2D()(model_input)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
model_output = keras.layers.MaxPooling2D()(model_output)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
model_output = keras.layers.Flatten()(model_output)
return keras.Model(model_input, model_output)
def expected_model_2():
model_input = keras.Input(shape=(32, 32, 3))
model_output = keras.layers.BatchNormalization()(model_input)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
model_output = keras.layers.MaxPooling2D()(model_output)
model_output = keras.layers.Flatten()(model_output)
return keras.Model(model_input, model_output)
def expected_model_3():
model_input = keras.Input(shape=(32, 32, 3))
model_output = keras.layers.MaxPooling2D()(model_input)
model_output_1 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output_2 = keras.layers.Conv2D(filters=32, kernel_size=3)(model_output)
model_output = keras.layers.Add()([model_output_1, model_output_2])
model_output = keras.layers.Flatten()(model_output)
model_output = keras.layers.Dense(10)(model_output)
return keras.Model(model_input, model_output)
| [
"tensorflow.nn.relu",
"numpy.random.seed",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"numpy.zeros",
"model.mod... | [((7060, 7090), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (7071, 7090), False, 'from tensorflow import keras\n'), ((7454, 7492), 'tensorflow.keras.Model', 'keras.Model', (['model_input', 'model_output'], {}), '(model_input, model_output)\n', (7465, 7492), False, 'from tensorflow import keras\n'), ((7536, 7566), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (7547, 7566), False, 'from tensorflow import keras\n'), ((7780, 7807), 'tensorflow.keras.Model', 'keras.Model', (['model_input', 'x'], {}), '(model_input, x)\n', (7791, 7807), False, 'from tensorflow import keras\n'), ((7853, 7883), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (7864, 7883), False, 'from tensorflow import keras\n'), ((7904, 7934), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (7915, 7934), False, 'from tensorflow import keras\n'), ((8243, 8311), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[model_input_1, model_input_2]', 'outputs': '[x1, x2]'}), '(inputs=[model_input_1, model_input_2], outputs=[x1, x2])\n', (8254, 8311), False, 'from tensorflow import keras\n'), ((8356, 8386), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (8367, 8386), False, 'from tensorflow import keras\n'), ((9047, 9085), 'tensorflow.keras.Model', 'keras.Model', (['model_input', 'model_output'], {}), '(model_input, model_output)\n', (9058, 9085), False, 'from tensorflow import keras\n'), ((9130, 9160), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (9141, 9160), False, 'from tensorflow import keras\n'), ((9591, 9629), 'tensorflow.keras.Model', 'keras.Model', (['model_input', 'model_output'], {}), '(model_input, model_output)\n', (9602, 9629), False, 'from tensorflow import keras\n'), ((9674, 9704), 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (9685, 9704), False, 'from tensorflow import keras\n'), ((10124, 10162), 'tensorflow.keras.Model', 'keras.Model', (['model_input', 'model_output'], {}), '(model_input, model_output)\n', (10135, 10162), False, 'from tensorflow import keras\n'), ((429, 531), 'tensorflow.keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'classes': '(1000)'}), "(weights='imagenet', input_shape=(224, \n 224, 3), classes=1000)\n", (465, 531), False, 'from tensorflow import keras\n'), ((563, 589), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (574, 589), False, 'from model.model_editor import ModelEditor\n'), ((723, 740), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (737, 740), True, 'import numpy as np\n'), ((759, 785), 'numpy.zeros', 'np.zeros', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (767, 785), True, 'import numpy as np\n'), ((1622, 1724), 'tensorflow.keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)', 'classes': '(1000)'}), "(weights='imagenet', input_shape=(224, \n 224, 3), classes=1000)\n", (1658, 1724), False, 'from tensorflow import keras\n'), ((1756, 1782), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (1767, 1782), False, 'from model.model_editor import ModelEditor\n'), ((1895, 1912), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1909, 1912), True, 'import numpy as np\n'), ((1931, 1957), 'numpy.zeros', 'np.zeros', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (1939, 1957), True, 'import numpy as np\n'), ((2981, 3007), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (2992, 3007), False, 'from model.model_editor import ModelEditor\n'), ((3884, 3910), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (3895, 3910), False, 'from model.model_editor import ModelEditor\n'), ((4568, 4594), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (4579, 4594), False, 'from model.model_editor import ModelEditor\n'), ((4973, 4999), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (4984, 4999), False, 'from model.model_editor import ModelEditor\n'), ((5368, 5394), 'model.model_editor.ModelEditor', 'ModelEditor', (['initial_model'], {}), '(initial_model)\n', (5379, 5394), False, 'from model.model_editor import ModelEditor\n'), ((6693, 6724), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(32)', '(1, 1)'], {}), '(32, (1, 1))\n', (6712, 6724), False, 'from tensorflow import keras\n'), ((6745, 6778), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (6776, 6778), False, 'from tensorflow import keras\n'), ((7110, 7137), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (7135, 7137), False, 'from tensorflow import keras\n'), ((7172, 7218), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (7191, 7218), False, 'from tensorflow import keras\n'), ((7254, 7300), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (7273, 7300), False, 'from tensorflow import keras\n'), ((7334, 7352), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (7350, 7352), False, 'from tensorflow import keras\n'), ((7406, 7428), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (7426, 7428), False, 'from tensorflow import keras\n'), ((7621, 7667), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (7640, 7667), False, 'from tensorflow import keras\n'), ((7681, 7727), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (7700, 7727), False, 'from tensorflow import keras\n'), ((7740, 7758), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (7756, 7758), False, 'from tensorflow import keras\n'), ((7944, 7990), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (7963, 7990), False, 'from tensorflow import keras\n'), ((8015, 8061), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8034, 8061), False, 'from tensorflow import keras\n'), ((8085, 8103), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (8101, 8103), False, 'from tensorflow import keras\n'), ((8123, 8169), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8142, 8169), False, 'from tensorflow import keras\n'), ((8182, 8228), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8201, 8228), False, 'from tensorflow import keras\n'), ((8406, 8433), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (8431, 8433), False, 'from tensorflow import keras\n'), ((8468, 8514), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8487, 8514), False, 'from tensorflow import keras\n'), ((8550, 8596), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8569, 8596), False, 'from tensorflow import keras\n'), ((8630, 8648), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (8646, 8648), False, 'from tensorflow import keras\n'), ((8702, 8729), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (8727, 8729), False, 'from tensorflow import keras\n'), ((8765, 8811), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8784, 8811), False, 'from tensorflow import keras\n'), ((8847, 8893), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (8866, 8893), False, 'from tensorflow import keras\n'), ((8927, 8945), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (8943, 8945), False, 'from tensorflow import keras\n'), ((8999, 9021), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (9019, 9021), False, 'from tensorflow import keras\n'), ((9180, 9213), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (9211, 9213), False, 'from tensorflow import keras\n'), ((9248, 9294), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (9267, 9294), False, 'from tensorflow import keras\n'), ((9330, 9376), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (9349, 9376), False, 'from tensorflow import keras\n'), ((9410, 9428), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (9426, 9428), False, 'from tensorflow import keras\n'), ((9482, 9509), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (9507, 9509), False, 'from tensorflow import keras\n'), ((9543, 9565), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (9563, 9565), False, 'from tensorflow import keras\n'), ((9724, 9751), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (9749, 9751), False, 'from tensorflow import keras\n'), ((9786, 9832), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (9805, 9832), False, 'from tensorflow import keras\n'), ((9868, 9914), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), '(filters=32, kernel_size=3)\n', (9887, 9914), False, 'from tensorflow import keras\n'), ((9948, 9966), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {}), '()\n', (9964, 9966), False, 'from tensorflow import keras\n'), ((10020, 10042), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (10040, 10042), False, 'from tensorflow import keras\n'), ((10076, 10098), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (10094, 10098), False, 'from tensorflow import keras\n'), ((804, 838), 'numpy.random.random', 'np.random.random', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (820, 838), True, 'import numpy as np\n'), ((1976, 2010), 'numpy.random.random', 'np.random.random', (['(1, 224, 224, 3)'], {}), '((1, 224, 224, 3))\n', (1992, 2010), True, 'import numpy as np\n'), ((6895, 6909), 'tensorflow.nn.relu', 'tf.nn.relu', (['x1'], {}), '(x1)\n', (6905, 6909), True, 'import tensorflow as tf\n'), ((6911, 6925), 'tensorflow.nn.relu', 'tf.nn.relu', (['x2'], {}), '(x2)\n', (6921, 6925), True, 'import tensorflow as tf\n'), ((2528, 2568), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {'name': '"""added1"""'}), "(name='added1')\n", (2553, 2568), False, 'from tensorflow import keras\n'), ((2611, 2672), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'name': '"""added2"""'}), "(filters=32, kernel_size=3, name='added2')\n", (2630, 2672), False, 'from tensorflow import keras\n'), ((2716, 2777), 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'name': '"""added3"""'}), "(filters=32, kernel_size=3, name='added3')\n", (2735, 2777), False, 'from tensorflow import keras\n'), ((2819, 2850), 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {'name': '"""added4"""'}), "(name='added4')\n", (2835, 2850), False, 'from tensorflow import keras\n'), ((3509, 3542), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (3540, 3542), False, 'from tensorflow import keras\n'), ((3681, 3708), 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', ([], {}), '()\n', (3706, 3708), False, 'from tensorflow import keras\n'), ((3749, 3771), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), '()\n', (3769, 3771), False, 'from tensorflow import keras\n'), ((4433, 4455), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), '(10)\n', (4451, 4455), False, 'from tensorflow import keras\n'), ((202, 216), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (206, 216), False, 'from pathlib import Path\n')] |
def set_seed(seed):
import random
random.seed(seed)
try:
import numpy as np
np.random.seed(seed)
except:
pass
try:
import torch
torch.manual_seed(seed)
except:
pass
| [
"torch.manual_seed",
"numpy.random.seed",
"random.seed"
] | [((42, 59), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (53, 59), False, 'import random\n'), ((109, 129), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (123, 129), True, 'import numpy as np\n'), ((198, 221), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (215, 221), False, 'import torch\n')] |
# Copyright (c) 2018-2019, NVIDIA CORPORATION.
import pytest
import numpy as np
import pandas as pd
import nvstrings
from utils import assert_eq
@pytest.mark.parametrize('pattern', ['\\d',
'\\w+',
'\\s',
'\\S',
'^.*\\\\.*$',
'[1-5]+',
'[a-h]+',
'[A-H]+',
'\n',
'b.\\s*\n',
'.*c',
'\\d\\d:\\d\\d:\\d\\d',
'\\d\\d?:\\d\\d?:\\d\\d?',
'[Hh]ello [Ww]orld',
'\\bworld\\b'
])
def test_contains(pattern):
s = [
'5',
'hej',
'\t \n',
'12345',
'\\',
'd',
'c:\\Tools',
'+27',
'1c2',
'1C2',
'0:00:0',
'0:0:00',
'00:0:0',
'00:00:0',
'00:0:00',
'0:00:00',
'00:00:00',
'Hello world !',
'Hello world! ',
'Hello worldcup !',
'0123456789',
'1C2',
'Xaa',
'abcdefghxxx',
'ABCDEFGH',
'abcdefgh',
'abc def',
'abc\ndef',
'aa\r\nbb\r\ncc\r\n\r\n',
'abcabc'
]
pstrs = pd.Series(s)
nvstrs = nvstrings.to_device(s)
got = nvstrs.contains(pattern)
expected = pstrs.str.contains(pattern).values
assert_eq(got, expected)
@pytest.mark.parametrize('find', ["@\\S+", "(?:@|https?://)\\S+"])
@pytest.mark.parametrize('replace', ["***", ""])
def test_replace(find, replace):
s = ["hello @abc @def world", "The quick brown @fox jumps", "over the",
"lazy @dog", "hello http://www.world.com I'm here @home"]
pstrs = pd.Series(s)
nvstrs = nvstrings.to_device(s)
got = nvstrs.replace(find, replace)
expected = pstrs.str.replace(find, replace).values
assert_eq(got, expected)
@pytest.mark.parametrize('pattern', ['[hH]',
'[bB][aA]',
])
def test_match(pattern):
s = ["hello", "and héllo", None, ""]
pstrs = pd.Series(s)
nvstrs = nvstrings.to_device(s)
got = nvstrs.match(pattern)
expected = pstrs.str.match(pattern).values
assert_eq(got, expected)
@pytest.mark.parametrize('pattern', ['a',
'[aA]',
])
def test_count(pattern):
s = ["hello", "and héllo", 'this was empty', ""]
pstrs = pd.Series(s)
nvstrs = nvstrings.to_device(s)
got = nvstrs.count(pattern)
expected = pstrs.str.count(pattern).values
assert_eq(got, expected)
def test_findall():
pattern = '[aA]'
s = ["hello", "and héllo", 'this was empty', ""]
nvstrs = nvstrings.to_device(s)
got = nvstrs.findall(pattern)[0]
expected = [None, 'a', 'a', None]
assert_eq(got, expected)
def test_findall_record():
pattern = '[aA]'
s = ["hello", "and héllo", 'this was empty', "", 'another']
nvstrs = nvstrings.to_device(s)
got = nvstrs.findall_record(pattern)
expected = [[], ['a'], ['a'], [], ['a']]
for i in range(len(got)):
assert got[i].to_host() == expected[i]
def test_extract():
pattern = r'Flight:([A-Z]+)(\d+)'
s = ['ALA-PEK Flight:HU7934', 'HKT-PEK Flight:CA822',
'FRA-PEK Flight:LA8769', 'FRA-PEK Flight:LH7332', '', None,
'Flight:ZZ']
nvstrs = nvstrings.to_device(s)
got = nvstrs.extract(pattern)
expected = np.array([['HU', '7934'],
['CA', '822'],
['LA', '8769'],
['LH', '7332'],
[None, None],
[None, None],
[None, None]])
assert_eq(got[0], expected[:, 0])
assert_eq(got[1], expected[:, 1])
def test_extract_record():
pattern = r'Flight:([A-Z]+)(\d+)'
s = ['ALA-PEK Flight:HU7934', 'HKT-PEK Flight:CA822',
'FRA-PEK Flight:LA8769', 'FRA-PEK Flight:LH7332', '', None,
'Flight:ZZ']
nvstrs = nvstrings.to_device(s)
got = nvstrs.extract_record(pattern)
expected = np.array([['HU', '7934'],
['CA', '822'],
['LA', '8769'],
['LH', '7332'],
[None, None],
[None, None],
[None, None]])
for i in range(len(got)):
assert_eq(got[i], expected[i, :])
@pytest.mark.parametrize('find', ['(\\d)(\\d)',
'(\\d)(\\d)',
'(\\d)(\\d)',
'(\\d)(\\d)',
"([a-z])-([a-z])",
"([a-z])-([a-zé])",
"([a-z])-([a-z])",
"([a-z])-([a-zé])"
])
@pytest.mark.parametrize('replace', [
'\\1-\\2',
'V\\2-\\1',
pytest.param('V\\1-\\3', marks=[pytest.mark.xfail(
reason='Pandas fails with this backreference group 3')]),
pytest.param('V\\3-\\2', marks=[pytest.mark.xfail(
reason='Pandas fails with this backreference group 3')]),
"\\1 \\2",
"\\2 \\1",
"X\\1+\\2Z",
"X\\1+\\2Z"
])
def test_replace_with_backrefs(find, replace):
s = ["A543", "Z756", "", None, 'tést-string', 'two-thréé four-fivé',
'abcd-éfgh', 'tést-string-again']
pstrs = pd.Series(s)
nvstrs = nvstrings.to_device(s)
got = nvstrs.replace_with_backrefs(find, replace)
expected = pstrs.str.replace(find, replace).values
assert_eq(got, expected)
@pytest.mark.parametrize('pattern', [
"hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I'm here @home",
"hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I'm here @home zzzz"
])
def test_contains_large_regex(pattern):
s = ["hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I'm here @home", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890", "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"]
pstrs = pd.Series(s)
strs = nvstrings.to_device(s)
got = strs.contains(pattern)
expected = pstrs.str.contains(pattern)
assert_eq(got, expected)
| [
"utils.assert_eq",
"numpy.array",
"pandas.Series",
"pytest.mark.parametrize",
"pytest.mark.xfail",
"nvstrings.to_device"
] | [((150, 383), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pattern"""', "['\\\\d', '\\\\w+', '\\\\s', '\\\\S', '^.*\\\\\\\\.*$', '[1-5]+', '[a-h]+', '[A-H]+',\n '\\n', 'b.\\\\s*\\n', '.*c', '\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d',\n '\\\\d\\\\d?:\\\\d\\\\d?:\\\\d\\\\d?', '[Hh]ello [Ww]orld', '\\\\bworld\\\\b']"], {}), "('pattern', ['\\\\d', '\\\\w+', '\\\\s', '\\\\S',\n '^.*\\\\\\\\.*$', '[1-5]+', '[a-h]+', '[A-H]+', '\\n', 'b.\\\\s*\\n', '.*c',\n '\\\\d\\\\d:\\\\d\\\\d:\\\\d\\\\d', '\\\\d\\\\d?:\\\\d\\\\d?:\\\\d\\\\d?', '[Hh]ello [Ww]orld',\n '\\\\bworld\\\\b'])\n", (173, 383), False, 'import pytest\n'), ((1722, 1787), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""find"""', "['@\\\\S+', '(?:@|https?://)\\\\S+']"], {}), "('find', ['@\\\\S+', '(?:@|https?://)\\\\S+'])\n", (1745, 1787), False, 'import pytest\n'), ((1789, 1836), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""replace"""', "['***', '']"], {}), "('replace', ['***', ''])\n", (1812, 1836), False, 'import pytest\n'), ((2201, 2257), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pattern"""', "['[hH]', '[bB][aA]']"], {}), "('pattern', ['[hH]', '[bB][aA]'])\n", (2224, 2257), False, 'import pytest\n'), ((2572, 2621), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pattern"""', "['a', '[aA]']"], {}), "('pattern', ['a', '[aA]'])\n", (2595, 2621), False, 'import pytest\n'), ((4780, 4955), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""find"""', "['(\\\\d)(\\\\d)', '(\\\\d)(\\\\d)', '(\\\\d)(\\\\d)', '(\\\\d)(\\\\d)', '([a-z])-([a-z])',\n '([a-z])-([a-zé])', '([a-z])-([a-z])', '([a-z])-([a-zé])']"], {}), "('find', ['(\\\\d)(\\\\d)', '(\\\\d)(\\\\d)', '(\\\\d)(\\\\d)',\n '(\\\\d)(\\\\d)', '([a-z])-([a-z])', '([a-z])-([a-zé])', '([a-z])-([a-z])',\n '([a-z])-([a-zé])'])\n", (4803, 4955), False, 'import pytest\n'), ((5965, 6251), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pattern"""', '["hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I\'m here @home"\n ,\n "hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I\'m here @home zzzz"\n ]'], {}), '(\'pattern\', [\n "hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I\'m here @home"\n ,\n "hello @abc @def world The quick brown @fox jumps over the lazy @dog hello http://www.world.com I\'m here @home zzzz"\n ])\n', (5988, 6251), False, 'import pytest\n'), ((1556, 1568), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (1565, 1568), True, 'import pandas as pd\n'), ((1582, 1604), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (1601, 1604), False, 'import nvstrings\n'), ((1694, 1718), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (1703, 1718), False, 'from utils import assert_eq\n'), ((2025, 2037), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (2034, 2037), True, 'import pandas as pd\n'), ((2051, 2073), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (2070, 2073), False, 'import nvstrings\n'), ((2173, 2197), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (2182, 2197), False, 'from utils import assert_eq\n'), ((2412, 2424), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (2421, 2424), True, 'import pandas as pd\n'), ((2438, 2460), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (2457, 2460), False, 'import nvstrings\n'), ((2544, 2568), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (2553, 2568), False, 'from utils import assert_eq\n'), ((2788, 2800), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (2797, 2800), True, 'import pandas as pd\n'), ((2814, 2836), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (2833, 2836), False, 'import nvstrings\n'), ((2920, 2944), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (2929, 2944), False, 'from utils import assert_eq\n'), ((3054, 3076), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (3073, 3076), False, 'import nvstrings\n'), ((3156, 3180), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (3165, 3180), False, 'from utils import assert_eq\n'), ((3308, 3330), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (3327, 3330), False, 'import nvstrings\n'), ((3716, 3738), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (3735, 3738), False, 'import nvstrings\n'), ((3788, 3908), 'numpy.array', 'np.array', (["[['HU', '7934'], ['CA', '822'], ['LA', '8769'], ['LH', '7332'], [None, None\n ], [None, None], [None, None]]"], {}), "([['HU', '7934'], ['CA', '822'], ['LA', '8769'], ['LH', '7332'], [\n None, None], [None, None], [None, None]])\n", (3796, 3908), True, 'import numpy as np\n'), ((4058, 4091), 'utils.assert_eq', 'assert_eq', (['got[0]', 'expected[:, 0]'], {}), '(got[0], expected[:, 0])\n', (4067, 4091), False, 'from utils import assert_eq\n'), ((4096, 4129), 'utils.assert_eq', 'assert_eq', (['got[1]', 'expected[:, 1]'], {}), '(got[1], expected[:, 1])\n', (4105, 4129), False, 'from utils import assert_eq\n'), ((4359, 4381), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (4378, 4381), False, 'import nvstrings\n'), ((4438, 4558), 'numpy.array', 'np.array', (["[['HU', '7934'], ['CA', '822'], ['LA', '8769'], ['LH', '7332'], [None, None\n ], [None, None], [None, None]]"], {}), "([['HU', '7934'], ['CA', '822'], ['LA', '8769'], ['LH', '7332'], [\n None, None], [None, None], [None, None]])\n", (4446, 4558), True, 'import numpy as np\n'), ((5775, 5787), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (5784, 5787), True, 'import pandas as pd\n'), ((5801, 5823), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (5820, 5823), False, 'import nvstrings\n'), ((5937, 5961), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (5946, 5961), False, 'from utils import assert_eq\n'), ((6691, 6703), 'pandas.Series', 'pd.Series', (['s'], {}), '(s)\n', (6700, 6703), True, 'import pandas as pd\n'), ((6715, 6737), 'nvstrings.to_device', 'nvstrings.to_device', (['s'], {}), '(s)\n', (6734, 6737), False, 'import nvstrings\n'), ((6818, 6842), 'utils.assert_eq', 'assert_eq', (['got', 'expected'], {}), '(got, expected)\n', (6827, 6842), False, 'from utils import assert_eq\n'), ((4743, 4776), 'utils.assert_eq', 'assert_eq', (['got[i]', 'expected[i, :]'], {}), '(got[i], expected[i, :])\n', (4752, 4776), False, 'from utils import assert_eq\n'), ((5326, 5398), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Pandas fails with this backreference group 3"""'}), "(reason='Pandas fails with this backreference group 3')\n", (5343, 5398), False, 'import pytest\n'), ((5448, 5520), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Pandas fails with this backreference group 3"""'}), "(reason='Pandas fails with this backreference group 3')\n", (5465, 5520), False, 'import pytest\n')] |
import numpy as np
from autodiff.dualmat import DualMat2D
def test_dualmat_lift() -> None:
a = -3
b = DualMat2D.lift(a)
# pylint: disable=C0326
c = np.array([[-3, 0], [0, -3]], dtype=int)
np.testing.assert_equal(b.mat, c)
def test_dualmat_init() -> None:
a = -3
b = 1
# pylint: disable=C0326
c = np.array([[-3, 1], [0, -3]], dtype=int)
d = DualMat2D.from_vals(a, b)
np.testing.assert_equal(d.mat, c)
def test_dualmat_add() -> None:
a = DualMat2D.from_vals(-3, 1)
b = DualMat2D.from_vals(-5, 2)
c_ = DualMat2D.from_vals(-8, 3)
# pylint: disable=C0326
refmat = np.array([[-8, 3], [0, -8]], dtype=int)
c = a + b
np.testing.assert_equal(c.first, c_.first)
np.testing.assert_equal(c.second, c_.second)
np.testing.assert_equal(c.mat, refmat)
np.testing.assert_equal(c_.mat, refmat)
def test_dualmat_sub() -> None:
a = DualMat2D.from_vals(-3, 1)
b = DualMat2D.from_vals(-5, 2)
c_ = DualMat2D.from_vals(2, -1)
# pylint: disable=C0326
refmat = np.array([[2, -1], [0, 2]], dtype=int)
c = a - b
np.testing.assert_equal(c.first, c_.first)
np.testing.assert_equal(c.second, c_.second)
np.testing.assert_equal(c.mat, refmat)
np.testing.assert_equal(c_.mat, refmat)
def test_dualmat_mul() -> None:
a = DualMat2D.from_vals(-3, 1)
b = DualMat2D.from_vals(-5, 2)
c_ = DualMat2D.from_vals(15, -11)
# pylint: disable=C0326
refmat = np.array([[15, -11], [0, 15]], dtype=int)
c = a * b
np.testing.assert_equal(c.first, c_.first)
np.testing.assert_equal(c.second, c_.second)
np.testing.assert_equal(c.mat, refmat)
np.testing.assert_equal(c_.mat, refmat)
def test_dualmat_div() -> None:
a = DualMat2D.from_vals(-3, 1)
b = DualMat2D.from_vals(-5, 2)
c_ = DualMat2D.from_vals(0.6, 0.04)
# pylint: disable=C0326
refmat = np.array([[0.60, 0.04], [0.00, 0.60]])
c = a / b
np.testing.assert_almost_equal(c.first, c_.first)
np.testing.assert_almost_equal(c.second, c_.second)
np.testing.assert_almost_equal(c.mat, refmat)
np.testing.assert_almost_equal(c_.mat, refmat)
| [
"numpy.testing.assert_almost_equal",
"autodiff.dualmat.DualMat2D.lift",
"autodiff.dualmat.DualMat2D.from_vals",
"numpy.array",
"numpy.testing.assert_equal"
] | [((113, 130), 'autodiff.dualmat.DualMat2D.lift', 'DualMat2D.lift', (['a'], {}), '(a)\n', (127, 130), False, 'from autodiff.dualmat import DualMat2D\n'), ((167, 206), 'numpy.array', 'np.array', (['[[-3, 0], [0, -3]]'], {'dtype': 'int'}), '([[-3, 0], [0, -3]], dtype=int)\n', (175, 206), True, 'import numpy as np\n'), ((211, 244), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['b.mat', 'c'], {}), '(b.mat, c)\n', (234, 244), True, 'import numpy as np\n'), ((337, 376), 'numpy.array', 'np.array', (['[[-3, 1], [0, -3]]'], {'dtype': 'int'}), '([[-3, 1], [0, -3]], dtype=int)\n', (345, 376), True, 'import numpy as np\n'), ((385, 410), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['a', 'b'], {}), '(a, b)\n', (404, 410), False, 'from autodiff.dualmat import DualMat2D\n'), ((415, 448), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['d.mat', 'c'], {}), '(d.mat, c)\n', (438, 448), True, 'import numpy as np\n'), ((491, 517), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-3)', '(1)'], {}), '(-3, 1)\n', (510, 517), False, 'from autodiff.dualmat import DualMat2D\n'), ((526, 552), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-5)', '(2)'], {}), '(-5, 2)\n', (545, 552), False, 'from autodiff.dualmat import DualMat2D\n'), ((562, 588), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-8)', '(3)'], {}), '(-8, 3)\n', (581, 588), False, 'from autodiff.dualmat import DualMat2D\n'), ((630, 669), 'numpy.array', 'np.array', (['[[-8, 3], [0, -8]]'], {'dtype': 'int'}), '([[-8, 3], [0, -8]], dtype=int)\n', (638, 669), True, 'import numpy as np\n'), ((688, 730), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.first', 'c_.first'], {}), '(c.first, c_.first)\n', (711, 730), True, 'import numpy as np\n'), ((735, 779), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.second', 'c_.second'], {}), '(c.second, c_.second)\n', (758, 779), True, 'import numpy as np\n'), ((784, 822), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.mat', 'refmat'], {}), '(c.mat, refmat)\n', (807, 822), True, 'import numpy as np\n'), ((827, 866), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c_.mat', 'refmat'], {}), '(c_.mat, refmat)\n', (850, 866), True, 'import numpy as np\n'), ((909, 935), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-3)', '(1)'], {}), '(-3, 1)\n', (928, 935), False, 'from autodiff.dualmat import DualMat2D\n'), ((944, 970), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-5)', '(2)'], {}), '(-5, 2)\n', (963, 970), False, 'from autodiff.dualmat import DualMat2D\n'), ((980, 1006), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(2)', '(-1)'], {}), '(2, -1)\n', (999, 1006), False, 'from autodiff.dualmat import DualMat2D\n'), ((1048, 1086), 'numpy.array', 'np.array', (['[[2, -1], [0, 2]]'], {'dtype': 'int'}), '([[2, -1], [0, 2]], dtype=int)\n', (1056, 1086), True, 'import numpy as np\n'), ((1105, 1147), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.first', 'c_.first'], {}), '(c.first, c_.first)\n', (1128, 1147), True, 'import numpy as np\n'), ((1152, 1196), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.second', 'c_.second'], {}), '(c.second, c_.second)\n', (1175, 1196), True, 'import numpy as np\n'), ((1201, 1239), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.mat', 'refmat'], {}), '(c.mat, refmat)\n', (1224, 1239), True, 'import numpy as np\n'), ((1244, 1283), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c_.mat', 'refmat'], {}), '(c_.mat, refmat)\n', (1267, 1283), True, 'import numpy as np\n'), ((1326, 1352), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-3)', '(1)'], {}), '(-3, 1)\n', (1345, 1352), False, 'from autodiff.dualmat import DualMat2D\n'), ((1361, 1387), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-5)', '(2)'], {}), '(-5, 2)\n', (1380, 1387), False, 'from autodiff.dualmat import DualMat2D\n'), ((1397, 1425), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(15)', '(-11)'], {}), '(15, -11)\n', (1416, 1425), False, 'from autodiff.dualmat import DualMat2D\n'), ((1467, 1508), 'numpy.array', 'np.array', (['[[15, -11], [0, 15]]'], {'dtype': 'int'}), '([[15, -11], [0, 15]], dtype=int)\n', (1475, 1508), True, 'import numpy as np\n'), ((1527, 1569), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.first', 'c_.first'], {}), '(c.first, c_.first)\n', (1550, 1569), True, 'import numpy as np\n'), ((1574, 1618), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.second', 'c_.second'], {}), '(c.second, c_.second)\n', (1597, 1618), True, 'import numpy as np\n'), ((1623, 1661), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c.mat', 'refmat'], {}), '(c.mat, refmat)\n', (1646, 1661), True, 'import numpy as np\n'), ((1666, 1705), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['c_.mat', 'refmat'], {}), '(c_.mat, refmat)\n', (1689, 1705), True, 'import numpy as np\n'), ((1748, 1774), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-3)', '(1)'], {}), '(-3, 1)\n', (1767, 1774), False, 'from autodiff.dualmat import DualMat2D\n'), ((1783, 1809), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(-5)', '(2)'], {}), '(-5, 2)\n', (1802, 1809), False, 'from autodiff.dualmat import DualMat2D\n'), ((1819, 1849), 'autodiff.dualmat.DualMat2D.from_vals', 'DualMat2D.from_vals', (['(0.6)', '(0.04)'], {}), '(0.6, 0.04)\n', (1838, 1849), False, 'from autodiff.dualmat import DualMat2D\n'), ((1891, 1926), 'numpy.array', 'np.array', (['[[0.6, 0.04], [0.0, 0.6]]'], {}), '([[0.6, 0.04], [0.0, 0.6]])\n', (1899, 1926), True, 'import numpy as np\n'), ((1948, 1997), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['c.first', 'c_.first'], {}), '(c.first, c_.first)\n', (1978, 1997), True, 'import numpy as np\n'), ((2002, 2053), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['c.second', 'c_.second'], {}), '(c.second, c_.second)\n', (2032, 2053), True, 'import numpy as np\n'), ((2058, 2103), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['c.mat', 'refmat'], {}), '(c.mat, refmat)\n', (2088, 2103), True, 'import numpy as np\n'), ((2108, 2154), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['c_.mat', 'refmat'], {}), '(c_.mat, refmat)\n', (2138, 2154), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import struct
import numpy as np
class WeightReader:
def __init__(self, weight_file):
with open(weight_file, 'rb') as w_f:
major, = struct.unpack('i', w_f.read(4))
minor, = struct.unpack('i', w_f.read(4))
_, = struct.unpack('i', w_f.read(4))
if (major*10 + minor) >= 2 and major < 1000 and minor < 1000:
w_f.read(8)
else:
w_f.read(4)
binary = w_f.read()
self.offset = 0
self.all_weights = np.frombuffer(binary, dtype='float32')
def load_weights(self, model, skip_detect_layer=False):
# 81 93 105
for i in range(model.num_layers):
if skip_detect_layer and i in [81, 93, 105]:
skip_size = self._skip(i)
self._read_bytes(skip_size)
continue
suffixes = ["beta", "gamma", "moving_mean", "moving_variance", "bias"]
for suffix in suffixes:
variables = model.get_variables(layer_idx=i, suffix=suffix)
if variables:
self._load_1d_var(variables[0])
variables = model.get_variables(layer_idx=i, suffix="kernel")
if variables:
self._load_4d_var(variables[0])
print(self.offset) # 62001757
def _skip(self, i):
if i == 81:
skip_size = 255 + 1024*255
elif i == 93:
skip_size = 255 + 512*255
elif i == 105:
skip_size = 255 + 256*255
else:
skip_size = 0
return skip_size
def _read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset-size:self.offset]
def _load_1d_var(self, variable):
size = np.prod(variable.shape)
value = self._read_bytes(size) # bias
variable.assign(value)
def _load_4d_var(self, variable):
size = np.prod(variable.shape)
value = self._read_bytes(size) # scale
value = value.reshape(list(reversed(variable.shape)))
value = value.transpose([2, 3, 1, 0])
variable.assign(value)
if __name__ == '__main__':
from yolo.net.yolonet import Yolonet
from yolo import YOLOV3_WEIGHTS
yolonet = Yolonet(18)
reader = WeightReader(YOLOV3_WEIGHTS)
reader.load_weights(yolonet, True)
yolonet.load_darknet_params(YOLOV3_WEIGHTS,
skip_detect_layer=True)
yolonet = Yolonet(255)
yolonet.load_darknet_params(YOLOV3_WEIGHTS,
skip_detect_layer=True)
yolonet.load_darknet_params(YOLOV3_WEIGHTS,
skip_detect_layer=False)
| [
"numpy.frombuffer",
"yolo.net.yolonet.Yolonet",
"numpy.prod"
] | [((2378, 2389), 'yolo.net.yolonet.Yolonet', 'Yolonet', (['(18)'], {}), '(18)\n', (2385, 2389), False, 'from yolo.net.yolonet import Yolonet\n'), ((2598, 2610), 'yolo.net.yolonet.Yolonet', 'Yolonet', (['(255)'], {}), '(255)\n', (2605, 2610), False, 'from yolo.net.yolonet import Yolonet\n'), ((578, 616), 'numpy.frombuffer', 'np.frombuffer', (['binary'], {'dtype': '"""float32"""'}), "(binary, dtype='float32')\n", (591, 616), True, 'import numpy as np\n'), ((1876, 1899), 'numpy.prod', 'np.prod', (['variable.shape'], {}), '(variable.shape)\n', (1883, 1899), True, 'import numpy as np\n'), ((2037, 2060), 'numpy.prod', 'np.prod', (['variable.shape'], {}), '(variable.shape)\n', (2044, 2060), True, 'import numpy as np\n')] |
# DEPENDENCIES
import random
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ObjectProperty
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.label import Label
import numpy as np
import itertools
# CUSTOM MODULES
from globals import config_dict
# SUPPORT FUNCTIONS
def get_text_dict(max_number:int=config_dict['Tasks']['Sudoku']['base_size']**2):
"""
Define button text dictionary with a maximum number.
"""
text_dict = {str(i):str(i+1) for i in range(1,max_number)}
text_dict.update({'-':'1', str(max_number):'-'})
return text_dict
# SUPPORT CLASSES
class NumberButton(Button):
"""
A button that changes its label when pressed.
"""
config_dict = config_dict
text_dict = get_text_dict(max_number=config_dict['Tasks']['Sudoku']['base_size']**2)
def __init__(self, row:int, column:int, number:int, **kwargs):
super(NumberButton, self).__init__(**kwargs)
self.row = row
self.column = column
self.number = number
self.text = str(self.number)
def use_button(self, instance):
"""
Trigger when button is pressed. Button changes its own text.
"""
self.text = self.text_dict[self.text]
def disable_button(self):
"""
Disable interactability.
"""
self.disabled = True
# MAIN WIDGET
class Sudoku(GridLayout):
config_dict = config_dict
empty_rate = config_dict['Tasks']['Sudoku']['empty_rate']
board = ObjectProperty(None)
widget_board = ObjectProperty(None)
task_id = config_dict['Tasks']['Sudoku']['task_id']
def __init__(self, base_size:int=config_dict['Tasks']['Sudoku']['base_size'], **kwargs):
super(Sudoku, self).__init__(**kwargs)
self.base_size = base_size
self.side_size = self.base_size**2
self.generate_board()
self.add_number_buttons()
self.clear_some()
self.disable_rest()
def building_pattern(self, r:int, c:int):
"""
Pattern for a baseline valid solution
"""
return (self.base_size*(r%self.base_size)+r//self.base_size+c)%self.side_size
def shuffle(self, s:list):
"""
# randomize rows, columns and numbers (of valid base pattern)
"""
return random.sample(s,len(s))
def generate_board(self):
"""
Generate a list of lists where each sublist is a row of the game matrix.
"""
r_base = range(self.base_size)
rows = [g*self.base_size + r for g in self.shuffle(r_base) for r in self.shuffle(r_base)]
cols = [g*self.base_size + c for g in self.shuffle(r_base) for c in self.shuffle(r_base)]
nums = self.shuffle(s=range(1, self.base_size**2 + 1))
self.board = [[nums[self.building_pattern(r=r,c=c)] for c in cols] for r in rows] # build board using randomised building pattern
def add_number_buttons(self):
"""
Add Number button to game board GridLayout.
"""
add_rowspace, add_colspace = False, False
for i in range(len(self.board)): # for each row i of the board
add_rowspace = ((i+1)%self.base_size == 1 and i != 0)
if add_rowspace:
[self.widget_board.add_widget(Label(text='')) for _ in range(self.side_size + self.base_size - 1)] # add sudoku row spacing
for j in range(len(self.board[i])): # for each index j in row i of the board
add_colspace = ((j+1)%self.base_size == 1 and j != 0)
if add_colspace:
self.widget_board.add_widget(Label(text='')) # add sudoku spacing
button = NumberButton(row=i, column=j, number=self.board[i][j])
button.bind(on_release=button.use_button)
self.widget_board.add_widget(button)
def clear_some(self):
"""
Sets some of the widget texts to '-'
"""
squares = self.side_size**2
empties = round(squares * self.empty_rate)
for p in random.sample(range(squares),empties):
r, c = p//self.side_size, p%self.side_size
buttons = [w for w in self.widget_board.children if isinstance(w, NumberButton)]
button = [b for b in buttons if (b.row == r and b.column == c)][0] # find button in row r and column c
button.text = '-'
def disable_rest(self):
"""
Disable buttons that are not modifiable. Also make them look different.
"""
button_list = [w for w in self.widget_board.children if isinstance(w, NumberButton)]
short_button_list = [b for b in button_list if b.text != '-']
for button in short_button_list:
button.disabled = True
def check_solution(self):
"""
Look at board widgets, check if the solution is valid or not. Could be different from generated board but still valid.
Empty field treated as 0.
:return: 'Correct' if solution is correct, else return 'Incorrect'
:rtype: str
"""
button_list = [w for w in self.widget_board.children if isinstance(w, NumberButton)]
solution_board = [[int(b.text) if b.text != '-' else 0 for b in button_list if b.row == i] for i in range(self.side_size)] # a nested list of integers
rows = [set(row) for row in solution_board] # rows as sets
columns = [set(row) for row in list(np.transpose(np.asarray(solution_board)))] # columns as sets
block_indices = [[i for i in range(self.side_size) if i//self.base_size==j] for j in range(self.base_size)] # define block index groups
blocks = list(itertools.permutations(block_indices, 2)) + [(i,i) for i in block_indices]
blocks = [set.union(*[set([solution_board[i][j] for j in t[1]]) for i in t[0]]) for t in blocks] # through tuples through lists, construct sets of block elements
val = self.validate_solution(rows=rows, columns=columns, blocks=blocks)
if val:
self.disable_buttons()
return 'Correct'
else:
return 'Incorrect, check again when done.'
def validate_solution(self, rows:list, columns:list, blocks:list):
"""
Compare columns, rows and blocks to reference_set.
:param rows: A list of sets of row elements.
:type rows: list
:param columns: A list of sets of column elements.
:type columns: list
:param blocks: A list of sets of block elements.
:type blocks: list
:return: True if solution is corect, False otherwise.
:rtype: bool
"""
reference_set = set(list(range(1,self.side_size+1))) # a set of numbers a row, column and block has to contain
for i in range(self.side_size): # go through rows, columns and blocks and compare elements with
if (rows[i] != reference_set or columns[i] != reference_set or blocks[i] != reference_set):
return False
return True
def disable_buttons(self):
"""
Disable all buttons.
"""
[w.disable_button() for w in self.widget_board.children if isinstance(w, NumberButton)]
| [
"kivy.uix.label.Label",
"itertools.permutations",
"numpy.asarray",
"kivy.properties.ObjectProperty"
] | [((1587, 1607), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (1601, 1607), False, 'from kivy.properties import ObjectProperty\n'), ((1628, 1648), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (1642, 1648), False, 'from kivy.properties import ObjectProperty\n'), ((5812, 5852), 'itertools.permutations', 'itertools.permutations', (['block_indices', '(2)'], {}), '(block_indices, 2)\n', (5834, 5852), False, 'import itertools\n'), ((3399, 3413), 'kivy.uix.label.Label', 'Label', ([], {'text': '""""""'}), "(text='')\n", (3404, 3413), False, 'from kivy.uix.label import Label\n'), ((3738, 3752), 'kivy.uix.label.Label', 'Label', ([], {'text': '""""""'}), "(text='')\n", (3743, 3752), False, 'from kivy.uix.label import Label\n'), ((5596, 5622), 'numpy.asarray', 'np.asarray', (['solution_board'], {}), '(solution_board)\n', (5606, 5622), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import math
import urllib
import io
from PIL import Image
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return (xtile, ytile)
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
def getImageCluster( lon_deg,lat_deg, delta_long, delta_lat,zoom,style,printlog,imgsavepath,apikey = '',access_token = '',styleid = 'cjrewwj3l2dwt2tptkiu09scd'):
'''
apikey - openstreetmap token
access_token - mapbox token
'''
if style == 1:
smurl = r'https://a.tile.thunderforest.com/cycle/{0}/{1}/{2}.png?apikey='+apikey
if style == 2:
smurl = r'https://a.tile.thunderforest.com/transport/{0}/{1}/{2}.png?apikey='+apikey
if style == 3:
smurl = r'https://tile-b.openstreetmap.fr/hot/{0}/{1}/{2}.png'
if style == 4:
smurl = r'https://tiles.wmflabs.org/bw-mapnik/{0}/{1}/{2}.png'
if style == 5:
smurl = r'http://a.tile.stamen.com/toner/{0}/{1}/{2}.png'
if style == 6:
smurl = r'http://c.tile.stamen.com/watercolor/{0}/{1}/{2}.png'
if style == 7:
if styleid == 'dark':
styleid = 'cjetnd20i1vbi2qqxbh0by7p8'
if styleid == 'light':
styleid = 'cjrewwj3l2dwt2tptkiu09scd'
smurl = r'https://api.mapbox.com/styles/v1/ni1o1/'+styleid+r'/tiles/256/{0}/{1}/{2}?&access_token='+access_token
else:
styleid = ''
xmin, ymax =deg2num(lat_deg, lon_deg, zoom)
xmax, ymin =deg2num(lat_deg + delta_lat, lon_deg + delta_long, zoom)
def get_img(smurl,zoom, xtile, ytile,imgsize,imgsavepath):
import os
filename = str(style)+str(styleid)+'-'+str(zoom)+'-'+str(xtile)+'-'+str(ytile)+'-'+str(imgsize)+'.png'
def savefig(filename,tile):
try:
if 'tileimg' in os.listdir(imgsavepath):
if filename in os.listdir(imgsavepath+'tileimg'):
pass
else:
tile.save(imgsavepath+'tileimg\\'+filename)
print('figsaved:'+filename)
else:
os.mkdir(imgsavepath+'tileimg')
except:
pass
def loadfig(filename):
try:
if 'tileimg' in os.listdir(imgsavepath):
if filename in os.listdir(imgsavepath+'tileimg'):
tile = Image.open(imgsavepath+'tileimg\\'+filename)
return tile
else:
return None
else:
os.mkdir(imgsavepath+'tileimg')
return None
except:
return None
tile = loadfig(filename)
if tile is None:
try:
t = 0
while t<10:
try:
imgurl=smurl.format(zoom, xtile, ytile)
#print("Opening: " + imgurl)
imgstr = urllib.request.urlopen(imgurl,timeout = 6).read()
tile = Image.open(io.BytesIO(imgstr))
savefig(filename,tile)
Cluster.paste(tile, box=((xtile-xmin)*imgsize , (ytile-ymin)*imgsize))
t = 10
except:
if printlog:
print('Get map tile failed, retry ',t)
t += 1
except:
print("Couldn't download image")
tile = None
else:
Cluster.paste(tile, box=((xtile-xmin)*imgsize , (ytile-ymin)*imgsize))
imgsize = 256
import threading
threads = []
Cluster = Image.new('RGB',((xmax-xmin+1)*imgsize-1,(ymax-ymin+1)*imgsize-1))
for xtile in range(xmin, xmax+1):
for ytile in range(ymin, ymax+1):
threads.append(threading.Thread(target=get_img,args = (smurl,zoom, xtile, ytile,imgsize,imgsavepath)))
for t in threads:
t.setDaemon(True)
t.start()
for t in threads:
t.join()
threads.clear()
return Cluster
def plot_map(plt,bounds,zoom,style,imgsavepath = 'C:\\',printlog = False,apikey = '',access_token = '',styleid = 'dark'):
'''
bounds -- Set your plotting boundary [lon1,lat1,lon2,lat2] (wgs1984)
zoom -- The zoom level of the map
style -- From 1 to 7 represent different map styles,1-6 is from openstreetmap and 7 is the mapbox
styleid -- if style is set as 7(from mapbox), you can change the styleid here, "dark" or "light" or your own style
imgsavepath -- Path to save the tile map so that you don't have to download again
'''
try:
import os
os.listdir(imgsavepath)
except:
print('imgsavepath do not exist, your tile map will not save')
lon1= bounds[0]
lat1 = bounds[1]
lon2 = bounds[2]
lat2 = bounds[3]
a = getImageCluster(lon1, lat1, lon2-lon1, lat2-lat1, zoom,style,printlog = printlog,imgsavepath = imgsavepath,apikey = apikey,access_token = access_token, styleid = styleid)
x1, y1 =deg2num(lat1, lon1, zoom)
x2, y2 =deg2num(lat2, lon2, zoom)
x1,y1 = num2deg(x1, y1+1, zoom)
x2,y2 = num2deg(x2+1, y2, zoom)
plt.imshow(np.asarray(a),extent = (y1,y2,x1+0.00,x2+0.00))
def plotscale(ax,bounds,textcolor = 'k',textsize = 8,compasssize = 1,accuracy = 'auto',rect=[0.1,0.1]):
#栅格化代码
import math
#划定栅格划分范围
lon1 = bounds[0]
lat1 = bounds[1]
lon2 = bounds[2]
lat2 = bounds[3]
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
if accuracy == 'auto':
accuracy = (int((lon2-lon1)/0.0003/1000+0.5)*1000)
a,c=rect
b = 1-a
d = 1-c
alon,alat = (b*lon1+a*lon2)/(a+b),(d*lat1+c*lat2)/(c+d)
#计算栅格的经纬度增加量大小▲Lon和▲Lat
deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360));
#加比例尺
from shapely.geometry import Polygon
import geopandas as gpd
scale = gpd.GeoDataFrame({'color':[(0,0,0),(1,1,1),(0,0,0),(1,1,1)],'geometry':
[Polygon([(alon,alat),(alon+deltaLon,alat),(alon+deltaLon,alat+deltaLon*0.4),(alon,alat+deltaLon*0.4)]),
Polygon([(alon+deltaLon,alat),(alon+2*deltaLon,alat),(alon+2*deltaLon,alat+deltaLon*0.4),(alon+deltaLon,alat+deltaLon*0.4)]),
Polygon([(alon+2*deltaLon,alat),(alon+4*deltaLon,alat),(alon+4*deltaLon,alat+deltaLon*0.4),(alon+2*deltaLon,alat+deltaLon*0.4)]),
Polygon([(alon+4*deltaLon,alat),(alon+8*deltaLon,alat),(alon+8*deltaLon,alat+deltaLon*0.4),(alon+4*deltaLon,alat+deltaLon*0.4)])
]})
scale.plot(ax = ax,edgecolor= (0,0,0,1),facecolor = scale['color'],lw = 0.6)
ax.annotate(str(int(accuracy/1000)),color = textcolor,size = textsize,xy=(alon+deltaLon,alat+deltaLon*0.2), xytext=(-textsize*3/5,textsize/1.5), textcoords='offset points')
ax.annotate(str(int(2*accuracy/1000)),color = textcolor,size = textsize,xy=(alon+2*deltaLon,alat+deltaLon*0.2), xytext=(-textsize*3/5,textsize/1.5), textcoords='offset points')
ax.annotate(str(int(4*accuracy/1000)),color = textcolor,size = textsize,xy=(alon+4*deltaLon,alat+deltaLon*0.2), xytext=(-textsize*3/5,textsize/1.5), textcoords='offset points')
ax.annotate(str(int(8*accuracy/1000)),color = textcolor,size = textsize,xy=(alon+8*deltaLon,alat+deltaLon*0.2), xytext=(-textsize*3/5,textsize/1.5), textcoords='offset points')
ax.annotate('KM',size = textsize,color = textcolor,xy=(alon+8*deltaLon,alat+deltaLon*0.1), xytext=(textsize*2/5,-textsize/5), textcoords='offset points')
#加指北针
deltaLon = compasssize*deltaLon
alon = alon-deltaLon
compass = gpd.GeoDataFrame({'color':[(0,0,0),(1,1,1)],'geometry':
[Polygon([[alon,alat],[alon,alat+deltaLon],[alon+1/2*deltaLon,alat-1/2*deltaLon]]),
Polygon([[alon,alat],[alon,alat+deltaLon],[alon-1/2*deltaLon,alat-1/2*deltaLon]])]})
compass.plot(ax= ax, edgecolor= (0,0,0,1),facecolor = compass['color'],lw = 0.6)
ax.annotate('N',color = textcolor,size = textsize,xy=[alon,alat+deltaLon], xytext=(-textsize*2/5,textsize/2), textcoords='offset points') | [
"threading.Thread",
"PIL.Image.new",
"os.mkdir",
"io.BytesIO",
"shapely.geometry.Polygon",
"math.radians",
"math.tan",
"numpy.asarray",
"urllib.request.urlopen",
"PIL.Image.open",
"math.cos",
"math.sinh",
"math.degrees",
"os.listdir"
] | [((163, 184), 'math.radians', 'math.radians', (['lat_deg'], {}), '(lat_deg)\n', (175, 184), False, 'import math\n'), ((561, 582), 'math.degrees', 'math.degrees', (['lat_rad'], {}), '(lat_rad)\n', (573, 582), False, 'import math\n'), ((4172, 4260), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '((xmax - xmin + 1) * imgsize - 1, (ymax - ymin + 1) * imgsize - 1)'], {}), "('RGB', ((xmax - xmin + 1) * imgsize - 1, (ymax - ymin + 1) *\n imgsize - 1))\n", (4181, 4260), False, 'from PIL import Image\n'), ((504, 544), 'math.sinh', 'math.sinh', (['(math.pi * (1 - 2 * ytile / n))'], {}), '(math.pi * (1 - 2 * ytile / n))\n', (513, 544), False, 'import math\n'), ((5206, 5229), 'os.listdir', 'os.listdir', (['imgsavepath'], {}), '(imgsavepath)\n', (5216, 5229), False, 'import os\n'), ((5754, 5767), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (5764, 5767), True, 'import numpy as np\n'), ((6393, 6432), 'math.cos', 'math.cos', (['((lat1 + lat2) * math.pi / 360)'], {}), '((lat1 + lat2) * math.pi / 360)\n', (6401, 6432), False, 'import math\n'), ((4351, 4443), 'threading.Thread', 'threading.Thread', ([], {'target': 'get_img', 'args': '(smurl, zoom, xtile, ytile, imgsize, imgsavepath)'}), '(target=get_img, args=(smurl, zoom, xtile, ytile, imgsize,\n imgsavepath))\n', (4367, 4443), False, 'import threading\n'), ((6614, 6740), 'shapely.geometry.Polygon', 'Polygon', (['[(alon, alat), (alon + deltaLon, alat), (alon + deltaLon, alat + deltaLon *\n 0.4), (alon, alat + deltaLon * 0.4)]'], {}), '([(alon, alat), (alon + deltaLon, alat), (alon + deltaLon, alat + \n deltaLon * 0.4), (alon, alat + deltaLon * 0.4)])\n', (6621, 6740), False, 'from shapely.geometry import Polygon\n'), ((6723, 6883), 'shapely.geometry.Polygon', 'Polygon', (['[(alon + deltaLon, alat), (alon + 2 * deltaLon, alat), (alon + 2 * deltaLon,\n alat + deltaLon * 0.4), (alon + deltaLon, alat + deltaLon * 0.4)]'], {}), '([(alon + deltaLon, alat), (alon + 2 * deltaLon, alat), (alon + 2 *\n deltaLon, alat + deltaLon * 0.4), (alon + deltaLon, alat + deltaLon * 0.4)]\n )\n', (6730, 6883), False, 'from shapely.geometry import Polygon\n'), ((6854, 7023), 'shapely.geometry.Polygon', 'Polygon', (['[(alon + 2 * deltaLon, alat), (alon + 4 * deltaLon, alat), (alon + 4 *\n deltaLon, alat + deltaLon * 0.4), (alon + 2 * deltaLon, alat + deltaLon *\n 0.4)]'], {}), '([(alon + 2 * deltaLon, alat), (alon + 4 * deltaLon, alat), (alon + \n 4 * deltaLon, alat + deltaLon * 0.4), (alon + 2 * deltaLon, alat + \n deltaLon * 0.4)])\n', (6861, 7023), False, 'from shapely.geometry import Polygon\n'), ((6989, 7158), 'shapely.geometry.Polygon', 'Polygon', (['[(alon + 4 * deltaLon, alat), (alon + 8 * deltaLon, alat), (alon + 8 *\n deltaLon, alat + deltaLon * 0.4), (alon + 4 * deltaLon, alat + deltaLon *\n 0.4)]'], {}), '([(alon + 4 * deltaLon, alat), (alon + 8 * deltaLon, alat), (alon + \n 8 * deltaLon, alat + deltaLon * 0.4), (alon + 4 * deltaLon, alat + \n deltaLon * 0.4)])\n', (6996, 7158), False, 'from shapely.geometry import Polygon\n'), ((8249, 8354), 'shapely.geometry.Polygon', 'Polygon', (['[[alon, alat], [alon, alat + deltaLon], [alon + 1 / 2 * deltaLon, alat - 1 /\n 2 * deltaLon]]'], {}), '([[alon, alat], [alon, alat + deltaLon], [alon + 1 / 2 * deltaLon, \n alat - 1 / 2 * deltaLon]])\n', (8256, 8354), False, 'from shapely.geometry import Polygon\n'), ((8337, 8442), 'shapely.geometry.Polygon', 'Polygon', (['[[alon, alat], [alon, alat + deltaLon], [alon - 1 / 2 * deltaLon, alat - 1 /\n 2 * deltaLon]]'], {}), '([[alon, alat], [alon, alat + deltaLon], [alon - 1 / 2 * deltaLon, \n alat - 1 / 2 * deltaLon]])\n', (8344, 8442), False, 'from shapely.geometry import Polygon\n'), ((2219, 2242), 'os.listdir', 'os.listdir', (['imgsavepath'], {}), '(imgsavepath)\n', (2229, 2242), False, 'import os\n'), ((2538, 2571), 'os.mkdir', 'os.mkdir', (["(imgsavepath + 'tileimg')"], {}), "(imgsavepath + 'tileimg')\n", (2546, 2571), False, 'import os\n'), ((2696, 2719), 'os.listdir', 'os.listdir', (['imgsavepath'], {}), '(imgsavepath)\n', (2706, 2719), False, 'import os\n'), ((3014, 3047), 'os.mkdir', 'os.mkdir', (["(imgsavepath + 'tileimg')"], {}), "(imgsavepath + 'tileimg')\n", (3022, 3047), False, 'import os\n'), ((2280, 2315), 'os.listdir', 'os.listdir', (["(imgsavepath + 'tileimg')"], {}), "(imgsavepath + 'tileimg')\n", (2290, 2315), False, 'import os\n'), ((2757, 2792), 'os.listdir', 'os.listdir', (["(imgsavepath + 'tileimg')"], {}), "(imgsavepath + 'tileimg')\n", (2767, 2792), False, 'import os\n'), ((2824, 2872), 'PIL.Image.open', 'Image.open', (["(imgsavepath + 'tileimg\\\\' + filename)"], {}), "(imgsavepath + 'tileimg\\\\' + filename)\n", (2834, 2872), False, 'from PIL import Image\n'), ((3531, 3549), 'io.BytesIO', 'io.BytesIO', (['imgstr'], {}), '(imgstr)\n', (3541, 3549), False, 'import io\n'), ((287, 304), 'math.tan', 'math.tan', (['lat_rad'], {}), '(lat_rad)\n', (295, 304), False, 'import math\n'), ((3438, 3479), 'urllib.request.urlopen', 'urllib.request.urlopen', (['imgurl'], {'timeout': '(6)'}), '(imgurl, timeout=6)\n', (3460, 3479), False, 'import urllib\n'), ((312, 329), 'math.cos', 'math.cos', (['lat_rad'], {}), '(lat_rad)\n', (320, 329), False, 'import math\n')] |
'''
Title : Concatenate
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 7 May 2020
'''
import numpy as np
N, M, P = map(int, input().split())
array1 = np.array([input().split() for _ in range(N)],int)
array2 = np.array([input().split() for _ in range(M)],int)
print(np.concatenate((array1,array2),axis=0))
| [
"numpy.concatenate"
] | [((299, 339), 'numpy.concatenate', 'np.concatenate', (['(array1, array2)'], {'axis': '(0)'}), '((array1, array2), axis=0)\n', (313, 339), True, 'import numpy as np\n')] |
from styx_msgs.msg import TrafficLight
import os
import cv2
import numpy as np
import rospy
import tensorflow as tf
from datetime import datetime
class TLClassifier(object):
def __init__(self, model_name):
#TODO load classifier
self.first_call = True
# load frozen model
cwd = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(cwd, "model_trained/{}".format(model_name))
self.frozen_graph = tf.Graph()
with self.frozen_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Tensors from frozen_graph
self.image_tensor = self.frozen_graph.get_tensor_by_name('image_tensor:0')
# Boxes, Scores and Classes
self.detection_boxes = self.frozen_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.frozen_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.frozen_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.frozen_graph.get_tensor_by_name('num_detections:0')
# create tensorflow session for detection
self.sess = tf.Session(graph=self.frozen_graph)
# Model was trained to detect traffic lights with color
self.category_dict = {
1: 'green',
2: 'yellow',
3: 'red',
4: 'none'
}
# create output image directory
self.out_dir = 'images'
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
def to_image_coords(self, boxes, height, width):
"""
The original box coordinate output is normalized, i.e [0, 1], so it converts back to the original coordinate.
"""
box_coords = np.zeros_like(boxes)
box_coords[:, 0] = boxes[:, 0] * height
box_coords[:, 1] = boxes[:, 1] * width
box_coords[:, 2] = boxes[:, 2] * height
box_coords[:, 3] = boxes[:, 3] * width
return box_coords
def draw_boxes(self, image, boxes, classes, scores):
"""Draw bounding boxes on the image"""
for i in range(len(boxes)):
top, left, bot, right = boxes[i, ...]
cv2.rectangle(image, (left, top), (right, bot), (255,0,0), 3)
text = self.category_dict[classes[i]] + ': ' + str(int(scores[i]*100)) + '%'
cv2.putText(image , text, (left, int(top - 5)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (200,0,0), 1, cv2.LINE_AA)
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
# Prepare the input
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
(im_width, im_height, _) = image_rgb.shape
image_np = np.expand_dims(image_rgb, axis=0)
# Prediction
with self.frozen_graph.as_default():
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
# Thresholds
min_score_threshold = 0.2
boxes, scores, classes = self.filter_boxes(min_score_threshold, boxes, scores, classes)
# Output the image
output_images = False # make this True to output inference images
if output_images:
image = np.dstack((image[:, :, 2], image[:, :, 1], image[:, :, 0]))
width, height = image.shape[1], image.shape[0]
box_coords = self.to_image_coords(boxes, height, width)
self.draw_boxes(image, box_coords, classes, scores)
timestr = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
filename = os.path.join(self.out_dir, 'image_' + timestr + '.jpg')
im_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, im_bgr)
if len(scores)>0:
this_class = int(classes[np.argmax(scores)])
else:
this_class = 4
if self.first_call:
self.start_time = rospy.get_time()
self.first_call = False
now = rospy.get_time()
duration = round(now - self.start_time, 1)
rospy.loginfo("{} secs - ### {}:{} ### classes: {}, scores: {}".format(duration, this_class, self.category_dict[this_class], classes, scores))
if this_class == 1:
return TrafficLight.GREEN
elif this_class == 2:
return TrafficLight.RED # Return RED for YELLOW as well
elif this_class == 3:
return TrafficLight.RED
return TrafficLight.GREEN # Return GREEN for UNKNOWN | [
"os.mkdir",
"numpy.argmax",
"datetime.datetime.utcnow",
"cv2.rectangle",
"os.path.join",
"numpy.zeros_like",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.exists",
"rospy.get_time",
"tensorflow.GraphDef",
"numpy.dstack",
"os.path.realpath",
"tensorflow.Session",
"tensorflow.gfile.GFile",
"te... | [((467, 477), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (475, 477), True, 'import tensorflow as tf\n'), ((1405, 1440), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.frozen_graph'}), '(graph=self.frozen_graph)\n', (1415, 1440), True, 'import tensorflow as tf\n'), ((2011, 2031), 'numpy.zeros_like', 'np.zeros_like', (['boxes'], {}), '(boxes)\n', (2024, 2031), True, 'import numpy as np\n'), ((3602, 3640), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (3614, 3640), False, 'import cv2\n'), ((3711, 3744), 'numpy.expand_dims', 'np.expand_dims', (['image_rgb'], {'axis': '(0)'}), '(image_rgb, axis=0)\n', (3725, 3744), True, 'import numpy as np\n'), ((4070, 4087), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (4080, 4087), True, 'import numpy as np\n'), ((4105, 4123), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (4115, 4123), True, 'import numpy as np\n'), ((5257, 5273), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (5271, 5273), False, 'import rospy\n'), ((333, 359), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (349, 359), False, 'import os\n'), ((550, 563), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (561, 563), True, 'import tensorflow as tf\n'), ((1729, 1757), 'os.path.exists', 'os.path.exists', (['self.out_dir'], {}), '(self.out_dir)\n', (1743, 1757), False, 'import os\n'), ((1771, 1793), 'os.mkdir', 'os.mkdir', (['self.out_dir'], {}), '(self.out_dir)\n', (1779, 1793), False, 'import os\n'), ((2460, 2523), 'cv2.rectangle', 'cv2.rectangle', (['image', '(left, top)', '(right, bot)', '(255, 0, 0)', '(3)'], {}), '(image, (left, top), (right, bot), (255, 0, 0), 3)\n', (2473, 2523), False, 'import cv2\n'), ((4487, 4546), 'numpy.dstack', 'np.dstack', (['(image[:, :, 2], image[:, :, 1], image[:, :, 0])'], {}), '((image[:, :, 2], image[:, :, 1], image[:, :, 0]))\n', (4496, 4546), True, 'import numpy as np\n'), ((4840, 4895), 'os.path.join', 'os.path.join', (['self.out_dir', "('image_' + timestr + '.jpg')"], {}), "(self.out_dir, 'image_' + timestr + '.jpg')\n", (4852, 4895), False, 'import os\n'), ((4917, 4955), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (4929, 4955), False, 'import cv2\n'), ((4968, 4997), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'im_bgr'], {}), '(filename, im_bgr)\n', (4979, 4997), False, 'import cv2\n'), ((5190, 5206), 'rospy.get_time', 'rospy.get_time', ([], {}), '()\n', (5204, 5206), False, 'import rospy\n'), ((581, 613), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model_path', '"""rb"""'], {}), "(model_path, 'rb')\n", (595, 613), True, 'import tensorflow as tf\n'), ((747, 789), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (766, 789), True, 'import tensorflow as tf\n'), ((4142, 4161), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (4152, 4161), True, 'import numpy as np\n'), ((5062, 5079), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (5071, 5079), True, 'import numpy as np\n'), ((4761, 4778), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4776, 4778), False, 'from datetime import datetime\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import sys
import torch
import numpy as np
import argparse
import re
import os
import torch.distributed as dist
from contextlib import contextmanager
def word_tokenize(sent):
pat = re.compile(r'[\w]+|[.,!?;|]')
if isinstance(sent, str):
return pat.findall(sent.lower())
else:
return []
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def setuplogging():
from .world import LOG_LEVEL
root = logging.getLogger()
# logging.basicConfig(format="[%(levelname)s %(asctime)s] %(message)s", level=logging.INFO)
root.setLevel(LOG_LEVEL)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(LOG_LEVEL)
formatter = logging.Formatter("[%(levelname)s %(asctime)s] %(message)s")
handler.setFormatter(formatter)
if (root.hasHandlers()):
root.handlers.clear()
root.addHandler(handler)
def init_process(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
os.environ["RANK"] = str(rank)
dist.init_process_group("nccl", rank=rank, world_size=world_size,)
torch.cuda.set_device(rank)
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases.
torch.manual_seed(42)
def cleanup_process():
dist.destroy_process_group()
def get_device():
if torch.cuda.is_available():
local_rank = os.environ.get("RANK", 0)
return torch.device('cuda', int(local_rank))
return torch.device('cpu')
def get_barrier(dist_training):
if dist_training:
return dist.barrier
def do_nothing():
pass
return do_nothing
@contextmanager
def only_on_main_process(local_rank, barrier):
"""
Decorator to make all processes in distributed training wait for each local_master to do something.
"""
need = True
if local_rank not in [-1, 0]:
barrier()
need = False
yield need
if local_rank == 0:
barrier()
def init_world_size(world_size):
assert world_size <= torch.cuda.device_count()
return torch.cuda.device_count() if world_size==-1 else world_size
def init_config(args,Configclass):
config = Configclass.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
output_hidden_states=True)
seg_num = 0
for name in args.news_attributes:
if name == 'title':
seg_num += 1
elif name == 'abstract':
seg_num += 1
elif name == 'body':
seg_num += args.body_seg_num
args.seg_num = seg_num
if seg_num>1 and args.bus_connection:
args.bus_num = seg_num
else:
args.bus_num = 0
config.bus_num = args.bus_num
config.hidden_size = args.word_embedding_dim
config.num_hidden_layers = args.bert_layer_hidden
return args,config
def warmup_linear(args,step):
if step <= args.warmup_step:
return step/args.warmup_step
return max(1e-4,(args.schedule_step-step)/(args.schedule_step-args.warmup_step))
def dump_args(args):
for arg in dir(args):
if not arg.startswith("_"):
logging.info(f"args[{arg}]={getattr(args, arg)}")
def check_args_environment(args):
if not torch.cuda.is_available():
logging.warning("Cuda is not available, " \
"related options will be disabled")
args.enable_gpu = torch.cuda.is_available() & args.enable_gpu
return args
def acc(y_true, y_hat):
y_hat = torch.argmax(y_hat, dim=-1)
tot = y_true.shape[0]
hit = torch.sum(y_true == y_hat)
return hit.data.float() * 1.0 / tot
def dcg_score(y_true, y_score, k=10):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
gains = 2**y_true - 1
discounts = np.log2(np.arange(len(y_true)) + 2)
return np.sum(gains / discounts)
def ndcg_score(y_true, y_score, k=10):
best = dcg_score(y_true, y_true, k)
actual = dcg_score(y_true, y_score, k)
return actual / best
def mrr_score(y_true, y_score):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order)
rr_score = y_true / (np.arange(len(y_true)) + 1)
return np.sum(rr_score) / np.sum(y_true)
def ctr_score(y_true, y_score, k=1):
order = np.argsort(y_score)[::-1]
y_true = np.take(y_true, order[:k])
return np.mean(y_true)
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def get_checkpoint(directory, ckpt_name):
ckpt_path = os.path.join(directory, ckpt_name)
if os.path.exists(ckpt_path):
return ckpt_path
else:
return None
| [
"numpy.sum",
"torch.argmax",
"torch.cuda.device_count",
"logging.Formatter",
"numpy.argsort",
"numpy.mean",
"torch.device",
"os.path.join",
"argparse.ArgumentTypeError",
"logging.warning",
"os.path.exists",
"torch.cuda.set_device",
"torch.manual_seed",
"logging.StreamHandler",
"torch.cud... | [((274, 303), 're.compile', 're.compile', (['"""[\\\\w]+|[.,!?;|]"""'], {}), "('[\\\\w]+|[.,!?;|]')\n", (284, 303), False, 'import re\n'), ((758, 777), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (775, 777), False, 'import logging\n'), ((917, 950), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (938, 950), False, 'import logging\n'), ((999, 1059), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s %(asctime)s] %(message)s"""'], {}), "('[%(levelname)s %(asctime)s] %(message)s')\n", (1016, 1059), False, 'import logging\n'), ((1381, 1446), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""nccl"""'], {'rank': 'rank', 'world_size': 'world_size'}), "('nccl', rank=rank, world_size=world_size)\n", (1404, 1446), True, 'import torch.distributed as dist\n'), ((1452, 1479), 'torch.cuda.set_device', 'torch.cuda.set_device', (['rank'], {}), '(rank)\n', (1473, 1479), False, 'import torch\n'), ((1614, 1635), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (1631, 1635), False, 'import torch\n'), ((1664, 1692), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (1690, 1692), True, 'import torch.distributed as dist\n'), ((1719, 1744), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1742, 1744), False, 'import torch\n'), ((1857, 1876), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1869, 1876), False, 'import torch\n'), ((3864, 3891), 'torch.argmax', 'torch.argmax', (['y_hat'], {'dim': '(-1)'}), '(y_hat, dim=-1)\n', (3876, 3891), False, 'import torch\n'), ((3928, 3954), 'torch.sum', 'torch.sum', (['(y_true == y_hat)'], {}), '(y_true == y_hat)\n', (3937, 3954), False, 'import torch\n'), ((4086, 4112), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (4093, 4112), True, 'import numpy as np\n'), ((4202, 4227), 'numpy.sum', 'np.sum', (['(gains / discounts)'], {}), '(gains / discounts)\n', (4208, 4227), True, 'import numpy as np\n'), ((4462, 4484), 'numpy.take', 'np.take', (['y_true', 'order'], {}), '(y_true, order)\n', (4469, 4484), True, 'import numpy as np\n'), ((4673, 4699), 'numpy.take', 'np.take', (['y_true', 'order[:k]'], {}), '(y_true, order[:k])\n', (4680, 4699), True, 'import numpy as np\n'), ((4711, 4726), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (4718, 4726), True, 'import numpy as np\n'), ((5150, 5184), 'os.path.join', 'os.path.join', (['directory', 'ckpt_name'], {}), '(directory, ckpt_name)\n', (5162, 5184), False, 'import os\n'), ((5192, 5217), 'os.path.exists', 'os.path.exists', (['ckpt_path'], {}), '(ckpt_path)\n', (5206, 5217), False, 'import os\n'), ((1767, 1792), 'os.environ.get', 'os.environ.get', (['"""RANK"""', '(0)'], {}), "('RANK', 0)\n", (1781, 1792), False, 'import os\n'), ((2407, 2432), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2430, 2432), False, 'import torch\n'), ((2444, 2469), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2467, 2469), False, 'import torch\n'), ((3606, 3631), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3629, 3631), False, 'import torch\n'), ((3641, 3715), 'logging.warning', 'logging.warning', (['"""Cuda is not available, related options will be disabled"""'], {}), "('Cuda is not available, related options will be disabled')\n", (3656, 3715), False, 'import logging\n'), ((3767, 3792), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3790, 3792), False, 'import torch\n'), ((4047, 4066), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (4057, 4066), True, 'import numpy as np\n'), ((4423, 4442), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (4433, 4442), True, 'import numpy as np\n'), ((4549, 4565), 'numpy.sum', 'np.sum', (['rr_score'], {}), '(rr_score)\n', (4555, 4565), True, 'import numpy as np\n'), ((4568, 4582), 'numpy.sum', 'np.sum', (['y_true'], {}), '(y_true)\n', (4574, 4582), True, 'import numpy as np\n'), ((4634, 4653), 'numpy.argsort', 'np.argsort', (['y_score'], {}), '(y_score)\n', (4644, 4653), True, 'import numpy as np\n'), ((4774, 4799), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (4788, 4799), False, 'import os\n'), ((637, 690), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (663, 690), False, 'import argparse\n'), ((4910, 4931), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (4920, 4931), False, 'import os\n')] |
"""
template_wt
Functions needed to generate a wind turbines
Notes:
To load this library: import cases.templates.template_wt as template_wt
"""
import pandas as pd
import numpy as np
import scipy.interpolate as scint
import math
import sys
import sharpy.utils.generate_cases as gc
import sharpy.utils.algebra as algebra
import sharpy.utils.h5utils as h5
from sharpy.utils.constants import deg2rad
######################################################################
# AUX FUNCTIONS
######################################################################
def create_node_radial_pos_from_elem_centres(root_elem_centres_tip, num_node, num_elem, num_node_elem):
"""
create_node_radial_pos_from_elem_centres
Define the position of the nodes adn the elements in the blade from the list of element centres
Args:
root_elem_centres_tip (np.array):
- First value: Radial position of the beginning of the blade
- Last value: Radial position of the tip of the blade
- Rest of the values: Radial position the rest of the strucutral element centres
num_node (int): number of nodes
num_elem (int): number of elements
num_node_elem (int): number of nodes in each element
Returns:
node_r (np.array): Radial position of the nodes
elem_r (np.array): Radial position of the elements
Notes:
Radial positions are measured from the hub centre and measured in the rotation plane
"""
elem_r = root_elem_centres_tip[1:-1]
node_r = np.zeros((num_node, ), )
node_r[0] = root_elem_centres_tip[0]
node_r[-2] = root_elem_centres_tip[-2]
node_r[-1] = root_elem_centres_tip[-1]
for ielem in range(num_elem-1):
node_r[ielem*(num_node_elem-1)+1] = elem_r[ielem]
node_r[ielem*(num_node_elem-1)+2] = 0.5*(elem_r[ielem]+elem_r[ielem+1])
return node_r, elem_r
def create_blade_coordinates(num_node, node_r, node_y, node_z):
"""
create_blade_coordinates
Creates SHARPy format of the nodes coordinates and
applies prebending and presweept to node radial position
Args:
num_node (int): number of nodes
node_r (np.array): Radial position of the nodes
node_y (np.array): Displacement of each point IN the rotation plane
node_z (np.array): Displacement of each point OUT OF the rotation plane
Returns:
coordinates (np.array): nodes coordinates
"""
coordinates = np.zeros((num_node, 3),)
coordinates[:, 0] = node_r
coordinates[:, 1] = node_y
coordinates[:, 2] = node_z
return coordinates
######################################################################
# FROM excel type02
######################################################################
def rotor_from_excel_type02(chord_panels,
rotation_velocity,
pitch_deg,
excel_file_name='database_excel_type02.xlsx',
excel_sheet_parameters='parameters',
excel_sheet_structural_blade='structural_blade',
excel_sheet_discretization_blade='discretization_blade',
excel_sheet_aero_blade='aero_blade',
excel_sheet_airfoil_info='airfoil_info',
excel_sheet_airfoil_coord='airfoil_coord',
m_distribution='uniform',
h5_cross_sec_prop=None,
n_points_camber=100,
tol_remove_points=1e-3,
user_defined_m_distribution_type=None,
camber_effect_on_twist=False,
wsp=0.,
dt=0.):
"""
generate_from_excel_type02_db
Function needed to generate a wind turbine from an excel database type02
Args:
chord_panels (int): Number of panels on the blade surface in the chord direction
rotation_velocity (float): Rotation velocity of the rotor
pitch_deg (float): pitch angle in degrees
excel_file_name (str):
excel_sheet_structural_blade (str):
excel_sheet_discretization_blade (str):
excel_sheet_aero_blade (str):
excel_sheet_airfoil_info (str):
excel_sheet_airfoil_coord (str):
excel_sheet_parameters (str):
h5_cross_sec_prop (str): h5 containing mass and stiffness matrices along the blade.
m_distribution (str):
n_points_camber (int): number of points to define the camber of the airfoil,
tol_remove_points (float): maximum distance to remove adjacent points
user_defined_m_distribution_type (string): type of distribution of the chordwise panels when 'm_distribution' == 'user_defined'
camber_effects_on_twist (bool): When true plain airfoils are used and the blade is twisted and preloaded based on thin airfoil theory
wsp (float): wind speed (It may be needed for discretisation purposes)
dt (float): time step (It may be needed for discretisation purposes)
Returns:
rotor (sharpy.utils.generate_cases.AeroelasticInfromation): Aeroelastic information of the rotor
Note:
- h5_cross_sec_prop is a path to a h5 containing the following groups:
- str_prop: with:
- K: list of 6x6 stiffness matrices
- M: list of 6x6 mass matrices
- radius: radial location (including hub) of K and M matrices
- when h5_cross_sec_prop is not None, mass and stiffness properties are
interpolated at BlFract location specified in "excel_sheet_structural_blade"
"""
######################################################################
## BLADE
######################################################################
blade = gc.AeroelasticInformation()
######################################################################
### STRUCTURE
######################################################################
# Read blade structural information from excel file
rR_structural = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'rR')
OutPElAxis = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'OutPElAxis')
InPElAxis = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'InPElAxis')
ElAxisAftLEc = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'ElAxisAftLEc')
StrcTwst = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'StrcTwst')*deg2rad
BMassDen = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'BMassDen')
FlpStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlpStff')
EdgStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EdgStff')
FlapEdgeStiff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlapEdgeStiff')
GJStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'GJStff')
EAStff = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EAStff')
FlpIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlpIner')
EdgIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'EdgIner')
FlapEdgeIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'FlapEdgeIner')
PrebendRef = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'PrebendRef')
PreswpRef = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'PreswpRef')
OutPcg = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'OutPcg')
InPcg = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_blade, 'InPcg')
# Blade parameters
TipRad = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'TipRad')
# HubRad = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'HubRad')
# Discretization points
rR = gc.read_column_sheet_type01(excel_file_name, excel_sheet_discretization_blade, 'rR')
# Interpolate excel variables into the correct locations
# Geometry
if rR[0] < rR_structural[0]:
rR_structural = np.concatenate((np.array([0.]), rR_structural),)
OutPElAxis = np.concatenate((np.array([OutPElAxis[0]]), OutPElAxis),)
InPElAxis = np.concatenate((np.array([InPElAxis[0]]), InPElAxis),)
ElAxisAftLEc = np.concatenate((np.array([ElAxisAftLEc[0]]), ElAxisAftLEc),)
StrcTwst = np.concatenate((np.array([StrcTwst[0]]), StrcTwst),)
BMassDen = np.concatenate((np.array([BMassDen[0]]), BMassDen),)
FlpStff = np.concatenate((np.array([FlpStff[0]]), FlpStff),)
EdgStff = np.concatenate((np.array([EdgStff[0]]), EdgStff),)
FlapEdgeStiff = np.concatenate((np.array([FlapEdgeStiff[0]]), FlapEdgeStiff),)
GJStff = np.concatenate((np.array([GJStff[0]]), GJStff),)
EAStff = np.concatenate((np.array([EAStff[0]]), EAStff),)
FlpIner = np.concatenate((np.array([FlpIner[0]]), FlpIner),)
EdgIner = np.concatenate((np.array([EdgIner[0]]), EdgIner),)
FlapEdgeIner = np.concatenate((np.array([FlapEdgeIner[0]]), FlapEdgeIner),)
PrebendRef = np.concatenate((np.array([PrebendRef[0]]), PrebendRef),)
PreswpRef = np.concatenate((np.array([PreswpRef[0]]), PreswpRef),)
OutPcg = np.concatenate((np.array([OutPcg[0]]), OutPcg),)
InPcg = np.concatenate((np.array([InPcg[0]]), InPcg),)
# Base parameters
use_excel_struct_as_elem = False
if use_excel_struct_as_elem:
blade.StructuralInformation.num_node_elem = 3
blade.StructuralInformation.num_elem = len(rR) - 2
blade.StructuralInformation.compute_basic_num_node()
node_r, elem_r = create_node_radial_pos_from_elem_centres(rR*TipRad,
blade.StructuralInformation.num_node,
blade.StructuralInformation.num_elem,
blade.StructuralInformation.num_node_elem)
else:
# Use excel struct as nodes
# Check the number of nodes
blade.StructuralInformation.num_node_elem = 3
blade.StructuralInformation.num_node = len(rR)
if ((len(rR) - 1) % (blade.StructuralInformation.num_node_elem - 1)) == 0:
blade.StructuralInformation.num_elem = int((len(rR) - 1)/(blade.StructuralInformation.num_node_elem - 1))
node_r = rR*TipRad
elem_rR = rR[1::2] + 0.
elem_r = rR[1::2]*TipRad + 0.
else:
print("ERROR: Cannot build ", blade.StructuralInformation.num_node_elem, "-noded elements from ", blade.StructuralInformation.num_node, "nodes")
node_y = np.interp(rR, rR_structural, InPElAxis) + np.interp(rR, rR_structural, PreswpRef)
node_z = -np.interp(rR, rR_structural, OutPElAxis) - np.interp(rR, rR_structural, PrebendRef)
node_twist = -1.0*np.interp(rR, rR_structural, StrcTwst)
coordinates = create_blade_coordinates(blade.StructuralInformation.num_node, node_r, node_y, node_z)
if h5_cross_sec_prop is None:
# Stiffness
elem_EA = np.interp(elem_rR, rR_structural, EAStff)
elem_EIy = np.interp(elem_rR, rR_structural, FlpStff)
elem_EIz = np.interp(elem_rR, rR_structural, EdgStff)
elem_EIyz = np.interp(elem_rR, rR_structural, FlapEdgeStiff)
elem_GJ = np.interp(elem_rR, rR_structural, GJStff)
# Stiffness: estimate unknown properties
print('WARNING: The poisson cofficient is assumed equal to 0.3')
print('WARNING: Cross-section area is used as shear area')
poisson_coef = 0.3
elem_GAy = elem_EA/2.0/(1.0+poisson_coef)
elem_GAz = elem_EA/2.0/(1.0+poisson_coef)
# Inertia
elem_pos_cg_B = np.zeros((blade.StructuralInformation.num_elem, 3),)
elem_pos_cg_B[:, 1] = np.interp(elem_rR, rR_structural, InPcg)
elem_pos_cg_B[:, 2] = -np.interp(elem_rR, rR_structural, OutPcg)
elem_mass_per_unit_length = np.interp(elem_rR, rR_structural, BMassDen)
elem_mass_iner_y = np.interp(elem_rR, rR_structural, FlpIner)
elem_mass_iner_z = np.interp(elem_rR, rR_structural, EdgIner)
elem_mass_iner_yz = np.interp(elem_rR, rR_structural, FlapEdgeIner)
# Inertia: estimate unknown properties
print('WARNING: Using perpendicular axis theorem to compute the inertia around xB')
elem_mass_iner_x = elem_mass_iner_y + elem_mass_iner_z
# Generate blade structural properties
blade.StructuralInformation.create_mass_db_from_vector(elem_mass_per_unit_length, elem_mass_iner_x, elem_mass_iner_y, elem_mass_iner_z, elem_pos_cg_B, elem_mass_iner_yz)
blade.StructuralInformation.create_stiff_db_from_vector(elem_EA, elem_GAy, elem_GAz, elem_GJ, elem_EIy, elem_EIz, elem_EIyz)
else:
# read Mass/Stiffness from database
cross_prop = h5.readh5(h5_cross_sec_prop).str_prop
# create mass_db/stiffness_db (interpolate at mid-node of each element)
blade.StructuralInformation.mass_db = scint.interp1d(
cross_prop.radius, cross_prop.M, kind='cubic', copy=False, assume_sorted=True, axis=0,
bounds_error=False, fill_value='extrapolate')(node_r[1::2])
blade.StructuralInformation.stiffness_db = scint.interp1d(
cross_prop.radius, cross_prop.K, kind='cubic', copy=False, assume_sorted=True, axis=0,
bounds_error=False, fill_value='extrapolate')(node_r[1::2])
blade.StructuralInformation.generate_1to1_from_vectors(
num_node_elem=blade.StructuralInformation.num_node_elem,
num_node=blade.StructuralInformation.num_node,
num_elem=blade.StructuralInformation.num_elem,
coordinates=coordinates,
stiffness_db=blade.StructuralInformation.stiffness_db,
mass_db=blade.StructuralInformation.mass_db,
frame_of_reference_delta='y_AFoR',
vec_node_structural_twist=node_twist,
num_lumped_mass=0)
# Boundary conditions
blade.StructuralInformation.boundary_conditions = np.zeros((blade.StructuralInformation.num_node), dtype=int)
blade.StructuralInformation.boundary_conditions[0] = 1
blade.StructuralInformation.boundary_conditions[-1] = -1
######################################################################
### AERODYNAMICS
######################################################################
# Read blade aerodynamic information from excel file
rR_aero = gc.read_column_sheet_type01(excel_file_name, excel_sheet_aero_blade, 'rR')
chord_aero = gc.read_column_sheet_type01(excel_file_name, excel_sheet_aero_blade, 'BlChord')
thickness_aero = gc.read_column_sheet_type01(excel_file_name, excel_sheet_aero_blade, 'BlThickness')
pure_airfoils_names = gc.read_column_sheet_type01(excel_file_name, excel_sheet_airfoil_info, 'Name')
pure_airfoils_thickness = gc.read_column_sheet_type01(excel_file_name, excel_sheet_airfoil_info, 'Thickness')
node_ElAxisAftLEc = np.interp(node_r, rR_structural*TipRad, ElAxisAftLEc)
# Read coordinates of the pure airfoils
n_pure_airfoils = len(pure_airfoils_names)
pure_airfoils_camber = np.zeros((n_pure_airfoils, n_points_camber, 2),)
xls = pd.ExcelFile(excel_file_name)
excel_db = pd.read_excel(xls, sheet_name=excel_sheet_airfoil_coord)
for iairfoil in range(n_pure_airfoils):
# Look for the NaN
icoord = 2
while(not(math.isnan(excel_db["%s_x" % pure_airfoils_names[iairfoil]][icoord]))):
icoord += 1
if(icoord == len(excel_db["%s_x" % pure_airfoils_names[iairfoil]])):
break
# Compute the camber of the airfoils at the defined chord points
pure_airfoils_camber[iairfoil, :, 0], pure_airfoils_camber[iairfoil, :, 1] = gc.get_airfoil_camber(excel_db["%s_x" % pure_airfoils_names[iairfoil]][2:icoord],
excel_db["%s_y" % pure_airfoils_names[iairfoil]][2:icoord],
n_points_camber)
# Basic variables
surface_distribution = np.zeros((blade.StructuralInformation.num_elem), dtype=int)
# Interpolate in the correct positions
node_chord = np.interp(node_r, rR_aero*TipRad, chord_aero)
# Define the nodes with aerodynamic properties
# Look for the first element that is goint to be aerodynamic
first_aero_elem = 0
while (elem_r[first_aero_elem] <= rR_aero[0]*TipRad):
first_aero_elem += 1
first_aero_node = first_aero_elem*(blade.StructuralInformation.num_node_elem - 1)
aero_node = np.zeros((blade.StructuralInformation.num_node,), dtype=bool)
aero_node[first_aero_node:] = np.ones((blade.StructuralInformation.num_node-first_aero_node,), dtype=bool)
# Define the airfoil at each stage
# airfoils = blade.AerodynamicInformation.interpolate_airfoils_camber(pure_airfoils_camber,excel_aero_r, node_r, n_points_camber)
node_thickness = np.interp(node_r, rR_aero*TipRad, thickness_aero)
airfoils = blade.AerodynamicInformation.interpolate_airfoils_camber_thickness(pure_airfoils_camber, pure_airfoils_thickness, node_thickness, n_points_camber)
airfoil_distribution = np.linspace(0, blade.StructuralInformation.num_node - 1, blade.StructuralInformation.num_node, dtype=int)
# User defined m distribution
if (m_distribution == 'user_defined') and (user_defined_m_distribution_type == 'last_geometric'):
blade_nodes = blade.StructuralInformation.num_node
udmd_by_nodes = np.zeros((blade_nodes, chord_panels[0] + 1))
for inode in range(blade_nodes):
r = np.linalg.norm(blade.StructuralInformation.coordinates[inode, :])
vrel = np.sqrt(rotation_velocity**2*r**2 + wsp**2)
last_length = vrel*dt/node_chord[inode]
last_length = np.minimum(last_length, 0.5)
if last_length <= 0.5:
ratio = gc.get_factor_geometric_progression(last_length, 1., chord_panels)
udmd_by_nodes[inode, -1] = 1.
udmd_by_nodes[inode, 0] = 0.
for im in range(chord_panels[0] - 1, 0, -1):
udmd_by_nodes[inode, im] = udmd_by_nodes[inode, im + 1] - last_length
last_length *= ratio
# Check
if (np.diff(udmd_by_nodes[inode, :]) < 0.).any():
sys.error("ERROR in the panel discretization of the blade in node %d" % (inode))
else:
print("ERROR: cannot match the last panel size for node:", inode)
udmd_by_nodes[inode, :] = np.linspace(0, 1, chord_panels + 1)
else:
udmd_by_nodes = None
node_twist = np.zeros_like(node_chord)
if camber_effect_on_twist:
print("WARNING: The steady applied Mx should be manually multiplied by the density")
for inode in range(blade.StructuralInformation.num_node):
node_twist[inode] = gc.get_aoacl0_from_camber(airfoils[inode, :, 0], airfoils[inode, :, 1])
mu0 = gc.get_mu0_from_camber(airfoils[inode, :, 0], airfoils[inode, :, 1])
r = np.linalg.norm(blade.StructuralInformation.coordinates[inode, :])
vrel = np.sqrt(rotation_velocity**2*r**2 + wsp**2)
if inode == 0:
dr = 0.5*np.linalg.norm(blade.StructuralInformation.coordinates[1, :] - blade.StructuralInformation.coordinates[0, :])
elif inode == len(blade.StructuralInformation.coordinates[:, 0]) - 1:
dr = 0.5*np.linalg.norm(blade.StructuralInformation.coordinates[-1, :] - blade.StructuralInformation.coordinates[-2, :])
else:
dr = 0.5*np.linalg.norm(blade.StructuralInformation.coordinates[inode + 1, :] - blade.StructuralInformation.coordinates[inode - 1, :])
moment_factor = 0.5*vrel**2*node_chord[inode]**2*dr
# print("node", inode, "mu0", mu0, "CMc/4", 2.*mu0 + np.pi/2*node_twist[inode])
blade.StructuralInformation.app_forces[inode, 3] = (2.*mu0 + np.pi/2*node_twist[inode])*moment_factor
airfoils[inode, :, 1] *= 0.
# Write SHARPy format
blade.AerodynamicInformation.create_aerodynamics_from_vec(blade.StructuralInformation,
aero_node,
node_chord,
node_twist,
np.pi*np.ones_like(node_chord),
chord_panels,
surface_distribution,
m_distribution,
node_ElAxisAftLEc,
airfoil_distribution,
airfoils,
udmd_by_nodes)
######################################################################
## ROTOR
######################################################################
# Read from excel file
numberOfBlades = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'NumBl')
tilt = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'ShftTilt')*deg2rad
cone = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'Cone')*deg2rad
# pitch = gc.read_column_sheet_type01(excel_file_name, excel_sheet_rotor, 'Pitch')*deg2rad
# Apply pitch
blade.StructuralInformation.rotate_around_origin(np.array([1., 0., 0.]), -pitch_deg*deg2rad)
# Apply coning
blade.StructuralInformation.rotate_around_origin(np.array([0., 1., 0.]), -cone)
# Build the whole rotor
rotor = blade.copy()
for iblade in range(numberOfBlades-1):
blade2 = blade.copy()
blade2.StructuralInformation.rotate_around_origin(np.array([0., 0., 1.]), (iblade + 1)*(360.0/numberOfBlades)*deg2rad)
rotor.assembly(blade2)
blade2 = None
rotor.remove_duplicated_points(tol_remove_points)
# Apply tilt
rotor.StructuralInformation.rotate_around_origin(np.array([0., 1., 0.]), tilt)
return rotor
def generate_from_excel_type02(chord_panels,
rotation_velocity,
pitch_deg,
excel_file_name='database_excel_type02.xlsx',
excel_sheet_parameters='parameters',
excel_sheet_structural_blade='structural_blade',
excel_sheet_discretization_blade='discretization_blade',
excel_sheet_aero_blade='aero_blade',
excel_sheet_airfoil_info='airfoil_info',
excel_sheet_airfoil_coord='airfoil_coord',
excel_sheet_structural_tower='structural_tower',
m_distribution='uniform',
h5_cross_sec_prop=None,
n_points_camber=100,
tol_remove_points=1e-3,
user_defined_m_distribution_type=None,
wsp=0.,
dt=0.):
"""
generate_from_excel_type02
Function needed to generate a wind turbine from an excel database according to OpenFAST inputs
Args:
chord_panels (int): Number of panels on the blade surface in the chord direction
rotation_velocity (float): Rotation velocity of the rotor
pitch_deg (float): pitch angle in degrees
excel_file_name (str):
excel_sheet_structural_blade (str):
excel_sheet_aero_blade (str):
excel_sheet_airfoil_coord (str):
excel_sheet_parameters (str):
excel_sheet_structural_tower (str):
m_distribution (str):
n_points_camber (int): number of points to define the camber of the airfoil,
tol_remove_points (float): maximum distance to remove adjacent points
user_defined_m_distribution_type (string): type of distribution of the chordwise panels when 'm_distribution' == 'user_defined'
camber_effects_on_twist (bool): When true plain airfoils are used and the blade is twisted and preloaded based on thin airfoil theory
wsp (float): wind speed (It may be needed for discretisation purposes)
dt (float): time step (It may be needed for discretisation purposes)
Returns:
wt (sharpy.utils.generate_cases.AeroelasticInfromation): Aeroelastic infrmation of the wind turbine
LC (list): list of all the Lagrange constraints needed in the cases (sharpy.utils.generate_cases.LagrangeConstraint)
MB (list): list of the multibody information of each body (sharpy.utils.generate_cases.BodyInfrmation)
"""
rotor = rotor_from_excel_type02(chord_panels,
rotation_velocity,
pitch_deg,
excel_file_name=excel_file_name,
excel_sheet_parameters=excel_sheet_parameters,
excel_sheet_structural_blade=excel_sheet_structural_blade,
excel_sheet_discretization_blade=excel_sheet_discretization_blade,
excel_sheet_aero_blade=excel_sheet_aero_blade,
excel_sheet_airfoil_info=excel_sheet_airfoil_info,
excel_sheet_airfoil_coord=excel_sheet_airfoil_coord,
m_distribution=m_distribution,
h5_cross_sec_prop=h5_cross_sec_prop,
n_points_camber=n_points_camber,
tol_remove_points=tol_remove_points,
user_defined_m_distribution_type=user_defined_m_distribution_type,
wsp=0.,
dt=0.)
######################################################################
## TOWER
######################################################################
# Read from excel file
HtFract = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'HtFract')
TMassDen = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TMassDen')
TwFAStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAStif')
TwSSStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSSStif')
# TODO> variables to be defined
TwGJStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwGJStif')
TwEAStif = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwEAStif')
TwFAIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAIner')
TwSSIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSSIner')
TwFAcgOf = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwFAcgOf')
TwSScgOf = gc.read_column_sheet_type01(excel_file_name, excel_sheet_structural_tower, 'TwSScgOf')
# Define the TOWER
TowerHt = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'TowerHt')
Elevation = TowerHt*HtFract
tower = gc.AeroelasticInformation()
tower.StructuralInformation.num_elem = len(Elevation) - 2
tower.StructuralInformation.num_node_elem = 3
tower.StructuralInformation.compute_basic_num_node()
# Interpolate excel variables into the correct locations
node_r, elem_r = create_node_radial_pos_from_elem_centres(Elevation,
tower.StructuralInformation.num_node,
tower.StructuralInformation.num_elem,
tower.StructuralInformation.num_node_elem)
# Stiffness
elem_EA = np.interp(elem_r, Elevation, TwEAStif)
elem_EIz = np.interp(elem_r, Elevation, TwSSStif)
elem_EIy = np.interp(elem_r, Elevation, TwFAStif)
elem_GJ = np.interp(elem_r, Elevation, TwGJStif)
# Stiffness: estimate unknown properties
print('WARNING: The poisson cofficient is assumed equal to 0.3')
print('WARNING: Cross-section area is used as shear area')
poisson_coef = 0.3
elem_GAy = elem_EA/2.0/(1.0+poisson_coef)
elem_GAz = elem_EA/2.0/(1.0+poisson_coef)
# Inertia
elem_mass_per_unit_length = np.interp(elem_r, Elevation, TMassDen)
elem_mass_iner_y = np.interp(elem_r, Elevation, TwFAIner)
elem_mass_iner_z = np.interp(elem_r, Elevation, TwSSIner)
# TODO: check yz axis and Flap-edge
elem_pos_cg_B = np.zeros((tower.StructuralInformation.num_elem, 3),)
elem_pos_cg_B[:, 1] = np.interp(elem_r, Elevation, TwSScgOf)
elem_pos_cg_B[:, 2] = np.interp(elem_r, Elevation, TwFAcgOf)
# Stiffness: estimate unknown properties
print('WARNING: Using perpendicular axis theorem to compute the inertia around xB')
elem_mass_iner_x = elem_mass_iner_y + elem_mass_iner_z
# Create the tower
tower.StructuralInformation.create_mass_db_from_vector(elem_mass_per_unit_length, elem_mass_iner_x, elem_mass_iner_y, elem_mass_iner_z, elem_pos_cg_B)
tower.StructuralInformation.create_stiff_db_from_vector(elem_EA, elem_GAy, elem_GAz, elem_GJ, elem_EIy, elem_EIz)
coordinates = np.zeros((tower.StructuralInformation.num_node, 3),)
coordinates[:, 0] = node_r
tower.StructuralInformation.generate_1to1_from_vectors(
num_node_elem=tower.StructuralInformation.num_node_elem,
num_node=tower.StructuralInformation.num_node,
num_elem=tower.StructuralInformation.num_elem,
coordinates=coordinates,
stiffness_db=tower.StructuralInformation.stiffness_db,
mass_db=tower.StructuralInformation.mass_db,
frame_of_reference_delta='y_AFoR',
vec_node_structural_twist=np.zeros((tower.StructuralInformation.num_node,),),
num_lumped_mass=1)
tower.StructuralInformation.boundary_conditions = np.zeros((tower.StructuralInformation.num_node), dtype = int)
tower.StructuralInformation.boundary_conditions[0] = 1
# Read overhang and nacelle properties from excel file
overhang_len = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'overhang')
# HubMass = gc.read_column_sheet_type01(excel_file_name, excel_sheet_nacelle, 'HubMass')
NacelleMass = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'NacMass')
# NacelleYawIner = gc.read_column_sheet_type01(excel_file_name, excel_sheet_nacelle, 'NacelleYawIner')
# Include nacelle mass
tower.StructuralInformation.lumped_mass_nodes = np.array([tower.StructuralInformation.num_node - 1], dtype=int)
tower.StructuralInformation.lumped_mass = np.array([NacelleMass], dtype=float)
tower.AerodynamicInformation.set_to_zero(tower.StructuralInformation.num_node_elem,
tower.StructuralInformation.num_node,
tower.StructuralInformation.num_elem)
# Assembly overhang with the tower
# numberOfBlades = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'NumBl')
tilt = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'ShftTilt')*deg2rad
# cone = gc.read_column_sheet_type01(excel_file_name, excel_sheet_parameters, 'Cone')*deg2rad
overhang = gc.AeroelasticInformation()
overhang.StructuralInformation.num_node = 3
overhang.StructuralInformation.num_node_elem = 3
overhang.StructuralInformation.compute_basic_num_elem()
node_pos = np.zeros((overhang.StructuralInformation.num_node, 3), )
node_pos[:, 0] += tower.StructuralInformation.coordinates[-1, 0]
node_pos[:, 0] += np.linspace(0., overhang_len*np.sin(tilt*deg2rad), overhang.StructuralInformation.num_node)
node_pos[:, 2] = np.linspace(0., -overhang_len*np.cos(tilt*deg2rad), overhang.StructuralInformation.num_node)
# TODO: change the following by real values
# Same properties as the last element of the tower
print("WARNING: Using the structural properties of the last tower section for the overhang")
oh_mass_per_unit_length = tower.StructuralInformation.mass_db[-1, 0, 0]
oh_mass_iner = tower.StructuralInformation.mass_db[-1, 3, 3]
oh_EA = tower.StructuralInformation.stiffness_db[-1, 0, 0]
oh_GA = tower.StructuralInformation.stiffness_db[-1, 1, 1]
oh_GJ = tower.StructuralInformation.stiffness_db[-1, 3, 3]
oh_EI = tower.StructuralInformation.stiffness_db[-1, 4, 4]
overhang.StructuralInformation.generate_uniform_sym_beam(node_pos,
oh_mass_per_unit_length,
oh_mass_iner,
oh_EA,
oh_GA,
oh_GJ,
oh_EI,
num_node_elem=3,
y_BFoR='y_AFoR',
num_lumped_mass=0)
overhang.StructuralInformation.boundary_conditions = np.zeros((overhang.StructuralInformation.num_node), dtype=int)
overhang.StructuralInformation.boundary_conditions[-1] = -1
overhang.AerodynamicInformation.set_to_zero(overhang.StructuralInformation.num_node_elem,
overhang.StructuralInformation.num_node,
overhang.StructuralInformation.num_elem)
tower.assembly(overhang)
tower.remove_duplicated_points(tol_remove_points)
######################################################################
## WIND TURBINE
######################################################################
# Assembly the whole case
wt = tower.copy()
hub_position = tower.StructuralInformation.coordinates[-1, :]
rotor.StructuralInformation.coordinates += hub_position
wt.assembly(rotor)
# Redefine the body numbers
wt.StructuralInformation.body_number *= 0
wt.StructuralInformation.body_number[tower.StructuralInformation.num_elem:wt.StructuralInformation.num_elem] += 1
######################################################################
## MULTIBODY
######################################################################
# Define the boundary condition between the rotor and the tower tip
LC1 = gc.LagrangeConstraint()
LC1.behaviour = 'hinge_node_FoR_constant_vel'
LC1.node_in_body = tower.StructuralInformation.num_node - 1
LC1.body = 0
LC1.body_FoR = 1
LC1.rot_axisB = np.array([1., 0., 0.0])
LC1.rot_vel = -rotation_velocity
LC = []
LC.append(LC1)
# Define the multibody infromation for the tower and the rotor
MB1 = gc.BodyInformation()
MB1.body_number = 0
MB1.FoR_position = np.zeros((6,),)
MB1.FoR_velocity = np.zeros((6,),)
MB1.FoR_acceleration = np.zeros((6,),)
MB1.FoR_movement = 'prescribed'
MB1.quat = np.array([1.0, 0.0, 0.0, 0.0])
MB2 = gc.BodyInformation()
MB2.body_number = 1
MB2.FoR_position = np.array([rotor.StructuralInformation.coordinates[0, 0], rotor.StructuralInformation.coordinates[0, 1], rotor.StructuralInformation.coordinates[0, 2], 0.0, 0.0, 0.0])
MB2.FoR_velocity = np.array([0., 0., 0., 0., 0., rotation_velocity])
MB2.FoR_acceleration = np.zeros((6,),)
MB2.FoR_movement = 'free'
MB2.quat = algebra.euler2quat(np.array([0.0, tilt, 0.0]))
MB = []
MB.append(MB1)
MB.append(MB2)
######################################################################
## RETURN
######################################################################
return wt, LC, MB
| [
"sys.error",
"numpy.ones",
"sharpy.utils.generate_cases.AeroelasticInformation",
"sharpy.utils.generate_cases.get_mu0_from_camber",
"numpy.sin",
"numpy.linalg.norm",
"numpy.interp",
"scipy.interpolate.interp1d",
"sharpy.utils.h5utils.readh5",
"sharpy.utils.generate_cases.read_column_sheet_type01",... | [((1550, 1571), 'numpy.zeros', 'np.zeros', (['(num_node,)'], {}), '((num_node,))\n', (1558, 1571), True, 'import numpy as np\n'), ((2477, 2500), 'numpy.zeros', 'np.zeros', (['(num_node, 3)'], {}), '((num_node, 3))\n', (2485, 2500), True, 'import numpy as np\n'), ((5920, 5947), 'sharpy.utils.generate_cases.AeroelasticInformation', 'gc.AeroelasticInformation', ([], {}), '()\n', (5945, 5947), True, 'import sharpy.utils.generate_cases as gc\n'), ((6193, 6278), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""rR"""'], {}), "(excel_file_name, excel_sheet_structural_blade, 'rR'\n )\n", (6220, 6278), True, 'import sharpy.utils.generate_cases as gc\n'), ((6291, 6383), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""OutPElAxis"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'OutPElAxis')\n", (6318, 6383), True, 'import sharpy.utils.generate_cases as gc\n'), ((6396, 6487), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""InPElAxis"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'InPElAxis')\n", (6423, 6487), True, 'import sharpy.utils.generate_cases as gc\n'), ((6503, 6597), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""ElAxisAftLEc"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'ElAxisAftLEc')\n", (6530, 6597), True, 'import sharpy.utils.generate_cases as gc\n'), ((6719, 6809), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""BMassDen"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'BMassDen')\n", (6746, 6809), True, 'import sharpy.utils.generate_cases as gc\n'), ((6820, 6909), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""FlpStff"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'FlpStff')\n", (6847, 6909), True, 'import sharpy.utils.generate_cases as gc\n'), ((6920, 7009), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""EdgStff"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'EdgStff')\n", (6947, 7009), True, 'import sharpy.utils.generate_cases as gc\n'), ((7026, 7121), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""FlapEdgeStiff"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'FlapEdgeStiff')\n", (7053, 7121), True, 'import sharpy.utils.generate_cases as gc\n'), ((7131, 7219), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""GJStff"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'GJStff')\n", (7158, 7219), True, 'import sharpy.utils.generate_cases as gc\n'), ((7229, 7317), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""EAStff"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'EAStff')\n", (7256, 7317), True, 'import sharpy.utils.generate_cases as gc\n'), ((7328, 7417), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""FlpIner"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'FlpIner')\n", (7355, 7417), True, 'import sharpy.utils.generate_cases as gc\n'), ((7428, 7517), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""EdgIner"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'EdgIner')\n", (7455, 7517), True, 'import sharpy.utils.generate_cases as gc\n'), ((7533, 7627), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""FlapEdgeIner"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'FlapEdgeIner')\n", (7560, 7627), True, 'import sharpy.utils.generate_cases as gc\n'), ((7641, 7733), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""PrebendRef"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'PrebendRef')\n", (7668, 7733), True, 'import sharpy.utils.generate_cases as gc\n'), ((7746, 7837), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""PreswpRef"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'PreswpRef')\n", (7773, 7837), True, 'import sharpy.utils.generate_cases as gc\n'), ((7847, 7935), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""OutPcg"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'OutPcg')\n", (7874, 7935), True, 'import sharpy.utils.generate_cases as gc\n'), ((7944, 8031), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""InPcg"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'InPcg')\n", (7971, 8031), True, 'import sharpy.utils.generate_cases as gc\n'), ((8065, 8143), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""TipRad"""'], {}), "(excel_file_name, excel_sheet_parameters, 'TipRad')\n", (8092, 8143), True, 'import sharpy.utils.generate_cases as gc\n'), ((8276, 8364), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_discretization_blade', '"""rR"""'], {}), "(excel_file_name,\n excel_sheet_discretization_blade, 'rR')\n", (8303, 8364), True, 'import sharpy.utils.generate_cases as gc\n'), ((14578, 14635), 'numpy.zeros', 'np.zeros', (['blade.StructuralInformation.num_node'], {'dtype': 'int'}), '(blade.StructuralInformation.num_node, dtype=int)\n', (14586, 14635), True, 'import numpy as np\n'), ((15001, 15075), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_aero_blade', '"""rR"""'], {}), "(excel_file_name, excel_sheet_aero_blade, 'rR')\n", (15028, 15075), True, 'import sharpy.utils.generate_cases as gc\n'), ((15093, 15172), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_aero_blade', '"""BlChord"""'], {}), "(excel_file_name, excel_sheet_aero_blade, 'BlChord')\n", (15120, 15172), True, 'import sharpy.utils.generate_cases as gc\n'), ((15194, 15281), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_aero_blade', '"""BlThickness"""'], {}), "(excel_file_name, excel_sheet_aero_blade,\n 'BlThickness')\n", (15221, 15281), True, 'import sharpy.utils.generate_cases as gc\n'), ((15305, 15383), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_airfoil_info', '"""Name"""'], {}), "(excel_file_name, excel_sheet_airfoil_info, 'Name')\n", (15332, 15383), True, 'import sharpy.utils.generate_cases as gc\n'), ((15414, 15501), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_airfoil_info', '"""Thickness"""'], {}), "(excel_file_name, excel_sheet_airfoil_info,\n 'Thickness')\n", (15441, 15501), True, 'import sharpy.utils.generate_cases as gc\n'), ((15523, 15578), 'numpy.interp', 'np.interp', (['node_r', '(rR_structural * TipRad)', 'ElAxisAftLEc'], {}), '(node_r, rR_structural * TipRad, ElAxisAftLEc)\n', (15532, 15578), True, 'import numpy as np\n'), ((15697, 15744), 'numpy.zeros', 'np.zeros', (['(n_pure_airfoils, n_points_camber, 2)'], {}), '((n_pure_airfoils, n_points_camber, 2))\n', (15705, 15744), True, 'import numpy as np\n'), ((15756, 15785), 'pandas.ExcelFile', 'pd.ExcelFile', (['excel_file_name'], {}), '(excel_file_name)\n', (15768, 15785), True, 'import pandas as pd\n'), ((15801, 15857), 'pandas.read_excel', 'pd.read_excel', (['xls'], {'sheet_name': 'excel_sheet_airfoil_coord'}), '(xls, sheet_name=excel_sheet_airfoil_coord)\n', (15814, 15857), True, 'import pandas as pd\n'), ((16747, 16804), 'numpy.zeros', 'np.zeros', (['blade.StructuralInformation.num_elem'], {'dtype': 'int'}), '(blade.StructuralInformation.num_elem, dtype=int)\n', (16755, 16804), True, 'import numpy as np\n'), ((16868, 16915), 'numpy.interp', 'np.interp', (['node_r', '(rR_aero * TipRad)', 'chord_aero'], {}), '(node_r, rR_aero * TipRad, chord_aero)\n', (16877, 16915), True, 'import numpy as np\n'), ((17244, 17305), 'numpy.zeros', 'np.zeros', (['(blade.StructuralInformation.num_node,)'], {'dtype': 'bool'}), '((blade.StructuralInformation.num_node,), dtype=bool)\n', (17252, 17305), True, 'import numpy as np\n'), ((17340, 17418), 'numpy.ones', 'np.ones', (['(blade.StructuralInformation.num_node - first_aero_node,)'], {'dtype': 'bool'}), '((blade.StructuralInformation.num_node - first_aero_node,), dtype=bool)\n', (17347, 17418), True, 'import numpy as np\n'), ((17613, 17664), 'numpy.interp', 'np.interp', (['node_r', '(rR_aero * TipRad)', 'thickness_aero'], {}), '(node_r, rR_aero * TipRad, thickness_aero)\n', (17622, 17664), True, 'import numpy as np\n'), ((17853, 17963), 'numpy.linspace', 'np.linspace', (['(0)', '(blade.StructuralInformation.num_node - 1)', 'blade.StructuralInformation.num_node'], {'dtype': 'int'}), '(0, blade.StructuralInformation.num_node - 1, blade.\n StructuralInformation.num_node, dtype=int)\n', (17864, 17963), True, 'import numpy as np\n'), ((19352, 19377), 'numpy.zeros_like', 'np.zeros_like', (['node_chord'], {}), '(node_chord)\n', (19365, 19377), True, 'import numpy as np\n'), ((21940, 22017), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""NumBl"""'], {}), "(excel_file_name, excel_sheet_parameters, 'NumBl')\n", (21967, 22017), True, 'import sharpy.utils.generate_cases as gc\n'), ((27132, 27221), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""HtFract"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'HtFract')\n", (27159, 27221), True, 'import sharpy.utils.generate_cases as gc\n'), ((27233, 27323), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TMassDen"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TMassDen')\n", (27260, 27323), True, 'import sharpy.utils.generate_cases as gc\n'), ((27335, 27425), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwFAStif"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwFAStif')\n", (27362, 27425), True, 'import sharpy.utils.generate_cases as gc\n'), ((27437, 27527), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwSSStif"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwSSStif')\n", (27464, 27527), True, 'import sharpy.utils.generate_cases as gc\n'), ((27575, 27665), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwGJStif"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwGJStif')\n", (27602, 27665), True, 'import sharpy.utils.generate_cases as gc\n'), ((27677, 27767), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwEAStif"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwEAStif')\n", (27704, 27767), True, 'import sharpy.utils.generate_cases as gc\n'), ((27779, 27869), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwFAIner"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwFAIner')\n", (27806, 27869), True, 'import sharpy.utils.generate_cases as gc\n'), ((27881, 27971), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwSSIner"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwSSIner')\n", (27908, 27971), True, 'import sharpy.utils.generate_cases as gc\n'), ((27983, 28073), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwFAcgOf"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwFAcgOf')\n", (28010, 28073), True, 'import sharpy.utils.generate_cases as gc\n'), ((28085, 28175), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_tower', '"""TwSScgOf"""'], {}), "(excel_file_name, excel_sheet_structural_tower,\n 'TwSScgOf')\n", (28112, 28175), True, 'import sharpy.utils.generate_cases as gc\n'), ((28210, 28289), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""TowerHt"""'], {}), "(excel_file_name, excel_sheet_parameters, 'TowerHt')\n", (28237, 28289), True, 'import sharpy.utils.generate_cases as gc\n'), ((28335, 28362), 'sharpy.utils.generate_cases.AeroelasticInformation', 'gc.AeroelasticInformation', ([], {}), '()\n', (28360, 28362), True, 'import sharpy.utils.generate_cases as gc\n'), ((28937, 28975), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwEAStif'], {}), '(elem_r, Elevation, TwEAStif)\n', (28946, 28975), True, 'import numpy as np\n'), ((28991, 29029), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwSSStif'], {}), '(elem_r, Elevation, TwSSStif)\n', (29000, 29029), True, 'import numpy as np\n'), ((29045, 29083), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwFAStif'], {}), '(elem_r, Elevation, TwFAStif)\n', (29054, 29083), True, 'import numpy as np\n'), ((29098, 29136), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwGJStif'], {}), '(elem_r, Elevation, TwGJStif)\n', (29107, 29136), True, 'import numpy as np\n'), ((29476, 29514), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TMassDen'], {}), '(elem_r, Elevation, TMassDen)\n', (29485, 29514), True, 'import numpy as np\n'), ((29538, 29576), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwFAIner'], {}), '(elem_r, Elevation, TwFAIner)\n', (29547, 29576), True, 'import numpy as np\n'), ((29600, 29638), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwSSIner'], {}), '(elem_r, Elevation, TwSSIner)\n', (29609, 29638), True, 'import numpy as np\n'), ((29699, 29750), 'numpy.zeros', 'np.zeros', (['(tower.StructuralInformation.num_elem, 3)'], {}), '((tower.StructuralInformation.num_elem, 3))\n', (29707, 29750), True, 'import numpy as np\n'), ((29778, 29816), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwSScgOf'], {}), '(elem_r, Elevation, TwSScgOf)\n', (29787, 29816), True, 'import numpy as np\n'), ((29843, 29881), 'numpy.interp', 'np.interp', (['elem_r', 'Elevation', 'TwFAcgOf'], {}), '(elem_r, Elevation, TwFAcgOf)\n', (29852, 29881), True, 'import numpy as np\n'), ((30391, 30442), 'numpy.zeros', 'np.zeros', (['(tower.StructuralInformation.num_node, 3)'], {}), '((tower.StructuralInformation.num_node, 3))\n', (30399, 30442), True, 'import numpy as np\n'), ((31107, 31164), 'numpy.zeros', 'np.zeros', (['tower.StructuralInformation.num_node'], {'dtype': 'int'}), '(tower.StructuralInformation.num_node, dtype=int)\n', (31115, 31164), True, 'import numpy as np\n'), ((31307, 31392), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""overhang"""'], {}), "(excel_file_name, excel_sheet_parameters, 'overhang'\n )\n", (31334, 31392), True, 'import sharpy.utils.generate_cases as gc\n'), ((31499, 31578), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""NacMass"""'], {}), "(excel_file_name, excel_sheet_parameters, 'NacMass')\n", (31526, 31578), True, 'import sharpy.utils.generate_cases as gc\n'), ((31766, 31829), 'numpy.array', 'np.array', (['[tower.StructuralInformation.num_node - 1]'], {'dtype': 'int'}), '([tower.StructuralInformation.num_node - 1], dtype=int)\n', (31774, 31829), True, 'import numpy as np\n'), ((31876, 31912), 'numpy.array', 'np.array', (['[NacelleMass]'], {'dtype': 'float'}), '([NacelleMass], dtype=float)\n', (31884, 31912), True, 'import numpy as np\n'), ((32521, 32548), 'sharpy.utils.generate_cases.AeroelasticInformation', 'gc.AeroelasticInformation', ([], {}), '()\n', (32546, 32548), True, 'import sharpy.utils.generate_cases as gc\n'), ((32725, 32779), 'numpy.zeros', 'np.zeros', (['(overhang.StructuralInformation.num_node, 3)'], {}), '((overhang.StructuralInformation.num_node, 3))\n', (32733, 32779), True, 'import numpy as np\n'), ((34461, 34521), 'numpy.zeros', 'np.zeros', (['overhang.StructuralInformation.num_node'], {'dtype': 'int'}), '(overhang.StructuralInformation.num_node, dtype=int)\n', (34469, 34521), True, 'import numpy as np\n'), ((35765, 35788), 'sharpy.utils.generate_cases.LagrangeConstraint', 'gc.LagrangeConstraint', ([], {}), '()\n', (35786, 35788), True, 'import sharpy.utils.generate_cases as gc\n'), ((35961, 35986), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (35969, 35986), True, 'import numpy as np\n'), ((36132, 36152), 'sharpy.utils.generate_cases.BodyInformation', 'gc.BodyInformation', ([], {}), '()\n', (36150, 36152), True, 'import sharpy.utils.generate_cases as gc\n'), ((36200, 36214), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (36208, 36214), True, 'import numpy as np\n'), ((36239, 36253), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (36247, 36253), True, 'import numpy as np\n'), ((36282, 36296), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (36290, 36296), True, 'import numpy as np\n'), ((36349, 36379), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (36357, 36379), True, 'import numpy as np\n'), ((36391, 36411), 'sharpy.utils.generate_cases.BodyInformation', 'gc.BodyInformation', ([], {}), '()\n', (36409, 36411), True, 'import sharpy.utils.generate_cases as gc\n'), ((36459, 36635), 'numpy.array', 'np.array', (['[rotor.StructuralInformation.coordinates[0, 0], rotor.StructuralInformation\n .coordinates[0, 1], rotor.StructuralInformation.coordinates[0, 2], 0.0,\n 0.0, 0.0]'], {}), '([rotor.StructuralInformation.coordinates[0, 0], rotor.\n StructuralInformation.coordinates[0, 1], rotor.StructuralInformation.\n coordinates[0, 2], 0.0, 0.0, 0.0])\n', (36467, 36635), True, 'import numpy as np\n'), ((36649, 36703), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, rotation_velocity]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, rotation_velocity])\n', (36657, 36703), True, 'import numpy as np\n'), ((36726, 36740), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (36734, 36740), True, 'import numpy as np\n'), ((6609, 6699), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_structural_blade', '"""StrcTwst"""'], {}), "(excel_file_name, excel_sheet_structural_blade,\n 'StrcTwst')\n", (6636, 6699), True, 'import sharpy.utils.generate_cases as gc\n'), ((11068, 11107), 'numpy.interp', 'np.interp', (['rR', 'rR_structural', 'InPElAxis'], {}), '(rR, rR_structural, InPElAxis)\n', (11077, 11107), True, 'import numpy as np\n'), ((11110, 11149), 'numpy.interp', 'np.interp', (['rR', 'rR_structural', 'PreswpRef'], {}), '(rR, rR_structural, PreswpRef)\n', (11119, 11149), True, 'import numpy as np\n'), ((11207, 11247), 'numpy.interp', 'np.interp', (['rR', 'rR_structural', 'PrebendRef'], {}), '(rR, rR_structural, PrebendRef)\n', (11216, 11247), True, 'import numpy as np\n'), ((11270, 11308), 'numpy.interp', 'np.interp', (['rR', 'rR_structural', 'StrcTwst'], {}), '(rR, rR_structural, StrcTwst)\n', (11279, 11308), True, 'import numpy as np\n'), ((11488, 11529), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'EAStff'], {}), '(elem_rR, rR_structural, EAStff)\n', (11497, 11529), True, 'import numpy as np\n'), ((11549, 11591), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'FlpStff'], {}), '(elem_rR, rR_structural, FlpStff)\n', (11558, 11591), True, 'import numpy as np\n'), ((11611, 11653), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'EdgStff'], {}), '(elem_rR, rR_structural, EdgStff)\n', (11620, 11653), True, 'import numpy as np\n'), ((11674, 11722), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'FlapEdgeStiff'], {}), '(elem_rR, rR_structural, FlapEdgeStiff)\n', (11683, 11722), True, 'import numpy as np\n'), ((11741, 11782), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'GJStff'], {}), '(elem_rR, rR_structural, GJStff)\n', (11750, 11782), True, 'import numpy as np\n'), ((12142, 12193), 'numpy.zeros', 'np.zeros', (['(blade.StructuralInformation.num_elem, 3)'], {}), '((blade.StructuralInformation.num_elem, 3))\n', (12150, 12193), True, 'import numpy as np\n'), ((12225, 12265), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'InPcg'], {}), '(elem_rR, rR_structural, InPcg)\n', (12234, 12265), True, 'import numpy as np\n'), ((12376, 12419), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'BMassDen'], {}), '(elem_rR, rR_structural, BMassDen)\n', (12385, 12419), True, 'import numpy as np\n'), ((12447, 12489), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'FlpIner'], {}), '(elem_rR, rR_structural, FlpIner)\n', (12456, 12489), True, 'import numpy as np\n'), ((12517, 12559), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'EdgIner'], {}), '(elem_rR, rR_structural, EdgIner)\n', (12526, 12559), True, 'import numpy as np\n'), ((12588, 12635), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'FlapEdgeIner'], {}), '(elem_rR, rR_structural, FlapEdgeIner)\n', (12597, 12635), True, 'import numpy as np\n'), ((16324, 16491), 'sharpy.utils.generate_cases.get_airfoil_camber', 'gc.get_airfoil_camber', (["excel_db['%s_x' % pure_airfoils_names[iairfoil]][2:icoord]", "excel_db['%s_y' % pure_airfoils_names[iairfoil]][2:icoord]", 'n_points_camber'], {}), "(excel_db['%s_x' % pure_airfoils_names[iairfoil]][2:\n icoord], excel_db['%s_y' % pure_airfoils_names[iairfoil]][2:icoord],\n n_points_camber)\n", (16345, 16491), True, 'import sharpy.utils.generate_cases as gc\n'), ((18179, 18223), 'numpy.zeros', 'np.zeros', (['(blade_nodes, chord_panels[0] + 1)'], {}), '((blade_nodes, chord_panels[0] + 1))\n', (18187, 18223), True, 'import numpy as np\n'), ((22029, 22114), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""ShftTilt"""'], {}), "(excel_file_name, excel_sheet_parameters, 'ShftTilt'\n )\n", (22056, 22114), True, 'import sharpy.utils.generate_cases as gc\n'), ((22129, 22205), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""Cone"""'], {}), "(excel_file_name, excel_sheet_parameters, 'Cone')\n", (22156, 22205), True, 'import sharpy.utils.generate_cases as gc\n'), ((22381, 22406), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (22389, 22406), True, 'import numpy as np\n'), ((22498, 22523), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (22506, 22523), True, 'import numpy as np\n'), ((22962, 22987), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (22970, 22987), True, 'import numpy as np\n'), ((32318, 32403), 'sharpy.utils.generate_cases.read_column_sheet_type01', 'gc.read_column_sheet_type01', (['excel_file_name', 'excel_sheet_parameters', '"""ShftTilt"""'], {}), "(excel_file_name, excel_sheet_parameters, 'ShftTilt'\n )\n", (32345, 32403), True, 'import sharpy.utils.generate_cases as gc\n'), ((36806, 36832), 'numpy.array', 'np.array', (['[0.0, tilt, 0.0]'], {}), '([0.0, tilt, 0.0])\n', (36814, 36832), True, 'import numpy as np\n'), ((11164, 11204), 'numpy.interp', 'np.interp', (['rR', 'rR_structural', 'OutPElAxis'], {}), '(rR, rR_structural, OutPElAxis)\n', (11173, 11204), True, 'import numpy as np\n'), ((12297, 12338), 'numpy.interp', 'np.interp', (['elem_rR', 'rR_structural', 'OutPcg'], {}), '(elem_rR, rR_structural, OutPcg)\n', (12306, 12338), True, 'import numpy as np\n'), ((13274, 13302), 'sharpy.utils.h5utils.readh5', 'h5.readh5', (['h5_cross_sec_prop'], {}), '(h5_cross_sec_prop)\n', (13283, 13302), True, 'import sharpy.utils.h5utils as h5\n'), ((13439, 13590), 'scipy.interpolate.interp1d', 'scint.interp1d', (['cross_prop.radius', 'cross_prop.M'], {'kind': '"""cubic"""', 'copy': '(False)', 'assume_sorted': '(True)', 'axis': '(0)', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(cross_prop.radius, cross_prop.M, kind='cubic', copy=False,\n assume_sorted=True, axis=0, bounds_error=False, fill_value='extrapolate')\n", (13453, 13590), True, 'import scipy.interpolate as scint\n'), ((13725, 13876), 'scipy.interpolate.interp1d', 'scint.interp1d', (['cross_prop.radius', 'cross_prop.K'], {'kind': '"""cubic"""', 'copy': '(False)', 'assume_sorted': '(True)', 'axis': '(0)', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(cross_prop.radius, cross_prop.K, kind='cubic', copy=False,\n assume_sorted=True, axis=0, bounds_error=False, fill_value='extrapolate')\n", (13739, 13876), True, 'import scipy.interpolate as scint\n'), ((15966, 16034), 'math.isnan', 'math.isnan', (["excel_db['%s_x' % pure_airfoils_names[iairfoil]][icoord]"], {}), "(excel_db['%s_x' % pure_airfoils_names[iairfoil]][icoord])\n", (15976, 16034), False, 'import math\n'), ((18281, 18346), 'numpy.linalg.norm', 'np.linalg.norm', (['blade.StructuralInformation.coordinates[inode, :]'], {}), '(blade.StructuralInformation.coordinates[inode, :])\n', (18295, 18346), True, 'import numpy as np\n'), ((18366, 18417), 'numpy.sqrt', 'np.sqrt', (['(rotation_velocity ** 2 * r ** 2 + wsp ** 2)'], {}), '(rotation_velocity ** 2 * r ** 2 + wsp ** 2)\n', (18373, 18417), True, 'import numpy as np\n'), ((18488, 18516), 'numpy.minimum', 'np.minimum', (['last_length', '(0.5)'], {}), '(last_length, 0.5)\n', (18498, 18516), True, 'import numpy as np\n'), ((19600, 19671), 'sharpy.utils.generate_cases.get_aoacl0_from_camber', 'gc.get_aoacl0_from_camber', (['airfoils[inode, :, 0]', 'airfoils[inode, :, 1]'], {}), '(airfoils[inode, :, 0], airfoils[inode, :, 1])\n', (19625, 19671), True, 'import sharpy.utils.generate_cases as gc\n'), ((19690, 19758), 'sharpy.utils.generate_cases.get_mu0_from_camber', 'gc.get_mu0_from_camber', (['airfoils[inode, :, 0]', 'airfoils[inode, :, 1]'], {}), '(airfoils[inode, :, 0], airfoils[inode, :, 1])\n', (19712, 19758), True, 'import sharpy.utils.generate_cases as gc\n'), ((19775, 19840), 'numpy.linalg.norm', 'np.linalg.norm', (['blade.StructuralInformation.coordinates[inode, :]'], {}), '(blade.StructuralInformation.coordinates[inode, :])\n', (19789, 19840), True, 'import numpy as np\n'), ((19860, 19911), 'numpy.sqrt', 'np.sqrt', (['(rotation_velocity ** 2 * r ** 2 + wsp ** 2)'], {}), '(rotation_velocity ** 2 * r ** 2 + wsp ** 2)\n', (19867, 19911), True, 'import numpy as np\n'), ((21163, 21187), 'numpy.ones_like', 'np.ones_like', (['node_chord'], {}), '(node_chord)\n', (21175, 21187), True, 'import numpy as np\n'), ((22714, 22739), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (22722, 22739), True, 'import numpy as np\n'), ((30969, 31018), 'numpy.zeros', 'np.zeros', (['(tower.StructuralInformation.num_node,)'], {}), '((tower.StructuralInformation.num_node,))\n', (30977, 31018), True, 'import numpy as np\n'), ((32902, 32924), 'numpy.sin', 'np.sin', (['(tilt * deg2rad)'], {}), '(tilt * deg2rad)\n', (32908, 32924), True, 'import numpy as np\n'), ((33016, 33038), 'numpy.cos', 'np.cos', (['(tilt * deg2rad)'], {}), '(tilt * deg2rad)\n', (33022, 33038), True, 'import numpy as np\n'), ((8511, 8526), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8519, 8526), True, 'import numpy as np\n'), ((8581, 8606), 'numpy.array', 'np.array', (['[OutPElAxis[0]]'], {}), '([OutPElAxis[0]])\n', (8589, 8606), True, 'import numpy as np\n'), ((8658, 8682), 'numpy.array', 'np.array', (['[InPElAxis[0]]'], {}), '([InPElAxis[0]])\n', (8666, 8682), True, 'import numpy as np\n'), ((8736, 8763), 'numpy.array', 'np.array', (['[ElAxisAftLEc[0]]'], {}), '([ElAxisAftLEc[0]])\n', (8744, 8763), True, 'import numpy as np\n'), ((8816, 8839), 'numpy.array', 'np.array', (['[StrcTwst[0]]'], {}), '([StrcTwst[0]])\n', (8824, 8839), True, 'import numpy as np\n'), ((8888, 8911), 'numpy.array', 'np.array', (['[BMassDen[0]]'], {}), '([BMassDen[0]])\n', (8896, 8911), True, 'import numpy as np\n'), ((8959, 8981), 'numpy.array', 'np.array', (['[FlpStff[0]]'], {}), '([FlpStff[0]])\n', (8967, 8981), True, 'import numpy as np\n'), ((9028, 9050), 'numpy.array', 'np.array', (['[EdgStff[0]]'], {}), '([EdgStff[0]])\n', (9036, 9050), True, 'import numpy as np\n'), ((9103, 9131), 'numpy.array', 'np.array', (['[FlapEdgeStiff[0]]'], {}), '([FlapEdgeStiff[0]])\n', (9111, 9131), True, 'import numpy as np\n'), ((9183, 9204), 'numpy.array', 'np.array', (['[GJStff[0]]'], {}), '([GJStff[0]])\n', (9191, 9204), True, 'import numpy as np\n'), ((9249, 9270), 'numpy.array', 'np.array', (['[EAStff[0]]'], {}), '([EAStff[0]])\n', (9257, 9270), True, 'import numpy as np\n'), ((9316, 9338), 'numpy.array', 'np.array', (['[FlpIner[0]]'], {}), '([FlpIner[0]])\n', (9324, 9338), True, 'import numpy as np\n'), ((9385, 9407), 'numpy.array', 'np.array', (['[EdgIner[0]]'], {}), '([EdgIner[0]])\n', (9393, 9407), True, 'import numpy as np\n'), ((9459, 9486), 'numpy.array', 'np.array', (['[FlapEdgeIner[0]]'], {}), '([FlapEdgeIner[0]])\n', (9467, 9486), True, 'import numpy as np\n'), ((9541, 9566), 'numpy.array', 'np.array', (['[PrebendRef[0]]'], {}), '([PrebendRef[0]])\n', (9549, 9566), True, 'import numpy as np\n'), ((9618, 9642), 'numpy.array', 'np.array', (['[PreswpRef[0]]'], {}), '([PreswpRef[0]])\n', (9626, 9642), True, 'import numpy as np\n'), ((9690, 9711), 'numpy.array', 'np.array', (['[OutPcg[0]]'], {}), '([OutPcg[0]])\n', (9698, 9711), True, 'import numpy as np\n'), ((9755, 9775), 'numpy.array', 'np.array', (['[InPcg[0]]'], {}), '([InPcg[0]])\n', (9763, 9775), True, 'import numpy as np\n'), ((18576, 18643), 'sharpy.utils.generate_cases.get_factor_geometric_progression', 'gc.get_factor_geometric_progression', (['last_length', '(1.0)', 'chord_panels'], {}), '(last_length, 1.0, chord_panels)\n', (18611, 18643), True, 'import sharpy.utils.generate_cases as gc\n'), ((19259, 19294), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(chord_panels + 1)'], {}), '(0, 1, chord_panels + 1)\n', (19270, 19294), True, 'import numpy as np\n'), ((19036, 19114), 'sys.error', 'sys.error', (["('ERROR in the panel discretization of the blade in node %d' % inode)"], {}), "('ERROR in the panel discretization of the blade in node %d' % inode)\n", (19045, 19114), False, 'import sys\n'), ((19956, 20070), 'numpy.linalg.norm', 'np.linalg.norm', (['(blade.StructuralInformation.coordinates[1, :] - blade.\n StructuralInformation.coordinates[0, :])'], {}), '(blade.StructuralInformation.coordinates[1, :] - blade.\n StructuralInformation.coordinates[0, :])\n', (19970, 20070), True, 'import numpy as np\n'), ((20173, 20289), 'numpy.linalg.norm', 'np.linalg.norm', (['(blade.StructuralInformation.coordinates[-1, :] - blade.\n StructuralInformation.coordinates[-2, :])'], {}), '(blade.StructuralInformation.coordinates[-1, :] - blade.\n StructuralInformation.coordinates[-2, :])\n', (20187, 20289), True, 'import numpy as np\n'), ((20328, 20457), 'numpy.linalg.norm', 'np.linalg.norm', (['(blade.StructuralInformation.coordinates[inode + 1, :] - blade.\n StructuralInformation.coordinates[inode - 1, :])'], {}), '(blade.StructuralInformation.coordinates[inode + 1, :] -\n blade.StructuralInformation.coordinates[inode - 1, :])\n', (20342, 20457), True, 'import numpy as np\n'), ((18970, 19002), 'numpy.diff', 'np.diff', (['udmd_by_nodes[inode, :]'], {}), '(udmd_by_nodes[inode, :])\n', (18977, 19002), True, 'import numpy as np\n')] |
import os
import math
import nrrd
import logging
import tifffile
import warnings
import numpy as np
from skimage import transform
from tqdm import tqdm
from natsort import natsorted
from concurrent.futures import ProcessPoolExecutor
from imlib.general.system import get_sorted_file_paths, get_num_processes
from .utils import scale_z, check_mem
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import nibabel as nib
def load_any(
src_path,
x_scaling_factor=1.0,
y_scaling_factor=1.0,
z_scaling_factor=1.0,
anti_aliasing=True,
load_parallel=False,
sort_input_file=False,
as_numpy=False,
n_free_cpus=2,
):
"""
This function will guess the type of data and hence call the appropriate
function from this module to load the given brain.
.. warning:: x and y scaling not used at the moment if loading a
complete image
:param str src_path: Can be the path of a nifty file, tiff file,
tiff files folder or text file containing a list of paths
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param float z_scaling_factor: The scaling of the brain along the z
dimension (applied on loading before return)
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:param bool load_parallel: Load planes in parallel using multiprocessing
for faster data loading
:param bool sort_input_file: If set to true and the input is a filepaths
file, it will be naturally sorted
:param bool as_numpy: Whether to convert the image to a numpy array in
memory (rather than a memmap). Only relevant for .nii files.
:param bool verbose: Print more information about the process
:param int n_free_cpus: Number of cpu cores to leave free.
:return: The loaded brain
:rtype: np.ndarray
"""
src_path = str(src_path)
if os.path.isdir(src_path):
logging.debug("Data type is: directory of files")
img = load_from_folder(
src_path,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=anti_aliasing,
file_extension=".tif",
load_parallel=load_parallel,
n_free_cpus=n_free_cpus,
)
elif src_path.endswith(".txt"):
logging.debug("Data type is: list of file paths")
img = load_img_sequence(
src_path,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=anti_aliasing,
load_parallel=load_parallel,
sort=sort_input_file,
n_free_cpus=n_free_cpus,
)
elif src_path.endswith((".tif", ".tiff")):
logging.debug("Data type is: tif stack")
img = load_img_stack(
src_path,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=anti_aliasing,
)
elif src_path.endswith(".nrrd"):
logging.debug("Data type is: nrrd")
img = load_nrrd(src_path)
elif src_path.endswith((".nii", ".nii.gz")):
logging.debug("Data type is: NifTI")
img = load_nii(src_path, as_array=True, as_numpy=as_numpy)
else:
raise NotImplementedError(
"Could not guess data type for path {}".format(src_path)
)
return img
def load_nrrd(src_path):
"""
Load an .nrrd file as a numpy array
:param str src_path: The path of the image to be loaded
:return: The loaded brain array
:rtype: np.ndarray
"""
src_path = str(src_path)
stack, _ = nrrd.read(src_path)
return stack
def load_img_stack(
stack_path,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=True,
):
"""
Load a tiff stack as a numpy array
:param str stack_path: The path of the image to be loaded
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param float z_scaling_factor: The scaling of the brain along the z
dimension (applied on loading before return)
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:return: The loaded brain array
:rtype: np.ndarray
"""
stack_path = str(stack_path)
logging.debug(f"Loading: {stack_path}")
stack = tifffile.imread(stack_path)
# Downsampled plane by plane because the 3D downsampling in scipy etc
# uses too much RAM
if not (x_scaling_factor == y_scaling_factor == 1):
downsampled_stack = []
logging.debug("Downsampling stack in X/Y")
for plane in tqdm(range(0, len(stack))):
downsampled_stack.append(
transform.rescale(
stack[plane],
(y_scaling_factor, x_scaling_factor),
mode="constant",
preserve_range=True,
anti_aliasing=anti_aliasing,
)
)
logging.debug("Converting downsampled stack to array")
stack = np.array(downsampled_stack)
if stack.ndim == 3:
stack = np.rollaxis(stack, 0, 3)
if z_scaling_factor != 1:
logging.debug("Downsampling stack in Z")
stack = scale_z(stack, z_scaling_factor)
return stack
def load_nii(src_path, as_array=False, as_numpy=False):
"""
Load a brain from a nifti file
:param str src_path: The path to the nifty file on the filesystem
:param bool as_array: Whether to convert the brain to a numpy array of
keep it as nifty object
:param bool as_numpy: Whether to convert the image to a numpy array in
memory (rather than a memmap)
:return: The loaded brain (format depends on the above flag)
"""
src_path = str(src_path)
nii_img = nib.load(src_path)
if as_array:
image = nii_img.get_data()
if as_numpy:
image = np.array(image)
return image
else:
return nii_img
def load_from_folder(
src_folder,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=True,
file_extension="",
load_parallel=False,
n_free_cpus=2,
):
"""
Load a brain from a folder. All tiff files will be read sorted and assumed
to belong to the same sample.
Optionally a name_filter string can be supplied which will have to be
present in the file names for them
to be considered part of the sample
:param str src_folder:
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param float z_scaling_factor: The scaling of the brain along the z
dimension
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:param str file_extension: will have to be present in the file names for
them to be considered part of the sample
:param bool load_parallel: Use multiprocessing to speedup image loading
:param int n_free_cpus: Number of cpu cores to leave free.
:return: The loaded and scaled brain
:rtype: np.ndarray
"""
paths = get_sorted_file_paths(src_folder, file_extension=file_extension)
return load_image_series(
paths,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
load_parallel=load_parallel,
n_free_cpus=n_free_cpus,
anti_aliasing=anti_aliasing,
)
def load_img_sequence(
img_sequence_file_path,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=True,
load_parallel=False,
sort=False,
n_free_cpus=2,
):
"""
Load a brain from a sequence of files specified in a text file containing
an ordered list of paths
:param str img_sequence_file_path: The path to the file containing the
ordered list of image paths (one per line)
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param float z_scaling_factor: The scaling of the brain along the z
dimension
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:param bool load_parallel: Use multiprocessing to speedup image loading
:param bool sort: If set to true will perform a natural sort of the
file paths in the list
:param int n_free_cpus: Number of cpu cores to leave free.
:return: The loaded and scaled brain
:rtype: np.ndarray
"""
img_sequence_file_path = str(img_sequence_file_path)
with open(img_sequence_file_path, "r") as in_file:
paths = in_file.readlines()
paths = [p.strip() for p in paths]
if sort:
paths = natsorted(paths)
return load_image_series(
paths,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
load_parallel=load_parallel,
n_free_cpus=n_free_cpus,
anti_aliasing=anti_aliasing,
)
def load_image_series(
paths,
x_scaling_factor,
y_scaling_factor,
z_scaling_factor,
anti_aliasing=True,
load_parallel=False,
n_free_cpus=2,
):
"""
Load a brain from a sequence of files specified in a text file containing
an ordered list of paths
:param lost paths: Ordered list of image paths
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param float z_scaling_factor: The scaling of the brain along the z
dimension
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:param bool load_parallel: Use multiprocessing to speedup image loading
:param int n_free_cpus: Number of cpu cores to leave free.
:return: The loaded and scaled brain
:rtype: np.ndarray
"""
if load_parallel:
img = threaded_load_from_sequence(
paths,
x_scaling_factor,
y_scaling_factor,
n_free_cpus=n_free_cpus,
anti_aliasing=anti_aliasing,
)
else:
img = load_from_paths_sequence(
paths,
x_scaling_factor,
y_scaling_factor,
anti_aliasing=anti_aliasing,
)
if z_scaling_factor != 1:
img = scale_z(img, z_scaling_factor)
return img
def threaded_load_from_sequence(
paths_sequence,
x_scaling_factor=1.0,
y_scaling_factor=1.0,
anti_aliasing=True,
n_free_cpus=2,
):
"""
Use multiprocessing to load a brain from a sequence of image paths.
:param list paths_sequence: The sorted list of the planes paths on the
filesystem
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:param int n_free_cpus: Number of cpu cores to leave free.
:return: The loaded and scaled brain
:rtype: np.ndarray
"""
stacks = []
n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
# WARNING: will not work with interactive interpreter.
pool = ProcessPoolExecutor(max_workers=n_processes)
# FIXME: should detect and switch to other method
n_paths_per_subsequence = math.ceil(len(paths_sequence) / n_processes)
for i in range(n_processes):
start_idx = i * n_paths_per_subsequence
if start_idx >= len(paths_sequence):
break
else:
end_idx = start_idx + n_paths_per_subsequence
end_idx = end_idx if end_idx < len(paths_sequence) else -1
sub_paths = paths_sequence[start_idx:end_idx]
process = pool.submit(
load_from_paths_sequence,
sub_paths,
x_scaling_factor,
y_scaling_factor,
anti_aliasing=anti_aliasing,
)
stacks.append(process)
stack = np.dstack([s.result() for s in stacks])
return stack
def load_from_paths_sequence(
paths_sequence,
x_scaling_factor=1.0,
y_scaling_factor=1.0,
anti_aliasing=True,
):
# TODO: Optimise - load threaded and process by batch
"""
A single core version of the function to load a brain from a sequence of
image paths.
:param list paths_sequence: The sorted list of the planes paths on the
filesystem
:param float x_scaling_factor: The scaling of the brain along the x
dimension (applied on loading before return)
:param float y_scaling_factor: The scaling of the brain along the y
dimension (applied on loading before return)
:param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
the image prior to down-scaling. It is crucial to filter when
down-sampling the image to avoid aliasing artifacts.
:return: The loaded and scaled brain
:rtype: np.ndarray
"""
for i, p in enumerate(
tqdm(paths_sequence, desc="Loading images", unit="plane")
):
img = tifffile.imread(p)
if i == 0:
check_mem(
img.nbytes * x_scaling_factor * y_scaling_factor,
len(paths_sequence),
)
# TEST: add test case for shape rounding
volume = np.empty(
(
int(round(img.shape[0] * x_scaling_factor)),
int(round(img.shape[1] * y_scaling_factor)),
len(paths_sequence),
),
dtype=img.dtype,
)
if x_scaling_factor != 1 or y_scaling_factor != 1:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
img = transform.rescale(
img,
(x_scaling_factor, y_scaling_factor),
mode="constant",
preserve_range=True,
anti_aliasing=anti_aliasing,
)
volume[:, :, i] = img
return volume
def generate_paths_sequence_file(
input_folder,
output_file_path,
sort=True,
prefix=None,
suffix=None,
match_string=None,
):
input_folder = str(input_folder)
paths = []
for root, dirs, files in os.walk(input_folder):
for filename in files:
if prefix is not None and not filename.startswith(prefix):
continue
if suffix is not None and not filename.endswith(suffix):
continue
if match_string is not None and match_string not in filename:
continue
paths.append(os.path.join(root, filename))
if sort:
paths = natsorted(paths)
with open(output_file_path, "w") as out_file:
out_file.writelines(paths)
def get_size_image_from_file_paths(file_path, file_extension="tif"):
"""
Returns the size of an image (which is a list of 2D files), without loading
the whole image
:param str file_path: File containing file_paths in a text file,
or as a list.
:param str file_extension: Optional file extension (if a directory
is passed)
:return: Dict of image sizes
"""
file_path = str(file_path)
img_paths = get_sorted_file_paths(file_path, file_extension=file_extension)
z_shape = len(img_paths)
logging.debug(
"Loading file: {} to check raw image size" "".format(img_paths[0])
)
image_0 = load_any(img_paths[0])
y_shape, x_shape = image_0.shape
image_shape = {"x": x_shape, "y": y_shape, "z": z_shape}
return image_shape
| [
"tqdm.tqdm",
"logging.debug",
"warnings.simplefilter",
"nibabel.load",
"skimage.transform.rescale",
"os.path.isdir",
"concurrent.futures.ProcessPoolExecutor",
"imlib.general.system.get_sorted_file_paths",
"os.walk",
"warnings.catch_warnings",
"numpy.array",
"numpy.rollaxis",
"tifffile.imread... | [((354, 379), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (377, 379), False, 'import warnings\n'), ((385, 416), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (406, 416), False, 'import warnings\n'), ((2219, 2242), 'os.path.isdir', 'os.path.isdir', (['src_path'], {}), '(src_path)\n', (2232, 2242), False, 'import os\n'), ((3964, 3983), 'nrrd.read', 'nrrd.read', (['src_path'], {}), '(src_path)\n', (3973, 3983), False, 'import nrrd\n'), ((4928, 4967), 'logging.debug', 'logging.debug', (['f"""Loading: {stack_path}"""'], {}), "(f'Loading: {stack_path}')\n", (4941, 4967), False, 'import logging\n'), ((4980, 5007), 'tifffile.imread', 'tifffile.imread', (['stack_path'], {}), '(stack_path)\n', (4995, 5007), False, 'import tifffile\n'), ((6458, 6476), 'nibabel.load', 'nib.load', (['src_path'], {}), '(src_path)\n', (6466, 6476), True, 'import nibabel as nib\n'), ((8039, 8103), 'imlib.general.system.get_sorted_file_paths', 'get_sorted_file_paths', (['src_folder'], {'file_extension': 'file_extension'}), '(src_folder, file_extension=file_extension)\n', (8060, 8103), False, 'from imlib.general.system import get_sorted_file_paths, get_num_processes\n'), ((12681, 12730), 'imlib.general.system.get_num_processes', 'get_num_processes', ([], {'min_free_cpu_cores': 'n_free_cpus'}), '(min_free_cpu_cores=n_free_cpus)\n', (12698, 12730), False, 'from imlib.general.system import get_sorted_file_paths, get_num_processes\n'), ((12802, 12846), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'n_processes'}), '(max_workers=n_processes)\n', (12821, 12846), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((15873, 15894), 'os.walk', 'os.walk', (['input_folder'], {}), '(input_folder)\n', (15880, 15894), False, 'import os\n'), ((16846, 16909), 'imlib.general.system.get_sorted_file_paths', 'get_sorted_file_paths', (['file_path'], {'file_extension': 'file_extension'}), '(file_path, file_extension=file_extension)\n', (16867, 16909), False, 'from imlib.general.system import get_sorted_file_paths, get_num_processes\n'), ((2252, 2301), 'logging.debug', 'logging.debug', (['"""Data type is: directory of files"""'], {}), "('Data type is: directory of files')\n", (2265, 2301), False, 'import logging\n'), ((5203, 5245), 'logging.debug', 'logging.debug', (['"""Downsampling stack in X/Y"""'], {}), "('Downsampling stack in X/Y')\n", (5216, 5245), False, 'import logging\n'), ((5628, 5682), 'logging.debug', 'logging.debug', (['"""Converting downsampled stack to array"""'], {}), "('Converting downsampled stack to array')\n", (5641, 5682), False, 'import logging\n'), ((5699, 5726), 'numpy.array', 'np.array', (['downsampled_stack'], {}), '(downsampled_stack)\n', (5707, 5726), True, 'import numpy as np\n'), ((5768, 5792), 'numpy.rollaxis', 'np.rollaxis', (['stack', '(0)', '(3)'], {}), '(stack, 0, 3)\n', (5779, 5792), True, 'import numpy as np\n'), ((9870, 9886), 'natsort.natsorted', 'natsorted', (['paths'], {}), '(paths)\n', (9879, 9886), False, 'from natsort import natsorted\n'), ((14577, 14634), 'tqdm.tqdm', 'tqdm', (['paths_sequence'], {'desc': '"""Loading images"""', 'unit': '"""plane"""'}), "(paths_sequence, desc='Loading images', unit='plane')\n", (14581, 14634), False, 'from tqdm import tqdm\n'), ((14656, 14674), 'tifffile.imread', 'tifffile.imread', (['p'], {}), '(p)\n', (14671, 14674), False, 'import tifffile\n'), ((16301, 16317), 'natsort.natsorted', 'natsorted', (['paths'], {}), '(paths)\n', (16310, 16317), False, 'from natsort import natsorted\n'), ((2654, 2703), 'logging.debug', 'logging.debug', (['"""Data type is: list of file paths"""'], {}), "('Data type is: list of file paths')\n", (2667, 2703), False, 'import logging\n'), ((5839, 5879), 'logging.debug', 'logging.debug', (['"""Downsampling stack in Z"""'], {}), "('Downsampling stack in Z')\n", (5852, 5879), False, 'import logging\n'), ((6570, 6585), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (6578, 6585), True, 'import numpy as np\n'), ((3067, 3107), 'logging.debug', 'logging.debug', (['"""Data type is: tif stack"""'], {}), "('Data type is: tif stack')\n", (3080, 3107), False, 'import logging\n'), ((5349, 5490), 'skimage.transform.rescale', 'transform.rescale', (['stack[plane]', '(y_scaling_factor, x_scaling_factor)'], {'mode': '"""constant"""', 'preserve_range': '(True)', 'anti_aliasing': 'anti_aliasing'}), "(stack[plane], (y_scaling_factor, x_scaling_factor), mode=\n 'constant', preserve_range=True, anti_aliasing=anti_aliasing)\n", (5366, 5490), False, 'from skimage import transform\n'), ((15249, 15274), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (15272, 15274), False, 'import warnings\n'), ((15292, 15323), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (15313, 15323), False, 'import warnings\n'), ((15346, 15478), 'skimage.transform.rescale', 'transform.rescale', (['img', '(x_scaling_factor, y_scaling_factor)'], {'mode': '"""constant"""', 'preserve_range': '(True)', 'anti_aliasing': 'anti_aliasing'}), "(img, (x_scaling_factor, y_scaling_factor), mode=\n 'constant', preserve_range=True, anti_aliasing=anti_aliasing)\n", (15363, 15478), False, 'from skimage import transform\n'), ((16241, 16269), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (16253, 16269), False, 'import os\n'), ((3346, 3381), 'logging.debug', 'logging.debug', (['"""Data type is: nrrd"""'], {}), "('Data type is: nrrd')\n", (3359, 3381), False, 'import logging\n'), ((3473, 3509), 'logging.debug', 'logging.debug', (['"""Data type is: NifTI"""'], {}), "('Data type is: NifTI')\n", (3486, 3509), False, 'import logging\n')] |
from ...isa.inst import *
import numpy as np
import math
class _Vsenn_v(Inst):
def golden(self):
if 'isExcept' in self:
if self['isExcept'] > 0:
return np.zeros(self['vl'], dtype=self['rs1'].dtype)
if 'start' in self:
start = self['start']
else:
start = 0
if 'origin' in self:
origin = self['origin']
else:
origin = np.zeros(self['vl'], dtype=self['rs1'].dtype)
if 'offset' in self:
rs1 = self['rs1'].copy()
rs1.dtype = np.uint8
mul = self['rs1'].itemsize // rs1.itemsize
rs1[0: (self['vl']*mul-self['offset'])] = rs1[self['offset'] : self['vl']*mul]
rs1[(self['vl']*mul-self['offset']): self['vl']*mul] = 0
rs1.dtype = self['rs1'].dtype
else:
rs1 = self['rs1'].copy()
res = self.masked(rs1, origin[0: self['vl']])
origin[start: self['vl']] = res[start: self['vl']]
return origin
class Vse8_v(_Vsenn_v):
name = 'vse8.v'
class Vse16_v(_Vsenn_v):
name = 'vse16.v'
class Vse32_v(_Vsenn_v):
name = 'vse32.v'
class Vse64_v(_Vsenn_v):
name = 'vse64.v'
class Vse1_v(Inst):
name = 'vse1.v'
def golden(self):
newLen = math.ceil(self['vl']/8)
if 'start' in self:
start = self['start']
else:
start = 0
res = np.zeros(newLen, dtype=self['rs1'].dtype)
res[start: newLen] = self['rs1'][start: newLen]
return res
| [
"numpy.zeros",
"math.ceil"
] | [((1312, 1337), 'math.ceil', 'math.ceil', (["(self['vl'] / 8)"], {}), "(self['vl'] / 8)\n", (1321, 1337), False, 'import math\n'), ((1457, 1498), 'numpy.zeros', 'np.zeros', (['newLen'], {'dtype': "self['rs1'].dtype"}), "(newLen, dtype=self['rs1'].dtype)\n", (1465, 1498), True, 'import numpy as np\n'), ((446, 491), 'numpy.zeros', 'np.zeros', (["self['vl']"], {'dtype': "self['rs1'].dtype"}), "(self['vl'], dtype=self['rs1'].dtype)\n", (454, 491), True, 'import numpy as np\n'), ((193, 238), 'numpy.zeros', 'np.zeros', (["self['vl']"], {'dtype': "self['rs1'].dtype"}), "(self['vl'], dtype=self['rs1'].dtype)\n", (201, 238), True, 'import numpy as np\n')] |
"""GANomaly
"""
# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915
##
from collections import OrderedDict
import os
from re import I
import time
import numpy as np
from tqdm import tqdm
import torch.optim as optim
import torch.nn as nn
import torch.utils.data
import torchvision.utils as vutils
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from copy import deepcopy
from lib.models.networks import weights_init, define_G, define_D, get_scheduler
from lib.visualizer import Visualizer
from lib.loss import l2_loss
from lib.evaluate import roc, auprc, write_inference_result, get_performance
from sklearn.metrics import precision_recall_fscore_support, confusion_matrix
import json
#import wandb
def seed(seed_value):
""" Seed
Arguments:
seed_value {int} -- [description]
"""
# Check if seed is default value
if seed_value == -1:
return
# Otherwise seed all functionality
import random
random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
np.random.seed(seed_value)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print("set seed to {}".format(str(seed_value)))
class Skipganomaly:
"""Skip-GANomaly Class
"""
@property
def name(self): return 'skip-ganomaly'
def __init__(self, opt, data=None):
# Initalize variables.
self.opt = opt
self.visualizer = Visualizer(opt)
self.data = data
self.trn_dir = os.path.join(self.opt.outf, self.opt.name, 'train')
self.tst_dir = os.path.join(self.opt.outf, self.opt.name, 'test')
self.inf_dir = os.path.join(self.opt.outf, self.opt.name, 'inference')
self.device = torch.device("cuda:0" if self.opt.device != "cpu" else "cpu")
# -- Misc attributes
self.epoch = 0
self.times = []
self.total_steps = 0
##
# Create and initialize networks from networks.py.
self.netg = define_G(self.opt, norm='batch', use_dropout=False, init_type='normal')
self.netd = define_D(self.opt, norm='batch', use_sigmoid=False, init_type='normal')
##
#resume Training
if self.opt.resume != '':
print("\nLoading pre-trained networks.")
self.opt.iter = torch.load(os.path.join(self.opt.resume, 'netG.pth'))['epoch']
self.netg.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netG.pth'))['state_dict'])
self.netd.load_state_dict(torch.load(os.path.join(self.opt.resume, 'netD.pth'))['state_dict'])
print("\tDone.\n")
print(self.netg)
print(self.netd)
##
# Loss Functions
self.l_adv = nn.BCELoss()
self.l_con = nn.L1Loss()
self.l_lat = l2_loss
##
# Initialize input tensors.
self.input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
self.noise = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
self.label = torch.empty(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
self.gt = torch.empty(size=(opt.batchsize,), dtype=torch.long, device=self.device)
self.fixed_input = torch.empty(size=(self.opt.batchsize, 3, self.opt.isize, self.opt.isize), dtype=torch.float32, device=self.device)
self.real_label = torch.ones (size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
self.fake_label = torch.zeros(size=(self.opt.batchsize,), dtype=torch.float32, device=self.device)
##
# Setup optimizer
if self.opt.phase == "train":
self.netg.train()
self.netd.train()
self.optimizers = []
self.optimizer_d = optim.Adam(self.netd.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
self.optimizer_g = optim.Adam(self.netg.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
self.optimizers.append(self.optimizer_d)
self.optimizers.append(self.optimizer_g)
self.schedulers = [get_scheduler(optimizer, opt) for optimizer in self.optimizers]
##
def set_input(self, input:torch.Tensor, noise:bool=False):
""" Set input and ground truth
Args:
input (FloatTensor): Input data for batch i.
"""
with torch.no_grad():
self.input.resize_(input[0].size()).copy_(input[0])
self.gt.resize_(input[1].size()).copy_(input[1])
self.label.resize_(input[1].size())
# Add noise to the input.
if noise: self.noise.data.copy_(torch.randn(self.noise.size()))
# Copy the first batch as the fixed input.
if self.total_steps == self.opt.batchsize:
self.fixed_input.resize_(input[0].size()).copy_(input[0])
##
def get_errors(self):
""" Get netD and netG errors.
Returns:
[OrderedDict]: Dictionary containing errors.
"""
errors = OrderedDict([
('err_d', self.err_d),
('err_g', self.err_g),
('err_g_adv', self.err_g_adv),
('err_g_con', self.err_g_con),
('err_g_lat', self.err_g_lat)])
return errors
##
def reinit_d(self):
""" Initialize the weights of netD
"""
self.netd.apply(weights_init)
print('Reloading d net')
##
def get_current_images(self):
""" Returns current images.
Returns:
[reals, fakes, fixed]
"""
reals = self.input.data
fakes = self.fake.data
fixed = self.netg(self.fixed_input)[0].data
return reals, fakes, fixed
##
def save_weights(self, epoch:int, is_best:bool=False):
"""Save netG and netD weights for the current epoch.
Args:
epoch ([int]): Current epoch number.
"""
name = self.opt.dataset if self.opt.dataset else self.opt.dataroot.split("/")[-1]
weight_dir = os.path.join(
self.opt.outf, name, 'train', 'weights')
if not os.path.exists(weight_dir):
os.makedirs(weight_dir)
if is_best:
torch.save({'epoch': epoch, 'state_dict': self.netg.state_dict()}, f'{weight_dir}/netG_best.pth')
torch.save({'epoch': epoch, 'state_dict': self.netd.state_dict()}, f'{weight_dir}/netD_best.pth')
else:
torch.save({'epoch': epoch, 'state_dict': self.netd.state_dict()}, f"{weight_dir}/netD_{epoch}.pth")
torch.save({'epoch': epoch, 'state_dict': self.netg.state_dict()}, f"{weight_dir}/netG_{epoch}.pth")
def load_weights(self, epoch=None, is_best:bool=False, path=None):
""" Load pre-trained weights of NetG and NetD
Keyword Arguments:
epoch {int} -- Epoch to be loaded (default: {None})
is_best {bool} -- Load the best epoch (default: {False})
path {str} -- Path to weight file (default: {None})
Raises:
Exception -- [description]
IOError -- [description]
"""
if epoch is None and is_best is False:
raise Exception('Please provide epoch to be loaded or choose the best epoch.')
if is_best:
fname_g = f"netG_best.pth"
fname_d = f"netD_best.pth"
else:
fname_g = f"netG_{epoch}.pth"
fname_d = f"netD_{epoch}.pth"
if path is None:
name = self.opt.dataset if self.opt.dataset else self.opt.dataroot.split("/")[-1]
path_g = f"{self.opt.outf}/{name}/train/weights/{fname_g}"
path_d = f"{self.opt.outf}/{name}/train/weights/{fname_d}"
else:
path_g = path + "/" + fname_g
path_d = path + "/" + fname_d
# Load the weights of netg and netd.
print('>> Loading weights...')
if len(self.opt.gpu_ids) == 0:
weights_g = torch.load(path_g, map_location=lambda storage, loc: storage)['state_dict']
weights_d = torch.load(path_d, map_location=lambda storage, loc: storage)['state_dict']
else:
weights_g = torch.load(path_g)['state_dict']
weights_d = torch.load(path_d)['state_dict']
try:
# create new OrderedDict that does not contain `module.`
new_weights_g = OrderedDict()
new_weights_d = OrderedDict()
for k, v in weights_g.items():
name = k[7:] # remove `module.`
new_weights_g[name] = v
for k, v in weights_d.items():
name = k[7:] # remove `module.`
new_weights_d[name] = v
# load params
if len(self.opt.gpu_ids) == 0:
weights_g = new_weights_g
weights_d = new_weights_d
self.netg.load_state_dict(weights_g)
self.netd.load_state_dict(weights_d)
except IOError:
raise IOError("netG weights not found")
print(' Done.')
def forward(self):
self.forward_g()
self.forward_d()
def forward_g(self):
""" Forward propagate through netG
"""
#TODO: Check, why noised input is used
self.fake = self.netg(self.input + self.noise)
def forward_d(self):
""" Forward propagate through netD
"""
self.pred_real, self.feat_real = self.netd(self.input)
self.pred_fake, self.feat_fake = self.netd(self.fake)
def backward_g(self):
""" Backpropagate netg
"""
self.err_g_adv = self.opt.w_adv * self.l_adv(self.pred_fake, self.real_label)
self.err_g_con = self.opt.w_con * self.l_con(self.fake, self.input)
self.err_g_lat = self.opt.w_lat * self.l_lat(self.feat_fake, self.feat_real) # should be named discriminator
if self.opt.verbose:
print(f'err_g_adv: {str(self.err_g_adv)}')
print(f'err_g_con: {str(self.err_g_con)}')
print(f'err_g_lat: {str(self.err_g_lat)}')
self.err_g = self.err_g_adv + self.err_g_con + self.err_g_lat
self.err_g.backward(retain_graph=True)
def backward_d(self):
# Fake
#print(f'pref_fake: {str(self.pred_fake)}')
#print(f'self.fake_label: {str(self.fake_label)}')
#print(f'self.pred_real: {str(self.pred_real)}')
#print(f'self.real_label: {str(self.real_label)}')
self.err_d_fake = self.l_adv(self.pred_fake, self.fake_label)
# Real
# pred_real, feat_real = self.netd(self.input)
self.err_d_real = self.l_adv(self.pred_real, self.real_label)
# Combine losses.
# TODO: According to https://github.com/samet-akcay/skip-ganomaly/issues/18#issue-728932038 ... Check if lat loss has to be negative in discriminator backprob
if self.opt.verbose:
print(f'err_d_real: {str(self.err_d_real)}')
print(f'err_d_fake: {str(self.err_d_fake)}')
print(f'err_g_lat: {str(self.err_g_lat)}')
self.err_d = self.err_d_real + self.err_d_fake + self.err_g_lat
self.err_d.backward(retain_graph=True)
def update_netg(self):
""" Update Generator Network.
"""
self.optimizer_g.zero_grad()
self.backward_g()
def update_netd(self):
""" Update Discriminator Network.
"""
self.optimizer_d.zero_grad()
self.backward_d()
##
def optimize_params(self):
""" Optimize netD and netG networks.
"""
self.forward()
self.update_netg()
self.update_netd()
self.optimizer_g.step()
self.optimizer_d.step()
if self.err_d < 1e-5: self.reinit_d()
##
def train_one_epoch(self):
""" Train the model for one epoch.
"""
self.opt.phase = "train"
self.netg.train()
self.netd.train()
epoch_iter = 0
for data in tqdm(self.data.train, leave=False, total=len(self.data.train)):
self.total_steps += self.opt.batchsize
epoch_iter += self.opt.batchsize
self.set_input(data)
self.optimize_params()
reals, fakes, fixed = self.get_current_images()
errors = self.get_errors()
if self.opt.display:
self.visualizer.plot_current_errors(self.epoch, self.total_steps, errors)
# Write images to tensorboard
if self.total_steps % self.opt.save_image_freq == 0:
self.visualizer.display_current_images(reals, fakes, fixed, train_or_test="train", global_step=self.total_steps)
if self.total_steps % self.opt.save_image_freq == 0:
self.visualizer.save_current_images(self.epoch, reals, fakes, fixed)
print(">> Training model %s. Epoch %d/%d" % (self.name, self.epoch+1, self.opt.niter))
##
def train(self):
""" Train the model
"""
##
# TRAIN
self.total_steps = 0
best_auc = 0
# Train for niter epochs.
print(f">> Training {self.name} on {self.opt.dataset} to detect anomalies")
for self.epoch in range(self.opt.iter, self.opt.niter):
self.train_one_epoch()
res = self.test()
if res['auc'] > best_auc:
best_auc = res['auc']
self.save_weights(self.epoch, is_best=True)
self.visualizer.print_current_performance(res, best_auc)
print(">> Training model %s.[Done]" % self.name)
##
def test(self, plot_hist=True):
""" Test GANomaly model.
Args:
data ([type]): Dataloader for the test set
Raises:
IOError: Model weights not found.
"""
self.netg.eval()
self.netd.eval()
with torch.no_grad():
# Load the weights of netg and netd.
if self.opt.path_to_weights is not None:
self.load_weights(path=self.opt.path_to_weights, is_best=True)
self.opt.phase = 'test'
# Create big error tensor for the test set.
self.an_scores = torch.zeros(size=(len(self.data.valid.dataset),), dtype=torch.float32, device=self.device)
self.gt_labels = torch.zeros(size=(len(self.data.valid.dataset),), dtype=torch.long, device=self.device)
print(" Testing %s" % self.name)
self.times = []
total_steps_test = 0
epoch_iter = 0
i = 0
for data in tqdm(self.data.valid, leave=False, total=len(self.data.valid)):
total_steps_test += self.opt.batchsize
epoch_iter += self.opt.batchsize
time_i = time.time()
# Forward - Pass
self.forward_for_testing(data)
# Calculate the anomaly score.
error = self.calculate_an_score()
time_o = time.time()
self.an_scores[i*self.opt.batchsize: i*self.opt.batchsize + error.size(0)] = error.reshape(error.size(0))
self.gt_labels[i*self.opt.batchsize: i*self.opt.batchsize + error.size(0)] = self.gt.reshape(error.size(0))
if self.opt.verbose:
print(f'an_scores: {str(self.an_scores)}')
self.times.append(time_o - time_i)
real, fake, fixed = self.get_current_images()
if self.epoch*len(self.data.valid)+total_steps_test % self.opt.save_image_freq == 0:
self.visualizer.display_current_images(real, fake, fixed, train_or_test="test", global_step=self.epoch*len(self.data.valid)+total_steps_test)
# Save test images.
if self.opt.save_test_images:
dst = os.path.join(self.opt.outf, self.opt.name, 'test', 'images')
if not os.path.isdir(dst): os.makedirs(dst)
#iterate over them (real) and write anomaly score and ground truth on filename
vutils.save_image(real, '%s/real_%03d.png' % (dst, i+1), normalize=True)
vutils.save_image(fake, '%s/fake_%03d.png' % (dst, i+1), normalize=True)
i = i + 1
# Measure inference time.
self.times = np.array(self.times)
self.times = np.mean(self.times * 1000)
# Scale error vector between [0, 1]
# self.an_scores = (self.an_scores - torch.min(self.an_scores))/(torch.max(self.an_scores) - torch.min(self.an_scores))
if self.opt.verbose:
print(f'scaled an_scores: {str(self.an_scores)}')
y_trues = self.gt_labels.cpu()
y_preds = self.an_scores.cpu()
# Create data frame for scores and labels.
performance, thresholds, y_preds_man, y_preds_auc= get_performance(y_trues=y_trues, y_preds=y_preds, manual_threshold=self.opt.decision_threshold)
with open(os.path.join(self.opt.outf, self.opt.phase +"_results.txt"), "a+") as f:
f.write(str(performance))
f.write("\n")
f.close()
self.visualizer.plot_histogram(y_trues=y_trues, y_preds=y_preds, threshold=performance["threshold"], save_path=os.path.join(self.opt.outf, "histogram_test"+str(self.epoch)+".png"), tag="Histogram_Test", global_step=self.epoch)
self.visualizer.plot_pr_curve(y_trues=y_trues, y_preds=y_preds, thresholds=thresholds, global_step=self.epoch, tag="PR_Curve_Test")
self.visualizer.plot_roc_curve(y_trues=y_trues, y_preds=y_preds, global_step=self.epoch, tag="ROC_Curve_Test", save_path=os.path.join(self.opt.outf, "roc_test"+str(self.epoch)+".png"))
self.visualizer.plot_current_conf_matrix(self.epoch, performance["conf_matrix"], tag="Confusion_Matrix_Test", save_path=os.path.join(self.opt.outf, self.opt.phase+"_conf_matrix.png"))
self.visualizer.plot_performance(self.epoch, 0, performance, tag="Performance_Test")
return performance
def forward_for_testing(self, data):
self.set_input(data)
self.fake = self.netg(self.input)
real_clas, self.feat_real = self.netd(self.input)
fake_clas, self.feat_fake = self.netd(self.fake)
def inference(self):
self.netg.eval()
self.netd.eval()
with torch.no_grad():
self.load_weights(path=self.opt.path_to_weights, is_best=True)
# Create big error tensor for the test set.
self.an_scores = torch.zeros(size=(len(self.data.inference.dataset),), dtype=torch.float32, device=self.device)
self.gt_labels = torch.zeros(size=(len(self.data.inference.dataset),), dtype=torch.long, device=self.device)
print("Starting Inference!")
inf_time = None
inf_times = []
self.file_names = []
for i, data in tqdm(enumerate(self.data.inference), leave=False, total=len(self.data.inference)):
inf_start = time.time()
# Forward - Pass
self.forward_for_testing(data)
# Calculate the anomaly score.
error = self.calculate_an_score()
inf_times.append(time.time()-inf_start)
self.an_scores[i*self.opt.batchsize: i*self.opt.batchsize + error.size(0)] = error.reshape(error.size(0))
self.gt_labels[i*self.opt.batchsize: i*self.opt.batchsize + error.size(0)] = self.gt.reshape(error.size(0))
if self.opt.verbose:
print(f'an_scores: {str(self.an_scores)}')
real, fake, fixed = self.get_current_images()
if i % self.opt.save_image_freq == 0:
self.visualizer.display_current_images(real, fake, fixed, train_or_test="test_inference", global_step=i)
self.file_names.append(data[2])
# Measure inference time.
# Scale error vector between [0, 1] TODO: does it work without normalizing?
# self.an_scores = (self.an_scores - torch.min(self.an_scores))/(torch.max(self.an_scores) - torch.min(self.an_scores))
if self.opt.verbose:
print(f'scaled an_scores: {str(self.an_scores)}')
y_trues = self.gt_labels.cpu()
y_preds = self.an_scores.cpu()
inf_time = sum(inf_times)
print (f'Inference time: {inf_time} secs')
print (f'Inference time / individual: {inf_time/len(y_trues)} secs')
# Create data frame for scores and labels.
performance, thresholds, y_preds_man, y_preds_auc = get_performance(y_trues=y_trues, y_preds=y_preds, manual_threshold=self.opt.decision_threshold)
with open(os.path.join(self.opt.outf, self.opt.phase +"_results.txt"), "w") as f:
f.write(str(performance))
f.close()
self.visualizer.plot_histogram(y_trues=y_trues, y_preds=y_preds, threshold=performance["threshold"], save_path=os.path.join(self.opt.outf, "histogram_inference.png"), tag="Histogram_Inference")
self.visualizer.plot_pr_curve(y_trues=y_trues, y_preds=y_preds, thresholds=thresholds, global_step=1, tag="PR_Curve_Inference")
self.visualizer.plot_roc_curve(y_trues=y_trues, y_preds=y_preds, global_step=1, tag="ROC_Curve_Inference", save_path=os.path.join(self.opt.outf, "roc_inference.png"))
self.visualizer.plot_current_conf_matrix(1, performance["conf_matrix"], tag="Confusion_Matrix_Inference", save_path=os.path.join(self.opt.outf, self.opt.phase+"_conf_matrix.png"))
if self.opt.decision_threshold:
self.visualizer.plot_current_conf_matrix(2, performance["conf_matrix_man"], save_path=os.path.join(self.opt.outf, self.opt.phase+"_conf_matrix_man.png"))
self.visualizer.plot_histogram(y_trues=y_trues, y_preds=y_preds, threshold=performance["manual_threshold"], global_step=2, save_path=os.path.join(self.opt.outf, "histogram_inference_man.png"), tag="Histogram_Inference")
write_inference_result(file_names=self.file_names, y_trues=y_trues, y_preds=y_preds_man,outf=os.path.join(self.opt.outf, "classification_result_man.json"))
self.visualizer.plot_performance(1, 0, performance, tag="Performance_Inference")
write_inference_result(file_names=self.file_names, y_trues=y_trues, y_preds=y_preds_auc,outf=os.path.join(self.opt.outf, "classification_result.json"))
##
# RETURN
return performance
def calculate_an_score(self):
si = self.input.size()
sz = self.feat_real.size()
rec = (self.input - self.fake).view(si[0], si[1] * si[2] * si[3])
lat = (self.feat_real - self.feat_fake).view(sz[0], sz[1] * sz[2] * sz[3])
rec = torch.mean(torch.pow(rec, 2), dim=1)
lat = torch.mean(torch.pow(lat, 2), dim=1)
#print("lat", lat)
#print("rec", rec)
if self.opt.verbose:
print(f'rec: {str(rec)}')
print(f'lat: {str(lat)}')
error = 0.9*rec + 0.1*lat
return error | [
"numpy.random.seed",
"os.path.join",
"torch.nn.BCELoss",
"torch.nn.L1Loss",
"lib.models.networks.define_D",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"time.time",
"lib.visualizer.Visualizer",
"numpy.mean",
"random.seed",
"numpy.array",
"torchvision.utils.save_image",
"collections... | [((973, 996), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (984, 996), False, 'import random\n'), ((1117, 1143), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (1131, 1143), True, 'import numpy as np\n'), ((1518, 1533), 'lib.visualizer.Visualizer', 'Visualizer', (['opt'], {}), '(opt)\n', (1528, 1533), False, 'from lib.visualizer import Visualizer\n'), ((1582, 1633), 'os.path.join', 'os.path.join', (['self.opt.outf', 'self.opt.name', '"""train"""'], {}), "(self.opt.outf, self.opt.name, 'train')\n", (1594, 1633), False, 'import os\n'), ((1657, 1707), 'os.path.join', 'os.path.join', (['self.opt.outf', 'self.opt.name', '"""test"""'], {}), "(self.opt.outf, self.opt.name, 'test')\n", (1669, 1707), False, 'import os\n'), ((1731, 1786), 'os.path.join', 'os.path.join', (['self.opt.outf', 'self.opt.name', '"""inference"""'], {}), "(self.opt.outf, self.opt.name, 'inference')\n", (1743, 1786), False, 'import os\n'), ((2077, 2148), 'lib.models.networks.define_G', 'define_G', (['self.opt'], {'norm': '"""batch"""', 'use_dropout': '(False)', 'init_type': '"""normal"""'}), "(self.opt, norm='batch', use_dropout=False, init_type='normal')\n", (2085, 2148), False, 'from lib.models.networks import weights_init, define_G, define_D, get_scheduler\n'), ((2169, 2240), 'lib.models.networks.define_D', 'define_D', (['self.opt'], {'norm': '"""batch"""', 'use_sigmoid': '(False)', 'init_type': '"""normal"""'}), "(self.opt, norm='batch', use_sigmoid=False, init_type='normal')\n", (2177, 2240), False, 'from lib.models.networks import weights_init, define_G, define_D, get_scheduler\n'), ((2811, 2823), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2821, 2823), True, 'import torch.nn as nn\n'), ((2845, 2856), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (2854, 2856), True, 'import torch.nn as nn\n'), ((5237, 5398), 'collections.OrderedDict', 'OrderedDict', (["[('err_d', self.err_d), ('err_g', self.err_g), ('err_g_adv', self.err_g_adv\n ), ('err_g_con', self.err_g_con), ('err_g_lat', self.err_g_lat)]"], {}), "([('err_d', self.err_d), ('err_g', self.err_g), ('err_g_adv',\n self.err_g_adv), ('err_g_con', self.err_g_con), ('err_g_lat', self.\n err_g_lat)])\n", (5248, 5398), False, 'from collections import OrderedDict\n'), ((6241, 6294), 'os.path.join', 'os.path.join', (['self.opt.outf', 'name', '"""train"""', '"""weights"""'], {}), "(self.opt.outf, name, 'train', 'weights')\n", (6253, 6294), False, 'import os\n'), ((21162, 21262), 'lib.evaluate.get_performance', 'get_performance', ([], {'y_trues': 'y_trues', 'y_preds': 'y_preds', 'manual_threshold': 'self.opt.decision_threshold'}), '(y_trues=y_trues, y_preds=y_preds, manual_threshold=self.opt\n .decision_threshold)\n', (21177, 21262), False, 'from lib.evaluate import roc, auprc, write_inference_result, get_performance\n'), ((6323, 6349), 'os.path.exists', 'os.path.exists', (['weight_dir'], {}), '(weight_dir)\n', (6337, 6349), False, 'import os\n'), ((6363, 6386), 'os.makedirs', 'os.makedirs', (['weight_dir'], {}), '(weight_dir)\n', (6374, 6386), False, 'import os\n'), ((8596, 8609), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8607, 8609), False, 'from collections import OrderedDict\n'), ((8638, 8651), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8649, 8651), False, 'from collections import OrderedDict\n'), ((16672, 16692), 'numpy.array', 'np.array', (['self.times'], {}), '(self.times)\n', (16680, 16692), True, 'import numpy as np\n'), ((16718, 16744), 'numpy.mean', 'np.mean', (['(self.times * 1000)'], {}), '(self.times * 1000)\n', (16725, 16744), True, 'import numpy as np\n'), ((17233, 17333), 'lib.evaluate.get_performance', 'get_performance', ([], {'y_trues': 'y_trues', 'y_preds': 'y_preds', 'manual_threshold': 'self.opt.decision_threshold'}), '(y_trues=y_trues, y_preds=y_preds, manual_threshold=self.opt\n .decision_threshold)\n', (17248, 17333), False, 'from lib.evaluate import roc, auprc, write_inference_result, get_performance\n'), ((4287, 4316), 'lib.models.networks.get_scheduler', 'get_scheduler', (['optimizer', 'opt'], {}), '(optimizer, opt)\n', (4300, 4316), False, 'from lib.models.networks import weights_init, define_G, define_D, get_scheduler\n'), ((15075, 15086), 'time.time', 'time.time', ([], {}), '()\n', (15084, 15086), False, 'import time\n'), ((15292, 15303), 'time.time', 'time.time', ([], {}), '()\n', (15301, 15303), False, 'import time\n'), ((19492, 19503), 'time.time', 'time.time', ([], {}), '()\n', (19501, 19503), False, 'import time\n'), ((21276, 21336), 'os.path.join', 'os.path.join', (['self.opt.outf', "(self.opt.phase + '_results.txt')"], {}), "(self.opt.outf, self.opt.phase + '_results.txt')\n", (21288, 21336), False, 'import os\n'), ((21527, 21581), 'os.path.join', 'os.path.join', (['self.opt.outf', '"""histogram_inference.png"""'], {}), "(self.opt.outf, 'histogram_inference.png')\n", (21539, 21581), False, 'import os\n'), ((21871, 21919), 'os.path.join', 'os.path.join', (['self.opt.outf', '"""roc_inference.png"""'], {}), "(self.opt.outf, 'roc_inference.png')\n", (21883, 21919), False, 'import os\n'), ((22054, 22118), 'os.path.join', 'os.path.join', (['self.opt.outf', "(self.opt.phase + '_conf_matrix.png')"], {}), "(self.opt.outf, self.opt.phase + '_conf_matrix.png')\n", (22066, 22118), False, 'import os\n'), ((22936, 22993), 'os.path.join', 'os.path.join', (['self.opt.outf', '"""classification_result.json"""'], {}), "(self.opt.outf, 'classification_result.json')\n", (22948, 22993), False, 'import os\n'), ((2405, 2446), 'os.path.join', 'os.path.join', (['self.opt.resume', '"""netG.pth"""'], {}), "(self.opt.resume, 'netG.pth')\n", (2417, 2446), False, 'import os\n'), ((16152, 16212), 'os.path.join', 'os.path.join', (['self.opt.outf', 'self.opt.name', '"""test"""', '"""images"""'], {}), "(self.opt.outf, self.opt.name, 'test', 'images')\n", (16164, 16212), False, 'import os\n'), ((16417, 16491), 'torchvision.utils.save_image', 'vutils.save_image', (['real', "('%s/real_%03d.png' % (dst, i + 1))"], {'normalize': '(True)'}), "(real, '%s/real_%03d.png' % (dst, i + 1), normalize=True)\n", (16434, 16491), True, 'import torchvision.utils as vutils\n'), ((16510, 16584), 'torchvision.utils.save_image', 'vutils.save_image', (['fake', "('%s/fake_%03d.png' % (dst, i + 1))"], {'normalize': '(True)'}), "(fake, '%s/fake_%03d.png' % (dst, i + 1), normalize=True)\n", (16527, 16584), True, 'import torchvision.utils as vutils\n'), ((17351, 17411), 'os.path.join', 'os.path.join', (['self.opt.outf', "(self.opt.phase + '_results.txt')"], {}), "(self.opt.outf, self.opt.phase + '_results.txt')\n", (17363, 17411), False, 'import os\n'), ((18259, 18323), 'os.path.join', 'os.path.join', (['self.opt.outf', "(self.opt.phase + '_conf_matrix.png')"], {}), "(self.opt.outf, self.opt.phase + '_conf_matrix.png')\n", (18271, 18323), False, 'import os\n'), ((22256, 22324), 'os.path.join', 'os.path.join', (['self.opt.outf', "(self.opt.phase + '_conf_matrix_man.png')"], {}), "(self.opt.outf, self.opt.phase + '_conf_matrix_man.png')\n", (22268, 22324), False, 'import os\n'), ((22469, 22527), 'os.path.join', 'os.path.join', (['self.opt.outf', '"""histogram_inference_man.png"""'], {}), "(self.opt.outf, 'histogram_inference_man.png')\n", (22481, 22527), False, 'import os\n'), ((22661, 22722), 'os.path.join', 'os.path.join', (['self.opt.outf', '"""classification_result_man.json"""'], {}), "(self.opt.outf, 'classification_result_man.json')\n", (22673, 22722), False, 'import os\n'), ((2506, 2547), 'os.path.join', 'os.path.join', (['self.opt.resume', '"""netG.pth"""'], {}), "(self.opt.resume, 'netG.pth')\n", (2518, 2547), False, 'import os\n'), ((2613, 2654), 'os.path.join', 'os.path.join', (['self.opt.resume', '"""netD.pth"""'], {}), "(self.opt.resume, 'netD.pth')\n", (2625, 2654), False, 'import os\n'), ((16240, 16258), 'os.path.isdir', 'os.path.isdir', (['dst'], {}), '(dst)\n', (16253, 16258), False, 'import os\n'), ((16260, 16276), 'os.makedirs', 'os.makedirs', (['dst'], {}), '(dst)\n', (16271, 16276), False, 'import os\n'), ((19733, 19744), 'time.time', 'time.time', ([], {}), '()\n', (19742, 19744), False, 'import time\n')] |
# Enable import from parent package
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
sys.path.append( os.path.dirname( os.path.dirname( os.path.abspath(__file__) ) ) )
import dataio, utils, training, loss_functions, modules, diff_operators
import torch
import numpy as np
import math
from torch.utils.data import DataLoader
import configargparse
import scipy.io as spio
logging_root = './logs'
angle_alpha = 1.2
# Setting to plot
ckpt_path = './Deepreach_trained_checkpoints/air3D_ckpt.pth'
activation = 'sine'
times = [0.9]
time_indices_matlab = [int(time_to_plot/0.1) + 1 for time_to_plot in times]
thetas = [1.5863] # This theta is contained in the LS computation grid.
# Initialize and load the model
model = modules.SingleBVPNet(in_features=4, out_features=1, type=activation, mode='mlp',
final_layer_factor=1., hidden_features=512, num_hidden_layers=3)
model.cuda()
checkpoint = torch.load(ckpt_path)
try:
model_weights = checkpoint['model']
except:
model_weights = checkpoint
model.load_state_dict(model_weights)
model.eval()
# Load the ground truth BRS data
true_BRT_path = './Deepreach_trained_checkpoints/analytical_BRT_air3D.mat'
true_data = spio.loadmat(true_BRT_path)
# Save the value function arrays
val_functions = {}
val_functions['LS'] = []
val_functions['siren'] = []
# Define the validation function
def val_fn_BRS(model):
num_times = len(times)
num_thetas = len(thetas)
# Find matlab indices for theta slices
theta_indices_matlab = []
theta_values = true_data['gmat'][0, 0, :, 2]
for i in range(num_thetas):
theta_indices_matlab.append(np.argmin(abs(theta_values - thetas[i])))
# Create figures
fig_brs = plt.figure(figsize=(5*num_thetas, 5*num_times))
fig_valfunc_LS = plt.figure(figsize=(5*num_thetas, 5*num_times))
fig_valfunc_siren = plt.figure(figsize=(5*num_thetas, 5*num_times))
# Start plotting the results
for i in range(num_times):
for j in range(num_thetas):
state_coords = torch.tensor(np.reshape(true_data['gmat'][:, :, theta_indices_matlab[j], :], (-1, 3)), dtype=torch.float32)
state_coords[:, 2] = state_coords[:, 2] / (angle_alpha * math.pi)
time_coords = torch.ones(state_coords.shape[0], 1) * times[i]
coords = torch.cat((time_coords, state_coords), dim=1)[None]
# Compute the value function
model_in = {'coords': coords.cuda()}
model_out = model(model_in)
# Detatch outputs and reshape
valfunc = model_out['model_out'].detach().cpu().numpy()
valfunc_true = true_data['data'][:, :, theta_indices_matlab[j], time_indices_matlab[i]]
valfunc = np.reshape(valfunc, valfunc_true.shape)
# Unnormalize the value function and gradients
norm_to = 0.02
mean = 0.25
var = 0.5
valfunc = (valfunc*var/norm_to) + mean
## Plot the zero level set
# Fetch the BRS
brs_predicted = (valfunc <= 0.001) * 1.
brs_actual = (valfunc_true <= 0.001) * 1.
# Plot it
ax = fig_brs.add_subplot(num_times, num_thetas, (j+1) + i*num_thetas)
ax.set_title('t = %0.2f, theta = %0.2f' % (times[i], thetas[j]))
s1 = ax.imshow(brs_predicted.T, cmap='bwr', origin='lower', vmin=-1., vmax=1., extent=(-1., 1., -1., 1.), interpolation='bilinear')
s2 = ax.imshow(brs_actual.T, cmap='seismic', alpha=0.5, origin='lower', vmin=-1., vmax=1., extent=(-1., 1., -1., 1.), interpolation='bilinear')
## Plot the actual value function
ax = fig_valfunc_LS.add_subplot(num_times, num_thetas, (j+1) + i*num_thetas)
ax.set_title('t = %0.2f, theta = %0.2f' % (times[i], thetas[j]))
s = ax.imshow(valfunc_true.T, cmap='bwr', origin='lower', extent=(-1., 1., -1., 1.), vmin=-0.25, vmax=1.2)
fig_valfunc_LS.colorbar(s)
## Plot the predicted value function
ax = fig_valfunc_siren.add_subplot(num_times, num_thetas, (j+1) + i*num_thetas)
ax.set_title('t = %0.2f, theta = %0.2f' % (times[i], thetas[j]))
s = ax.imshow(valfunc.T, cmap='bwr', origin='lower', extent=(-1., 1., -1., 1.), vmin=-0.25, vmax=1.2)
fig_valfunc_siren.colorbar(s)
## Append the value functions
val_functions['LS'].append(valfunc_true)
val_functions['siren'].append(valfunc)
return fig_brs, fig_valfunc_LS, fig_valfunc_siren, val_functions
# Run the validation of sets
fig_brs, fig_valfunc_LS, fig_valfunc_siren, val_functions = val_fn_BRS(model)
fig_brs.savefig(os.path.join(logging_root, 'Air3D_BRS_comparison.png'))
fig_valfunc_LS.savefig(os.path.join(logging_root, 'Air3D_LS_valfunc.png'))
fig_valfunc_siren.savefig(os.path.join(logging_root, 'Air3D_Siren_valfunc.png'))
spio.savemat(os.path.join(logging_root, 'Air3D_raw_valfuncs.mat'), val_functions)
| [
"torch.ones",
"os.path.abspath",
"scipy.io.loadmat",
"torch.load",
"torch.cat",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.reshape",
"os.path.join",
"modules.SingleBVPNet"
] | [((54, 75), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (68, 75), False, 'import matplotlib\n'), ((764, 915), 'modules.SingleBVPNet', 'modules.SingleBVPNet', ([], {'in_features': '(4)', 'out_features': '(1)', 'type': 'activation', 'mode': '"""mlp"""', 'final_layer_factor': '(1.0)', 'hidden_features': '(512)', 'num_hidden_layers': '(3)'}), "(in_features=4, out_features=1, type=activation, mode=\n 'mlp', final_layer_factor=1.0, hidden_features=512, num_hidden_layers=3)\n", (784, 915), False, 'import dataio, utils, training, loss_functions, modules, diff_operators\n'), ((965, 986), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (975, 986), False, 'import torch\n'), ((1238, 1265), 'scipy.io.loadmat', 'spio.loadmat', (['true_BRT_path'], {}), '(true_BRT_path)\n', (1250, 1265), True, 'import scipy.io as spio\n'), ((1736, 1787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5 * num_thetas, 5 * num_times)'}), '(figsize=(5 * num_thetas, 5 * num_times))\n', (1746, 1787), True, 'import matplotlib.pyplot as plt\n'), ((1803, 1854), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5 * num_thetas, 5 * num_times)'}), '(figsize=(5 * num_thetas, 5 * num_times))\n', (1813, 1854), True, 'import matplotlib.pyplot as plt\n'), ((1873, 1924), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5 * num_thetas, 5 * num_times)'}), '(figsize=(5 * num_thetas, 5 * num_times))\n', (1883, 1924), True, 'import matplotlib.pyplot as plt\n'), ((4485, 4539), 'os.path.join', 'os.path.join', (['logging_root', '"""Air3D_BRS_comparison.png"""'], {}), "(logging_root, 'Air3D_BRS_comparison.png')\n", (4497, 4539), False, 'import os\n'), ((4564, 4614), 'os.path.join', 'os.path.join', (['logging_root', '"""Air3D_LS_valfunc.png"""'], {}), "(logging_root, 'Air3D_LS_valfunc.png')\n", (4576, 4614), False, 'import os\n'), ((4642, 4695), 'os.path.join', 'os.path.join', (['logging_root', '"""Air3D_Siren_valfunc.png"""'], {}), "(logging_root, 'Air3D_Siren_valfunc.png')\n", (4654, 4695), False, 'import os\n'), ((4710, 4762), 'os.path.join', 'os.path.join', (['logging_root', '"""Air3D_raw_valfuncs.mat"""'], {}), "(logging_root, 'Air3D_raw_valfuncs.mat')\n", (4722, 4762), False, 'import os\n'), ((181, 206), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((2679, 2718), 'numpy.reshape', 'np.reshape', (['valfunc', 'valfunc_true.shape'], {}), '(valfunc, valfunc_true.shape)\n', (2689, 2718), True, 'import numpy as np\n'), ((2048, 2120), 'numpy.reshape', 'np.reshape', (["true_data['gmat'][:, :, theta_indices_matlab[j], :]", '(-1, 3)'], {}), "(true_data['gmat'][:, :, theta_indices_matlab[j], :], (-1, 3))\n", (2058, 2120), True, 'import numpy as np\n'), ((2235, 2271), 'torch.ones', 'torch.ones', (['state_coords.shape[0]', '(1)'], {}), '(state_coords.shape[0], 1)\n', (2245, 2271), False, 'import torch\n'), ((2298, 2343), 'torch.cat', 'torch.cat', (['(time_coords, state_coords)'], {'dim': '(1)'}), '((time_coords, state_coords), dim=1)\n', (2307, 2343), False, 'import torch\n')] |
#!/usr/bin/env python
"""
ONS Address Index - Land Registry Data
======================================
A simple script to process land registry sales data.
The original data were downloaded on the 10th of November from:
https://data.gov.uk/dataset/land-registry-monthly-price-paid-data
Because the AddressBased used by the prototype is Epoch 39 (from April) it does not contain all
new builds with new postcodes. This scripts allows to identify those postcodes that do not exist
in the Epoch 39 AddressBase.
Running
-------
The script can be run from command line using CPython::
python landRegistryData.py
Requirements
------------
:requires: pandas
:requires: numpy
Author
------
:author: <NAME> (<EMAIL>)
Version
-------
:version: 0.2
:date: 18-Nov-2016
"""
import pandas as pd
import numpy as np
import os
if os.environ.get('LC_CTYPE', '') == 'UTF-8':
os.environ['LC_CTYPE'] = 'en_US.UTF-8'
def loadData(filename='pp-monthly-update.csv', path='/Users/saminiemi/Projects/ONS/AddressIndex/data/'):
"""
Read in the Land Registry testing data.
The data were downloaded from:
https://data.gov.uk/dataset/land-registry-monthly-price-paid-data
The header was grabbed from:
https://www.gov.uk/guidance/about-the-price-paid-data#explanations-of-column-headers-in-the-ppd
:param filename: name of the CSV file holding the data
:type filename: str
:param path: location of the test data
:type path: str
:return: pandas dataframe of the data (no UPRNs)
:rtype: pandas.DataFrame
"""
df = pd.read_csv(path + filename, low_memory=False, parse_dates=[2, ], infer_datetime_format=True)
print('Found', len(df.index), 'addresses from the land registry sales data...')
return df
def loadAddressBaseData(filename='AB.csv', path='/Users/saminiemi/Projects/ONS/AddressIndex/data/ADDRESSBASE/'):
"""
Load a compressed version of the full AddressBase file. The information being used
has been processed from a AB Epoch 39 files provided by ONS.
:param filename: name of the file containing modified AddressBase
:type filename: str
:param path: location of the AddressBase combined data file
:type path: str
:return: pandas dataframe of the requested information
:rtype: pandas.DataFrame
"""
df = pd.read_csv(path + filename, usecols=['POSTCODE', 'POSTCODE_LOCATOR'])
print('Found', len(df.index), 'addresses from AddressBase...')
# combine PAF and NAG information
msk = df['POSTCODE'].isnull()
df.loc[msk, 'POSTCODE'] = df.loc[msk, 'POSTCODE_LOCATOR']
return df
def testIfPostcodeExists(ab, landRegistry):
"""
A simple function to identify those postcodes that are present in the land registry data but
missing from AddressBase. Most of these are new buildings. One should consider removing these
from the testing of prototype matching.
:param ab: dataframe containing addressbase information
:type ab: pandas.DataFrame
:param landRegistry: dataframe containing land registry data
:type landRegistry: pandas.DataFrame
:return: None
"""
# find unique postcodes from AddressBase
ABpostcodes = np.unique(ab['POSTCODE'].values)
# those land registry postcodes that are not present in AddressBase are newbuilds
msk = landRegistry['Postcode'].isin(ABpostcodes)
# get those addresses that have a postcode in AB and identify missing postcodes
lr = landRegistry.loc[~msk]
missingPostcodes = np.unique(lr.loc[lr['Postcode'].notnull(), 'Postcode'].values)
print('Missing Postcodes:')
print(missingPostcodes)
print('In total', len(missingPostcodes), 'postcodes in sales data without AB counterpart')
print('In total', len(lr.index), 'addresses without counterparts')
# find those with postcode counterparts and save to a file
msk = ~landRegistry.Postcode.isin(missingPostcodes)
lr = landRegistry.ix[msk]
path = '/Users/saminiemi/Projects/ONS/AddressIndex/data/'
print('After removing postcodes without counterpart', len(lr.index), 'address remain...')
lr.to_csv(path + 'pp-monthly-update-Edited.csv', index=False)
# record also those without postcode counterpart
lr = landRegistry.ix[~msk]
print(len(lr.index), 'addresses without postcodes...')
lr.to_csv(path + 'pp-monthly-update-no-postcode.csv', index=False)
if __name__ == "__main__":
ab = loadAddressBaseData()
lr = loadData()
testIfPostcodeExists(ab, lr)
| [
"os.environ.get",
"pandas.read_csv",
"numpy.unique"
] | [((836, 866), 'os.environ.get', 'os.environ.get', (['"""LC_CTYPE"""', '""""""'], {}), "('LC_CTYPE', '')\n", (850, 866), False, 'import os\n'), ((1567, 1662), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {'low_memory': '(False)', 'parse_dates': '[2]', 'infer_datetime_format': '(True)'}), '(path + filename, low_memory=False, parse_dates=[2],\n infer_datetime_format=True)\n', (1578, 1662), True, 'import pandas as pd\n'), ((2321, 2391), 'pandas.read_csv', 'pd.read_csv', (['(path + filename)'], {'usecols': "['POSTCODE', 'POSTCODE_LOCATOR']"}), "(path + filename, usecols=['POSTCODE', 'POSTCODE_LOCATOR'])\n", (2332, 2391), True, 'import pandas as pd\n'), ((3190, 3222), 'numpy.unique', 'np.unique', (["ab['POSTCODE'].values"], {}), "(ab['POSTCODE'].values)\n", (3199, 3222), True, 'import numpy as np\n')] |
import torch
from PIL import Image
import torchvision
import numpy as np
import utils
import json
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
IMG_SIZE=(300,300)
manifest_path = "./data/safebooru-pic-meta/list.json"
label_path = "./safebooru-labels-dict.json"
model_path = "./safebooru-anime_vgg16.pth"
vgg16 = torch.load(model_path, map_location=torch.device(device))
with open(label_path,"r") as fp1:
classes = json.load(fp1)
classes = {k:int(v) for k,v in classes.items()}
classes_rev = {int(v):k for k,v in classes.items()}
print(classes_rev)
def recognize_img(img_paths):
imgs=[]
for img_path in img_paths:
img_bytes = Image.open(img_path)
img = torchvision.transforms.functional.resize(img=img_bytes,size=IMG_SIZE)#resize img
img=(np.array(img)/255.0).astype(np.float32)
imgs.append(img)
imgs = np.asarray(imgs)
#subset=np.append(subset,item[0])
imgs_tensor = torch.tensor(imgs)
imgs_tensor = imgs_tensor.to(device)
b = imgs_tensor.permute(0,3,1,2)
print(b.shape)
outputs = vgg16(b)
outputs = torch.max(outputs, 1)[1].data.cpu().numpy() #convert into array
print(outputs)
utils.show_imgs(imgs,real_labels=None,pred_labels=outputs,classes_rev=classes_rev)
img_paths = ["./data/etc_imgs/dd{}.jpg".format(i) for i in range(1,5)]
recognize_img(img_paths) | [
"utils.show_imgs",
"json.load",
"numpy.asarray",
"torchvision.transforms.functional.resize",
"PIL.Image.open",
"torch.cuda.is_available",
"numpy.array",
"torch.max",
"torch.device",
"torch.tensor"
] | [((444, 458), 'json.load', 'json.load', (['fp1'], {}), '(fp1)\n', (453, 458), False, 'import json\n'), ((884, 900), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (894, 900), True, 'import numpy as np\n'), ((957, 975), 'torch.tensor', 'torch.tensor', (['imgs'], {}), '(imgs)\n', (969, 975), False, 'import torch\n'), ((1203, 1293), 'utils.show_imgs', 'utils.show_imgs', (['imgs'], {'real_labels': 'None', 'pred_labels': 'outputs', 'classes_rev': 'classes_rev'}), '(imgs, real_labels=None, pred_labels=outputs, classes_rev=\n classes_rev)\n', (1218, 1293), False, 'import utils\n'), ((132, 157), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (155, 157), False, 'import torch\n'), ((374, 394), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (386, 394), False, 'import torch\n'), ((679, 699), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (689, 699), False, 'from PIL import Image\n'), ((714, 784), 'torchvision.transforms.functional.resize', 'torchvision.transforms.functional.resize', ([], {'img': 'img_bytes', 'size': 'IMG_SIZE'}), '(img=img_bytes, size=IMG_SIZE)\n', (754, 784), False, 'import torchvision\n'), ((808, 821), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (816, 821), True, 'import numpy as np\n'), ((1111, 1132), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (1120, 1132), False, 'import torch\n')] |
import operator
import builtins
import numpy as np
from .nodes import slice_op
from .base import Node, add, sub, mul, min_, max_, var_index, DEFAULT_SHAPES
from .util import _flatten_iterable
class GroupNode(Node):
builtin_np = ["sum", "prod", "amax", "amin", "argmin", "argmax"]
scalar_op_map = {"sum": operator.add, "prod": operator.mul, "amax": builtins.max, "amin": builtins.min, "argmin": min_, "argmax": max_, "bitreverse": lambda a, b: (a << 1) | (b & 1)}
def __init__(self, target, bounds, input_node, **kwargs):
self.output_nodes = []
target_name = f"{target.__module__}.{target.__name__}"
if "domain" in kwargs:
domain = tuple(kwargs.pop("domain")) if isinstance(kwargs["domain"], list) else kwargs.pop("domain")
elif isinstance(input_node, Node):
domain = input_node.domain.reduction_domain(_flatten_iterable(bounds))
else:
raise ValueError(f"Group operations unable to handle non node inputs currently: {input_node} - {target_name}")
if "axes" in kwargs:
axes = kwargs.pop("axes") if isinstance(kwargs["axes"], tuple) else tuple(kwargs.pop("axes"))
else:
axes = input_node.domain.compute_set_reduction_index(domain)
super(GroupNode, self).__init__(bounds, input_node, target=target_name, domain=domain, axes=axes, **kwargs)
self.target = target
if self.target.__name__ == "reduce":
self.scalar_target = self.scalar_op_map[self.__class__.__name__]
else:
self.scalar_target = self.scalar_op_map[self.target.__name__]
self.input_node = input_node
def __getitem__(self, key):
if isinstance(key, (tuple, list, np.ndarray)) and len(key) == 0:
return self
# elif self.is_shape_finalized() and self.shape == DEFAULT_SHAPES[0]:
# return self
elif self.is_shape_finalized() and len(self.nodes) > 0:
if isinstance(key, int):
key = tuple([key])
idx = np.ravel_multi_index(key, dims=self.shape, order='C')
ret = self.output_nodes[idx]
return ret
else:
name = []
if isinstance(key, Node):
name.append(key.name)
elif hasattr(key, "__len__") and not isinstance(key, str):
for k in key:
if isinstance(k, Node):
name.append(k.name)
else:
name.append(k)
else:
name.append(key)
name = f"{self.var.name}{tuple(name)}"
if name in self.graph.nodes:
return self.graph.nodes[name]
elif isinstance(key, (list)):
return var_index(self, key, name=name, graph=self.graph)
elif isinstance(key, tuple):
return var_index(self, list(key), name=name, graph=self.graph)
else:
return var_index(self, [key], name=name, graph=self.graph)
@property
def axes(self):
return self.kwargs["axes"]
@property
def domain(self):
return self.kwargs["domain"]
def _evaluate(self, bounds, input_res, **kwargs):
sum_axes = self.axes
if not hasattr(input_res, "__len__"):
value = input_res * np.prod([len(bound) for bound in bounds])
elif self.target.__name__ in self.builtin_np:
# reshaped = input_res.reshape(self.args[1].domain.computed_set_shape)
# value = self.target(reshaped, axis=sum_axes)
value = self.target(input_res.reshape(self.args[1].domain.computed_set_shape), axis=sum_axes)
else:
value = self.target(input_res.reshape(self.args[1].domain.computed_set_shape), axis=sum_axes, initial=self.initial)
if len(value.shape) == 0:
value = np.asarray([value])
# if value.shape == DEFAULT_SHAPES[0]:
# value = value[0]
if not self.is_shape_finalized():
self.shape = value.shape
# if len(value.shape) == 0:
# value = np.asarray([value])
return value
def __add__(self, other):
return slice_op(operator.add, self, other, graph=self.graph) if not self.domain.is_scalar else add(self, other,
graph=self.graph)
def __radd__(self, other):
return slice_op(operator.add, other, self, graph=self.graph) if not self.domain.is_scalar else add(other, self,
graph=self.graph)
def __sub__(self, other):
return slice_op(operator.sub, self, other, graph=self.graph) if not self.domain.is_scalar else sub(self, other,
graph=self.graph)
def __rsub__(self, other):
return slice_op(operator.sub, other, self, graph=self.graph) if not self.domain.is_scalar else sub(other, self,
graph=self.graph)
def __pow__(self, other):
return slice_op(builtins.pow, self, other, graph=self.graph)
def __rpow__(self, other):
return slice_op(builtins.pow, other, self, graph=self.graph)
def __mul__(self, other):
return slice_op(operator.mul, self, other, graph=self.graph) if not self.domain.is_scalar else mul(self, other, graph=self.graph)
def __rmul__(self, other):
return slice_op(operator.mul, other, self, graph=self.graph) if not self.domain.is_scalar else mul(other, self, graph=self.graph)
def __truediv__(self, other):
return slice_op(operator.truediv, self, other, graph=self.graph)
def __rtruediv__(self, other):
return slice_op(operator.truediv, other, self, graph=self.graph)
def __floordiv__(self, other):
return slice_op(operator.floordiv, self, other, graph=self.graph)
def __rfloordiv__(self, other):
return slice_op(operator.floordiv, other, self, graph=self.graph)
def __mod__(self, other):
return slice_op(operator.mod, self, other, graph=self.graph)
def __rmod__(self, other):
return slice_op(operator.mod, other, self, graph=self.graph)
def __lshift__(self, other):
return slice_op(operator.lshift, self, other, graph=self.graph)
def __rlshift__(self, other):
return slice_op(operator.lshift, other, self, graph=self.graph)
def __rshift__(self, other):
return slice_op(operator.rshift, self, other, graph=self.graph)
def __rrshift__(self, other):
return slice_op(operator.rshift, other, self, graph=self.graph)
def __and__(self, other):
return slice_op(operator.and_, self, other, graph=self.graph)
def __rand__(self, other):
return slice_op(operator.and_, other, self, graph=self.graph)
def __or__(self, other):
return slice_op(operator.or_, self, other, graph=self.graph)
def __ror__(self, other):
return slice_op(operator.or_, other, self, graph=self.graph)
def __xor__(self, other):
return slice_op(operator.xor, self, other, graph=self.graph)
def __rxor__(self, other):
return slice_op(operator.xor, other, self, graph=self.graph)
def __lt__(self, other):
return slice_op(operator.lt, self, other, graph=self.graph)
def __le__(self, other):
return slice_op(operator.lt, other, self, graph=self.graph)
def __ne__(self, other):
return slice_op(operator.ne, self, other, graph=self.graph)
def __gt__(self, other):
return slice_op(operator.gt, self, other, graph=self.graph)
def __ge__(self, other):
return slice_op(operator.ge, self, other, graph=self.graph)
def __repr__(self):
return f"<group_{self.op_name} '{self.name}'>"
class sum(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(sum, self).__init__(np.sum, bounds, input_node, **kwargs)
class min(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(min, self).__init__(np.min, bounds, input_node, **kwargs)
class prod(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(prod, self).__init__(np.prod, bounds, input_node, **kwargs)
class max(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(max, self).__init__(np.max, bounds, input_node, **kwargs)
class argmax(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(argmax, self).__init__(np.argmax, bounds, input_node, **kwargs)
class argmin(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
super(argmin, self).__init__(np.argmin, bounds, input_node, **kwargs)
class bitreverse(GroupNode):
def __init__(self, bounds, input_node, **kwargs):
shifter = lambda a, b: (a << 1) | (b & 1)
np_shifter = np.frompyfunc(shifter, 2, 1).reduce
self.initial = 0
super(bitreverse, self).__init__(np_shifter, bounds, input_node, **kwargs) | [
"numpy.asarray",
"numpy.frompyfunc",
"numpy.ravel_multi_index"
] | [((3889, 3908), 'numpy.asarray', 'np.asarray', (['[value]'], {}), '([value])\n', (3899, 3908), True, 'import numpy as np\n'), ((9111, 9139), 'numpy.frompyfunc', 'np.frompyfunc', (['shifter', '(2)', '(1)'], {}), '(shifter, 2, 1)\n', (9124, 9139), True, 'import numpy as np\n'), ((2039, 2092), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['key'], {'dims': 'self.shape', 'order': '"""C"""'}), "(key, dims=self.shape, order='C')\n", (2059, 2092), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.